blob: 05146970c19cf829ad40267ad1e6ee622dff17ac [file] [log] [blame]
Ido Schimmel464dce12016-07-02 11:00:15 +02001/*
2 * drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
Petr Machatae437f3b2018-02-13 11:26:09 +01003 * Copyright (c) 2016-2018 Mellanox Technologies. All rights reserved.
Ido Schimmel464dce12016-07-02 11:00:15 +02004 * Copyright (c) 2016 Jiri Pirko <jiri@mellanox.com>
5 * Copyright (c) 2016 Ido Schimmel <idosch@mellanox.com>
Yotam Gigic723c7352016-07-05 11:27:43 +02006 * Copyright (c) 2016 Yotam Gigi <yotamg@mellanox.com>
Petr Machatae437f3b2018-02-13 11:26:09 +01007 * Copyright (c) 2017-2018 Petr Machata <petrm@mellanox.com>
Ido Schimmel464dce12016-07-02 11:00:15 +02008 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions are met:
11 *
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. Neither the names of the copyright holders nor the names of its
18 * contributors may be used to endorse or promote products derived from
19 * this software without specific prior written permission.
20 *
21 * Alternatively, this software may be distributed under the terms of the
22 * GNU General Public License ("GPL") version 2 as published by the Free
23 * Software Foundation.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
26 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
29 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
36 */
37
38#include <linux/kernel.h>
39#include <linux/types.h>
Jiri Pirko5e9c16c2016-07-04 08:23:04 +020040#include <linux/rhashtable.h>
41#include <linux/bitops.h>
42#include <linux/in6.h>
Yotam Gigic723c7352016-07-05 11:27:43 +020043#include <linux/notifier.h>
Ido Schimmeldf6dd79b2017-02-08 14:36:49 +010044#include <linux/inetdevice.h>
Ido Schimmel9db032b2017-03-16 09:08:17 +010045#include <linux/netdevice.h>
Ido Schimmel03ea01e2017-05-23 21:56:30 +020046#include <linux/if_bridge.h>
Ido Schimmelb5f3e0d2017-07-24 09:56:00 +020047#include <linux/socket.h>
Ido Schimmel428b8512017-08-03 13:28:28 +020048#include <linux/route.h>
Ido Schimmeleb789982017-10-22 23:11:48 +020049#include <linux/gcd.h>
Ido Schimmelaf658b62017-11-02 17:14:09 +010050#include <linux/random.h>
Yotam Gigic723c7352016-07-05 11:27:43 +020051#include <net/netevent.h>
Jiri Pirko6cf3c972016-07-05 11:27:39 +020052#include <net/neighbour.h>
53#include <net/arp.h>
Jiri Pirkob45f64d2016-09-26 12:52:31 +020054#include <net/ip_fib.h>
Ido Schimmel583419f2017-08-03 13:28:27 +020055#include <net/ip6_fib.h>
Ido Schimmel5d7bfd12017-03-16 09:08:14 +010056#include <net/fib_rules.h>
Petr Machata6ddb7422017-09-02 23:49:19 +020057#include <net/ip_tunnels.h>
Ido Schimmel57837882017-03-16 09:08:16 +010058#include <net/l3mdev.h>
Arkadi Sharshevsky5ea12372017-07-18 10:10:13 +020059#include <net/addrconf.h>
Arkadi Sharshevskyd5eb89c2017-07-18 10:10:15 +020060#include <net/ndisc.h>
61#include <net/ipv6.h>
Ido Schimmel04b1d4e2017-08-03 13:28:11 +020062#include <net/fib_notifier.h>
Ido Schimmel464dce12016-07-02 11:00:15 +020063
64#include "spectrum.h"
65#include "core.h"
66#include "reg.h"
Arkadi Sharshevskye0c0afd2017-03-28 17:24:15 +020067#include "spectrum_cnt.h"
68#include "spectrum_dpipe.h"
Petr Machata38ebc0f2017-09-02 23:49:17 +020069#include "spectrum_ipip.h"
Yotam Gigid42b0962017-09-27 08:23:20 +020070#include "spectrum_mr.h"
71#include "spectrum_mr_tcam.h"
Arkadi Sharshevskye0c0afd2017-03-28 17:24:15 +020072#include "spectrum_router.h"
Ido Schimmel464dce12016-07-02 11:00:15 +020073
Ido Schimmel2b52ce02018-01-22 09:17:42 +010074struct mlxsw_sp_fib;
Ido Schimmel9011b672017-05-16 19:38:25 +020075struct mlxsw_sp_vr;
76struct mlxsw_sp_lpm_tree;
Ido Schimmele4f3c1c2017-05-26 08:37:40 +020077struct mlxsw_sp_rif_ops;
Ido Schimmel9011b672017-05-16 19:38:25 +020078
79struct mlxsw_sp_router {
80 struct mlxsw_sp *mlxsw_sp;
Ido Schimmel5f9efff2017-05-16 19:38:27 +020081 struct mlxsw_sp_rif **rifs;
Ido Schimmel9011b672017-05-16 19:38:25 +020082 struct mlxsw_sp_vr *vrs;
83 struct rhashtable neigh_ht;
84 struct rhashtable nexthop_group_ht;
85 struct rhashtable nexthop_ht;
Arkadi Sharshevskydbe45982017-09-25 10:32:23 +020086 struct list_head nexthop_list;
Ido Schimmel9011b672017-05-16 19:38:25 +020087 struct {
Ido Schimmel2b52ce02018-01-22 09:17:42 +010088 /* One tree for each protocol: IPv4 and IPv6 */
89 struct mlxsw_sp_lpm_tree *proto_trees[2];
Ido Schimmel9011b672017-05-16 19:38:25 +020090 struct mlxsw_sp_lpm_tree *trees;
91 unsigned int tree_count;
92 } lpm;
93 struct {
94 struct delayed_work dw;
95 unsigned long interval; /* ms */
96 } neighs_update;
97 struct delayed_work nexthop_probe_dw;
98#define MLXSW_SP_UNRESOLVED_NH_PROBE_INTERVAL 5000 /* ms */
99 struct list_head nexthop_neighs_list;
Petr Machata1012b9a2017-09-02 23:49:23 +0200100 struct list_head ipip_list;
Ido Schimmel9011b672017-05-16 19:38:25 +0200101 bool aborted;
Ido Schimmel7e39d112017-05-16 19:38:28 +0200102 struct notifier_block fib_nb;
Ido Schimmel48fac882017-11-02 17:14:06 +0100103 struct notifier_block netevent_nb;
Ido Schimmele4f3c1c2017-05-26 08:37:40 +0200104 const struct mlxsw_sp_rif_ops **rif_ops_arr;
Petr Machata38ebc0f2017-09-02 23:49:17 +0200105 const struct mlxsw_sp_ipip_ops **ipip_ops_arr;
Ido Schimmel9011b672017-05-16 19:38:25 +0200106};
107
Ido Schimmel4724ba562017-03-10 08:53:39 +0100108struct mlxsw_sp_rif {
109 struct list_head nexthop_list;
110 struct list_head neigh_list;
111 struct net_device *dev;
Ido Schimmela1107482017-05-26 08:37:39 +0200112 struct mlxsw_sp_fid *fid;
Ido Schimmel4724ba562017-03-10 08:53:39 +0100113 unsigned char addr[ETH_ALEN];
114 int mtu;
Arkadi Sharshevskybf952332017-03-17 09:38:00 +0100115 u16 rif_index;
Ido Schimmel69132292017-03-10 08:53:42 +0100116 u16 vr_id;
Ido Schimmele4f3c1c2017-05-26 08:37:40 +0200117 const struct mlxsw_sp_rif_ops *ops;
118 struct mlxsw_sp *mlxsw_sp;
119
Arkadi Sharshevskye0c0afd2017-03-28 17:24:15 +0200120 unsigned int counter_ingress;
121 bool counter_ingress_valid;
122 unsigned int counter_egress;
123 bool counter_egress_valid;
Ido Schimmel4724ba562017-03-10 08:53:39 +0100124};
125
Ido Schimmele4f3c1c2017-05-26 08:37:40 +0200126struct mlxsw_sp_rif_params {
127 struct net_device *dev;
128 union {
129 u16 system_port;
130 u16 lag_id;
131 };
132 u16 vid;
133 bool lag;
134};
135
Ido Schimmel4d93cee2017-05-26 08:37:34 +0200136struct mlxsw_sp_rif_subport {
137 struct mlxsw_sp_rif common;
138 union {
139 u16 system_port;
140 u16 lag_id;
141 };
142 u16 vid;
143 bool lag;
144};
145
Petr Machata6ddb7422017-09-02 23:49:19 +0200146struct mlxsw_sp_rif_ipip_lb {
147 struct mlxsw_sp_rif common;
148 struct mlxsw_sp_rif_ipip_lb_config lb_config;
149 u16 ul_vr_id; /* Reserved for Spectrum-2. */
150};
151
152struct mlxsw_sp_rif_params_ipip_lb {
153 struct mlxsw_sp_rif_params common;
154 struct mlxsw_sp_rif_ipip_lb_config lb_config;
155};
156
Ido Schimmele4f3c1c2017-05-26 08:37:40 +0200157struct mlxsw_sp_rif_ops {
158 enum mlxsw_sp_rif_type type;
159 size_t rif_size;
160
161 void (*setup)(struct mlxsw_sp_rif *rif,
162 const struct mlxsw_sp_rif_params *params);
163 int (*configure)(struct mlxsw_sp_rif *rif);
164 void (*deconfigure)(struct mlxsw_sp_rif *rif);
165 struct mlxsw_sp_fid * (*fid_get)(struct mlxsw_sp_rif *rif);
166};
167
Ido Schimmel2b52ce02018-01-22 09:17:42 +0100168static void mlxsw_sp_lpm_tree_hold(struct mlxsw_sp_lpm_tree *lpm_tree);
169static void mlxsw_sp_lpm_tree_put(struct mlxsw_sp *mlxsw_sp,
170 struct mlxsw_sp_lpm_tree *lpm_tree);
171static int mlxsw_sp_vr_lpm_tree_bind(struct mlxsw_sp *mlxsw_sp,
172 const struct mlxsw_sp_fib *fib,
173 u8 tree_id);
174static int mlxsw_sp_vr_lpm_tree_unbind(struct mlxsw_sp *mlxsw_sp,
175 const struct mlxsw_sp_fib *fib);
176
Arkadi Sharshevskye0c0afd2017-03-28 17:24:15 +0200177static unsigned int *
178mlxsw_sp_rif_p_counter_get(struct mlxsw_sp_rif *rif,
179 enum mlxsw_sp_rif_counter_dir dir)
180{
181 switch (dir) {
182 case MLXSW_SP_RIF_COUNTER_EGRESS:
183 return &rif->counter_egress;
184 case MLXSW_SP_RIF_COUNTER_INGRESS:
185 return &rif->counter_ingress;
186 }
187 return NULL;
188}
189
190static bool
191mlxsw_sp_rif_counter_valid_get(struct mlxsw_sp_rif *rif,
192 enum mlxsw_sp_rif_counter_dir dir)
193{
194 switch (dir) {
195 case MLXSW_SP_RIF_COUNTER_EGRESS:
196 return rif->counter_egress_valid;
197 case MLXSW_SP_RIF_COUNTER_INGRESS:
198 return rif->counter_ingress_valid;
199 }
200 return false;
201}
202
203static void
204mlxsw_sp_rif_counter_valid_set(struct mlxsw_sp_rif *rif,
205 enum mlxsw_sp_rif_counter_dir dir,
206 bool valid)
207{
208 switch (dir) {
209 case MLXSW_SP_RIF_COUNTER_EGRESS:
210 rif->counter_egress_valid = valid;
211 break;
212 case MLXSW_SP_RIF_COUNTER_INGRESS:
213 rif->counter_ingress_valid = valid;
214 break;
215 }
216}
217
218static int mlxsw_sp_rif_counter_edit(struct mlxsw_sp *mlxsw_sp, u16 rif_index,
219 unsigned int counter_index, bool enable,
220 enum mlxsw_sp_rif_counter_dir dir)
221{
222 char ritr_pl[MLXSW_REG_RITR_LEN];
223 bool is_egress = false;
224 int err;
225
226 if (dir == MLXSW_SP_RIF_COUNTER_EGRESS)
227 is_egress = true;
228 mlxsw_reg_ritr_rif_pack(ritr_pl, rif_index);
229 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
230 if (err)
231 return err;
232
233 mlxsw_reg_ritr_counter_pack(ritr_pl, counter_index, enable,
234 is_egress);
235 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
236}
237
238int mlxsw_sp_rif_counter_value_get(struct mlxsw_sp *mlxsw_sp,
239 struct mlxsw_sp_rif *rif,
240 enum mlxsw_sp_rif_counter_dir dir, u64 *cnt)
241{
242 char ricnt_pl[MLXSW_REG_RICNT_LEN];
243 unsigned int *p_counter_index;
244 bool valid;
245 int err;
246
247 valid = mlxsw_sp_rif_counter_valid_get(rif, dir);
248 if (!valid)
249 return -EINVAL;
250
251 p_counter_index = mlxsw_sp_rif_p_counter_get(rif, dir);
252 if (!p_counter_index)
253 return -EINVAL;
254 mlxsw_reg_ricnt_pack(ricnt_pl, *p_counter_index,
255 MLXSW_REG_RICNT_OPCODE_NOP);
256 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ricnt), ricnt_pl);
257 if (err)
258 return err;
259 *cnt = mlxsw_reg_ricnt_good_unicast_packets_get(ricnt_pl);
260 return 0;
261}
262
263static int mlxsw_sp_rif_counter_clear(struct mlxsw_sp *mlxsw_sp,
264 unsigned int counter_index)
265{
266 char ricnt_pl[MLXSW_REG_RICNT_LEN];
267
268 mlxsw_reg_ricnt_pack(ricnt_pl, counter_index,
269 MLXSW_REG_RICNT_OPCODE_CLEAR);
270 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ricnt), ricnt_pl);
271}
272
273int mlxsw_sp_rif_counter_alloc(struct mlxsw_sp *mlxsw_sp,
274 struct mlxsw_sp_rif *rif,
275 enum mlxsw_sp_rif_counter_dir dir)
276{
277 unsigned int *p_counter_index;
278 int err;
279
280 p_counter_index = mlxsw_sp_rif_p_counter_get(rif, dir);
281 if (!p_counter_index)
282 return -EINVAL;
283 err = mlxsw_sp_counter_alloc(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_RIF,
284 p_counter_index);
285 if (err)
286 return err;
287
288 err = mlxsw_sp_rif_counter_clear(mlxsw_sp, *p_counter_index);
289 if (err)
290 goto err_counter_clear;
291
292 err = mlxsw_sp_rif_counter_edit(mlxsw_sp, rif->rif_index,
293 *p_counter_index, true, dir);
294 if (err)
295 goto err_counter_edit;
296 mlxsw_sp_rif_counter_valid_set(rif, dir, true);
297 return 0;
298
299err_counter_edit:
300err_counter_clear:
301 mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_RIF,
302 *p_counter_index);
303 return err;
304}
305
306void mlxsw_sp_rif_counter_free(struct mlxsw_sp *mlxsw_sp,
307 struct mlxsw_sp_rif *rif,
308 enum mlxsw_sp_rif_counter_dir dir)
309{
310 unsigned int *p_counter_index;
311
Arkadi Sharshevsky6b1206b2017-05-18 09:18:53 +0200312 if (!mlxsw_sp_rif_counter_valid_get(rif, dir))
313 return;
314
Arkadi Sharshevskye0c0afd2017-03-28 17:24:15 +0200315 p_counter_index = mlxsw_sp_rif_p_counter_get(rif, dir);
316 if (WARN_ON(!p_counter_index))
317 return;
318 mlxsw_sp_rif_counter_edit(mlxsw_sp, rif->rif_index,
319 *p_counter_index, false, dir);
320 mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_RIF,
321 *p_counter_index);
322 mlxsw_sp_rif_counter_valid_set(rif, dir, false);
323}
324
Ido Schimmele4f3c1c2017-05-26 08:37:40 +0200325static void mlxsw_sp_rif_counters_alloc(struct mlxsw_sp_rif *rif)
326{
327 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
328 struct devlink *devlink;
329
330 devlink = priv_to_devlink(mlxsw_sp->core);
331 if (!devlink_dpipe_table_counter_enabled(devlink,
332 MLXSW_SP_DPIPE_TABLE_NAME_ERIF))
333 return;
334 mlxsw_sp_rif_counter_alloc(mlxsw_sp, rif, MLXSW_SP_RIF_COUNTER_EGRESS);
335}
336
337static void mlxsw_sp_rif_counters_free(struct mlxsw_sp_rif *rif)
338{
339 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
340
341 mlxsw_sp_rif_counter_free(mlxsw_sp, rif, MLXSW_SP_RIF_COUNTER_EGRESS);
342}
343
Ido Schimmel4724ba562017-03-10 08:53:39 +0100344static struct mlxsw_sp_rif *
345mlxsw_sp_rif_find_by_dev(const struct mlxsw_sp *mlxsw_sp,
346 const struct net_device *dev);
347
Ido Schimmel7dcc18a2017-07-18 10:10:30 +0200348#define MLXSW_SP_PREFIX_COUNT (sizeof(struct in6_addr) * BITS_PER_BYTE + 1)
Ido Schimmel9011b672017-05-16 19:38:25 +0200349
350struct mlxsw_sp_prefix_usage {
351 DECLARE_BITMAP(b, MLXSW_SP_PREFIX_COUNT);
352};
353
Jiri Pirko53342022016-07-04 08:23:08 +0200354#define mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage) \
355 for_each_set_bit(prefix, (prefix_usage)->b, MLXSW_SP_PREFIX_COUNT)
356
357static bool
358mlxsw_sp_prefix_usage_eq(struct mlxsw_sp_prefix_usage *prefix_usage1,
359 struct mlxsw_sp_prefix_usage *prefix_usage2)
360{
361 return !memcmp(prefix_usage1, prefix_usage2, sizeof(*prefix_usage1));
362}
363
Jiri Pirko6b75c482016-07-04 08:23:09 +0200364static void
365mlxsw_sp_prefix_usage_cpy(struct mlxsw_sp_prefix_usage *prefix_usage1,
366 struct mlxsw_sp_prefix_usage *prefix_usage2)
367{
368 memcpy(prefix_usage1, prefix_usage2, sizeof(*prefix_usage1));
369}
370
371static void
Jiri Pirko5e9c16c2016-07-04 08:23:04 +0200372mlxsw_sp_prefix_usage_set(struct mlxsw_sp_prefix_usage *prefix_usage,
373 unsigned char prefix_len)
374{
375 set_bit(prefix_len, prefix_usage->b);
376}
377
378static void
379mlxsw_sp_prefix_usage_clear(struct mlxsw_sp_prefix_usage *prefix_usage,
380 unsigned char prefix_len)
381{
382 clear_bit(prefix_len, prefix_usage->b);
383}
384
385struct mlxsw_sp_fib_key {
386 unsigned char addr[sizeof(struct in6_addr)];
387 unsigned char prefix_len;
388};
389
Jiri Pirko61c503f2016-07-04 08:23:11 +0200390enum mlxsw_sp_fib_entry_type {
391 MLXSW_SP_FIB_ENTRY_TYPE_REMOTE,
392 MLXSW_SP_FIB_ENTRY_TYPE_LOCAL,
393 MLXSW_SP_FIB_ENTRY_TYPE_TRAP,
Petr Machata4607f6d2017-09-02 23:49:25 +0200394
395 /* This is a special case of local delivery, where a packet should be
396 * decapsulated on reception. Note that there is no corresponding ENCAP,
397 * because that's a type of next hop, not of FIB entry. (There can be
398 * several next hops in a REMOTE entry, and some of them may be
399 * encapsulating entries.)
400 */
401 MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP,
Jiri Pirko61c503f2016-07-04 08:23:11 +0200402};
403
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +0200404struct mlxsw_sp_nexthop_group;
405
Ido Schimmel9aecce12017-02-09 10:28:42 +0100406struct mlxsw_sp_fib_node {
407 struct list_head entry_list;
Jiri Pirkob45f64d2016-09-26 12:52:31 +0200408 struct list_head list;
Ido Schimmel9aecce12017-02-09 10:28:42 +0100409 struct rhash_head ht_node;
Ido Schimmel76610eb2017-03-10 08:53:41 +0100410 struct mlxsw_sp_fib *fib;
Ido Schimmel9aecce12017-02-09 10:28:42 +0100411 struct mlxsw_sp_fib_key key;
412};
413
Petr Machata4607f6d2017-09-02 23:49:25 +0200414struct mlxsw_sp_fib_entry_decap {
415 struct mlxsw_sp_ipip_entry *ipip_entry;
416 u32 tunnel_index;
417};
418
Ido Schimmel9aecce12017-02-09 10:28:42 +0100419struct mlxsw_sp_fib_entry {
420 struct list_head list;
421 struct mlxsw_sp_fib_node *fib_node;
422 enum mlxsw_sp_fib_entry_type type;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +0200423 struct list_head nexthop_group_node;
424 struct mlxsw_sp_nexthop_group *nh_group;
Petr Machata4607f6d2017-09-02 23:49:25 +0200425 struct mlxsw_sp_fib_entry_decap decap; /* Valid for decap entries. */
Jiri Pirko5e9c16c2016-07-04 08:23:04 +0200426};
427
Ido Schimmel4f1c7f12017-07-18 10:10:26 +0200428struct mlxsw_sp_fib4_entry {
429 struct mlxsw_sp_fib_entry common;
430 u32 tb_id;
431 u32 prio;
432 u8 tos;
433 u8 type;
434};
435
Ido Schimmel428b8512017-08-03 13:28:28 +0200436struct mlxsw_sp_fib6_entry {
437 struct mlxsw_sp_fib_entry common;
438 struct list_head rt6_list;
439 unsigned int nrt6;
440};
441
442struct mlxsw_sp_rt6 {
443 struct list_head list;
444 struct rt6_info *rt;
445};
446
Ido Schimmel9011b672017-05-16 19:38:25 +0200447struct mlxsw_sp_lpm_tree {
448 u8 id; /* tree ID */
449 unsigned int ref_count;
450 enum mlxsw_sp_l3proto proto;
Ido Schimmel2b52ce02018-01-22 09:17:42 +0100451 unsigned long prefix_ref_count[MLXSW_SP_PREFIX_COUNT];
Ido Schimmel9011b672017-05-16 19:38:25 +0200452 struct mlxsw_sp_prefix_usage prefix_usage;
453};
454
Jiri Pirko5e9c16c2016-07-04 08:23:04 +0200455struct mlxsw_sp_fib {
456 struct rhashtable ht;
Ido Schimmel9aecce12017-02-09 10:28:42 +0100457 struct list_head node_list;
Ido Schimmel76610eb2017-03-10 08:53:41 +0100458 struct mlxsw_sp_vr *vr;
459 struct mlxsw_sp_lpm_tree *lpm_tree;
Ido Schimmel76610eb2017-03-10 08:53:41 +0100460 enum mlxsw_sp_l3proto proto;
Jiri Pirko5e9c16c2016-07-04 08:23:04 +0200461};
462
Ido Schimmel9011b672017-05-16 19:38:25 +0200463struct mlxsw_sp_vr {
464 u16 id; /* virtual router ID */
465 u32 tb_id; /* kernel fib table id */
466 unsigned int rif_count;
467 struct mlxsw_sp_fib *fib4;
Ido Schimmela3d9bc52017-07-18 10:10:22 +0200468 struct mlxsw_sp_fib *fib6;
Yotam Gigid42b0962017-09-27 08:23:20 +0200469 struct mlxsw_sp_mr_table *mr4_table;
Ido Schimmel9011b672017-05-16 19:38:25 +0200470};
471
Ido Schimmel9aecce12017-02-09 10:28:42 +0100472static const struct rhashtable_params mlxsw_sp_fib_ht_params;
Jiri Pirko5e9c16c2016-07-04 08:23:04 +0200473
Ido Schimmel2b52ce02018-01-22 09:17:42 +0100474static struct mlxsw_sp_fib *mlxsw_sp_fib_create(struct mlxsw_sp *mlxsw_sp,
475 struct mlxsw_sp_vr *vr,
Ido Schimmel76610eb2017-03-10 08:53:41 +0100476 enum mlxsw_sp_l3proto proto)
Jiri Pirko5e9c16c2016-07-04 08:23:04 +0200477{
Ido Schimmel2b52ce02018-01-22 09:17:42 +0100478 struct mlxsw_sp_lpm_tree *lpm_tree;
Jiri Pirko5e9c16c2016-07-04 08:23:04 +0200479 struct mlxsw_sp_fib *fib;
480 int err;
481
Ido Schimmel2b52ce02018-01-22 09:17:42 +0100482 lpm_tree = mlxsw_sp->router->lpm.proto_trees[proto];
Jiri Pirko5e9c16c2016-07-04 08:23:04 +0200483 fib = kzalloc(sizeof(*fib), GFP_KERNEL);
484 if (!fib)
485 return ERR_PTR(-ENOMEM);
486 err = rhashtable_init(&fib->ht, &mlxsw_sp_fib_ht_params);
487 if (err)
488 goto err_rhashtable_init;
Ido Schimmel9aecce12017-02-09 10:28:42 +0100489 INIT_LIST_HEAD(&fib->node_list);
Ido Schimmel76610eb2017-03-10 08:53:41 +0100490 fib->proto = proto;
491 fib->vr = vr;
Ido Schimmel2b52ce02018-01-22 09:17:42 +0100492 fib->lpm_tree = lpm_tree;
493 mlxsw_sp_lpm_tree_hold(lpm_tree);
494 err = mlxsw_sp_vr_lpm_tree_bind(mlxsw_sp, fib, lpm_tree->id);
495 if (err)
496 goto err_lpm_tree_bind;
Jiri Pirko5e9c16c2016-07-04 08:23:04 +0200497 return fib;
498
Ido Schimmel2b52ce02018-01-22 09:17:42 +0100499err_lpm_tree_bind:
500 mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
Jiri Pirko5e9c16c2016-07-04 08:23:04 +0200501err_rhashtable_init:
502 kfree(fib);
503 return ERR_PTR(err);
504}
505
Ido Schimmel2b52ce02018-01-22 09:17:42 +0100506static void mlxsw_sp_fib_destroy(struct mlxsw_sp *mlxsw_sp,
507 struct mlxsw_sp_fib *fib)
Jiri Pirko5e9c16c2016-07-04 08:23:04 +0200508{
Ido Schimmel2b52ce02018-01-22 09:17:42 +0100509 mlxsw_sp_vr_lpm_tree_unbind(mlxsw_sp, fib);
510 mlxsw_sp_lpm_tree_put(mlxsw_sp, fib->lpm_tree);
Ido Schimmel9aecce12017-02-09 10:28:42 +0100511 WARN_ON(!list_empty(&fib->node_list));
Jiri Pirko5e9c16c2016-07-04 08:23:04 +0200512 rhashtable_destroy(&fib->ht);
513 kfree(fib);
514}
515
Jiri Pirko53342022016-07-04 08:23:08 +0200516static struct mlxsw_sp_lpm_tree *
Ido Schimmel382dbb42017-03-10 08:53:40 +0100517mlxsw_sp_lpm_tree_find_unused(struct mlxsw_sp *mlxsw_sp)
Jiri Pirko53342022016-07-04 08:23:08 +0200518{
519 static struct mlxsw_sp_lpm_tree *lpm_tree;
520 int i;
521
Ido Schimmel9011b672017-05-16 19:38:25 +0200522 for (i = 0; i < mlxsw_sp->router->lpm.tree_count; i++) {
523 lpm_tree = &mlxsw_sp->router->lpm.trees[i];
Ido Schimmel382dbb42017-03-10 08:53:40 +0100524 if (lpm_tree->ref_count == 0)
525 return lpm_tree;
Jiri Pirko53342022016-07-04 08:23:08 +0200526 }
527 return NULL;
528}
529
530static int mlxsw_sp_lpm_tree_alloc(struct mlxsw_sp *mlxsw_sp,
531 struct mlxsw_sp_lpm_tree *lpm_tree)
532{
533 char ralta_pl[MLXSW_REG_RALTA_LEN];
534
Ido Schimmel1a9234e662016-09-19 08:29:26 +0200535 mlxsw_reg_ralta_pack(ralta_pl, true,
536 (enum mlxsw_reg_ralxx_protocol) lpm_tree->proto,
537 lpm_tree->id);
Jiri Pirko53342022016-07-04 08:23:08 +0200538 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralta), ralta_pl);
539}
540
Ido Schimmelcc702672017-08-14 10:54:03 +0200541static void mlxsw_sp_lpm_tree_free(struct mlxsw_sp *mlxsw_sp,
542 struct mlxsw_sp_lpm_tree *lpm_tree)
Jiri Pirko53342022016-07-04 08:23:08 +0200543{
544 char ralta_pl[MLXSW_REG_RALTA_LEN];
545
Ido Schimmel1a9234e662016-09-19 08:29:26 +0200546 mlxsw_reg_ralta_pack(ralta_pl, false,
547 (enum mlxsw_reg_ralxx_protocol) lpm_tree->proto,
548 lpm_tree->id);
Ido Schimmelcc702672017-08-14 10:54:03 +0200549 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralta), ralta_pl);
Jiri Pirko53342022016-07-04 08:23:08 +0200550}
551
552static int
553mlxsw_sp_lpm_tree_left_struct_set(struct mlxsw_sp *mlxsw_sp,
554 struct mlxsw_sp_prefix_usage *prefix_usage,
555 struct mlxsw_sp_lpm_tree *lpm_tree)
556{
557 char ralst_pl[MLXSW_REG_RALST_LEN];
558 u8 root_bin = 0;
559 u8 prefix;
560 u8 last_prefix = MLXSW_REG_RALST_BIN_NO_CHILD;
561
562 mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage)
563 root_bin = prefix;
564
565 mlxsw_reg_ralst_pack(ralst_pl, root_bin, lpm_tree->id);
566 mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage) {
567 if (prefix == 0)
568 continue;
569 mlxsw_reg_ralst_bin_pack(ralst_pl, prefix, last_prefix,
570 MLXSW_REG_RALST_BIN_NO_CHILD);
571 last_prefix = prefix;
572 }
573 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralst), ralst_pl);
574}
575
576static struct mlxsw_sp_lpm_tree *
577mlxsw_sp_lpm_tree_create(struct mlxsw_sp *mlxsw_sp,
578 struct mlxsw_sp_prefix_usage *prefix_usage,
Ido Schimmel382dbb42017-03-10 08:53:40 +0100579 enum mlxsw_sp_l3proto proto)
Jiri Pirko53342022016-07-04 08:23:08 +0200580{
581 struct mlxsw_sp_lpm_tree *lpm_tree;
582 int err;
583
Ido Schimmel382dbb42017-03-10 08:53:40 +0100584 lpm_tree = mlxsw_sp_lpm_tree_find_unused(mlxsw_sp);
Jiri Pirko53342022016-07-04 08:23:08 +0200585 if (!lpm_tree)
586 return ERR_PTR(-EBUSY);
587 lpm_tree->proto = proto;
588 err = mlxsw_sp_lpm_tree_alloc(mlxsw_sp, lpm_tree);
589 if (err)
590 return ERR_PTR(err);
591
592 err = mlxsw_sp_lpm_tree_left_struct_set(mlxsw_sp, prefix_usage,
593 lpm_tree);
594 if (err)
595 goto err_left_struct_set;
Jiri Pirko2083d362016-10-25 11:25:56 +0200596 memcpy(&lpm_tree->prefix_usage, prefix_usage,
597 sizeof(lpm_tree->prefix_usage));
Ido Schimmel2b52ce02018-01-22 09:17:42 +0100598 memset(&lpm_tree->prefix_ref_count, 0,
599 sizeof(lpm_tree->prefix_ref_count));
600 lpm_tree->ref_count = 1;
Jiri Pirko53342022016-07-04 08:23:08 +0200601 return lpm_tree;
602
603err_left_struct_set:
604 mlxsw_sp_lpm_tree_free(mlxsw_sp, lpm_tree);
605 return ERR_PTR(err);
606}
607
Ido Schimmelcc702672017-08-14 10:54:03 +0200608static void mlxsw_sp_lpm_tree_destroy(struct mlxsw_sp *mlxsw_sp,
609 struct mlxsw_sp_lpm_tree *lpm_tree)
Jiri Pirko53342022016-07-04 08:23:08 +0200610{
Ido Schimmelcc702672017-08-14 10:54:03 +0200611 mlxsw_sp_lpm_tree_free(mlxsw_sp, lpm_tree);
Jiri Pirko53342022016-07-04 08:23:08 +0200612}
613
614static struct mlxsw_sp_lpm_tree *
615mlxsw_sp_lpm_tree_get(struct mlxsw_sp *mlxsw_sp,
616 struct mlxsw_sp_prefix_usage *prefix_usage,
Ido Schimmel382dbb42017-03-10 08:53:40 +0100617 enum mlxsw_sp_l3proto proto)
Jiri Pirko53342022016-07-04 08:23:08 +0200618{
619 struct mlxsw_sp_lpm_tree *lpm_tree;
620 int i;
621
Ido Schimmel9011b672017-05-16 19:38:25 +0200622 for (i = 0; i < mlxsw_sp->router->lpm.tree_count; i++) {
623 lpm_tree = &mlxsw_sp->router->lpm.trees[i];
Jiri Pirko8b99bec2016-10-25 11:25:57 +0200624 if (lpm_tree->ref_count != 0 &&
625 lpm_tree->proto == proto &&
Jiri Pirko53342022016-07-04 08:23:08 +0200626 mlxsw_sp_prefix_usage_eq(&lpm_tree->prefix_usage,
Ido Schimmel2b52ce02018-01-22 09:17:42 +0100627 prefix_usage)) {
628 mlxsw_sp_lpm_tree_hold(lpm_tree);
Ido Schimmelfc922bb2017-08-14 10:54:05 +0200629 return lpm_tree;
Ido Schimmel2b52ce02018-01-22 09:17:42 +0100630 }
Jiri Pirko53342022016-07-04 08:23:08 +0200631 }
Ido Schimmelfc922bb2017-08-14 10:54:05 +0200632 return mlxsw_sp_lpm_tree_create(mlxsw_sp, prefix_usage, proto);
633}
Jiri Pirko53342022016-07-04 08:23:08 +0200634
Ido Schimmelfc922bb2017-08-14 10:54:05 +0200635static void mlxsw_sp_lpm_tree_hold(struct mlxsw_sp_lpm_tree *lpm_tree)
636{
Jiri Pirko53342022016-07-04 08:23:08 +0200637 lpm_tree->ref_count++;
Jiri Pirko53342022016-07-04 08:23:08 +0200638}
639
Ido Schimmelcc702672017-08-14 10:54:03 +0200640static void mlxsw_sp_lpm_tree_put(struct mlxsw_sp *mlxsw_sp,
641 struct mlxsw_sp_lpm_tree *lpm_tree)
Jiri Pirko53342022016-07-04 08:23:08 +0200642{
643 if (--lpm_tree->ref_count == 0)
Ido Schimmelcc702672017-08-14 10:54:03 +0200644 mlxsw_sp_lpm_tree_destroy(mlxsw_sp, lpm_tree);
Jiri Pirko53342022016-07-04 08:23:08 +0200645}
646
Ido Schimmeld7a60302017-06-08 08:47:43 +0200647#define MLXSW_SP_LPM_TREE_MIN 1 /* tree 0 is reserved */
Ido Schimmel8494ab02017-03-24 08:02:47 +0100648
649static int mlxsw_sp_lpm_init(struct mlxsw_sp *mlxsw_sp)
Jiri Pirko53342022016-07-04 08:23:08 +0200650{
Ido Schimmel2b52ce02018-01-22 09:17:42 +0100651 struct mlxsw_sp_prefix_usage req_prefix_usage = {{ 0 } };
Jiri Pirko53342022016-07-04 08:23:08 +0200652 struct mlxsw_sp_lpm_tree *lpm_tree;
Ido Schimmel8494ab02017-03-24 08:02:47 +0100653 u64 max_trees;
Ido Schimmel2b52ce02018-01-22 09:17:42 +0100654 int err, i;
Jiri Pirko53342022016-07-04 08:23:08 +0200655
Ido Schimmel8494ab02017-03-24 08:02:47 +0100656 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_LPM_TREES))
657 return -EIO;
658
659 max_trees = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_LPM_TREES);
Ido Schimmel9011b672017-05-16 19:38:25 +0200660 mlxsw_sp->router->lpm.tree_count = max_trees - MLXSW_SP_LPM_TREE_MIN;
661 mlxsw_sp->router->lpm.trees = kcalloc(mlxsw_sp->router->lpm.tree_count,
Ido Schimmel8494ab02017-03-24 08:02:47 +0100662 sizeof(struct mlxsw_sp_lpm_tree),
663 GFP_KERNEL);
Ido Schimmel9011b672017-05-16 19:38:25 +0200664 if (!mlxsw_sp->router->lpm.trees)
Ido Schimmel8494ab02017-03-24 08:02:47 +0100665 return -ENOMEM;
666
Ido Schimmel9011b672017-05-16 19:38:25 +0200667 for (i = 0; i < mlxsw_sp->router->lpm.tree_count; i++) {
668 lpm_tree = &mlxsw_sp->router->lpm.trees[i];
Jiri Pirko53342022016-07-04 08:23:08 +0200669 lpm_tree->id = i + MLXSW_SP_LPM_TREE_MIN;
670 }
Ido Schimmel8494ab02017-03-24 08:02:47 +0100671
Ido Schimmel2b52ce02018-01-22 09:17:42 +0100672 lpm_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, &req_prefix_usage,
673 MLXSW_SP_L3_PROTO_IPV4);
674 if (IS_ERR(lpm_tree)) {
675 err = PTR_ERR(lpm_tree);
676 goto err_ipv4_tree_get;
677 }
678 mlxsw_sp->router->lpm.proto_trees[MLXSW_SP_L3_PROTO_IPV4] = lpm_tree;
679
680 lpm_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, &req_prefix_usage,
681 MLXSW_SP_L3_PROTO_IPV6);
682 if (IS_ERR(lpm_tree)) {
683 err = PTR_ERR(lpm_tree);
684 goto err_ipv6_tree_get;
685 }
686 mlxsw_sp->router->lpm.proto_trees[MLXSW_SP_L3_PROTO_IPV6] = lpm_tree;
687
Ido Schimmel8494ab02017-03-24 08:02:47 +0100688 return 0;
Ido Schimmel2b52ce02018-01-22 09:17:42 +0100689
690err_ipv6_tree_get:
691 lpm_tree = mlxsw_sp->router->lpm.proto_trees[MLXSW_SP_L3_PROTO_IPV4];
692 mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
693err_ipv4_tree_get:
694 kfree(mlxsw_sp->router->lpm.trees);
695 return err;
Ido Schimmel8494ab02017-03-24 08:02:47 +0100696}
697
698static void mlxsw_sp_lpm_fini(struct mlxsw_sp *mlxsw_sp)
699{
Ido Schimmel2b52ce02018-01-22 09:17:42 +0100700 struct mlxsw_sp_lpm_tree *lpm_tree;
701
702 lpm_tree = mlxsw_sp->router->lpm.proto_trees[MLXSW_SP_L3_PROTO_IPV6];
703 mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
704
705 lpm_tree = mlxsw_sp->router->lpm.proto_trees[MLXSW_SP_L3_PROTO_IPV4];
706 mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
707
Ido Schimmel9011b672017-05-16 19:38:25 +0200708 kfree(mlxsw_sp->router->lpm.trees);
Jiri Pirko53342022016-07-04 08:23:08 +0200709}
710
Ido Schimmel76610eb2017-03-10 08:53:41 +0100711static bool mlxsw_sp_vr_is_used(const struct mlxsw_sp_vr *vr)
712{
Yotam Gigid42b0962017-09-27 08:23:20 +0200713 return !!vr->fib4 || !!vr->fib6 || !!vr->mr4_table;
Ido Schimmel76610eb2017-03-10 08:53:41 +0100714}
715
Jiri Pirko6b75c482016-07-04 08:23:09 +0200716static struct mlxsw_sp_vr *mlxsw_sp_vr_find_unused(struct mlxsw_sp *mlxsw_sp)
717{
718 struct mlxsw_sp_vr *vr;
719 int i;
720
Jiri Pirkoc1a38312016-10-21 16:07:23 +0200721 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
Ido Schimmel9011b672017-05-16 19:38:25 +0200722 vr = &mlxsw_sp->router->vrs[i];
Ido Schimmel76610eb2017-03-10 08:53:41 +0100723 if (!mlxsw_sp_vr_is_used(vr))
Jiri Pirko6b75c482016-07-04 08:23:09 +0200724 return vr;
725 }
726 return NULL;
727}
728
729static int mlxsw_sp_vr_lpm_tree_bind(struct mlxsw_sp *mlxsw_sp,
Ido Schimmel0adb2142017-08-14 10:54:04 +0200730 const struct mlxsw_sp_fib *fib, u8 tree_id)
Jiri Pirko6b75c482016-07-04 08:23:09 +0200731{
732 char raltb_pl[MLXSW_REG_RALTB_LEN];
733
Ido Schimmel76610eb2017-03-10 08:53:41 +0100734 mlxsw_reg_raltb_pack(raltb_pl, fib->vr->id,
735 (enum mlxsw_reg_ralxx_protocol) fib->proto,
Ido Schimmel0adb2142017-08-14 10:54:04 +0200736 tree_id);
Jiri Pirko6b75c482016-07-04 08:23:09 +0200737 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raltb), raltb_pl);
738}
739
740static int mlxsw_sp_vr_lpm_tree_unbind(struct mlxsw_sp *mlxsw_sp,
Ido Schimmel76610eb2017-03-10 08:53:41 +0100741 const struct mlxsw_sp_fib *fib)
Jiri Pirko6b75c482016-07-04 08:23:09 +0200742{
743 char raltb_pl[MLXSW_REG_RALTB_LEN];
744
745 /* Bind to tree 0 which is default */
Ido Schimmel76610eb2017-03-10 08:53:41 +0100746 mlxsw_reg_raltb_pack(raltb_pl, fib->vr->id,
747 (enum mlxsw_reg_ralxx_protocol) fib->proto, 0);
Jiri Pirko6b75c482016-07-04 08:23:09 +0200748 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raltb), raltb_pl);
749}
750
751static u32 mlxsw_sp_fix_tb_id(u32 tb_id)
752{
Yotam Gigi7e50d432017-09-27 08:23:19 +0200753 /* For our purpose, squash main, default and local tables into one */
754 if (tb_id == RT_TABLE_LOCAL || tb_id == RT_TABLE_DEFAULT)
Jiri Pirko6b75c482016-07-04 08:23:09 +0200755 tb_id = RT_TABLE_MAIN;
756 return tb_id;
757}
758
759static struct mlxsw_sp_vr *mlxsw_sp_vr_find(struct mlxsw_sp *mlxsw_sp,
Ido Schimmel76610eb2017-03-10 08:53:41 +0100760 u32 tb_id)
Jiri Pirko6b75c482016-07-04 08:23:09 +0200761{
762 struct mlxsw_sp_vr *vr;
763 int i;
764
765 tb_id = mlxsw_sp_fix_tb_id(tb_id);
Nogah Frankel9497c042016-09-20 11:16:54 +0200766
Jiri Pirkoc1a38312016-10-21 16:07:23 +0200767 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
Ido Schimmel9011b672017-05-16 19:38:25 +0200768 vr = &mlxsw_sp->router->vrs[i];
Ido Schimmel76610eb2017-03-10 08:53:41 +0100769 if (mlxsw_sp_vr_is_used(vr) && vr->tb_id == tb_id)
Jiri Pirko6b75c482016-07-04 08:23:09 +0200770 return vr;
771 }
772 return NULL;
773}
774
Ido Schimmel76610eb2017-03-10 08:53:41 +0100775static struct mlxsw_sp_fib *mlxsw_sp_vr_fib(const struct mlxsw_sp_vr *vr,
776 enum mlxsw_sp_l3proto proto)
Jiri Pirko6b75c482016-07-04 08:23:09 +0200777{
Ido Schimmel76610eb2017-03-10 08:53:41 +0100778 switch (proto) {
779 case MLXSW_SP_L3_PROTO_IPV4:
780 return vr->fib4;
781 case MLXSW_SP_L3_PROTO_IPV6:
Ido Schimmela3d9bc52017-07-18 10:10:22 +0200782 return vr->fib6;
Ido Schimmel76610eb2017-03-10 08:53:41 +0100783 }
784 return NULL;
785}
786
787static struct mlxsw_sp_vr *mlxsw_sp_vr_create(struct mlxsw_sp *mlxsw_sp,
David Ahernf8fa9b42017-10-18 09:56:56 -0700788 u32 tb_id,
789 struct netlink_ext_ack *extack)
Ido Schimmel76610eb2017-03-10 08:53:41 +0100790{
Jiri Pirko0f2d2b22018-02-13 11:22:42 +0100791 struct mlxsw_sp_mr_table *mr4_table;
792 struct mlxsw_sp_fib *fib4;
793 struct mlxsw_sp_fib *fib6;
Jiri Pirko6b75c482016-07-04 08:23:09 +0200794 struct mlxsw_sp_vr *vr;
Ido Schimmela3d9bc52017-07-18 10:10:22 +0200795 int err;
Jiri Pirko6b75c482016-07-04 08:23:09 +0200796
797 vr = mlxsw_sp_vr_find_unused(mlxsw_sp);
David Ahernf8fa9b42017-10-18 09:56:56 -0700798 if (!vr) {
Arkadi Sharshevsky6c677752018-02-13 11:29:05 +0100799 NL_SET_ERR_MSG_MOD(extack, "Exceeded number of supported virtual routers");
Jiri Pirko6b75c482016-07-04 08:23:09 +0200800 return ERR_PTR(-EBUSY);
David Ahernf8fa9b42017-10-18 09:56:56 -0700801 }
Jiri Pirko0f2d2b22018-02-13 11:22:42 +0100802 fib4 = mlxsw_sp_fib_create(mlxsw_sp, vr, MLXSW_SP_L3_PROTO_IPV4);
803 if (IS_ERR(fib4))
804 return ERR_CAST(fib4);
805 fib6 = mlxsw_sp_fib_create(mlxsw_sp, vr, MLXSW_SP_L3_PROTO_IPV6);
806 if (IS_ERR(fib6)) {
807 err = PTR_ERR(fib6);
Ido Schimmela3d9bc52017-07-18 10:10:22 +0200808 goto err_fib6_create;
809 }
Jiri Pirko0f2d2b22018-02-13 11:22:42 +0100810 mr4_table = mlxsw_sp_mr_table_create(mlxsw_sp, vr->id,
811 MLXSW_SP_L3_PROTO_IPV4);
812 if (IS_ERR(mr4_table)) {
813 err = PTR_ERR(mr4_table);
Yotam Gigid42b0962017-09-27 08:23:20 +0200814 goto err_mr_table_create;
815 }
Jiri Pirko0f2d2b22018-02-13 11:22:42 +0100816 vr->fib4 = fib4;
817 vr->fib6 = fib6;
818 vr->mr4_table = mr4_table;
Jiri Pirko6b75c482016-07-04 08:23:09 +0200819 vr->tb_id = tb_id;
Jiri Pirko6b75c482016-07-04 08:23:09 +0200820 return vr;
Ido Schimmela3d9bc52017-07-18 10:10:22 +0200821
Yotam Gigid42b0962017-09-27 08:23:20 +0200822err_mr_table_create:
Jiri Pirko0f2d2b22018-02-13 11:22:42 +0100823 mlxsw_sp_fib_destroy(mlxsw_sp, fib6);
Ido Schimmela3d9bc52017-07-18 10:10:22 +0200824err_fib6_create:
Jiri Pirko0f2d2b22018-02-13 11:22:42 +0100825 mlxsw_sp_fib_destroy(mlxsw_sp, fib4);
Ido Schimmela3d9bc52017-07-18 10:10:22 +0200826 return ERR_PTR(err);
Jiri Pirko6b75c482016-07-04 08:23:09 +0200827}
828
Ido Schimmel2b52ce02018-01-22 09:17:42 +0100829static void mlxsw_sp_vr_destroy(struct mlxsw_sp *mlxsw_sp,
830 struct mlxsw_sp_vr *vr)
Jiri Pirko6b75c482016-07-04 08:23:09 +0200831{
Yotam Gigid42b0962017-09-27 08:23:20 +0200832 mlxsw_sp_mr_table_destroy(vr->mr4_table);
833 vr->mr4_table = NULL;
Ido Schimmel2b52ce02018-01-22 09:17:42 +0100834 mlxsw_sp_fib_destroy(mlxsw_sp, vr->fib6);
Ido Schimmela3d9bc52017-07-18 10:10:22 +0200835 vr->fib6 = NULL;
Ido Schimmel2b52ce02018-01-22 09:17:42 +0100836 mlxsw_sp_fib_destroy(mlxsw_sp, vr->fib4);
Ido Schimmel76610eb2017-03-10 08:53:41 +0100837 vr->fib4 = NULL;
Jiri Pirko6b75c482016-07-04 08:23:09 +0200838}
839
David Ahernf8fa9b42017-10-18 09:56:56 -0700840static struct mlxsw_sp_vr *mlxsw_sp_vr_get(struct mlxsw_sp *mlxsw_sp, u32 tb_id,
841 struct netlink_ext_ack *extack)
Jiri Pirko6b75c482016-07-04 08:23:09 +0200842{
843 struct mlxsw_sp_vr *vr;
Jiri Pirko6b75c482016-07-04 08:23:09 +0200844
845 tb_id = mlxsw_sp_fix_tb_id(tb_id);
Ido Schimmel76610eb2017-03-10 08:53:41 +0100846 vr = mlxsw_sp_vr_find(mlxsw_sp, tb_id);
847 if (!vr)
David Ahernf8fa9b42017-10-18 09:56:56 -0700848 vr = mlxsw_sp_vr_create(mlxsw_sp, tb_id, extack);
Jiri Pirko6b75c482016-07-04 08:23:09 +0200849 return vr;
850}
851
Ido Schimmel2b52ce02018-01-22 09:17:42 +0100852static void mlxsw_sp_vr_put(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_vr *vr)
Jiri Pirko6b75c482016-07-04 08:23:09 +0200853{
Ido Schimmela3d9bc52017-07-18 10:10:22 +0200854 if (!vr->rif_count && list_empty(&vr->fib4->node_list) &&
Yotam Gigid42b0962017-09-27 08:23:20 +0200855 list_empty(&vr->fib6->node_list) &&
856 mlxsw_sp_mr_table_empty(vr->mr4_table))
Ido Schimmel2b52ce02018-01-22 09:17:42 +0100857 mlxsw_sp_vr_destroy(mlxsw_sp, vr);
Jiri Pirko6b75c482016-07-04 08:23:09 +0200858}
859
Ido Schimmelfc922bb2017-08-14 10:54:05 +0200860static bool
861mlxsw_sp_vr_lpm_tree_should_replace(struct mlxsw_sp_vr *vr,
862 enum mlxsw_sp_l3proto proto, u8 tree_id)
863{
864 struct mlxsw_sp_fib *fib = mlxsw_sp_vr_fib(vr, proto);
865
866 if (!mlxsw_sp_vr_is_used(vr))
867 return false;
Ido Schimmel2b52ce02018-01-22 09:17:42 +0100868 if (fib->lpm_tree->id == tree_id)
Ido Schimmelfc922bb2017-08-14 10:54:05 +0200869 return true;
870 return false;
871}
872
873static int mlxsw_sp_vr_lpm_tree_replace(struct mlxsw_sp *mlxsw_sp,
874 struct mlxsw_sp_fib *fib,
875 struct mlxsw_sp_lpm_tree *new_tree)
876{
877 struct mlxsw_sp_lpm_tree *old_tree = fib->lpm_tree;
878 int err;
879
Ido Schimmelfc922bb2017-08-14 10:54:05 +0200880 fib->lpm_tree = new_tree;
881 mlxsw_sp_lpm_tree_hold(new_tree);
Ido Schimmeled604c52018-01-18 15:42:10 +0100882 err = mlxsw_sp_vr_lpm_tree_bind(mlxsw_sp, fib, new_tree->id);
883 if (err)
884 goto err_tree_bind;
Ido Schimmelfc922bb2017-08-14 10:54:05 +0200885 mlxsw_sp_lpm_tree_put(mlxsw_sp, old_tree);
886 return 0;
Ido Schimmeled604c52018-01-18 15:42:10 +0100887
888err_tree_bind:
889 mlxsw_sp_lpm_tree_put(mlxsw_sp, new_tree);
890 fib->lpm_tree = old_tree;
891 return err;
Ido Schimmelfc922bb2017-08-14 10:54:05 +0200892}
893
894static int mlxsw_sp_vrs_lpm_tree_replace(struct mlxsw_sp *mlxsw_sp,
895 struct mlxsw_sp_fib *fib,
896 struct mlxsw_sp_lpm_tree *new_tree)
897{
Ido Schimmelfc922bb2017-08-14 10:54:05 +0200898 enum mlxsw_sp_l3proto proto = fib->proto;
Ido Schimmel2b52ce02018-01-22 09:17:42 +0100899 struct mlxsw_sp_lpm_tree *old_tree;
Ido Schimmelfc922bb2017-08-14 10:54:05 +0200900 u8 old_id, new_id = new_tree->id;
901 struct mlxsw_sp_vr *vr;
902 int i, err;
903
Ido Schimmel2b52ce02018-01-22 09:17:42 +0100904 old_tree = mlxsw_sp->router->lpm.proto_trees[proto];
Ido Schimmelfc922bb2017-08-14 10:54:05 +0200905 old_id = old_tree->id;
906
907 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
908 vr = &mlxsw_sp->router->vrs[i];
909 if (!mlxsw_sp_vr_lpm_tree_should_replace(vr, proto, old_id))
910 continue;
911 err = mlxsw_sp_vr_lpm_tree_replace(mlxsw_sp,
912 mlxsw_sp_vr_fib(vr, proto),
913 new_tree);
914 if (err)
915 goto err_tree_replace;
916 }
917
Ido Schimmel2b52ce02018-01-22 09:17:42 +0100918 memcpy(new_tree->prefix_ref_count, old_tree->prefix_ref_count,
919 sizeof(new_tree->prefix_ref_count));
920 mlxsw_sp->router->lpm.proto_trees[proto] = new_tree;
921 mlxsw_sp_lpm_tree_put(mlxsw_sp, old_tree);
922
Ido Schimmelfc922bb2017-08-14 10:54:05 +0200923 return 0;
924
925err_tree_replace:
926 for (i--; i >= 0; i--) {
927 if (!mlxsw_sp_vr_lpm_tree_should_replace(vr, proto, new_id))
928 continue;
929 mlxsw_sp_vr_lpm_tree_replace(mlxsw_sp,
930 mlxsw_sp_vr_fib(vr, proto),
931 old_tree);
932 }
933 return err;
Ido Schimmelfc922bb2017-08-14 10:54:05 +0200934}
935
Nogah Frankel9497c042016-09-20 11:16:54 +0200936static int mlxsw_sp_vrs_init(struct mlxsw_sp *mlxsw_sp)
Jiri Pirko6b75c482016-07-04 08:23:09 +0200937{
938 struct mlxsw_sp_vr *vr;
Jiri Pirkoc1a38312016-10-21 16:07:23 +0200939 u64 max_vrs;
Jiri Pirko6b75c482016-07-04 08:23:09 +0200940 int i;
941
Jiri Pirkoc1a38312016-10-21 16:07:23 +0200942 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_VRS))
Nogah Frankel9497c042016-09-20 11:16:54 +0200943 return -EIO;
944
Jiri Pirkoc1a38312016-10-21 16:07:23 +0200945 max_vrs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS);
Ido Schimmel9011b672017-05-16 19:38:25 +0200946 mlxsw_sp->router->vrs = kcalloc(max_vrs, sizeof(struct mlxsw_sp_vr),
947 GFP_KERNEL);
948 if (!mlxsw_sp->router->vrs)
Nogah Frankel9497c042016-09-20 11:16:54 +0200949 return -ENOMEM;
950
Jiri Pirkoc1a38312016-10-21 16:07:23 +0200951 for (i = 0; i < max_vrs; i++) {
Ido Schimmel9011b672017-05-16 19:38:25 +0200952 vr = &mlxsw_sp->router->vrs[i];
Jiri Pirko6b75c482016-07-04 08:23:09 +0200953 vr->id = i;
954 }
Nogah Frankel9497c042016-09-20 11:16:54 +0200955
956 return 0;
957}
958
Ido Schimmelac571de2016-11-14 11:26:32 +0100959static void mlxsw_sp_router_fib_flush(struct mlxsw_sp *mlxsw_sp);
960
Nogah Frankel9497c042016-09-20 11:16:54 +0200961static void mlxsw_sp_vrs_fini(struct mlxsw_sp *mlxsw_sp)
962{
Ido Schimmel30572242016-12-03 16:45:01 +0100963 /* At this stage we're guaranteed not to have new incoming
964 * FIB notifications and the work queue is free from FIBs
965 * sitting on top of mlxsw netdevs. However, we can still
966 * have other FIBs queued. Flush the queue before flushing
967 * the device's tables. No need for locks, as we're the only
968 * writer.
969 */
970 mlxsw_core_flush_owq();
Ido Schimmelac571de2016-11-14 11:26:32 +0100971 mlxsw_sp_router_fib_flush(mlxsw_sp);
Ido Schimmel9011b672017-05-16 19:38:25 +0200972 kfree(mlxsw_sp->router->vrs);
Jiri Pirko6b75c482016-07-04 08:23:09 +0200973}
974
Petr Machata6ddb7422017-09-02 23:49:19 +0200975static struct net_device *
976__mlxsw_sp_ipip_netdev_ul_dev_get(const struct net_device *ol_dev)
977{
978 struct ip_tunnel *tun = netdev_priv(ol_dev);
979 struct net *net = dev_net(ol_dev);
980
981 return __dev_get_by_index(net, tun->parms.link);
982}
983
Petr Machata4cf04f32017-11-03 10:03:42 +0100984u32 mlxsw_sp_ipip_dev_ul_tb_id(const struct net_device *ol_dev)
Petr Machata6ddb7422017-09-02 23:49:19 +0200985{
986 struct net_device *d = __mlxsw_sp_ipip_netdev_ul_dev_get(ol_dev);
987
988 if (d)
989 return l3mdev_fib_table(d) ? : RT_TABLE_MAIN;
990 else
991 return l3mdev_fib_table(ol_dev) ? : RT_TABLE_MAIN;
992}
993
Petr Machata1012b9a2017-09-02 23:49:23 +0200994static struct mlxsw_sp_rif *
995mlxsw_sp_rif_create(struct mlxsw_sp *mlxsw_sp,
David Ahernf8fa9b42017-10-18 09:56:56 -0700996 const struct mlxsw_sp_rif_params *params,
997 struct netlink_ext_ack *extack);
Petr Machata1012b9a2017-09-02 23:49:23 +0200998
999static struct mlxsw_sp_rif_ipip_lb *
1000mlxsw_sp_ipip_ol_ipip_lb_create(struct mlxsw_sp *mlxsw_sp,
1001 enum mlxsw_sp_ipip_type ipipt,
Petr Machata7e75af62017-11-03 10:03:36 +01001002 struct net_device *ol_dev,
1003 struct netlink_ext_ack *extack)
Petr Machata1012b9a2017-09-02 23:49:23 +02001004{
1005 struct mlxsw_sp_rif_params_ipip_lb lb_params;
1006 const struct mlxsw_sp_ipip_ops *ipip_ops;
1007 struct mlxsw_sp_rif *rif;
1008
1009 ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipipt];
1010 lb_params = (struct mlxsw_sp_rif_params_ipip_lb) {
1011 .common.dev = ol_dev,
1012 .common.lag = false,
1013 .lb_config = ipip_ops->ol_loopback_config(mlxsw_sp, ol_dev),
1014 };
1015
Petr Machata7e75af62017-11-03 10:03:36 +01001016 rif = mlxsw_sp_rif_create(mlxsw_sp, &lb_params.common, extack);
Petr Machata1012b9a2017-09-02 23:49:23 +02001017 if (IS_ERR(rif))
1018 return ERR_CAST(rif);
1019 return container_of(rif, struct mlxsw_sp_rif_ipip_lb, common);
1020}
1021
1022static struct mlxsw_sp_ipip_entry *
1023mlxsw_sp_ipip_entry_alloc(struct mlxsw_sp *mlxsw_sp,
1024 enum mlxsw_sp_ipip_type ipipt,
1025 struct net_device *ol_dev)
1026{
Petr Machatae437f3b2018-02-13 11:26:09 +01001027 const struct mlxsw_sp_ipip_ops *ipip_ops;
Petr Machata1012b9a2017-09-02 23:49:23 +02001028 struct mlxsw_sp_ipip_entry *ipip_entry;
1029 struct mlxsw_sp_ipip_entry *ret = NULL;
1030
Petr Machatae437f3b2018-02-13 11:26:09 +01001031 ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipipt];
Petr Machata1012b9a2017-09-02 23:49:23 +02001032 ipip_entry = kzalloc(sizeof(*ipip_entry), GFP_KERNEL);
1033 if (!ipip_entry)
1034 return ERR_PTR(-ENOMEM);
1035
1036 ipip_entry->ol_lb = mlxsw_sp_ipip_ol_ipip_lb_create(mlxsw_sp, ipipt,
Petr Machata7e75af62017-11-03 10:03:36 +01001037 ol_dev, NULL);
Petr Machata1012b9a2017-09-02 23:49:23 +02001038 if (IS_ERR(ipip_entry->ol_lb)) {
1039 ret = ERR_CAST(ipip_entry->ol_lb);
1040 goto err_ol_ipip_lb_create;
1041 }
1042
1043 ipip_entry->ipipt = ipipt;
1044 ipip_entry->ol_dev = ol_dev;
Petr Machatae437f3b2018-02-13 11:26:09 +01001045
1046 switch (ipip_ops->ul_proto) {
1047 case MLXSW_SP_L3_PROTO_IPV4:
1048 ipip_entry->parms4 = mlxsw_sp_ipip_netdev_parms4(ol_dev);
1049 break;
1050 case MLXSW_SP_L3_PROTO_IPV6:
1051 WARN_ON(1);
1052 break;
1053 }
Petr Machata1012b9a2017-09-02 23:49:23 +02001054
1055 return ipip_entry;
1056
1057err_ol_ipip_lb_create:
1058 kfree(ipip_entry);
1059 return ret;
1060}
1061
1062static void
Petr Machata4cccb732017-10-16 16:26:39 +02001063mlxsw_sp_ipip_entry_dealloc(struct mlxsw_sp_ipip_entry *ipip_entry)
Petr Machata1012b9a2017-09-02 23:49:23 +02001064{
Petr Machata1012b9a2017-09-02 23:49:23 +02001065 mlxsw_sp_rif_destroy(&ipip_entry->ol_lb->common);
1066 kfree(ipip_entry);
1067}
1068
Petr Machata1012b9a2017-09-02 23:49:23 +02001069static bool
1070mlxsw_sp_ipip_entry_saddr_matches(struct mlxsw_sp *mlxsw_sp,
1071 const enum mlxsw_sp_l3proto ul_proto,
1072 union mlxsw_sp_l3addr saddr,
1073 u32 ul_tb_id,
1074 struct mlxsw_sp_ipip_entry *ipip_entry)
1075{
1076 u32 tun_ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(ipip_entry->ol_dev);
1077 enum mlxsw_sp_ipip_type ipipt = ipip_entry->ipipt;
1078 union mlxsw_sp_l3addr tun_saddr;
1079
1080 if (mlxsw_sp->router->ipip_ops_arr[ipipt]->ul_proto != ul_proto)
1081 return false;
1082
1083 tun_saddr = mlxsw_sp_ipip_netdev_saddr(ul_proto, ipip_entry->ol_dev);
1084 return tun_ul_tb_id == ul_tb_id &&
1085 mlxsw_sp_l3addr_eq(&tun_saddr, &saddr);
1086}
1087
Petr Machata4607f6d2017-09-02 23:49:25 +02001088static int
1089mlxsw_sp_fib_entry_decap_init(struct mlxsw_sp *mlxsw_sp,
1090 struct mlxsw_sp_fib_entry *fib_entry,
1091 struct mlxsw_sp_ipip_entry *ipip_entry)
1092{
1093 u32 tunnel_index;
1094 int err;
1095
1096 err = mlxsw_sp_kvdl_alloc(mlxsw_sp, 1, &tunnel_index);
1097 if (err)
1098 return err;
1099
1100 ipip_entry->decap_fib_entry = fib_entry;
1101 fib_entry->decap.ipip_entry = ipip_entry;
1102 fib_entry->decap.tunnel_index = tunnel_index;
1103 return 0;
1104}
1105
1106static void mlxsw_sp_fib_entry_decap_fini(struct mlxsw_sp *mlxsw_sp,
1107 struct mlxsw_sp_fib_entry *fib_entry)
1108{
1109 /* Unlink this node from the IPIP entry that it's the decap entry of. */
1110 fib_entry->decap.ipip_entry->decap_fib_entry = NULL;
1111 fib_entry->decap.ipip_entry = NULL;
1112 mlxsw_sp_kvdl_free(mlxsw_sp, fib_entry->decap.tunnel_index);
1113}
1114
Petr Machata1cc38fb2017-09-02 23:49:26 +02001115static struct mlxsw_sp_fib_node *
1116mlxsw_sp_fib_node_lookup(struct mlxsw_sp_fib *fib, const void *addr,
1117 size_t addr_len, unsigned char prefix_len);
Petr Machata4607f6d2017-09-02 23:49:25 +02001118static int mlxsw_sp_fib_entry_update(struct mlxsw_sp *mlxsw_sp,
1119 struct mlxsw_sp_fib_entry *fib_entry);
1120
1121static void
1122mlxsw_sp_ipip_entry_demote_decap(struct mlxsw_sp *mlxsw_sp,
1123 struct mlxsw_sp_ipip_entry *ipip_entry)
1124{
1125 struct mlxsw_sp_fib_entry *fib_entry = ipip_entry->decap_fib_entry;
1126
1127 mlxsw_sp_fib_entry_decap_fini(mlxsw_sp, fib_entry);
1128 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
1129
1130 mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
1131}
1132
Petr Machata1cc38fb2017-09-02 23:49:26 +02001133static void
1134mlxsw_sp_ipip_entry_promote_decap(struct mlxsw_sp *mlxsw_sp,
1135 struct mlxsw_sp_ipip_entry *ipip_entry,
1136 struct mlxsw_sp_fib_entry *decap_fib_entry)
1137{
1138 if (mlxsw_sp_fib_entry_decap_init(mlxsw_sp, decap_fib_entry,
1139 ipip_entry))
1140 return;
1141 decap_fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP;
1142
1143 if (mlxsw_sp_fib_entry_update(mlxsw_sp, decap_fib_entry))
1144 mlxsw_sp_ipip_entry_demote_decap(mlxsw_sp, ipip_entry);
1145}
1146
1147/* Given an IPIP entry, find the corresponding decap route. */
1148static struct mlxsw_sp_fib_entry *
1149mlxsw_sp_ipip_entry_find_decap(struct mlxsw_sp *mlxsw_sp,
1150 struct mlxsw_sp_ipip_entry *ipip_entry)
1151{
1152 static struct mlxsw_sp_fib_node *fib_node;
1153 const struct mlxsw_sp_ipip_ops *ipip_ops;
1154 struct mlxsw_sp_fib_entry *fib_entry;
1155 unsigned char saddr_prefix_len;
1156 union mlxsw_sp_l3addr saddr;
1157 struct mlxsw_sp_fib *ul_fib;
1158 struct mlxsw_sp_vr *ul_vr;
1159 const void *saddrp;
1160 size_t saddr_len;
1161 u32 ul_tb_id;
1162 u32 saddr4;
1163
1164 ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt];
1165
1166 ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(ipip_entry->ol_dev);
1167 ul_vr = mlxsw_sp_vr_find(mlxsw_sp, ul_tb_id);
1168 if (!ul_vr)
1169 return NULL;
1170
1171 ul_fib = mlxsw_sp_vr_fib(ul_vr, ipip_ops->ul_proto);
1172 saddr = mlxsw_sp_ipip_netdev_saddr(ipip_ops->ul_proto,
1173 ipip_entry->ol_dev);
1174
1175 switch (ipip_ops->ul_proto) {
1176 case MLXSW_SP_L3_PROTO_IPV4:
1177 saddr4 = be32_to_cpu(saddr.addr4);
1178 saddrp = &saddr4;
1179 saddr_len = 4;
1180 saddr_prefix_len = 32;
1181 break;
1182 case MLXSW_SP_L3_PROTO_IPV6:
1183 WARN_ON(1);
1184 return NULL;
1185 }
1186
1187 fib_node = mlxsw_sp_fib_node_lookup(ul_fib, saddrp, saddr_len,
1188 saddr_prefix_len);
1189 if (!fib_node || list_empty(&fib_node->entry_list))
1190 return NULL;
1191
1192 fib_entry = list_first_entry(&fib_node->entry_list,
1193 struct mlxsw_sp_fib_entry, list);
1194 if (fib_entry->type != MLXSW_SP_FIB_ENTRY_TYPE_TRAP)
1195 return NULL;
1196
1197 return fib_entry;
1198}
1199
Petr Machata1012b9a2017-09-02 23:49:23 +02001200static struct mlxsw_sp_ipip_entry *
Petr Machata4cccb732017-10-16 16:26:39 +02001201mlxsw_sp_ipip_entry_create(struct mlxsw_sp *mlxsw_sp,
1202 enum mlxsw_sp_ipip_type ipipt,
1203 struct net_device *ol_dev)
Petr Machata1012b9a2017-09-02 23:49:23 +02001204{
Petr Machata1012b9a2017-09-02 23:49:23 +02001205 struct mlxsw_sp_ipip_entry *ipip_entry;
Petr Machata1012b9a2017-09-02 23:49:23 +02001206
1207 ipip_entry = mlxsw_sp_ipip_entry_alloc(mlxsw_sp, ipipt, ol_dev);
1208 if (IS_ERR(ipip_entry))
1209 return ipip_entry;
1210
1211 list_add_tail(&ipip_entry->ipip_list_node,
1212 &mlxsw_sp->router->ipip_list);
1213
Petr Machata1012b9a2017-09-02 23:49:23 +02001214 return ipip_entry;
1215}
1216
1217static void
Petr Machata4cccb732017-10-16 16:26:39 +02001218mlxsw_sp_ipip_entry_destroy(struct mlxsw_sp *mlxsw_sp,
1219 struct mlxsw_sp_ipip_entry *ipip_entry)
Petr Machata1012b9a2017-09-02 23:49:23 +02001220{
Petr Machata4cccb732017-10-16 16:26:39 +02001221 list_del(&ipip_entry->ipip_list_node);
1222 mlxsw_sp_ipip_entry_dealloc(ipip_entry);
Petr Machata1012b9a2017-09-02 23:49:23 +02001223}
1224
Petr Machata4607f6d2017-09-02 23:49:25 +02001225static bool
1226mlxsw_sp_ipip_entry_matches_decap(struct mlxsw_sp *mlxsw_sp,
1227 const struct net_device *ul_dev,
1228 enum mlxsw_sp_l3proto ul_proto,
1229 union mlxsw_sp_l3addr ul_dip,
1230 struct mlxsw_sp_ipip_entry *ipip_entry)
1231{
1232 u32 ul_tb_id = l3mdev_fib_table(ul_dev) ? : RT_TABLE_MAIN;
1233 enum mlxsw_sp_ipip_type ipipt = ipip_entry->ipipt;
1234 struct net_device *ipip_ul_dev;
1235
1236 if (mlxsw_sp->router->ipip_ops_arr[ipipt]->ul_proto != ul_proto)
1237 return false;
1238
1239 ipip_ul_dev = __mlxsw_sp_ipip_netdev_ul_dev_get(ipip_entry->ol_dev);
1240 return mlxsw_sp_ipip_entry_saddr_matches(mlxsw_sp, ul_proto, ul_dip,
1241 ul_tb_id, ipip_entry) &&
1242 (!ipip_ul_dev || ipip_ul_dev == ul_dev);
1243}
1244
1245/* Given decap parameters, find the corresponding IPIP entry. */
1246static struct mlxsw_sp_ipip_entry *
1247mlxsw_sp_ipip_entry_find_by_decap(struct mlxsw_sp *mlxsw_sp,
1248 const struct net_device *ul_dev,
1249 enum mlxsw_sp_l3proto ul_proto,
1250 union mlxsw_sp_l3addr ul_dip)
1251{
1252 struct mlxsw_sp_ipip_entry *ipip_entry;
1253
1254 list_for_each_entry(ipip_entry, &mlxsw_sp->router->ipip_list,
1255 ipip_list_node)
1256 if (mlxsw_sp_ipip_entry_matches_decap(mlxsw_sp, ul_dev,
1257 ul_proto, ul_dip,
1258 ipip_entry))
1259 return ipip_entry;
1260
1261 return NULL;
1262}
1263
Petr Machata6698c162017-10-16 16:26:36 +02001264static bool mlxsw_sp_netdev_ipip_type(const struct mlxsw_sp *mlxsw_sp,
1265 const struct net_device *dev,
1266 enum mlxsw_sp_ipip_type *p_type)
1267{
1268 struct mlxsw_sp_router *router = mlxsw_sp->router;
1269 const struct mlxsw_sp_ipip_ops *ipip_ops;
1270 enum mlxsw_sp_ipip_type ipipt;
1271
1272 for (ipipt = 0; ipipt < MLXSW_SP_IPIP_TYPE_MAX; ++ipipt) {
1273 ipip_ops = router->ipip_ops_arr[ipipt];
1274 if (dev->type == ipip_ops->dev_type) {
1275 if (p_type)
1276 *p_type = ipipt;
1277 return true;
1278 }
1279 }
1280 return false;
1281}
1282
Petr Machata796ec772017-11-03 10:03:29 +01001283bool mlxsw_sp_netdev_is_ipip_ol(const struct mlxsw_sp *mlxsw_sp,
1284 const struct net_device *dev)
Petr Machata00635872017-10-16 16:26:37 +02001285{
1286 return mlxsw_sp_netdev_ipip_type(mlxsw_sp, dev, NULL);
1287}
1288
1289static struct mlxsw_sp_ipip_entry *
1290mlxsw_sp_ipip_entry_find_by_ol_dev(struct mlxsw_sp *mlxsw_sp,
1291 const struct net_device *ol_dev)
1292{
1293 struct mlxsw_sp_ipip_entry *ipip_entry;
1294
1295 list_for_each_entry(ipip_entry, &mlxsw_sp->router->ipip_list,
1296 ipip_list_node)
1297 if (ipip_entry->ol_dev == ol_dev)
1298 return ipip_entry;
1299
1300 return NULL;
1301}
1302
Petr Machata61481f22017-11-03 10:03:41 +01001303static struct mlxsw_sp_ipip_entry *
1304mlxsw_sp_ipip_entry_find_by_ul_dev(const struct mlxsw_sp *mlxsw_sp,
1305 const struct net_device *ul_dev,
1306 struct mlxsw_sp_ipip_entry *start)
1307{
1308 struct mlxsw_sp_ipip_entry *ipip_entry;
1309
1310 ipip_entry = list_prepare_entry(start, &mlxsw_sp->router->ipip_list,
1311 ipip_list_node);
1312 list_for_each_entry_continue(ipip_entry, &mlxsw_sp->router->ipip_list,
1313 ipip_list_node) {
1314 struct net_device *ipip_ul_dev =
1315 __mlxsw_sp_ipip_netdev_ul_dev_get(ipip_entry->ol_dev);
1316
1317 if (ipip_ul_dev == ul_dev)
1318 return ipip_entry;
1319 }
1320
1321 return NULL;
1322}
1323
1324bool mlxsw_sp_netdev_is_ipip_ul(const struct mlxsw_sp *mlxsw_sp,
1325 const struct net_device *dev)
1326{
1327 return mlxsw_sp_ipip_entry_find_by_ul_dev(mlxsw_sp, dev, NULL);
1328}
1329
Petr Machatacafdb2a2017-11-03 10:03:30 +01001330static bool mlxsw_sp_netdevice_ipip_can_offload(struct mlxsw_sp *mlxsw_sp,
1331 const struct net_device *ol_dev,
1332 enum mlxsw_sp_ipip_type ipipt)
1333{
1334 const struct mlxsw_sp_ipip_ops *ops
1335 = mlxsw_sp->router->ipip_ops_arr[ipipt];
1336
1337 /* For deciding whether decap should be offloaded, we don't care about
1338 * overlay protocol, so ask whether either one is supported.
1339 */
1340 return ops->can_offload(mlxsw_sp, ol_dev, MLXSW_SP_L3_PROTO_IPV4) ||
1341 ops->can_offload(mlxsw_sp, ol_dev, MLXSW_SP_L3_PROTO_IPV6);
1342}
1343
Petr Machata796ec772017-11-03 10:03:29 +01001344static int mlxsw_sp_netdevice_ipip_ol_reg_event(struct mlxsw_sp *mlxsw_sp,
1345 struct net_device *ol_dev)
Petr Machata00635872017-10-16 16:26:37 +02001346{
Petr Machata00635872017-10-16 16:26:37 +02001347 struct mlxsw_sp_ipip_entry *ipip_entry;
Petr Machataaf641712017-11-03 10:03:40 +01001348 enum mlxsw_sp_l3proto ul_proto;
Petr Machata00635872017-10-16 16:26:37 +02001349 enum mlxsw_sp_ipip_type ipipt;
Petr Machataaf641712017-11-03 10:03:40 +01001350 union mlxsw_sp_l3addr saddr;
1351 u32 ul_tb_id;
Petr Machata00635872017-10-16 16:26:37 +02001352
1353 mlxsw_sp_netdev_ipip_type(mlxsw_sp, ol_dev, &ipipt);
Petr Machatacafdb2a2017-11-03 10:03:30 +01001354 if (mlxsw_sp_netdevice_ipip_can_offload(mlxsw_sp, ol_dev, ipipt)) {
Petr Machataaf641712017-11-03 10:03:40 +01001355 ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(ol_dev);
1356 ul_proto = mlxsw_sp->router->ipip_ops_arr[ipipt]->ul_proto;
1357 saddr = mlxsw_sp_ipip_netdev_saddr(ul_proto, ol_dev);
1358 if (!mlxsw_sp_ipip_demote_tunnel_by_saddr(mlxsw_sp, ul_proto,
1359 saddr, ul_tb_id,
1360 NULL)) {
1361 ipip_entry = mlxsw_sp_ipip_entry_create(mlxsw_sp, ipipt,
1362 ol_dev);
1363 if (IS_ERR(ipip_entry))
1364 return PTR_ERR(ipip_entry);
1365 }
Petr Machata00635872017-10-16 16:26:37 +02001366 }
1367
1368 return 0;
1369}
1370
Petr Machata796ec772017-11-03 10:03:29 +01001371static void mlxsw_sp_netdevice_ipip_ol_unreg_event(struct mlxsw_sp *mlxsw_sp,
1372 struct net_device *ol_dev)
Petr Machata00635872017-10-16 16:26:37 +02001373{
1374 struct mlxsw_sp_ipip_entry *ipip_entry;
1375
1376 ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
1377 if (ipip_entry)
Petr Machata4cccb732017-10-16 16:26:39 +02001378 mlxsw_sp_ipip_entry_destroy(mlxsw_sp, ipip_entry);
Petr Machata00635872017-10-16 16:26:37 +02001379}
1380
Petr Machata47518ca2017-11-03 10:03:35 +01001381static void
1382mlxsw_sp_ipip_entry_ol_up_event(struct mlxsw_sp *mlxsw_sp,
1383 struct mlxsw_sp_ipip_entry *ipip_entry)
1384{
1385 struct mlxsw_sp_fib_entry *decap_fib_entry;
1386
1387 decap_fib_entry = mlxsw_sp_ipip_entry_find_decap(mlxsw_sp, ipip_entry);
1388 if (decap_fib_entry)
1389 mlxsw_sp_ipip_entry_promote_decap(mlxsw_sp, ipip_entry,
1390 decap_fib_entry);
1391}
1392
Petr Machata6d4de442017-11-03 10:03:34 +01001393static void mlxsw_sp_netdevice_ipip_ol_up_event(struct mlxsw_sp *mlxsw_sp,
1394 struct net_device *ol_dev)
Petr Machata00635872017-10-16 16:26:37 +02001395{
Petr Machata00635872017-10-16 16:26:37 +02001396 struct mlxsw_sp_ipip_entry *ipip_entry;
1397
1398 ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
Petr Machata47518ca2017-11-03 10:03:35 +01001399 if (ipip_entry)
1400 mlxsw_sp_ipip_entry_ol_up_event(mlxsw_sp, ipip_entry);
Petr Machata00635872017-10-16 16:26:37 +02001401}
1402
Petr Machataa3fe1982017-11-03 10:03:33 +01001403static void
1404mlxsw_sp_ipip_entry_ol_down_event(struct mlxsw_sp *mlxsw_sp,
1405 struct mlxsw_sp_ipip_entry *ipip_entry)
1406{
1407 if (ipip_entry->decap_fib_entry)
1408 mlxsw_sp_ipip_entry_demote_decap(mlxsw_sp, ipip_entry);
1409}
1410
Petr Machata796ec772017-11-03 10:03:29 +01001411static void mlxsw_sp_netdevice_ipip_ol_down_event(struct mlxsw_sp *mlxsw_sp,
1412 struct net_device *ol_dev)
Petr Machata00635872017-10-16 16:26:37 +02001413{
1414 struct mlxsw_sp_ipip_entry *ipip_entry;
1415
1416 ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
Petr Machataa3fe1982017-11-03 10:03:33 +01001417 if (ipip_entry)
1418 mlxsw_sp_ipip_entry_ol_down_event(mlxsw_sp, ipip_entry);
Petr Machata00635872017-10-16 16:26:37 +02001419}
1420
Petr Machata09dbf622017-11-28 13:17:14 +01001421static void mlxsw_sp_nexthop_rif_migrate(struct mlxsw_sp *mlxsw_sp,
1422 struct mlxsw_sp_rif *old_rif,
1423 struct mlxsw_sp_rif *new_rif);
Petr Machata65a61212017-11-03 10:03:37 +01001424static int
1425mlxsw_sp_ipip_entry_ol_lb_update(struct mlxsw_sp *mlxsw_sp,
1426 struct mlxsw_sp_ipip_entry *ipip_entry,
Petr Machata0c5f1cd2017-11-03 10:03:38 +01001427 bool keep_encap,
Petr Machata65a61212017-11-03 10:03:37 +01001428 struct netlink_ext_ack *extack)
Petr Machataf63ce4e2017-10-16 16:26:38 +02001429{
Petr Machata65a61212017-11-03 10:03:37 +01001430 struct mlxsw_sp_rif_ipip_lb *old_lb_rif = ipip_entry->ol_lb;
1431 struct mlxsw_sp_rif_ipip_lb *new_lb_rif;
Petr Machataf63ce4e2017-10-16 16:26:38 +02001432
Petr Machata65a61212017-11-03 10:03:37 +01001433 new_lb_rif = mlxsw_sp_ipip_ol_ipip_lb_create(mlxsw_sp,
1434 ipip_entry->ipipt,
1435 ipip_entry->ol_dev,
1436 extack);
1437 if (IS_ERR(new_lb_rif))
1438 return PTR_ERR(new_lb_rif);
1439 ipip_entry->ol_lb = new_lb_rif;
Petr Machata0c5f1cd2017-11-03 10:03:38 +01001440
Petr Machata09dbf622017-11-28 13:17:14 +01001441 if (keep_encap)
1442 mlxsw_sp_nexthop_rif_migrate(mlxsw_sp, &old_lb_rif->common,
1443 &new_lb_rif->common);
Petr Machata0c5f1cd2017-11-03 10:03:38 +01001444
Petr Machata65a61212017-11-03 10:03:37 +01001445 mlxsw_sp_rif_destroy(&old_lb_rif->common);
Petr Machataf63ce4e2017-10-16 16:26:38 +02001446
Petr Machata65a61212017-11-03 10:03:37 +01001447 return 0;
1448}
1449
Petr Machata09dbf622017-11-28 13:17:14 +01001450static void mlxsw_sp_nexthop_rif_update(struct mlxsw_sp *mlxsw_sp,
1451 struct mlxsw_sp_rif *rif);
1452
Petr Machata0c5f1cd2017-11-03 10:03:38 +01001453/**
1454 * Update the offload related to an IPIP entry. This always updates decap, and
1455 * in addition to that it also:
1456 * @recreate_loopback: recreates the associated loopback RIF
1457 * @keep_encap: updates next hops that use the tunnel netdevice. This is only
1458 * relevant when recreate_loopback is true.
1459 * @update_nexthops: updates next hops, keeping the current loopback RIF. This
1460 * is only relevant when recreate_loopback is false.
1461 */
Petr Machata65a61212017-11-03 10:03:37 +01001462int __mlxsw_sp_ipip_entry_update_tunnel(struct mlxsw_sp *mlxsw_sp,
1463 struct mlxsw_sp_ipip_entry *ipip_entry,
Petr Machata0c5f1cd2017-11-03 10:03:38 +01001464 bool recreate_loopback,
1465 bool keep_encap,
1466 bool update_nexthops,
Petr Machata65a61212017-11-03 10:03:37 +01001467 struct netlink_ext_ack *extack)
1468{
1469 int err;
1470
1471 /* RIFs can't be edited, so to update loopback, we need to destroy and
1472 * recreate it. That creates a window of opportunity where RALUE and
1473 * RATR registers end up referencing a RIF that's already gone. RATRs
1474 * are handled in mlxsw_sp_ipip_entry_ol_lb_update(), and to take care
Petr Machataf63ce4e2017-10-16 16:26:38 +02001475 * of RALUE, demote the decap route back.
1476 */
1477 if (ipip_entry->decap_fib_entry)
1478 mlxsw_sp_ipip_entry_demote_decap(mlxsw_sp, ipip_entry);
1479
Petr Machata0c5f1cd2017-11-03 10:03:38 +01001480 if (recreate_loopback) {
1481 err = mlxsw_sp_ipip_entry_ol_lb_update(mlxsw_sp, ipip_entry,
1482 keep_encap, extack);
1483 if (err)
1484 return err;
1485 } else if (update_nexthops) {
1486 mlxsw_sp_nexthop_rif_update(mlxsw_sp,
1487 &ipip_entry->ol_lb->common);
1488 }
Petr Machataf63ce4e2017-10-16 16:26:38 +02001489
Petr Machata65a61212017-11-03 10:03:37 +01001490 if (ipip_entry->ol_dev->flags & IFF_UP)
1491 mlxsw_sp_ipip_entry_ol_up_event(mlxsw_sp, ipip_entry);
Petr Machataf63ce4e2017-10-16 16:26:38 +02001492
1493 return 0;
1494}
1495
Petr Machata65a61212017-11-03 10:03:37 +01001496static int mlxsw_sp_netdevice_ipip_ol_vrf_event(struct mlxsw_sp *mlxsw_sp,
1497 struct net_device *ol_dev,
1498 struct netlink_ext_ack *extack)
1499{
1500 struct mlxsw_sp_ipip_entry *ipip_entry =
1501 mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
Petr Machatacab43d92017-11-28 13:17:12 +01001502 enum mlxsw_sp_l3proto ul_proto;
1503 union mlxsw_sp_l3addr saddr;
1504 u32 ul_tb_id;
Petr Machata65a61212017-11-03 10:03:37 +01001505
1506 if (!ipip_entry)
1507 return 0;
Petr Machatacab43d92017-11-28 13:17:12 +01001508
1509 /* For flat configuration cases, moving overlay to a different VRF might
1510 * cause local address conflict, and the conflicting tunnels need to be
1511 * demoted.
1512 */
1513 ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(ol_dev);
1514 ul_proto = mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt]->ul_proto;
1515 saddr = mlxsw_sp_ipip_netdev_saddr(ul_proto, ol_dev);
1516 if (mlxsw_sp_ipip_demote_tunnel_by_saddr(mlxsw_sp, ul_proto,
1517 saddr, ul_tb_id,
1518 ipip_entry)) {
1519 mlxsw_sp_ipip_entry_demote_tunnel(mlxsw_sp, ipip_entry);
1520 return 0;
1521 }
1522
Petr Machata65a61212017-11-03 10:03:37 +01001523 return __mlxsw_sp_ipip_entry_update_tunnel(mlxsw_sp, ipip_entry,
Petr Machata0c5f1cd2017-11-03 10:03:38 +01001524 true, false, false, extack);
Petr Machata65a61212017-11-03 10:03:37 +01001525}
1526
Petr Machata61481f22017-11-03 10:03:41 +01001527static int
1528mlxsw_sp_netdevice_ipip_ul_vrf_event(struct mlxsw_sp *mlxsw_sp,
1529 struct mlxsw_sp_ipip_entry *ipip_entry,
1530 struct net_device *ul_dev,
1531 struct netlink_ext_ack *extack)
1532{
1533 return __mlxsw_sp_ipip_entry_update_tunnel(mlxsw_sp, ipip_entry,
1534 true, true, false, extack);
1535}
1536
Petr Machata4cf04f32017-11-03 10:03:42 +01001537static int
Petr Machata44b0fff2017-11-03 10:03:44 +01001538mlxsw_sp_netdevice_ipip_ul_up_event(struct mlxsw_sp *mlxsw_sp,
1539 struct mlxsw_sp_ipip_entry *ipip_entry,
1540 struct net_device *ul_dev)
1541{
1542 return __mlxsw_sp_ipip_entry_update_tunnel(mlxsw_sp, ipip_entry,
1543 false, false, true, NULL);
1544}
1545
1546static int
1547mlxsw_sp_netdevice_ipip_ul_down_event(struct mlxsw_sp *mlxsw_sp,
1548 struct mlxsw_sp_ipip_entry *ipip_entry,
1549 struct net_device *ul_dev)
1550{
1551 /* A down underlay device causes encapsulated packets to not be
1552 * forwarded, but decap still works. So refresh next hops without
1553 * touching anything else.
1554 */
1555 return __mlxsw_sp_ipip_entry_update_tunnel(mlxsw_sp, ipip_entry,
1556 false, false, true, NULL);
1557}
1558
1559static int
Petr Machata4cf04f32017-11-03 10:03:42 +01001560mlxsw_sp_netdevice_ipip_ol_change_event(struct mlxsw_sp *mlxsw_sp,
1561 struct net_device *ol_dev,
1562 struct netlink_ext_ack *extack)
1563{
1564 const struct mlxsw_sp_ipip_ops *ipip_ops;
1565 struct mlxsw_sp_ipip_entry *ipip_entry;
1566 int err;
1567
1568 ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
1569 if (!ipip_entry)
1570 /* A change might make a tunnel eligible for offloading, but
1571 * that is currently not implemented. What falls to slow path
1572 * stays there.
1573 */
1574 return 0;
1575
1576 /* A change might make a tunnel not eligible for offloading. */
1577 if (!mlxsw_sp_netdevice_ipip_can_offload(mlxsw_sp, ol_dev,
1578 ipip_entry->ipipt)) {
1579 mlxsw_sp_ipip_entry_demote_tunnel(mlxsw_sp, ipip_entry);
1580 return 0;
1581 }
1582
1583 ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt];
1584 err = ipip_ops->ol_netdev_change(mlxsw_sp, ipip_entry, extack);
1585 return err;
1586}
1587
Petr Machataaf641712017-11-03 10:03:40 +01001588void mlxsw_sp_ipip_entry_demote_tunnel(struct mlxsw_sp *mlxsw_sp,
1589 struct mlxsw_sp_ipip_entry *ipip_entry)
1590{
1591 struct net_device *ol_dev = ipip_entry->ol_dev;
1592
1593 if (ol_dev->flags & IFF_UP)
1594 mlxsw_sp_ipip_entry_ol_down_event(mlxsw_sp, ipip_entry);
1595 mlxsw_sp_ipip_entry_destroy(mlxsw_sp, ipip_entry);
1596}
1597
1598/* The configuration where several tunnels have the same local address in the
1599 * same underlay table needs special treatment in the HW. That is currently not
1600 * implemented in the driver. This function finds and demotes the first tunnel
1601 * with a given source address, except the one passed in in the argument
1602 * `except'.
1603 */
1604bool
1605mlxsw_sp_ipip_demote_tunnel_by_saddr(struct mlxsw_sp *mlxsw_sp,
1606 enum mlxsw_sp_l3proto ul_proto,
1607 union mlxsw_sp_l3addr saddr,
1608 u32 ul_tb_id,
1609 const struct mlxsw_sp_ipip_entry *except)
1610{
1611 struct mlxsw_sp_ipip_entry *ipip_entry, *tmp;
1612
1613 list_for_each_entry_safe(ipip_entry, tmp, &mlxsw_sp->router->ipip_list,
1614 ipip_list_node) {
1615 if (ipip_entry != except &&
1616 mlxsw_sp_ipip_entry_saddr_matches(mlxsw_sp, ul_proto, saddr,
1617 ul_tb_id, ipip_entry)) {
1618 mlxsw_sp_ipip_entry_demote_tunnel(mlxsw_sp, ipip_entry);
1619 return true;
1620 }
1621 }
1622
1623 return false;
1624}
1625
Petr Machata61481f22017-11-03 10:03:41 +01001626static void mlxsw_sp_ipip_demote_tunnel_by_ul_netdev(struct mlxsw_sp *mlxsw_sp,
1627 struct net_device *ul_dev)
1628{
1629 struct mlxsw_sp_ipip_entry *ipip_entry, *tmp;
1630
1631 list_for_each_entry_safe(ipip_entry, tmp, &mlxsw_sp->router->ipip_list,
1632 ipip_list_node) {
1633 struct net_device *ipip_ul_dev =
1634 __mlxsw_sp_ipip_netdev_ul_dev_get(ipip_entry->ol_dev);
1635
1636 if (ipip_ul_dev == ul_dev)
1637 mlxsw_sp_ipip_entry_demote_tunnel(mlxsw_sp, ipip_entry);
1638 }
1639}
1640
Petr Machata7e75af62017-11-03 10:03:36 +01001641int mlxsw_sp_netdevice_ipip_ol_event(struct mlxsw_sp *mlxsw_sp,
1642 struct net_device *ol_dev,
1643 unsigned long event,
1644 struct netdev_notifier_info *info)
Petr Machata00635872017-10-16 16:26:37 +02001645{
Petr Machata7e75af62017-11-03 10:03:36 +01001646 struct netdev_notifier_changeupper_info *chup;
1647 struct netlink_ext_ack *extack;
1648
Petr Machata00635872017-10-16 16:26:37 +02001649 switch (event) {
1650 case NETDEV_REGISTER:
Petr Machata796ec772017-11-03 10:03:29 +01001651 return mlxsw_sp_netdevice_ipip_ol_reg_event(mlxsw_sp, ol_dev);
Petr Machata00635872017-10-16 16:26:37 +02001652 case NETDEV_UNREGISTER:
Petr Machata796ec772017-11-03 10:03:29 +01001653 mlxsw_sp_netdevice_ipip_ol_unreg_event(mlxsw_sp, ol_dev);
Petr Machata00635872017-10-16 16:26:37 +02001654 return 0;
1655 case NETDEV_UP:
Petr Machata6d4de442017-11-03 10:03:34 +01001656 mlxsw_sp_netdevice_ipip_ol_up_event(mlxsw_sp, ol_dev);
1657 return 0;
Petr Machata00635872017-10-16 16:26:37 +02001658 case NETDEV_DOWN:
Petr Machata796ec772017-11-03 10:03:29 +01001659 mlxsw_sp_netdevice_ipip_ol_down_event(mlxsw_sp, ol_dev);
Petr Machata00635872017-10-16 16:26:37 +02001660 return 0;
Petr Machataf63ce4e2017-10-16 16:26:38 +02001661 case NETDEV_CHANGEUPPER:
Petr Machata7e75af62017-11-03 10:03:36 +01001662 chup = container_of(info, typeof(*chup), info);
1663 extack = info->extack;
1664 if (netif_is_l3_master(chup->upper_dev))
Petr Machata796ec772017-11-03 10:03:29 +01001665 return mlxsw_sp_netdevice_ipip_ol_vrf_event(mlxsw_sp,
Petr Machata7e75af62017-11-03 10:03:36 +01001666 ol_dev,
1667 extack);
Petr Machataf63ce4e2017-10-16 16:26:38 +02001668 return 0;
Petr Machata4cf04f32017-11-03 10:03:42 +01001669 case NETDEV_CHANGE:
1670 extack = info->extack;
1671 return mlxsw_sp_netdevice_ipip_ol_change_event(mlxsw_sp,
1672 ol_dev, extack);
Petr Machata00635872017-10-16 16:26:37 +02001673 }
1674 return 0;
1675}
1676
Petr Machata61481f22017-11-03 10:03:41 +01001677static int
1678__mlxsw_sp_netdevice_ipip_ul_event(struct mlxsw_sp *mlxsw_sp,
1679 struct mlxsw_sp_ipip_entry *ipip_entry,
1680 struct net_device *ul_dev,
1681 unsigned long event,
1682 struct netdev_notifier_info *info)
1683{
1684 struct netdev_notifier_changeupper_info *chup;
1685 struct netlink_ext_ack *extack;
1686
1687 switch (event) {
1688 case NETDEV_CHANGEUPPER:
1689 chup = container_of(info, typeof(*chup), info);
1690 extack = info->extack;
1691 if (netif_is_l3_master(chup->upper_dev))
1692 return mlxsw_sp_netdevice_ipip_ul_vrf_event(mlxsw_sp,
1693 ipip_entry,
1694 ul_dev,
1695 extack);
1696 break;
Petr Machata44b0fff2017-11-03 10:03:44 +01001697
1698 case NETDEV_UP:
1699 return mlxsw_sp_netdevice_ipip_ul_up_event(mlxsw_sp, ipip_entry,
1700 ul_dev);
1701 case NETDEV_DOWN:
1702 return mlxsw_sp_netdevice_ipip_ul_down_event(mlxsw_sp,
1703 ipip_entry,
1704 ul_dev);
Petr Machata61481f22017-11-03 10:03:41 +01001705 }
1706 return 0;
1707}
1708
1709int
1710mlxsw_sp_netdevice_ipip_ul_event(struct mlxsw_sp *mlxsw_sp,
1711 struct net_device *ul_dev,
1712 unsigned long event,
1713 struct netdev_notifier_info *info)
1714{
1715 struct mlxsw_sp_ipip_entry *ipip_entry = NULL;
1716 int err;
1717
1718 while ((ipip_entry = mlxsw_sp_ipip_entry_find_by_ul_dev(mlxsw_sp,
1719 ul_dev,
1720 ipip_entry))) {
1721 err = __mlxsw_sp_netdevice_ipip_ul_event(mlxsw_sp, ipip_entry,
1722 ul_dev, event, info);
1723 if (err) {
1724 mlxsw_sp_ipip_demote_tunnel_by_ul_netdev(mlxsw_sp,
1725 ul_dev);
1726 return err;
1727 }
1728 }
1729
1730 return 0;
1731}
1732
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001733struct mlxsw_sp_neigh_key {
Jiri Pirko33b13412016-11-10 12:31:04 +01001734 struct neighbour *n;
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001735};
1736
1737struct mlxsw_sp_neigh_entry {
Ido Schimmel9665b742017-02-08 11:16:42 +01001738 struct list_head rif_list_node;
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001739 struct rhash_head ht_node;
1740 struct mlxsw_sp_neigh_key key;
1741 u16 rif;
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001742 bool connected;
Yotam Gigia6bf9e92016-07-05 11:27:44 +02001743 unsigned char ha[ETH_ALEN];
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001744 struct list_head nexthop_list; /* list of nexthops using
1745 * this neigh entry
1746 */
Yotam Gigib2157142016-07-05 11:27:51 +02001747 struct list_head nexthop_neighs_list_node;
Arkadi Sharshevsky7cfcbc72017-08-24 08:40:08 +02001748 unsigned int counter_index;
1749 bool counter_valid;
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001750};
1751
1752static const struct rhashtable_params mlxsw_sp_neigh_ht_params = {
1753 .key_offset = offsetof(struct mlxsw_sp_neigh_entry, key),
1754 .head_offset = offsetof(struct mlxsw_sp_neigh_entry, ht_node),
1755 .key_len = sizeof(struct mlxsw_sp_neigh_key),
1756};
1757
Arkadi Sharshevskyf17cc842017-08-24 08:40:04 +02001758struct mlxsw_sp_neigh_entry *
1759mlxsw_sp_rif_neigh_next(struct mlxsw_sp_rif *rif,
1760 struct mlxsw_sp_neigh_entry *neigh_entry)
1761{
1762 if (!neigh_entry) {
1763 if (list_empty(&rif->neigh_list))
1764 return NULL;
1765 else
1766 return list_first_entry(&rif->neigh_list,
1767 typeof(*neigh_entry),
1768 rif_list_node);
1769 }
Arkadi Sharshevskyec2437f2017-09-25 10:32:24 +02001770 if (list_is_last(&neigh_entry->rif_list_node, &rif->neigh_list))
Arkadi Sharshevskyf17cc842017-08-24 08:40:04 +02001771 return NULL;
1772 return list_next_entry(neigh_entry, rif_list_node);
1773}
1774
1775int mlxsw_sp_neigh_entry_type(struct mlxsw_sp_neigh_entry *neigh_entry)
1776{
1777 return neigh_entry->key.n->tbl->family;
1778}
1779
1780unsigned char *
1781mlxsw_sp_neigh_entry_ha(struct mlxsw_sp_neigh_entry *neigh_entry)
1782{
1783 return neigh_entry->ha;
1784}
1785
1786u32 mlxsw_sp_neigh4_entry_dip(struct mlxsw_sp_neigh_entry *neigh_entry)
1787{
1788 struct neighbour *n;
1789
1790 n = neigh_entry->key.n;
1791 return ntohl(*((__be32 *) n->primary_key));
1792}
1793
Arkadi Sharshevsky02507682017-08-31 17:59:15 +02001794struct in6_addr *
1795mlxsw_sp_neigh6_entry_dip(struct mlxsw_sp_neigh_entry *neigh_entry)
1796{
1797 struct neighbour *n;
1798
1799 n = neigh_entry->key.n;
1800 return (struct in6_addr *) &n->primary_key;
1801}
1802
Arkadi Sharshevsky7cfcbc72017-08-24 08:40:08 +02001803int mlxsw_sp_neigh_counter_get(struct mlxsw_sp *mlxsw_sp,
1804 struct mlxsw_sp_neigh_entry *neigh_entry,
1805 u64 *p_counter)
1806{
1807 if (!neigh_entry->counter_valid)
1808 return -EINVAL;
1809
1810 return mlxsw_sp_flow_counter_get(mlxsw_sp, neigh_entry->counter_index,
1811 p_counter, NULL);
1812}
1813
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001814static struct mlxsw_sp_neigh_entry *
1815mlxsw_sp_neigh_entry_alloc(struct mlxsw_sp *mlxsw_sp, struct neighbour *n,
1816 u16 rif)
1817{
1818 struct mlxsw_sp_neigh_entry *neigh_entry;
1819
1820 neigh_entry = kzalloc(sizeof(*neigh_entry), GFP_KERNEL);
1821 if (!neigh_entry)
1822 return NULL;
1823
1824 neigh_entry->key.n = n;
1825 neigh_entry->rif = rif;
1826 INIT_LIST_HEAD(&neigh_entry->nexthop_list);
1827
1828 return neigh_entry;
1829}
1830
1831static void mlxsw_sp_neigh_entry_free(struct mlxsw_sp_neigh_entry *neigh_entry)
1832{
1833 kfree(neigh_entry);
1834}
1835
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001836static int
1837mlxsw_sp_neigh_entry_insert(struct mlxsw_sp *mlxsw_sp,
1838 struct mlxsw_sp_neigh_entry *neigh_entry)
1839{
Ido Schimmel9011b672017-05-16 19:38:25 +02001840 return rhashtable_insert_fast(&mlxsw_sp->router->neigh_ht,
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001841 &neigh_entry->ht_node,
1842 mlxsw_sp_neigh_ht_params);
1843}
1844
1845static void
1846mlxsw_sp_neigh_entry_remove(struct mlxsw_sp *mlxsw_sp,
1847 struct mlxsw_sp_neigh_entry *neigh_entry)
1848{
Ido Schimmel9011b672017-05-16 19:38:25 +02001849 rhashtable_remove_fast(&mlxsw_sp->router->neigh_ht,
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001850 &neigh_entry->ht_node,
1851 mlxsw_sp_neigh_ht_params);
1852}
1853
Arkadi Sharshevsky7cfcbc72017-08-24 08:40:08 +02001854static bool
Arkadi Sharshevsky1ed55742017-08-31 17:59:18 +02001855mlxsw_sp_neigh_counter_should_alloc(struct mlxsw_sp *mlxsw_sp,
1856 struct mlxsw_sp_neigh_entry *neigh_entry)
Arkadi Sharshevsky7cfcbc72017-08-24 08:40:08 +02001857{
1858 struct devlink *devlink;
Arkadi Sharshevsky1ed55742017-08-31 17:59:18 +02001859 const char *table_name;
1860
1861 switch (mlxsw_sp_neigh_entry_type(neigh_entry)) {
1862 case AF_INET:
1863 table_name = MLXSW_SP_DPIPE_TABLE_NAME_HOST4;
1864 break;
1865 case AF_INET6:
1866 table_name = MLXSW_SP_DPIPE_TABLE_NAME_HOST6;
1867 break;
1868 default:
1869 WARN_ON(1);
1870 return false;
1871 }
Arkadi Sharshevsky7cfcbc72017-08-24 08:40:08 +02001872
1873 devlink = priv_to_devlink(mlxsw_sp->core);
Arkadi Sharshevsky1ed55742017-08-31 17:59:18 +02001874 return devlink_dpipe_table_counter_enabled(devlink, table_name);
Arkadi Sharshevsky7cfcbc72017-08-24 08:40:08 +02001875}
1876
1877static void
1878mlxsw_sp_neigh_counter_alloc(struct mlxsw_sp *mlxsw_sp,
1879 struct mlxsw_sp_neigh_entry *neigh_entry)
1880{
Arkadi Sharshevsky1ed55742017-08-31 17:59:18 +02001881 if (!mlxsw_sp_neigh_counter_should_alloc(mlxsw_sp, neigh_entry))
Arkadi Sharshevsky7cfcbc72017-08-24 08:40:08 +02001882 return;
1883
1884 if (mlxsw_sp_flow_counter_alloc(mlxsw_sp, &neigh_entry->counter_index))
1885 return;
1886
1887 neigh_entry->counter_valid = true;
1888}
1889
1890static void
1891mlxsw_sp_neigh_counter_free(struct mlxsw_sp *mlxsw_sp,
1892 struct mlxsw_sp_neigh_entry *neigh_entry)
1893{
1894 if (!neigh_entry->counter_valid)
1895 return;
1896 mlxsw_sp_flow_counter_free(mlxsw_sp,
1897 neigh_entry->counter_index);
1898 neigh_entry->counter_valid = false;
1899}
1900
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001901static struct mlxsw_sp_neigh_entry *
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001902mlxsw_sp_neigh_entry_create(struct mlxsw_sp *mlxsw_sp, struct neighbour *n)
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001903{
1904 struct mlxsw_sp_neigh_entry *neigh_entry;
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01001905 struct mlxsw_sp_rif *rif;
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001906 int err;
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001907
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01001908 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, n->dev);
1909 if (!rif)
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001910 return ERR_PTR(-EINVAL);
1911
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01001912 neigh_entry = mlxsw_sp_neigh_entry_alloc(mlxsw_sp, n, rif->rif_index);
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001913 if (!neigh_entry)
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001914 return ERR_PTR(-ENOMEM);
1915
1916 err = mlxsw_sp_neigh_entry_insert(mlxsw_sp, neigh_entry);
1917 if (err)
1918 goto err_neigh_entry_insert;
1919
Arkadi Sharshevsky7cfcbc72017-08-24 08:40:08 +02001920 mlxsw_sp_neigh_counter_alloc(mlxsw_sp, neigh_entry);
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01001921 list_add(&neigh_entry->rif_list_node, &rif->neigh_list);
Ido Schimmel9665b742017-02-08 11:16:42 +01001922
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001923 return neigh_entry;
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001924
1925err_neigh_entry_insert:
1926 mlxsw_sp_neigh_entry_free(neigh_entry);
1927 return ERR_PTR(err);
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001928}
1929
1930static void
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001931mlxsw_sp_neigh_entry_destroy(struct mlxsw_sp *mlxsw_sp,
1932 struct mlxsw_sp_neigh_entry *neigh_entry)
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001933{
Ido Schimmel9665b742017-02-08 11:16:42 +01001934 list_del(&neigh_entry->rif_list_node);
Arkadi Sharshevsky7cfcbc72017-08-24 08:40:08 +02001935 mlxsw_sp_neigh_counter_free(mlxsw_sp, neigh_entry);
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001936 mlxsw_sp_neigh_entry_remove(mlxsw_sp, neigh_entry);
1937 mlxsw_sp_neigh_entry_free(neigh_entry);
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001938}
1939
1940static struct mlxsw_sp_neigh_entry *
Jiri Pirko33b13412016-11-10 12:31:04 +01001941mlxsw_sp_neigh_entry_lookup(struct mlxsw_sp *mlxsw_sp, struct neighbour *n)
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001942{
Jiri Pirko33b13412016-11-10 12:31:04 +01001943 struct mlxsw_sp_neigh_key key;
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001944
Jiri Pirko33b13412016-11-10 12:31:04 +01001945 key.n = n;
Ido Schimmel9011b672017-05-16 19:38:25 +02001946 return rhashtable_lookup_fast(&mlxsw_sp->router->neigh_ht,
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001947 &key, mlxsw_sp_neigh_ht_params);
1948}
1949
Yotam Gigic723c7352016-07-05 11:27:43 +02001950static void
1951mlxsw_sp_router_neighs_update_interval_init(struct mlxsw_sp *mlxsw_sp)
1952{
Arkadi Sharshevskya6c9b5d2017-07-18 10:10:18 +02001953 unsigned long interval;
Yotam Gigic723c7352016-07-05 11:27:43 +02001954
Ido Schimmelb5f3e0d2017-07-24 09:56:00 +02001955#if IS_ENABLED(CONFIG_IPV6)
Arkadi Sharshevskya6c9b5d2017-07-18 10:10:18 +02001956 interval = min_t(unsigned long,
1957 NEIGH_VAR(&arp_tbl.parms, DELAY_PROBE_TIME),
1958 NEIGH_VAR(&nd_tbl.parms, DELAY_PROBE_TIME));
Ido Schimmelb5f3e0d2017-07-24 09:56:00 +02001959#else
1960 interval = NEIGH_VAR(&arp_tbl.parms, DELAY_PROBE_TIME);
1961#endif
Ido Schimmel9011b672017-05-16 19:38:25 +02001962 mlxsw_sp->router->neighs_update.interval = jiffies_to_msecs(interval);
Yotam Gigic723c7352016-07-05 11:27:43 +02001963}
1964
1965static void mlxsw_sp_router_neigh_ent_ipv4_process(struct mlxsw_sp *mlxsw_sp,
1966 char *rauhtd_pl,
1967 int ent_index)
1968{
1969 struct net_device *dev;
1970 struct neighbour *n;
1971 __be32 dipn;
1972 u32 dip;
1973 u16 rif;
1974
1975 mlxsw_reg_rauhtd_ent_ipv4_unpack(rauhtd_pl, ent_index, &rif, &dip);
1976
Ido Schimmel5f9efff2017-05-16 19:38:27 +02001977 if (!mlxsw_sp->router->rifs[rif]) {
Yotam Gigic723c7352016-07-05 11:27:43 +02001978 dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Incorrect RIF in neighbour entry\n");
1979 return;
1980 }
1981
1982 dipn = htonl(dip);
Ido Schimmel5f9efff2017-05-16 19:38:27 +02001983 dev = mlxsw_sp->router->rifs[rif]->dev;
Yotam Gigic723c7352016-07-05 11:27:43 +02001984 n = neigh_lookup(&arp_tbl, &dipn, dev);
Yuval Mintz1ecdaea2018-01-24 10:02:09 +01001985 if (!n)
Yotam Gigic723c7352016-07-05 11:27:43 +02001986 return;
Yotam Gigic723c7352016-07-05 11:27:43 +02001987
1988 netdev_dbg(dev, "Updating neighbour with IP=%pI4h\n", &dip);
1989 neigh_event_send(n, NULL);
1990 neigh_release(n);
1991}
1992
Ido Schimmeldf9a21f2017-08-15 09:10:33 +02001993#if IS_ENABLED(CONFIG_IPV6)
Arkadi Sharshevsky60f040c2017-07-18 10:10:17 +02001994static void mlxsw_sp_router_neigh_ent_ipv6_process(struct mlxsw_sp *mlxsw_sp,
1995 char *rauhtd_pl,
1996 int rec_index)
1997{
1998 struct net_device *dev;
1999 struct neighbour *n;
2000 struct in6_addr dip;
2001 u16 rif;
2002
2003 mlxsw_reg_rauhtd_ent_ipv6_unpack(rauhtd_pl, rec_index, &rif,
2004 (char *) &dip);
2005
2006 if (!mlxsw_sp->router->rifs[rif]) {
2007 dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Incorrect RIF in neighbour entry\n");
2008 return;
2009 }
2010
2011 dev = mlxsw_sp->router->rifs[rif]->dev;
2012 n = neigh_lookup(&nd_tbl, &dip, dev);
Yuval Mintz1ecdaea2018-01-24 10:02:09 +01002013 if (!n)
Arkadi Sharshevsky60f040c2017-07-18 10:10:17 +02002014 return;
Arkadi Sharshevsky60f040c2017-07-18 10:10:17 +02002015
2016 netdev_dbg(dev, "Updating neighbour with IP=%pI6c\n", &dip);
2017 neigh_event_send(n, NULL);
2018 neigh_release(n);
2019}
Ido Schimmelb5f3e0d2017-07-24 09:56:00 +02002020#else
2021static void mlxsw_sp_router_neigh_ent_ipv6_process(struct mlxsw_sp *mlxsw_sp,
2022 char *rauhtd_pl,
2023 int rec_index)
2024{
2025}
2026#endif
Arkadi Sharshevsky60f040c2017-07-18 10:10:17 +02002027
Yotam Gigic723c7352016-07-05 11:27:43 +02002028static void mlxsw_sp_router_neigh_rec_ipv4_process(struct mlxsw_sp *mlxsw_sp,
2029 char *rauhtd_pl,
2030 int rec_index)
2031{
2032 u8 num_entries;
2033 int i;
2034
2035 num_entries = mlxsw_reg_rauhtd_ipv4_rec_num_entries_get(rauhtd_pl,
2036 rec_index);
2037 /* Hardware starts counting at 0, so add 1. */
2038 num_entries++;
2039
2040 /* Each record consists of several neighbour entries. */
2041 for (i = 0; i < num_entries; i++) {
2042 int ent_index;
2043
2044 ent_index = rec_index * MLXSW_REG_RAUHTD_IPV4_ENT_PER_REC + i;
2045 mlxsw_sp_router_neigh_ent_ipv4_process(mlxsw_sp, rauhtd_pl,
2046 ent_index);
2047 }
2048
2049}
2050
Arkadi Sharshevsky60f040c2017-07-18 10:10:17 +02002051static void mlxsw_sp_router_neigh_rec_ipv6_process(struct mlxsw_sp *mlxsw_sp,
2052 char *rauhtd_pl,
2053 int rec_index)
2054{
2055 /* One record contains one entry. */
2056 mlxsw_sp_router_neigh_ent_ipv6_process(mlxsw_sp, rauhtd_pl,
2057 rec_index);
2058}
2059
Yotam Gigic723c7352016-07-05 11:27:43 +02002060static void mlxsw_sp_router_neigh_rec_process(struct mlxsw_sp *mlxsw_sp,
2061 char *rauhtd_pl, int rec_index)
2062{
2063 switch (mlxsw_reg_rauhtd_rec_type_get(rauhtd_pl, rec_index)) {
2064 case MLXSW_REG_RAUHTD_TYPE_IPV4:
2065 mlxsw_sp_router_neigh_rec_ipv4_process(mlxsw_sp, rauhtd_pl,
2066 rec_index);
2067 break;
2068 case MLXSW_REG_RAUHTD_TYPE_IPV6:
Arkadi Sharshevsky60f040c2017-07-18 10:10:17 +02002069 mlxsw_sp_router_neigh_rec_ipv6_process(mlxsw_sp, rauhtd_pl,
2070 rec_index);
Yotam Gigic723c7352016-07-05 11:27:43 +02002071 break;
2072 }
2073}
2074
Arkadi Sharshevsky42cdb332016-11-11 16:34:26 +01002075static bool mlxsw_sp_router_rauhtd_is_full(char *rauhtd_pl)
2076{
2077 u8 num_rec, last_rec_index, num_entries;
2078
2079 num_rec = mlxsw_reg_rauhtd_num_rec_get(rauhtd_pl);
2080 last_rec_index = num_rec - 1;
2081
2082 if (num_rec < MLXSW_REG_RAUHTD_REC_MAX_NUM)
2083 return false;
2084 if (mlxsw_reg_rauhtd_rec_type_get(rauhtd_pl, last_rec_index) ==
2085 MLXSW_REG_RAUHTD_TYPE_IPV6)
2086 return true;
2087
2088 num_entries = mlxsw_reg_rauhtd_ipv4_rec_num_entries_get(rauhtd_pl,
2089 last_rec_index);
2090 if (++num_entries == MLXSW_REG_RAUHTD_IPV4_ENT_PER_REC)
2091 return true;
2092 return false;
2093}
2094
Arkadi Sharshevsky60f040c2017-07-18 10:10:17 +02002095static int
2096__mlxsw_sp_router_neighs_update_rauhtd(struct mlxsw_sp *mlxsw_sp,
2097 char *rauhtd_pl,
2098 enum mlxsw_reg_rauhtd_type type)
Yotam Gigic723c7352016-07-05 11:27:43 +02002099{
Arkadi Sharshevsky60f040c2017-07-18 10:10:17 +02002100 int i, num_rec;
2101 int err;
Yotam Gigic723c7352016-07-05 11:27:43 +02002102
2103 /* Make sure the neighbour's netdev isn't removed in the
2104 * process.
2105 */
2106 rtnl_lock();
2107 do {
Arkadi Sharshevsky60f040c2017-07-18 10:10:17 +02002108 mlxsw_reg_rauhtd_pack(rauhtd_pl, type);
Yotam Gigic723c7352016-07-05 11:27:43 +02002109 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(rauhtd),
2110 rauhtd_pl);
2111 if (err) {
Petr Machata7ff176f2017-10-02 12:21:57 +02002112 dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Failed to dump neighbour table\n");
Yotam Gigic723c7352016-07-05 11:27:43 +02002113 break;
2114 }
2115 num_rec = mlxsw_reg_rauhtd_num_rec_get(rauhtd_pl);
2116 for (i = 0; i < num_rec; i++)
2117 mlxsw_sp_router_neigh_rec_process(mlxsw_sp, rauhtd_pl,
2118 i);
Arkadi Sharshevsky42cdb332016-11-11 16:34:26 +01002119 } while (mlxsw_sp_router_rauhtd_is_full(rauhtd_pl));
Yotam Gigic723c7352016-07-05 11:27:43 +02002120 rtnl_unlock();
2121
Arkadi Sharshevsky60f040c2017-07-18 10:10:17 +02002122 return err;
2123}
2124
2125static int mlxsw_sp_router_neighs_update_rauhtd(struct mlxsw_sp *mlxsw_sp)
2126{
2127 enum mlxsw_reg_rauhtd_type type;
2128 char *rauhtd_pl;
2129 int err;
2130
2131 rauhtd_pl = kmalloc(MLXSW_REG_RAUHTD_LEN, GFP_KERNEL);
2132 if (!rauhtd_pl)
2133 return -ENOMEM;
2134
2135 type = MLXSW_REG_RAUHTD_TYPE_IPV4;
2136 err = __mlxsw_sp_router_neighs_update_rauhtd(mlxsw_sp, rauhtd_pl, type);
2137 if (err)
2138 goto out;
2139
2140 type = MLXSW_REG_RAUHTD_TYPE_IPV6;
2141 err = __mlxsw_sp_router_neighs_update_rauhtd(mlxsw_sp, rauhtd_pl, type);
2142out:
Yotam Gigic723c7352016-07-05 11:27:43 +02002143 kfree(rauhtd_pl);
Yotam Gigib2157142016-07-05 11:27:51 +02002144 return err;
2145}
2146
2147static void mlxsw_sp_router_neighs_update_nh(struct mlxsw_sp *mlxsw_sp)
2148{
2149 struct mlxsw_sp_neigh_entry *neigh_entry;
2150
2151 /* Take RTNL mutex here to prevent lists from changes */
2152 rtnl_lock();
Ido Schimmel9011b672017-05-16 19:38:25 +02002153 list_for_each_entry(neigh_entry, &mlxsw_sp->router->nexthop_neighs_list,
Ido Schimmel8a0b7272017-02-06 16:20:15 +01002154 nexthop_neighs_list_node)
Yotam Gigib2157142016-07-05 11:27:51 +02002155 /* If this neigh have nexthops, make the kernel think this neigh
2156 * is active regardless of the traffic.
2157 */
Ido Schimmel8a0b7272017-02-06 16:20:15 +01002158 neigh_event_send(neigh_entry->key.n, NULL);
Yotam Gigib2157142016-07-05 11:27:51 +02002159 rtnl_unlock();
2160}
2161
2162static void
2163mlxsw_sp_router_neighs_update_work_schedule(struct mlxsw_sp *mlxsw_sp)
2164{
Ido Schimmel9011b672017-05-16 19:38:25 +02002165 unsigned long interval = mlxsw_sp->router->neighs_update.interval;
Yotam Gigib2157142016-07-05 11:27:51 +02002166
Ido Schimmel9011b672017-05-16 19:38:25 +02002167 mlxsw_core_schedule_dw(&mlxsw_sp->router->neighs_update.dw,
Yotam Gigib2157142016-07-05 11:27:51 +02002168 msecs_to_jiffies(interval));
2169}
2170
2171static void mlxsw_sp_router_neighs_update_work(struct work_struct *work)
2172{
Ido Schimmel9011b672017-05-16 19:38:25 +02002173 struct mlxsw_sp_router *router;
Yotam Gigib2157142016-07-05 11:27:51 +02002174 int err;
2175
Ido Schimmel9011b672017-05-16 19:38:25 +02002176 router = container_of(work, struct mlxsw_sp_router,
2177 neighs_update.dw.work);
2178 err = mlxsw_sp_router_neighs_update_rauhtd(router->mlxsw_sp);
Yotam Gigib2157142016-07-05 11:27:51 +02002179 if (err)
Ido Schimmel9011b672017-05-16 19:38:25 +02002180 dev_err(router->mlxsw_sp->bus_info->dev, "Could not update kernel for neigh activity");
Yotam Gigib2157142016-07-05 11:27:51 +02002181
Ido Schimmel9011b672017-05-16 19:38:25 +02002182 mlxsw_sp_router_neighs_update_nh(router->mlxsw_sp);
Yotam Gigib2157142016-07-05 11:27:51 +02002183
Ido Schimmel9011b672017-05-16 19:38:25 +02002184 mlxsw_sp_router_neighs_update_work_schedule(router->mlxsw_sp);
Yotam Gigic723c7352016-07-05 11:27:43 +02002185}
2186
Yotam Gigi0b2361d2016-07-05 11:27:52 +02002187static void mlxsw_sp_router_probe_unresolved_nexthops(struct work_struct *work)
2188{
2189 struct mlxsw_sp_neigh_entry *neigh_entry;
Ido Schimmel9011b672017-05-16 19:38:25 +02002190 struct mlxsw_sp_router *router;
Yotam Gigi0b2361d2016-07-05 11:27:52 +02002191
Ido Schimmel9011b672017-05-16 19:38:25 +02002192 router = container_of(work, struct mlxsw_sp_router,
2193 nexthop_probe_dw.work);
Yotam Gigi0b2361d2016-07-05 11:27:52 +02002194 /* Iterate over nexthop neighbours, find those who are unresolved and
2195 * send arp on them. This solves the chicken-egg problem when
2196 * the nexthop wouldn't get offloaded until the neighbor is resolved
2197 * but it wouldn't get resolved ever in case traffic is flowing in HW
2198 * using different nexthop.
2199 *
2200 * Take RTNL mutex here to prevent lists from changes.
2201 */
2202 rtnl_lock();
Ido Schimmel9011b672017-05-16 19:38:25 +02002203 list_for_each_entry(neigh_entry, &router->nexthop_neighs_list,
Ido Schimmel8a0b7272017-02-06 16:20:15 +01002204 nexthop_neighs_list_node)
Ido Schimmel01b1aa32017-02-06 16:20:16 +01002205 if (!neigh_entry->connected)
Jiri Pirko33b13412016-11-10 12:31:04 +01002206 neigh_event_send(neigh_entry->key.n, NULL);
Yotam Gigi0b2361d2016-07-05 11:27:52 +02002207 rtnl_unlock();
2208
Ido Schimmel9011b672017-05-16 19:38:25 +02002209 mlxsw_core_schedule_dw(&router->nexthop_probe_dw,
Yotam Gigi0b2361d2016-07-05 11:27:52 +02002210 MLXSW_SP_UNRESOLVED_NH_PROBE_INTERVAL);
2211}
2212
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002213static void
2214mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp *mlxsw_sp,
2215 struct mlxsw_sp_neigh_entry *neigh_entry,
2216 bool removing);
2217
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002218static enum mlxsw_reg_rauht_op mlxsw_sp_rauht_op(bool adding)
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002219{
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002220 return adding ? MLXSW_REG_RAUHT_OP_WRITE_ADD :
2221 MLXSW_REG_RAUHT_OP_WRITE_DELETE;
2222}
2223
2224static void
2225mlxsw_sp_router_neigh_entry_op4(struct mlxsw_sp *mlxsw_sp,
2226 struct mlxsw_sp_neigh_entry *neigh_entry,
2227 enum mlxsw_reg_rauht_op op)
2228{
Jiri Pirko33b13412016-11-10 12:31:04 +01002229 struct neighbour *n = neigh_entry->key.n;
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002230 u32 dip = ntohl(*((__be32 *) n->primary_key));
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002231 char rauht_pl[MLXSW_REG_RAUHT_LEN];
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002232
2233 mlxsw_reg_rauht_pack4(rauht_pl, op, neigh_entry->rif, neigh_entry->ha,
2234 dip);
Arkadi Sharshevsky7cfcbc72017-08-24 08:40:08 +02002235 if (neigh_entry->counter_valid)
2236 mlxsw_reg_rauht_pack_counter(rauht_pl,
2237 neigh_entry->counter_index);
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002238 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rauht), rauht_pl);
2239}
2240
2241static void
Arkadi Sharshevskyd5eb89c2017-07-18 10:10:15 +02002242mlxsw_sp_router_neigh_entry_op6(struct mlxsw_sp *mlxsw_sp,
2243 struct mlxsw_sp_neigh_entry *neigh_entry,
2244 enum mlxsw_reg_rauht_op op)
2245{
2246 struct neighbour *n = neigh_entry->key.n;
2247 char rauht_pl[MLXSW_REG_RAUHT_LEN];
2248 const char *dip = n->primary_key;
2249
2250 mlxsw_reg_rauht_pack6(rauht_pl, op, neigh_entry->rif, neigh_entry->ha,
2251 dip);
Arkadi Sharshevsky7cfcbc72017-08-24 08:40:08 +02002252 if (neigh_entry->counter_valid)
2253 mlxsw_reg_rauht_pack_counter(rauht_pl,
2254 neigh_entry->counter_index);
Arkadi Sharshevskyd5eb89c2017-07-18 10:10:15 +02002255 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rauht), rauht_pl);
2256}
2257
Arkadi Sharshevsky1d1056d82017-08-31 17:59:13 +02002258bool mlxsw_sp_neigh_ipv6_ignore(struct mlxsw_sp_neigh_entry *neigh_entry)
Arkadi Sharshevskyd5eb89c2017-07-18 10:10:15 +02002259{
Arkadi Sharshevsky1d1056d82017-08-31 17:59:13 +02002260 struct neighbour *n = neigh_entry->key.n;
2261
Arkadi Sharshevskyd5eb89c2017-07-18 10:10:15 +02002262 /* Packets with a link-local destination address are trapped
2263 * after LPM lookup and never reach the neighbour table, so
2264 * there is no need to program such neighbours to the device.
2265 */
2266 if (ipv6_addr_type((struct in6_addr *) &n->primary_key) &
2267 IPV6_ADDR_LINKLOCAL)
2268 return true;
2269 return false;
2270}
2271
2272static void
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002273mlxsw_sp_neigh_entry_update(struct mlxsw_sp *mlxsw_sp,
2274 struct mlxsw_sp_neigh_entry *neigh_entry,
2275 bool adding)
2276{
2277 if (!adding && !neigh_entry->connected)
2278 return;
2279 neigh_entry->connected = adding;
Ido Schimmelb5f3e0d2017-07-24 09:56:00 +02002280 if (neigh_entry->key.n->tbl->family == AF_INET) {
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002281 mlxsw_sp_router_neigh_entry_op4(mlxsw_sp, neigh_entry,
2282 mlxsw_sp_rauht_op(adding));
Ido Schimmelb5f3e0d2017-07-24 09:56:00 +02002283 } else if (neigh_entry->key.n->tbl->family == AF_INET6) {
Arkadi Sharshevsky1d1056d82017-08-31 17:59:13 +02002284 if (mlxsw_sp_neigh_ipv6_ignore(neigh_entry))
Arkadi Sharshevskyd5eb89c2017-07-18 10:10:15 +02002285 return;
2286 mlxsw_sp_router_neigh_entry_op6(mlxsw_sp, neigh_entry,
2287 mlxsw_sp_rauht_op(adding));
2288 } else {
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002289 WARN_ON_ONCE(1);
Arkadi Sharshevskyd5eb89c2017-07-18 10:10:15 +02002290 }
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002291}
2292
Arkadi Sharshevskya481d712017-08-24 08:40:10 +02002293void
2294mlxsw_sp_neigh_entry_counter_update(struct mlxsw_sp *mlxsw_sp,
2295 struct mlxsw_sp_neigh_entry *neigh_entry,
2296 bool adding)
2297{
2298 if (adding)
2299 mlxsw_sp_neigh_counter_alloc(mlxsw_sp, neigh_entry);
2300 else
2301 mlxsw_sp_neigh_counter_free(mlxsw_sp, neigh_entry);
2302 mlxsw_sp_neigh_entry_update(mlxsw_sp, neigh_entry, true);
2303}
2304
Ido Schimmelceb88812017-11-02 17:14:07 +01002305struct mlxsw_sp_netevent_work {
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002306 struct work_struct work;
2307 struct mlxsw_sp *mlxsw_sp;
2308 struct neighbour *n;
2309};
2310
2311static void mlxsw_sp_router_neigh_event_work(struct work_struct *work)
2312{
Ido Schimmelceb88812017-11-02 17:14:07 +01002313 struct mlxsw_sp_netevent_work *net_work =
2314 container_of(work, struct mlxsw_sp_netevent_work, work);
2315 struct mlxsw_sp *mlxsw_sp = net_work->mlxsw_sp;
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002316 struct mlxsw_sp_neigh_entry *neigh_entry;
Ido Schimmelceb88812017-11-02 17:14:07 +01002317 struct neighbour *n = net_work->n;
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002318 unsigned char ha[ETH_ALEN];
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002319 bool entry_connected;
Ido Schimmel93a87e52016-12-23 09:32:49 +01002320 u8 nud_state, dead;
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002321
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002322 /* If these parameters are changed after we release the lock,
2323 * then we are guaranteed to receive another event letting us
2324 * know about it.
2325 */
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002326 read_lock_bh(&n->lock);
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002327 memcpy(ha, n->ha, ETH_ALEN);
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002328 nud_state = n->nud_state;
Ido Schimmel93a87e52016-12-23 09:32:49 +01002329 dead = n->dead;
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002330 read_unlock_bh(&n->lock);
2331
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002332 rtnl_lock();
Ido Schimmel93a87e52016-12-23 09:32:49 +01002333 entry_connected = nud_state & NUD_VALID && !dead;
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002334 neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp, n);
2335 if (!entry_connected && !neigh_entry)
2336 goto out;
2337 if (!neigh_entry) {
2338 neigh_entry = mlxsw_sp_neigh_entry_create(mlxsw_sp, n);
2339 if (IS_ERR(neigh_entry))
2340 goto out;
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002341 }
2342
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002343 memcpy(neigh_entry->ha, ha, ETH_ALEN);
2344 mlxsw_sp_neigh_entry_update(mlxsw_sp, neigh_entry, entry_connected);
2345 mlxsw_sp_nexthop_neigh_update(mlxsw_sp, neigh_entry, !entry_connected);
2346
2347 if (!neigh_entry->connected && list_empty(&neigh_entry->nexthop_list))
2348 mlxsw_sp_neigh_entry_destroy(mlxsw_sp, neigh_entry);
2349
2350out:
2351 rtnl_unlock();
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002352 neigh_release(n);
Ido Schimmelceb88812017-11-02 17:14:07 +01002353 kfree(net_work);
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002354}
2355
Ido Schimmel28678f02017-11-02 17:14:10 +01002356static int mlxsw_sp_mp_hash_init(struct mlxsw_sp *mlxsw_sp);
2357
2358static void mlxsw_sp_router_mp_hash_event_work(struct work_struct *work)
2359{
2360 struct mlxsw_sp_netevent_work *net_work =
2361 container_of(work, struct mlxsw_sp_netevent_work, work);
2362 struct mlxsw_sp *mlxsw_sp = net_work->mlxsw_sp;
2363
2364 mlxsw_sp_mp_hash_init(mlxsw_sp);
2365 kfree(net_work);
2366}
2367
2368static int mlxsw_sp_router_netevent_event(struct notifier_block *nb,
Ido Schimmel48fac882017-11-02 17:14:06 +01002369 unsigned long event, void *ptr)
Yotam Gigic723c7352016-07-05 11:27:43 +02002370{
Ido Schimmelceb88812017-11-02 17:14:07 +01002371 struct mlxsw_sp_netevent_work *net_work;
Yotam Gigic723c7352016-07-05 11:27:43 +02002372 struct mlxsw_sp_port *mlxsw_sp_port;
Ido Schimmel28678f02017-11-02 17:14:10 +01002373 struct mlxsw_sp_router *router;
Yotam Gigic723c7352016-07-05 11:27:43 +02002374 struct mlxsw_sp *mlxsw_sp;
2375 unsigned long interval;
2376 struct neigh_parms *p;
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002377 struct neighbour *n;
Ido Schimmel28678f02017-11-02 17:14:10 +01002378 struct net *net;
Yotam Gigic723c7352016-07-05 11:27:43 +02002379
2380 switch (event) {
2381 case NETEVENT_DELAY_PROBE_TIME_UPDATE:
2382 p = ptr;
2383
2384 /* We don't care about changes in the default table. */
Ido Schimmelb5f3e0d2017-07-24 09:56:00 +02002385 if (!p->dev || (p->tbl->family != AF_INET &&
2386 p->tbl->family != AF_INET6))
Yotam Gigic723c7352016-07-05 11:27:43 +02002387 return NOTIFY_DONE;
2388
2389 /* We are in atomic context and can't take RTNL mutex,
2390 * so use RCU variant to walk the device chain.
2391 */
2392 mlxsw_sp_port = mlxsw_sp_port_lower_dev_hold(p->dev);
2393 if (!mlxsw_sp_port)
2394 return NOTIFY_DONE;
2395
2396 mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2397 interval = jiffies_to_msecs(NEIGH_VAR(p, DELAY_PROBE_TIME));
Ido Schimmel9011b672017-05-16 19:38:25 +02002398 mlxsw_sp->router->neighs_update.interval = interval;
Yotam Gigic723c7352016-07-05 11:27:43 +02002399
2400 mlxsw_sp_port_dev_put(mlxsw_sp_port);
2401 break;
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002402 case NETEVENT_NEIGH_UPDATE:
2403 n = ptr;
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002404
Ido Schimmelb5f3e0d2017-07-24 09:56:00 +02002405 if (n->tbl->family != AF_INET && n->tbl->family != AF_INET6)
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002406 return NOTIFY_DONE;
2407
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002408 mlxsw_sp_port = mlxsw_sp_port_lower_dev_hold(n->dev);
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002409 if (!mlxsw_sp_port)
2410 return NOTIFY_DONE;
2411
Ido Schimmelceb88812017-11-02 17:14:07 +01002412 net_work = kzalloc(sizeof(*net_work), GFP_ATOMIC);
2413 if (!net_work) {
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002414 mlxsw_sp_port_dev_put(mlxsw_sp_port);
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002415 return NOTIFY_BAD;
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002416 }
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002417
Ido Schimmelceb88812017-11-02 17:14:07 +01002418 INIT_WORK(&net_work->work, mlxsw_sp_router_neigh_event_work);
2419 net_work->mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2420 net_work->n = n;
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002421
2422 /* Take a reference to ensure the neighbour won't be
2423 * destructed until we drop the reference in delayed
2424 * work.
2425 */
2426 neigh_clone(n);
Ido Schimmelceb88812017-11-02 17:14:07 +01002427 mlxsw_core_schedule_work(&net_work->work);
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002428 mlxsw_sp_port_dev_put(mlxsw_sp_port);
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002429 break;
Ido Schimmel28678f02017-11-02 17:14:10 +01002430 case NETEVENT_MULTIPATH_HASH_UPDATE:
2431 net = ptr;
2432
2433 if (!net_eq(net, &init_net))
2434 return NOTIFY_DONE;
2435
2436 net_work = kzalloc(sizeof(*net_work), GFP_ATOMIC);
2437 if (!net_work)
2438 return NOTIFY_BAD;
2439
2440 router = container_of(nb, struct mlxsw_sp_router, netevent_nb);
2441 INIT_WORK(&net_work->work, mlxsw_sp_router_mp_hash_event_work);
2442 net_work->mlxsw_sp = router->mlxsw_sp;
2443 mlxsw_core_schedule_work(&net_work->work);
2444 break;
Yotam Gigic723c7352016-07-05 11:27:43 +02002445 }
2446
2447 return NOTIFY_DONE;
2448}
2449
Jiri Pirko6cf3c972016-07-05 11:27:39 +02002450static int mlxsw_sp_neigh_init(struct mlxsw_sp *mlxsw_sp)
2451{
Yotam Gigic723c7352016-07-05 11:27:43 +02002452 int err;
2453
Ido Schimmel9011b672017-05-16 19:38:25 +02002454 err = rhashtable_init(&mlxsw_sp->router->neigh_ht,
Yotam Gigic723c7352016-07-05 11:27:43 +02002455 &mlxsw_sp_neigh_ht_params);
2456 if (err)
2457 return err;
2458
2459 /* Initialize the polling interval according to the default
2460 * table.
2461 */
2462 mlxsw_sp_router_neighs_update_interval_init(mlxsw_sp);
2463
Yotam Gigi0b2361d2016-07-05 11:27:52 +02002464 /* Create the delayed works for the activity_update */
Ido Schimmel9011b672017-05-16 19:38:25 +02002465 INIT_DELAYED_WORK(&mlxsw_sp->router->neighs_update.dw,
Yotam Gigic723c7352016-07-05 11:27:43 +02002466 mlxsw_sp_router_neighs_update_work);
Ido Schimmel9011b672017-05-16 19:38:25 +02002467 INIT_DELAYED_WORK(&mlxsw_sp->router->nexthop_probe_dw,
Yotam Gigi0b2361d2016-07-05 11:27:52 +02002468 mlxsw_sp_router_probe_unresolved_nexthops);
Ido Schimmel9011b672017-05-16 19:38:25 +02002469 mlxsw_core_schedule_dw(&mlxsw_sp->router->neighs_update.dw, 0);
2470 mlxsw_core_schedule_dw(&mlxsw_sp->router->nexthop_probe_dw, 0);
Yotam Gigic723c7352016-07-05 11:27:43 +02002471 return 0;
Jiri Pirko6cf3c972016-07-05 11:27:39 +02002472}
2473
2474static void mlxsw_sp_neigh_fini(struct mlxsw_sp *mlxsw_sp)
2475{
Ido Schimmel9011b672017-05-16 19:38:25 +02002476 cancel_delayed_work_sync(&mlxsw_sp->router->neighs_update.dw);
2477 cancel_delayed_work_sync(&mlxsw_sp->router->nexthop_probe_dw);
2478 rhashtable_destroy(&mlxsw_sp->router->neigh_ht);
Jiri Pirko6cf3c972016-07-05 11:27:39 +02002479}
2480
Ido Schimmel9665b742017-02-08 11:16:42 +01002481static void mlxsw_sp_neigh_rif_gone_sync(struct mlxsw_sp *mlxsw_sp,
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01002482 struct mlxsw_sp_rif *rif)
Ido Schimmel9665b742017-02-08 11:16:42 +01002483{
2484 struct mlxsw_sp_neigh_entry *neigh_entry, *tmp;
2485
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01002486 list_for_each_entry_safe(neigh_entry, tmp, &rif->neigh_list,
Petr Machata8ba6b302017-12-17 17:16:43 +01002487 rif_list_node) {
2488 mlxsw_sp_neigh_entry_update(mlxsw_sp, neigh_entry, false);
Ido Schimmel9665b742017-02-08 11:16:42 +01002489 mlxsw_sp_neigh_entry_destroy(mlxsw_sp, neigh_entry);
Petr Machata8ba6b302017-12-17 17:16:43 +01002490 }
Ido Schimmel9665b742017-02-08 11:16:42 +01002491}
2492
Petr Machata35225e42017-09-02 23:49:22 +02002493enum mlxsw_sp_nexthop_type {
2494 MLXSW_SP_NEXTHOP_TYPE_ETH,
Petr Machata1012b9a2017-09-02 23:49:23 +02002495 MLXSW_SP_NEXTHOP_TYPE_IPIP,
Petr Machata35225e42017-09-02 23:49:22 +02002496};
2497
Ido Schimmelc53b8e12017-02-08 11:16:30 +01002498struct mlxsw_sp_nexthop_key {
2499 struct fib_nh *fib_nh;
2500};
2501
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002502struct mlxsw_sp_nexthop {
2503 struct list_head neigh_list_node; /* member of neigh entry list */
Ido Schimmel9665b742017-02-08 11:16:42 +01002504 struct list_head rif_list_node;
Arkadi Sharshevskydbe45982017-09-25 10:32:23 +02002505 struct list_head router_list_node;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002506 struct mlxsw_sp_nexthop_group *nh_grp; /* pointer back to the group
2507 * this belongs to
2508 */
Ido Schimmelc53b8e12017-02-08 11:16:30 +01002509 struct rhash_head ht_node;
2510 struct mlxsw_sp_nexthop_key key;
Ido Schimmel58adf2c2017-07-18 10:10:19 +02002511 unsigned char gw_addr[sizeof(struct in6_addr)];
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02002512 int ifindex;
Ido Schimmel408bd942017-10-22 23:11:46 +02002513 int nh_weight;
Ido Schimmeleb789982017-10-22 23:11:48 +02002514 int norm_nh_weight;
2515 int num_adj_entries;
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01002516 struct mlxsw_sp_rif *rif;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002517 u8 should_offload:1, /* set indicates this neigh is connected and
2518 * should be put to KVD linear area of this group.
2519 */
2520 offloaded:1, /* set in case the neigh is actually put into
2521 * KVD linear area of this group.
2522 */
2523 update:1; /* set indicates that MAC of this neigh should be
2524 * updated in HW
2525 */
Petr Machata35225e42017-09-02 23:49:22 +02002526 enum mlxsw_sp_nexthop_type type;
2527 union {
2528 struct mlxsw_sp_neigh_entry *neigh_entry;
Petr Machata1012b9a2017-09-02 23:49:23 +02002529 struct mlxsw_sp_ipip_entry *ipip_entry;
Petr Machata35225e42017-09-02 23:49:22 +02002530 };
Arkadi Sharshevskya5390272017-09-25 10:32:28 +02002531 unsigned int counter_index;
2532 bool counter_valid;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002533};
2534
2535struct mlxsw_sp_nexthop_group {
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02002536 void *priv;
Ido Schimmele9ad5e72017-02-08 11:16:29 +01002537 struct rhash_head ht_node;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002538 struct list_head fib_list; /* list of fib entries that use this group */
Ido Schimmel58adf2c2017-07-18 10:10:19 +02002539 struct neigh_table *neigh_tbl;
Ido Schimmelb3e8d1e2017-02-08 11:16:32 +01002540 u8 adj_index_valid:1,
2541 gateway:1; /* routes using the group use a gateway */
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002542 u32 adj_index;
2543 u16 ecmp_size;
2544 u16 count;
Ido Schimmeleb789982017-10-22 23:11:48 +02002545 int sum_norm_weight;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002546 struct mlxsw_sp_nexthop nexthops[0];
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01002547#define nh_rif nexthops[0].rif
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002548};
2549
Arkadi Sharshevsky427e6522017-09-25 10:32:30 +02002550void mlxsw_sp_nexthop_counter_alloc(struct mlxsw_sp *mlxsw_sp,
2551 struct mlxsw_sp_nexthop *nh)
Arkadi Sharshevskya5390272017-09-25 10:32:28 +02002552{
2553 struct devlink *devlink;
2554
2555 devlink = priv_to_devlink(mlxsw_sp->core);
2556 if (!devlink_dpipe_table_counter_enabled(devlink,
2557 MLXSW_SP_DPIPE_TABLE_NAME_ADJ))
2558 return;
2559
2560 if (mlxsw_sp_flow_counter_alloc(mlxsw_sp, &nh->counter_index))
2561 return;
2562
2563 nh->counter_valid = true;
2564}
2565
Arkadi Sharshevsky427e6522017-09-25 10:32:30 +02002566void mlxsw_sp_nexthop_counter_free(struct mlxsw_sp *mlxsw_sp,
2567 struct mlxsw_sp_nexthop *nh)
Arkadi Sharshevskya5390272017-09-25 10:32:28 +02002568{
2569 if (!nh->counter_valid)
2570 return;
2571 mlxsw_sp_flow_counter_free(mlxsw_sp, nh->counter_index);
2572 nh->counter_valid = false;
2573}
2574
2575int mlxsw_sp_nexthop_counter_get(struct mlxsw_sp *mlxsw_sp,
2576 struct mlxsw_sp_nexthop *nh, u64 *p_counter)
2577{
2578 if (!nh->counter_valid)
2579 return -EINVAL;
2580
2581 return mlxsw_sp_flow_counter_get(mlxsw_sp, nh->counter_index,
2582 p_counter, NULL);
2583}
2584
Arkadi Sharshevskyc556cd22017-09-25 10:32:25 +02002585struct mlxsw_sp_nexthop *mlxsw_sp_nexthop_next(struct mlxsw_sp_router *router,
2586 struct mlxsw_sp_nexthop *nh)
2587{
2588 if (!nh) {
2589 if (list_empty(&router->nexthop_list))
2590 return NULL;
2591 else
2592 return list_first_entry(&router->nexthop_list,
2593 typeof(*nh), router_list_node);
2594 }
2595 if (list_is_last(&nh->router_list_node, &router->nexthop_list))
2596 return NULL;
2597 return list_next_entry(nh, router_list_node);
2598}
2599
2600bool mlxsw_sp_nexthop_offload(struct mlxsw_sp_nexthop *nh)
2601{
2602 return nh->offloaded;
2603}
2604
2605unsigned char *mlxsw_sp_nexthop_ha(struct mlxsw_sp_nexthop *nh)
2606{
2607 if (!nh->offloaded)
2608 return NULL;
2609 return nh->neigh_entry->ha;
2610}
2611
2612int mlxsw_sp_nexthop_indexes(struct mlxsw_sp_nexthop *nh, u32 *p_adj_index,
Ido Schimmele69cd9d2017-10-22 23:11:43 +02002613 u32 *p_adj_size, u32 *p_adj_hash_index)
Arkadi Sharshevskyc556cd22017-09-25 10:32:25 +02002614{
2615 struct mlxsw_sp_nexthop_group *nh_grp = nh->nh_grp;
2616 u32 adj_hash_index = 0;
2617 int i;
2618
2619 if (!nh->offloaded || !nh_grp->adj_index_valid)
2620 return -EINVAL;
2621
2622 *p_adj_index = nh_grp->adj_index;
Ido Schimmele69cd9d2017-10-22 23:11:43 +02002623 *p_adj_size = nh_grp->ecmp_size;
Arkadi Sharshevskyc556cd22017-09-25 10:32:25 +02002624
2625 for (i = 0; i < nh_grp->count; i++) {
2626 struct mlxsw_sp_nexthop *nh_iter = &nh_grp->nexthops[i];
2627
2628 if (nh_iter == nh)
2629 break;
2630 if (nh_iter->offloaded)
Ido Schimmeleb789982017-10-22 23:11:48 +02002631 adj_hash_index += nh_iter->num_adj_entries;
Arkadi Sharshevskyc556cd22017-09-25 10:32:25 +02002632 }
2633
2634 *p_adj_hash_index = adj_hash_index;
2635 return 0;
2636}
2637
2638struct mlxsw_sp_rif *mlxsw_sp_nexthop_rif(struct mlxsw_sp_nexthop *nh)
2639{
2640 return nh->rif;
2641}
2642
2643bool mlxsw_sp_nexthop_group_has_ipip(struct mlxsw_sp_nexthop *nh)
2644{
2645 struct mlxsw_sp_nexthop_group *nh_grp = nh->nh_grp;
2646 int i;
2647
2648 for (i = 0; i < nh_grp->count; i++) {
2649 struct mlxsw_sp_nexthop *nh_iter = &nh_grp->nexthops[i];
2650
2651 if (nh_iter->type == MLXSW_SP_NEXTHOP_TYPE_IPIP)
2652 return true;
2653 }
2654 return false;
2655}
2656
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02002657static struct fib_info *
2658mlxsw_sp_nexthop4_group_fi(const struct mlxsw_sp_nexthop_group *nh_grp)
2659{
2660 return nh_grp->priv;
2661}
2662
2663struct mlxsw_sp_nexthop_group_cmp_arg {
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02002664 enum mlxsw_sp_l3proto proto;
2665 union {
2666 struct fib_info *fi;
2667 struct mlxsw_sp_fib6_entry *fib6_entry;
2668 };
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02002669};
2670
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02002671static bool
2672mlxsw_sp_nexthop6_group_has_nexthop(const struct mlxsw_sp_nexthop_group *nh_grp,
Ido Schimmel3743d882018-01-12 17:15:59 +01002673 const struct in6_addr *gw, int ifindex,
2674 int weight)
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02002675{
2676 int i;
2677
2678 for (i = 0; i < nh_grp->count; i++) {
2679 const struct mlxsw_sp_nexthop *nh;
2680
2681 nh = &nh_grp->nexthops[i];
Ido Schimmel3743d882018-01-12 17:15:59 +01002682 if (nh->ifindex == ifindex && nh->nh_weight == weight &&
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02002683 ipv6_addr_equal(gw, (struct in6_addr *) nh->gw_addr))
2684 return true;
2685 }
2686
2687 return false;
2688}
2689
2690static bool
2691mlxsw_sp_nexthop6_group_cmp(const struct mlxsw_sp_nexthop_group *nh_grp,
2692 const struct mlxsw_sp_fib6_entry *fib6_entry)
2693{
2694 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
2695
2696 if (nh_grp->count != fib6_entry->nrt6)
2697 return false;
2698
2699 list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) {
2700 struct in6_addr *gw;
Ido Schimmel3743d882018-01-12 17:15:59 +01002701 int ifindex, weight;
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02002702
2703 ifindex = mlxsw_sp_rt6->rt->dst.dev->ifindex;
Ido Schimmel3743d882018-01-12 17:15:59 +01002704 weight = mlxsw_sp_rt6->rt->rt6i_nh_weight;
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02002705 gw = &mlxsw_sp_rt6->rt->rt6i_gateway;
Ido Schimmel3743d882018-01-12 17:15:59 +01002706 if (!mlxsw_sp_nexthop6_group_has_nexthop(nh_grp, gw, ifindex,
2707 weight))
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02002708 return false;
2709 }
2710
2711 return true;
2712}
2713
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02002714static int
2715mlxsw_sp_nexthop_group_cmp(struct rhashtable_compare_arg *arg, const void *ptr)
2716{
2717 const struct mlxsw_sp_nexthop_group_cmp_arg *cmp_arg = arg->key;
2718 const struct mlxsw_sp_nexthop_group *nh_grp = ptr;
2719
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02002720 switch (cmp_arg->proto) {
2721 case MLXSW_SP_L3_PROTO_IPV4:
2722 return cmp_arg->fi != mlxsw_sp_nexthop4_group_fi(nh_grp);
2723 case MLXSW_SP_L3_PROTO_IPV6:
2724 return !mlxsw_sp_nexthop6_group_cmp(nh_grp,
2725 cmp_arg->fib6_entry);
2726 default:
2727 WARN_ON(1);
2728 return 1;
2729 }
2730}
2731
2732static int
2733mlxsw_sp_nexthop_group_type(const struct mlxsw_sp_nexthop_group *nh_grp)
2734{
2735 return nh_grp->neigh_tbl->family;
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02002736}
2737
2738static u32 mlxsw_sp_nexthop_group_hash_obj(const void *data, u32 len, u32 seed)
2739{
2740 const struct mlxsw_sp_nexthop_group *nh_grp = data;
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02002741 const struct mlxsw_sp_nexthop *nh;
2742 struct fib_info *fi;
2743 unsigned int val;
2744 int i;
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02002745
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02002746 switch (mlxsw_sp_nexthop_group_type(nh_grp)) {
2747 case AF_INET:
2748 fi = mlxsw_sp_nexthop4_group_fi(nh_grp);
2749 return jhash(&fi, sizeof(fi), seed);
2750 case AF_INET6:
2751 val = nh_grp->count;
2752 for (i = 0; i < nh_grp->count; i++) {
2753 nh = &nh_grp->nexthops[i];
2754 val ^= nh->ifindex;
2755 }
2756 return jhash(&val, sizeof(val), seed);
2757 default:
2758 WARN_ON(1);
2759 return 0;
2760 }
2761}
2762
2763static u32
2764mlxsw_sp_nexthop6_group_hash(struct mlxsw_sp_fib6_entry *fib6_entry, u32 seed)
2765{
2766 unsigned int val = fib6_entry->nrt6;
2767 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
2768 struct net_device *dev;
2769
2770 list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) {
2771 dev = mlxsw_sp_rt6->rt->dst.dev;
2772 val ^= dev->ifindex;
2773 }
2774
2775 return jhash(&val, sizeof(val), seed);
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02002776}
2777
2778static u32
2779mlxsw_sp_nexthop_group_hash(const void *data, u32 len, u32 seed)
2780{
2781 const struct mlxsw_sp_nexthop_group_cmp_arg *cmp_arg = data;
2782
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02002783 switch (cmp_arg->proto) {
2784 case MLXSW_SP_L3_PROTO_IPV4:
2785 return jhash(&cmp_arg->fi, sizeof(cmp_arg->fi), seed);
2786 case MLXSW_SP_L3_PROTO_IPV6:
2787 return mlxsw_sp_nexthop6_group_hash(cmp_arg->fib6_entry, seed);
2788 default:
2789 WARN_ON(1);
2790 return 0;
2791 }
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02002792}
2793
Ido Schimmele9ad5e72017-02-08 11:16:29 +01002794static const struct rhashtable_params mlxsw_sp_nexthop_group_ht_params = {
Ido Schimmele9ad5e72017-02-08 11:16:29 +01002795 .head_offset = offsetof(struct mlxsw_sp_nexthop_group, ht_node),
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02002796 .hashfn = mlxsw_sp_nexthop_group_hash,
2797 .obj_hashfn = mlxsw_sp_nexthop_group_hash_obj,
2798 .obj_cmpfn = mlxsw_sp_nexthop_group_cmp,
Ido Schimmele9ad5e72017-02-08 11:16:29 +01002799};
2800
2801static int mlxsw_sp_nexthop_group_insert(struct mlxsw_sp *mlxsw_sp,
2802 struct mlxsw_sp_nexthop_group *nh_grp)
2803{
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02002804 if (mlxsw_sp_nexthop_group_type(nh_grp) == AF_INET6 &&
2805 !nh_grp->gateway)
2806 return 0;
2807
Ido Schimmel9011b672017-05-16 19:38:25 +02002808 return rhashtable_insert_fast(&mlxsw_sp->router->nexthop_group_ht,
Ido Schimmele9ad5e72017-02-08 11:16:29 +01002809 &nh_grp->ht_node,
2810 mlxsw_sp_nexthop_group_ht_params);
2811}
2812
2813static void mlxsw_sp_nexthop_group_remove(struct mlxsw_sp *mlxsw_sp,
2814 struct mlxsw_sp_nexthop_group *nh_grp)
2815{
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02002816 if (mlxsw_sp_nexthop_group_type(nh_grp) == AF_INET6 &&
2817 !nh_grp->gateway)
2818 return;
2819
Ido Schimmel9011b672017-05-16 19:38:25 +02002820 rhashtable_remove_fast(&mlxsw_sp->router->nexthop_group_ht,
Ido Schimmele9ad5e72017-02-08 11:16:29 +01002821 &nh_grp->ht_node,
2822 mlxsw_sp_nexthop_group_ht_params);
2823}
2824
2825static struct mlxsw_sp_nexthop_group *
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02002826mlxsw_sp_nexthop4_group_lookup(struct mlxsw_sp *mlxsw_sp,
2827 struct fib_info *fi)
Ido Schimmele9ad5e72017-02-08 11:16:29 +01002828{
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02002829 struct mlxsw_sp_nexthop_group_cmp_arg cmp_arg;
2830
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02002831 cmp_arg.proto = MLXSW_SP_L3_PROTO_IPV4;
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02002832 cmp_arg.fi = fi;
2833 return rhashtable_lookup_fast(&mlxsw_sp->router->nexthop_group_ht,
2834 &cmp_arg,
Ido Schimmele9ad5e72017-02-08 11:16:29 +01002835 mlxsw_sp_nexthop_group_ht_params);
2836}
2837
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02002838static struct mlxsw_sp_nexthop_group *
2839mlxsw_sp_nexthop6_group_lookup(struct mlxsw_sp *mlxsw_sp,
2840 struct mlxsw_sp_fib6_entry *fib6_entry)
2841{
2842 struct mlxsw_sp_nexthop_group_cmp_arg cmp_arg;
2843
2844 cmp_arg.proto = MLXSW_SP_L3_PROTO_IPV6;
2845 cmp_arg.fib6_entry = fib6_entry;
2846 return rhashtable_lookup_fast(&mlxsw_sp->router->nexthop_group_ht,
2847 &cmp_arg,
2848 mlxsw_sp_nexthop_group_ht_params);
2849}
2850
Ido Schimmelc53b8e12017-02-08 11:16:30 +01002851static const struct rhashtable_params mlxsw_sp_nexthop_ht_params = {
2852 .key_offset = offsetof(struct mlxsw_sp_nexthop, key),
2853 .head_offset = offsetof(struct mlxsw_sp_nexthop, ht_node),
2854 .key_len = sizeof(struct mlxsw_sp_nexthop_key),
2855};
2856
2857static int mlxsw_sp_nexthop_insert(struct mlxsw_sp *mlxsw_sp,
2858 struct mlxsw_sp_nexthop *nh)
2859{
Ido Schimmel9011b672017-05-16 19:38:25 +02002860 return rhashtable_insert_fast(&mlxsw_sp->router->nexthop_ht,
Ido Schimmelc53b8e12017-02-08 11:16:30 +01002861 &nh->ht_node, mlxsw_sp_nexthop_ht_params);
2862}
2863
2864static void mlxsw_sp_nexthop_remove(struct mlxsw_sp *mlxsw_sp,
2865 struct mlxsw_sp_nexthop *nh)
2866{
Ido Schimmel9011b672017-05-16 19:38:25 +02002867 rhashtable_remove_fast(&mlxsw_sp->router->nexthop_ht, &nh->ht_node,
Ido Schimmelc53b8e12017-02-08 11:16:30 +01002868 mlxsw_sp_nexthop_ht_params);
2869}
2870
Ido Schimmelad178c82017-02-08 11:16:40 +01002871static struct mlxsw_sp_nexthop *
2872mlxsw_sp_nexthop_lookup(struct mlxsw_sp *mlxsw_sp,
2873 struct mlxsw_sp_nexthop_key key)
2874{
Ido Schimmel9011b672017-05-16 19:38:25 +02002875 return rhashtable_lookup_fast(&mlxsw_sp->router->nexthop_ht, &key,
Ido Schimmelad178c82017-02-08 11:16:40 +01002876 mlxsw_sp_nexthop_ht_params);
2877}
2878
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002879static int mlxsw_sp_adj_index_mass_update_vr(struct mlxsw_sp *mlxsw_sp,
Ido Schimmel76610eb2017-03-10 08:53:41 +01002880 const struct mlxsw_sp_fib *fib,
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002881 u32 adj_index, u16 ecmp_size,
2882 u32 new_adj_index,
2883 u16 new_ecmp_size)
2884{
2885 char raleu_pl[MLXSW_REG_RALEU_LEN];
2886
Ido Schimmel1a9234e662016-09-19 08:29:26 +02002887 mlxsw_reg_raleu_pack(raleu_pl,
Ido Schimmel76610eb2017-03-10 08:53:41 +01002888 (enum mlxsw_reg_ralxx_protocol) fib->proto,
2889 fib->vr->id, adj_index, ecmp_size, new_adj_index,
Ido Schimmel1a9234e662016-09-19 08:29:26 +02002890 new_ecmp_size);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002891 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raleu), raleu_pl);
2892}
2893
2894static int mlxsw_sp_adj_index_mass_update(struct mlxsw_sp *mlxsw_sp,
2895 struct mlxsw_sp_nexthop_group *nh_grp,
2896 u32 old_adj_index, u16 old_ecmp_size)
2897{
2898 struct mlxsw_sp_fib_entry *fib_entry;
Ido Schimmel76610eb2017-03-10 08:53:41 +01002899 struct mlxsw_sp_fib *fib = NULL;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002900 int err;
2901
2902 list_for_each_entry(fib_entry, &nh_grp->fib_list, nexthop_group_node) {
Ido Schimmel76610eb2017-03-10 08:53:41 +01002903 if (fib == fib_entry->fib_node->fib)
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002904 continue;
Ido Schimmel76610eb2017-03-10 08:53:41 +01002905 fib = fib_entry->fib_node->fib;
2906 err = mlxsw_sp_adj_index_mass_update_vr(mlxsw_sp, fib,
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002907 old_adj_index,
2908 old_ecmp_size,
2909 nh_grp->adj_index,
2910 nh_grp->ecmp_size);
2911 if (err)
2912 return err;
2913 }
2914 return 0;
2915}
2916
Ido Schimmeleb789982017-10-22 23:11:48 +02002917static int __mlxsw_sp_nexthop_update(struct mlxsw_sp *mlxsw_sp, u32 adj_index,
2918 struct mlxsw_sp_nexthop *nh)
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002919{
2920 struct mlxsw_sp_neigh_entry *neigh_entry = nh->neigh_entry;
2921 char ratr_pl[MLXSW_REG_RATR_LEN];
2922
2923 mlxsw_reg_ratr_pack(ratr_pl, MLXSW_REG_RATR_OP_WRITE_WRITE_ENTRY,
Petr Machata89e41982017-09-02 23:49:15 +02002924 true, MLXSW_REG_RATR_TYPE_ETHERNET,
2925 adj_index, neigh_entry->rif);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002926 mlxsw_reg_ratr_eth_entry_pack(ratr_pl, neigh_entry->ha);
Arkadi Sharshevskya5390272017-09-25 10:32:28 +02002927 if (nh->counter_valid)
2928 mlxsw_reg_ratr_counter_pack(ratr_pl, nh->counter_index, true);
2929 else
2930 mlxsw_reg_ratr_counter_pack(ratr_pl, 0, false);
2931
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002932 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ratr), ratr_pl);
2933}
2934
Ido Schimmeleb789982017-10-22 23:11:48 +02002935int mlxsw_sp_nexthop_update(struct mlxsw_sp *mlxsw_sp, u32 adj_index,
2936 struct mlxsw_sp_nexthop *nh)
2937{
2938 int i;
2939
2940 for (i = 0; i < nh->num_adj_entries; i++) {
2941 int err;
2942
2943 err = __mlxsw_sp_nexthop_update(mlxsw_sp, adj_index + i, nh);
2944 if (err)
2945 return err;
2946 }
2947
2948 return 0;
2949}
2950
2951static int __mlxsw_sp_nexthop_ipip_update(struct mlxsw_sp *mlxsw_sp,
2952 u32 adj_index,
2953 struct mlxsw_sp_nexthop *nh)
Petr Machata1012b9a2017-09-02 23:49:23 +02002954{
2955 const struct mlxsw_sp_ipip_ops *ipip_ops;
2956
2957 ipip_ops = mlxsw_sp->router->ipip_ops_arr[nh->ipip_entry->ipipt];
2958 return ipip_ops->nexthop_update(mlxsw_sp, adj_index, nh->ipip_entry);
2959}
2960
Ido Schimmeleb789982017-10-22 23:11:48 +02002961static int mlxsw_sp_nexthop_ipip_update(struct mlxsw_sp *mlxsw_sp,
2962 u32 adj_index,
2963 struct mlxsw_sp_nexthop *nh)
2964{
2965 int i;
2966
2967 for (i = 0; i < nh->num_adj_entries; i++) {
2968 int err;
2969
2970 err = __mlxsw_sp_nexthop_ipip_update(mlxsw_sp, adj_index + i,
2971 nh);
2972 if (err)
2973 return err;
2974 }
2975
2976 return 0;
2977}
2978
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002979static int
Petr Machata35225e42017-09-02 23:49:22 +02002980mlxsw_sp_nexthop_group_update(struct mlxsw_sp *mlxsw_sp,
2981 struct mlxsw_sp_nexthop_group *nh_grp,
2982 bool reallocate)
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002983{
2984 u32 adj_index = nh_grp->adj_index; /* base */
2985 struct mlxsw_sp_nexthop *nh;
2986 int i;
2987 int err;
2988
2989 for (i = 0; i < nh_grp->count; i++) {
2990 nh = &nh_grp->nexthops[i];
2991
2992 if (!nh->should_offload) {
2993 nh->offloaded = 0;
2994 continue;
2995 }
2996
Ido Schimmela59b7e02017-01-23 11:11:42 +01002997 if (nh->update || reallocate) {
Petr Machata35225e42017-09-02 23:49:22 +02002998 switch (nh->type) {
2999 case MLXSW_SP_NEXTHOP_TYPE_ETH:
Arkadi Sharshevskya5390272017-09-25 10:32:28 +02003000 err = mlxsw_sp_nexthop_update
Petr Machata35225e42017-09-02 23:49:22 +02003001 (mlxsw_sp, adj_index, nh);
3002 break;
Petr Machata1012b9a2017-09-02 23:49:23 +02003003 case MLXSW_SP_NEXTHOP_TYPE_IPIP:
3004 err = mlxsw_sp_nexthop_ipip_update
3005 (mlxsw_sp, adj_index, nh);
3006 break;
Petr Machata35225e42017-09-02 23:49:22 +02003007 }
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003008 if (err)
3009 return err;
3010 nh->update = 0;
3011 nh->offloaded = 1;
3012 }
Ido Schimmeleb789982017-10-22 23:11:48 +02003013 adj_index += nh->num_adj_entries;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003014 }
3015 return 0;
3016}
3017
Ido Schimmel1819ae32017-07-21 18:04:28 +02003018static bool
3019mlxsw_sp_fib_node_entry_is_first(const struct mlxsw_sp_fib_node *fib_node,
3020 const struct mlxsw_sp_fib_entry *fib_entry);
3021
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003022static int
3023mlxsw_sp_nexthop_fib_entries_update(struct mlxsw_sp *mlxsw_sp,
3024 struct mlxsw_sp_nexthop_group *nh_grp)
3025{
3026 struct mlxsw_sp_fib_entry *fib_entry;
3027 int err;
3028
3029 list_for_each_entry(fib_entry, &nh_grp->fib_list, nexthop_group_node) {
Ido Schimmel1819ae32017-07-21 18:04:28 +02003030 if (!mlxsw_sp_fib_node_entry_is_first(fib_entry->fib_node,
3031 fib_entry))
3032 continue;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003033 err = mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
3034 if (err)
3035 return err;
3036 }
3037 return 0;
3038}
3039
3040static void
Ido Schimmel77d964e2017-08-02 09:56:05 +02003041mlxsw_sp_fib_entry_offload_refresh(struct mlxsw_sp_fib_entry *fib_entry,
3042 enum mlxsw_reg_ralue_op op, int err);
3043
3044static void
3045mlxsw_sp_nexthop_fib_entries_refresh(struct mlxsw_sp_nexthop_group *nh_grp)
3046{
3047 enum mlxsw_reg_ralue_op op = MLXSW_REG_RALUE_OP_WRITE_WRITE;
3048 struct mlxsw_sp_fib_entry *fib_entry;
3049
3050 list_for_each_entry(fib_entry, &nh_grp->fib_list, nexthop_group_node) {
3051 if (!mlxsw_sp_fib_node_entry_is_first(fib_entry->fib_node,
3052 fib_entry))
3053 continue;
3054 mlxsw_sp_fib_entry_offload_refresh(fib_entry, op, 0);
3055 }
3056}
3057
Ido Schimmel425a08c2017-10-22 23:11:47 +02003058static void mlxsw_sp_adj_grp_size_round_up(u16 *p_adj_grp_size)
3059{
3060 /* Valid sizes for an adjacency group are:
3061 * 1-64, 512, 1024, 2048 and 4096.
3062 */
3063 if (*p_adj_grp_size <= 64)
3064 return;
3065 else if (*p_adj_grp_size <= 512)
3066 *p_adj_grp_size = 512;
3067 else if (*p_adj_grp_size <= 1024)
3068 *p_adj_grp_size = 1024;
3069 else if (*p_adj_grp_size <= 2048)
3070 *p_adj_grp_size = 2048;
3071 else
3072 *p_adj_grp_size = 4096;
3073}
3074
3075static void mlxsw_sp_adj_grp_size_round_down(u16 *p_adj_grp_size,
3076 unsigned int alloc_size)
3077{
3078 if (alloc_size >= 4096)
3079 *p_adj_grp_size = 4096;
3080 else if (alloc_size >= 2048)
3081 *p_adj_grp_size = 2048;
3082 else if (alloc_size >= 1024)
3083 *p_adj_grp_size = 1024;
3084 else if (alloc_size >= 512)
3085 *p_adj_grp_size = 512;
3086}
3087
3088static int mlxsw_sp_fix_adj_grp_size(struct mlxsw_sp *mlxsw_sp,
3089 u16 *p_adj_grp_size)
3090{
3091 unsigned int alloc_size;
3092 int err;
3093
3094 /* Round up the requested group size to the next size supported
3095 * by the device and make sure the request can be satisfied.
3096 */
3097 mlxsw_sp_adj_grp_size_round_up(p_adj_grp_size);
3098 err = mlxsw_sp_kvdl_alloc_size_query(mlxsw_sp, *p_adj_grp_size,
3099 &alloc_size);
3100 if (err)
3101 return err;
3102 /* It is possible the allocation results in more allocated
3103 * entries than requested. Try to use as much of them as
3104 * possible.
3105 */
3106 mlxsw_sp_adj_grp_size_round_down(p_adj_grp_size, alloc_size);
3107
3108 return 0;
3109}
3110
Ido Schimmel77d964e2017-08-02 09:56:05 +02003111static void
Ido Schimmeleb789982017-10-22 23:11:48 +02003112mlxsw_sp_nexthop_group_normalize(struct mlxsw_sp_nexthop_group *nh_grp)
3113{
3114 int i, g = 0, sum_norm_weight = 0;
3115 struct mlxsw_sp_nexthop *nh;
3116
3117 for (i = 0; i < nh_grp->count; i++) {
3118 nh = &nh_grp->nexthops[i];
3119
3120 if (!nh->should_offload)
3121 continue;
3122 if (g > 0)
3123 g = gcd(nh->nh_weight, g);
3124 else
3125 g = nh->nh_weight;
3126 }
3127
3128 for (i = 0; i < nh_grp->count; i++) {
3129 nh = &nh_grp->nexthops[i];
3130
3131 if (!nh->should_offload)
3132 continue;
3133 nh->norm_nh_weight = nh->nh_weight / g;
3134 sum_norm_weight += nh->norm_nh_weight;
3135 }
3136
3137 nh_grp->sum_norm_weight = sum_norm_weight;
3138}
3139
3140static void
3141mlxsw_sp_nexthop_group_rebalance(struct mlxsw_sp_nexthop_group *nh_grp)
3142{
3143 int total = nh_grp->sum_norm_weight;
3144 u16 ecmp_size = nh_grp->ecmp_size;
3145 int i, weight = 0, lower_bound = 0;
3146
3147 for (i = 0; i < nh_grp->count; i++) {
3148 struct mlxsw_sp_nexthop *nh = &nh_grp->nexthops[i];
3149 int upper_bound;
3150
3151 if (!nh->should_offload)
3152 continue;
3153 weight += nh->norm_nh_weight;
3154 upper_bound = DIV_ROUND_CLOSEST(ecmp_size * weight, total);
3155 nh->num_adj_entries = upper_bound - lower_bound;
3156 lower_bound = upper_bound;
3157 }
3158}
3159
3160static void
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003161mlxsw_sp_nexthop_group_refresh(struct mlxsw_sp *mlxsw_sp,
3162 struct mlxsw_sp_nexthop_group *nh_grp)
3163{
Ido Schimmeleb789982017-10-22 23:11:48 +02003164 u16 ecmp_size, old_ecmp_size;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003165 struct mlxsw_sp_nexthop *nh;
3166 bool offload_change = false;
3167 u32 adj_index;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003168 bool old_adj_index_valid;
3169 u32 old_adj_index;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003170 int i;
3171 int err;
3172
Ido Schimmelb3e8d1e2017-02-08 11:16:32 +01003173 if (!nh_grp->gateway) {
3174 mlxsw_sp_nexthop_fib_entries_update(mlxsw_sp, nh_grp);
3175 return;
3176 }
3177
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003178 for (i = 0; i < nh_grp->count; i++) {
3179 nh = &nh_grp->nexthops[i];
3180
Petr Machata56b8a9e2017-07-31 09:27:29 +02003181 if (nh->should_offload != nh->offloaded) {
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003182 offload_change = true;
3183 if (nh->should_offload)
3184 nh->update = 1;
3185 }
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003186 }
3187 if (!offload_change) {
3188 /* Nothing was added or removed, so no need to reallocate. Just
3189 * update MAC on existing adjacency indexes.
3190 */
Petr Machata35225e42017-09-02 23:49:22 +02003191 err = mlxsw_sp_nexthop_group_update(mlxsw_sp, nh_grp, false);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003192 if (err) {
3193 dev_warn(mlxsw_sp->bus_info->dev, "Failed to update neigh MAC in adjacency table.\n");
3194 goto set_trap;
3195 }
3196 return;
3197 }
Ido Schimmeleb789982017-10-22 23:11:48 +02003198 mlxsw_sp_nexthop_group_normalize(nh_grp);
3199 if (!nh_grp->sum_norm_weight)
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003200 /* No neigh of this group is connected so we just set
3201 * the trap and let everthing flow through kernel.
3202 */
3203 goto set_trap;
3204
Ido Schimmeleb789982017-10-22 23:11:48 +02003205 ecmp_size = nh_grp->sum_norm_weight;
Ido Schimmel425a08c2017-10-22 23:11:47 +02003206 err = mlxsw_sp_fix_adj_grp_size(mlxsw_sp, &ecmp_size);
3207 if (err)
3208 /* No valid allocation size available. */
3209 goto set_trap;
3210
Arkadi Sharshevsky13124442017-03-25 08:28:22 +01003211 err = mlxsw_sp_kvdl_alloc(mlxsw_sp, ecmp_size, &adj_index);
3212 if (err) {
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003213 /* We ran out of KVD linear space, just set the
3214 * trap and let everything flow through kernel.
3215 */
3216 dev_warn(mlxsw_sp->bus_info->dev, "Failed to allocate KVD linear area for nexthop group.\n");
3217 goto set_trap;
3218 }
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003219 old_adj_index_valid = nh_grp->adj_index_valid;
3220 old_adj_index = nh_grp->adj_index;
3221 old_ecmp_size = nh_grp->ecmp_size;
3222 nh_grp->adj_index_valid = 1;
3223 nh_grp->adj_index = adj_index;
3224 nh_grp->ecmp_size = ecmp_size;
Ido Schimmeleb789982017-10-22 23:11:48 +02003225 mlxsw_sp_nexthop_group_rebalance(nh_grp);
Petr Machata35225e42017-09-02 23:49:22 +02003226 err = mlxsw_sp_nexthop_group_update(mlxsw_sp, nh_grp, true);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003227 if (err) {
3228 dev_warn(mlxsw_sp->bus_info->dev, "Failed to update neigh MAC in adjacency table.\n");
3229 goto set_trap;
3230 }
3231
3232 if (!old_adj_index_valid) {
3233 /* The trap was set for fib entries, so we have to call
3234 * fib entry update to unset it and use adjacency index.
3235 */
3236 err = mlxsw_sp_nexthop_fib_entries_update(mlxsw_sp, nh_grp);
3237 if (err) {
3238 dev_warn(mlxsw_sp->bus_info->dev, "Failed to add adjacency index to fib entries.\n");
3239 goto set_trap;
3240 }
3241 return;
3242 }
3243
3244 err = mlxsw_sp_adj_index_mass_update(mlxsw_sp, nh_grp,
3245 old_adj_index, old_ecmp_size);
3246 mlxsw_sp_kvdl_free(mlxsw_sp, old_adj_index);
3247 if (err) {
3248 dev_warn(mlxsw_sp->bus_info->dev, "Failed to mass-update adjacency index for nexthop group.\n");
3249 goto set_trap;
3250 }
Ido Schimmel77d964e2017-08-02 09:56:05 +02003251
3252 /* Offload state within the group changed, so update the flags. */
3253 mlxsw_sp_nexthop_fib_entries_refresh(nh_grp);
3254
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003255 return;
3256
3257set_trap:
3258 old_adj_index_valid = nh_grp->adj_index_valid;
3259 nh_grp->adj_index_valid = 0;
3260 for (i = 0; i < nh_grp->count; i++) {
3261 nh = &nh_grp->nexthops[i];
3262 nh->offloaded = 0;
3263 }
3264 err = mlxsw_sp_nexthop_fib_entries_update(mlxsw_sp, nh_grp);
3265 if (err)
3266 dev_warn(mlxsw_sp->bus_info->dev, "Failed to set traps for fib entries.\n");
3267 if (old_adj_index_valid)
3268 mlxsw_sp_kvdl_free(mlxsw_sp, nh_grp->adj_index);
3269}
3270
3271static void __mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp_nexthop *nh,
3272 bool removing)
3273{
Petr Machata213666a2017-07-31 09:27:30 +02003274 if (!removing)
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003275 nh->should_offload = 1;
Ido Schimmel8764a822017-12-25 08:57:35 +01003276 else
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003277 nh->should_offload = 0;
3278 nh->update = 1;
3279}
3280
3281static void
3282mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp *mlxsw_sp,
3283 struct mlxsw_sp_neigh_entry *neigh_entry,
3284 bool removing)
3285{
3286 struct mlxsw_sp_nexthop *nh;
3287
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003288 list_for_each_entry(nh, &neigh_entry->nexthop_list,
3289 neigh_list_node) {
3290 __mlxsw_sp_nexthop_neigh_update(nh, removing);
3291 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nh_grp);
3292 }
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003293}
3294
Ido Schimmel9665b742017-02-08 11:16:42 +01003295static void mlxsw_sp_nexthop_rif_init(struct mlxsw_sp_nexthop *nh,
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003296 struct mlxsw_sp_rif *rif)
Ido Schimmel9665b742017-02-08 11:16:42 +01003297{
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003298 if (nh->rif)
Ido Schimmel9665b742017-02-08 11:16:42 +01003299 return;
3300
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003301 nh->rif = rif;
3302 list_add(&nh->rif_list_node, &rif->nexthop_list);
Ido Schimmel9665b742017-02-08 11:16:42 +01003303}
3304
3305static void mlxsw_sp_nexthop_rif_fini(struct mlxsw_sp_nexthop *nh)
3306{
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003307 if (!nh->rif)
Ido Schimmel9665b742017-02-08 11:16:42 +01003308 return;
3309
3310 list_del(&nh->rif_list_node);
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003311 nh->rif = NULL;
Ido Schimmel9665b742017-02-08 11:16:42 +01003312}
3313
Ido Schimmela8c97012017-02-08 11:16:35 +01003314static int mlxsw_sp_nexthop_neigh_init(struct mlxsw_sp *mlxsw_sp,
3315 struct mlxsw_sp_nexthop *nh)
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003316{
3317 struct mlxsw_sp_neigh_entry *neigh_entry;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003318 struct neighbour *n;
Ido Schimmel93a87e52016-12-23 09:32:49 +01003319 u8 nud_state, dead;
Ido Schimmelc53b8e12017-02-08 11:16:30 +01003320 int err;
3321
Ido Schimmelad178c82017-02-08 11:16:40 +01003322 if (!nh->nh_grp->gateway || nh->neigh_entry)
Ido Schimmelb8399a12017-02-08 11:16:33 +01003323 return 0;
3324
Jiri Pirko33b13412016-11-10 12:31:04 +01003325 /* Take a reference of neigh here ensuring that neigh would
Petr Machata8de3c172017-07-31 09:27:25 +02003326 * not be destructed before the nexthop entry is finished.
Jiri Pirko33b13412016-11-10 12:31:04 +01003327 * The reference is taken either in neigh_lookup() or
Ido Schimmelfd76d912017-02-06 16:20:17 +01003328 * in neigh_create() in case n is not found.
Jiri Pirko33b13412016-11-10 12:31:04 +01003329 */
Ido Schimmel58adf2c2017-07-18 10:10:19 +02003330 n = neigh_lookup(nh->nh_grp->neigh_tbl, &nh->gw_addr, nh->rif->dev);
Jiri Pirko33b13412016-11-10 12:31:04 +01003331 if (!n) {
Ido Schimmel58adf2c2017-07-18 10:10:19 +02003332 n = neigh_create(nh->nh_grp->neigh_tbl, &nh->gw_addr,
3333 nh->rif->dev);
Ido Schimmela8c97012017-02-08 11:16:35 +01003334 if (IS_ERR(n))
3335 return PTR_ERR(n);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003336 neigh_event_send(n, NULL);
Jiri Pirko33b13412016-11-10 12:31:04 +01003337 }
3338 neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp, n);
3339 if (!neigh_entry) {
Ido Schimmel5c8802f2017-02-06 16:20:13 +01003340 neigh_entry = mlxsw_sp_neigh_entry_create(mlxsw_sp, n);
3341 if (IS_ERR(neigh_entry)) {
Ido Schimmelc53b8e12017-02-08 11:16:30 +01003342 err = -EINVAL;
3343 goto err_neigh_entry_create;
Ido Schimmel5c8802f2017-02-06 16:20:13 +01003344 }
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003345 }
Yotam Gigib2157142016-07-05 11:27:51 +02003346
3347 /* If that is the first nexthop connected to that neigh, add to
3348 * nexthop_neighs_list
3349 */
3350 if (list_empty(&neigh_entry->nexthop_list))
3351 list_add_tail(&neigh_entry->nexthop_neighs_list_node,
Ido Schimmel9011b672017-05-16 19:38:25 +02003352 &mlxsw_sp->router->nexthop_neighs_list);
Yotam Gigib2157142016-07-05 11:27:51 +02003353
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003354 nh->neigh_entry = neigh_entry;
3355 list_add_tail(&nh->neigh_list_node, &neigh_entry->nexthop_list);
3356 read_lock_bh(&n->lock);
3357 nud_state = n->nud_state;
Ido Schimmel93a87e52016-12-23 09:32:49 +01003358 dead = n->dead;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003359 read_unlock_bh(&n->lock);
Ido Schimmel93a87e52016-12-23 09:32:49 +01003360 __mlxsw_sp_nexthop_neigh_update(nh, !(nud_state & NUD_VALID && !dead));
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003361
3362 return 0;
Ido Schimmelc53b8e12017-02-08 11:16:30 +01003363
3364err_neigh_entry_create:
3365 neigh_release(n);
Ido Schimmelc53b8e12017-02-08 11:16:30 +01003366 return err;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003367}
3368
Ido Schimmela8c97012017-02-08 11:16:35 +01003369static void mlxsw_sp_nexthop_neigh_fini(struct mlxsw_sp *mlxsw_sp,
3370 struct mlxsw_sp_nexthop *nh)
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003371{
3372 struct mlxsw_sp_neigh_entry *neigh_entry = nh->neigh_entry;
Ido Schimmela8c97012017-02-08 11:16:35 +01003373 struct neighbour *n;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003374
Ido Schimmelb8399a12017-02-08 11:16:33 +01003375 if (!neigh_entry)
Ido Schimmela8c97012017-02-08 11:16:35 +01003376 return;
3377 n = neigh_entry->key.n;
Ido Schimmelb8399a12017-02-08 11:16:33 +01003378
Ido Schimmel58312122016-12-23 09:32:50 +01003379 __mlxsw_sp_nexthop_neigh_update(nh, true);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003380 list_del(&nh->neigh_list_node);
Ido Schimmele58be792017-02-08 11:16:28 +01003381 nh->neigh_entry = NULL;
Yotam Gigib2157142016-07-05 11:27:51 +02003382
3383 /* If that is the last nexthop connected to that neigh, remove from
3384 * nexthop_neighs_list
3385 */
Ido Schimmele58be792017-02-08 11:16:28 +01003386 if (list_empty(&neigh_entry->nexthop_list))
3387 list_del(&neigh_entry->nexthop_neighs_list_node);
Yotam Gigib2157142016-07-05 11:27:51 +02003388
Ido Schimmel5c8802f2017-02-06 16:20:13 +01003389 if (!neigh_entry->connected && list_empty(&neigh_entry->nexthop_list))
3390 mlxsw_sp_neigh_entry_destroy(mlxsw_sp, neigh_entry);
3391
3392 neigh_release(n);
Ido Schimmela8c97012017-02-08 11:16:35 +01003393}
Ido Schimmelc53b8e12017-02-08 11:16:30 +01003394
Petr Machata44b0fff2017-11-03 10:03:44 +01003395static bool mlxsw_sp_ipip_netdev_ul_up(struct net_device *ol_dev)
3396{
3397 struct net_device *ul_dev = __mlxsw_sp_ipip_netdev_ul_dev_get(ol_dev);
3398
3399 return ul_dev ? (ul_dev->flags & IFF_UP) : true;
3400}
3401
Petr Machatad97cda52017-11-28 13:17:13 +01003402static void mlxsw_sp_nexthop_ipip_init(struct mlxsw_sp *mlxsw_sp,
3403 struct mlxsw_sp_nexthop *nh,
3404 struct mlxsw_sp_ipip_entry *ipip_entry)
Petr Machata1012b9a2017-09-02 23:49:23 +02003405{
Petr Machata44b0fff2017-11-03 10:03:44 +01003406 bool removing;
3407
Petr Machata1012b9a2017-09-02 23:49:23 +02003408 if (!nh->nh_grp->gateway || nh->ipip_entry)
Petr Machatad97cda52017-11-28 13:17:13 +01003409 return;
Petr Machata1012b9a2017-09-02 23:49:23 +02003410
Petr Machatad97cda52017-11-28 13:17:13 +01003411 nh->ipip_entry = ipip_entry;
3412 removing = !mlxsw_sp_ipip_netdev_ul_up(ipip_entry->ol_dev);
Petr Machata44b0fff2017-11-03 10:03:44 +01003413 __mlxsw_sp_nexthop_neigh_update(nh, removing);
Petr Machatad97cda52017-11-28 13:17:13 +01003414 mlxsw_sp_nexthop_rif_init(nh, &ipip_entry->ol_lb->common);
Petr Machata1012b9a2017-09-02 23:49:23 +02003415}
3416
3417static void mlxsw_sp_nexthop_ipip_fini(struct mlxsw_sp *mlxsw_sp,
3418 struct mlxsw_sp_nexthop *nh)
3419{
3420 struct mlxsw_sp_ipip_entry *ipip_entry = nh->ipip_entry;
3421
3422 if (!ipip_entry)
3423 return;
3424
3425 __mlxsw_sp_nexthop_neigh_update(nh, true);
Petr Machata1012b9a2017-09-02 23:49:23 +02003426 nh->ipip_entry = NULL;
3427}
3428
3429static bool mlxsw_sp_nexthop4_ipip_type(const struct mlxsw_sp *mlxsw_sp,
3430 const struct fib_nh *fib_nh,
3431 enum mlxsw_sp_ipip_type *p_ipipt)
3432{
3433 struct net_device *dev = fib_nh->nh_dev;
3434
3435 return dev &&
3436 fib_nh->nh_parent->fib_type == RTN_UNICAST &&
3437 mlxsw_sp_netdev_ipip_type(mlxsw_sp, dev, p_ipipt);
3438}
3439
Petr Machata35225e42017-09-02 23:49:22 +02003440static void mlxsw_sp_nexthop_type_fini(struct mlxsw_sp *mlxsw_sp,
3441 struct mlxsw_sp_nexthop *nh)
3442{
3443 switch (nh->type) {
3444 case MLXSW_SP_NEXTHOP_TYPE_ETH:
3445 mlxsw_sp_nexthop_neigh_fini(mlxsw_sp, nh);
3446 mlxsw_sp_nexthop_rif_fini(nh);
3447 break;
Petr Machata1012b9a2017-09-02 23:49:23 +02003448 case MLXSW_SP_NEXTHOP_TYPE_IPIP:
Petr Machatade0f43c2017-10-02 12:14:57 +02003449 mlxsw_sp_nexthop_rif_fini(nh);
Petr Machata1012b9a2017-09-02 23:49:23 +02003450 mlxsw_sp_nexthop_ipip_fini(mlxsw_sp, nh);
3451 break;
Petr Machata35225e42017-09-02 23:49:22 +02003452 }
3453}
3454
3455static int mlxsw_sp_nexthop4_type_init(struct mlxsw_sp *mlxsw_sp,
3456 struct mlxsw_sp_nexthop *nh,
3457 struct fib_nh *fib_nh)
3458{
Petr Machatad97cda52017-11-28 13:17:13 +01003459 const struct mlxsw_sp_ipip_ops *ipip_ops;
Petr Machata35225e42017-09-02 23:49:22 +02003460 struct net_device *dev = fib_nh->nh_dev;
Petr Machatad97cda52017-11-28 13:17:13 +01003461 struct mlxsw_sp_ipip_entry *ipip_entry;
Petr Machata35225e42017-09-02 23:49:22 +02003462 struct mlxsw_sp_rif *rif;
3463 int err;
3464
Petr Machatad97cda52017-11-28 13:17:13 +01003465 ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, dev);
3466 if (ipip_entry) {
3467 ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt];
3468 if (ipip_ops->can_offload(mlxsw_sp, dev,
3469 MLXSW_SP_L3_PROTO_IPV4)) {
3470 nh->type = MLXSW_SP_NEXTHOP_TYPE_IPIP;
3471 mlxsw_sp_nexthop_ipip_init(mlxsw_sp, nh, ipip_entry);
3472 return 0;
3473 }
Petr Machata1012b9a2017-09-02 23:49:23 +02003474 }
3475
Petr Machata35225e42017-09-02 23:49:22 +02003476 nh->type = MLXSW_SP_NEXTHOP_TYPE_ETH;
3477 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
3478 if (!rif)
3479 return 0;
3480
3481 mlxsw_sp_nexthop_rif_init(nh, rif);
3482 err = mlxsw_sp_nexthop_neigh_init(mlxsw_sp, nh);
3483 if (err)
3484 goto err_neigh_init;
3485
3486 return 0;
3487
3488err_neigh_init:
3489 mlxsw_sp_nexthop_rif_fini(nh);
3490 return err;
3491}
3492
3493static void mlxsw_sp_nexthop4_type_fini(struct mlxsw_sp *mlxsw_sp,
3494 struct mlxsw_sp_nexthop *nh)
3495{
3496 mlxsw_sp_nexthop_type_fini(mlxsw_sp, nh);
3497}
3498
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02003499static int mlxsw_sp_nexthop4_init(struct mlxsw_sp *mlxsw_sp,
3500 struct mlxsw_sp_nexthop_group *nh_grp,
3501 struct mlxsw_sp_nexthop *nh,
3502 struct fib_nh *fib_nh)
Ido Schimmela8c97012017-02-08 11:16:35 +01003503{
3504 struct net_device *dev = fib_nh->nh_dev;
Ido Schimmeldf6dd79b2017-02-08 14:36:49 +01003505 struct in_device *in_dev;
Ido Schimmela8c97012017-02-08 11:16:35 +01003506 int err;
3507
3508 nh->nh_grp = nh_grp;
3509 nh->key.fib_nh = fib_nh;
Ido Schimmel408bd942017-10-22 23:11:46 +02003510#ifdef CONFIG_IP_ROUTE_MULTIPATH
3511 nh->nh_weight = fib_nh->nh_weight;
3512#else
3513 nh->nh_weight = 1;
3514#endif
Ido Schimmel58adf2c2017-07-18 10:10:19 +02003515 memcpy(&nh->gw_addr, &fib_nh->nh_gw, sizeof(fib_nh->nh_gw));
Ido Schimmela8c97012017-02-08 11:16:35 +01003516 err = mlxsw_sp_nexthop_insert(mlxsw_sp, nh);
3517 if (err)
3518 return err;
3519
Arkadi Sharshevskya5390272017-09-25 10:32:28 +02003520 mlxsw_sp_nexthop_counter_alloc(mlxsw_sp, nh);
Arkadi Sharshevskydbe45982017-09-25 10:32:23 +02003521 list_add_tail(&nh->router_list_node, &mlxsw_sp->router->nexthop_list);
3522
Ido Schimmel97989ee2017-03-10 08:53:38 +01003523 if (!dev)
3524 return 0;
3525
Ido Schimmeldf6dd79b2017-02-08 14:36:49 +01003526 in_dev = __in_dev_get_rtnl(dev);
3527 if (in_dev && IN_DEV_IGNORE_ROUTES_WITH_LINKDOWN(in_dev) &&
3528 fib_nh->nh_flags & RTNH_F_LINKDOWN)
3529 return 0;
3530
Petr Machata35225e42017-09-02 23:49:22 +02003531 err = mlxsw_sp_nexthop4_type_init(mlxsw_sp, nh, fib_nh);
Ido Schimmela8c97012017-02-08 11:16:35 +01003532 if (err)
3533 goto err_nexthop_neigh_init;
3534
3535 return 0;
3536
3537err_nexthop_neigh_init:
3538 mlxsw_sp_nexthop_remove(mlxsw_sp, nh);
3539 return err;
3540}
3541
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02003542static void mlxsw_sp_nexthop4_fini(struct mlxsw_sp *mlxsw_sp,
3543 struct mlxsw_sp_nexthop *nh)
Ido Schimmela8c97012017-02-08 11:16:35 +01003544{
Petr Machata35225e42017-09-02 23:49:22 +02003545 mlxsw_sp_nexthop4_type_fini(mlxsw_sp, nh);
Arkadi Sharshevskydbe45982017-09-25 10:32:23 +02003546 list_del(&nh->router_list_node);
Arkadi Sharshevskya5390272017-09-25 10:32:28 +02003547 mlxsw_sp_nexthop_counter_free(mlxsw_sp, nh);
Ido Schimmelc53b8e12017-02-08 11:16:30 +01003548 mlxsw_sp_nexthop_remove(mlxsw_sp, nh);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003549}
3550
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02003551static void mlxsw_sp_nexthop4_event(struct mlxsw_sp *mlxsw_sp,
3552 unsigned long event, struct fib_nh *fib_nh)
Ido Schimmelad178c82017-02-08 11:16:40 +01003553{
3554 struct mlxsw_sp_nexthop_key key;
3555 struct mlxsw_sp_nexthop *nh;
Ido Schimmelad178c82017-02-08 11:16:40 +01003556
Ido Schimmel9011b672017-05-16 19:38:25 +02003557 if (mlxsw_sp->router->aborted)
Ido Schimmelad178c82017-02-08 11:16:40 +01003558 return;
3559
3560 key.fib_nh = fib_nh;
3561 nh = mlxsw_sp_nexthop_lookup(mlxsw_sp, key);
3562 if (WARN_ON_ONCE(!nh))
3563 return;
3564
Ido Schimmelad178c82017-02-08 11:16:40 +01003565 switch (event) {
3566 case FIB_EVENT_NH_ADD:
Petr Machata35225e42017-09-02 23:49:22 +02003567 mlxsw_sp_nexthop4_type_init(mlxsw_sp, nh, fib_nh);
Ido Schimmelad178c82017-02-08 11:16:40 +01003568 break;
3569 case FIB_EVENT_NH_DEL:
Petr Machata35225e42017-09-02 23:49:22 +02003570 mlxsw_sp_nexthop4_type_fini(mlxsw_sp, nh);
Ido Schimmelad178c82017-02-08 11:16:40 +01003571 break;
3572 }
3573
3574 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nh_grp);
3575}
3576
Petr Machata0c5f1cd2017-11-03 10:03:38 +01003577static void mlxsw_sp_nexthop_rif_update(struct mlxsw_sp *mlxsw_sp,
3578 struct mlxsw_sp_rif *rif)
3579{
3580 struct mlxsw_sp_nexthop *nh;
Petr Machata44b0fff2017-11-03 10:03:44 +01003581 bool removing;
Petr Machata0c5f1cd2017-11-03 10:03:38 +01003582
3583 list_for_each_entry(nh, &rif->nexthop_list, rif_list_node) {
Petr Machata44b0fff2017-11-03 10:03:44 +01003584 switch (nh->type) {
3585 case MLXSW_SP_NEXTHOP_TYPE_ETH:
3586 removing = false;
3587 break;
3588 case MLXSW_SP_NEXTHOP_TYPE_IPIP:
3589 removing = !mlxsw_sp_ipip_netdev_ul_up(rif->dev);
3590 break;
3591 default:
3592 WARN_ON(1);
3593 continue;
3594 }
3595
3596 __mlxsw_sp_nexthop_neigh_update(nh, removing);
Petr Machata0c5f1cd2017-11-03 10:03:38 +01003597 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nh_grp);
3598 }
3599}
3600
Petr Machata09dbf622017-11-28 13:17:14 +01003601static void mlxsw_sp_nexthop_rif_migrate(struct mlxsw_sp *mlxsw_sp,
3602 struct mlxsw_sp_rif *old_rif,
3603 struct mlxsw_sp_rif *new_rif)
3604{
3605 struct mlxsw_sp_nexthop *nh;
3606
3607 list_splice_init(&old_rif->nexthop_list, &new_rif->nexthop_list);
3608 list_for_each_entry(nh, &new_rif->nexthop_list, rif_list_node)
3609 nh->rif = new_rif;
3610 mlxsw_sp_nexthop_rif_update(mlxsw_sp, new_rif);
3611}
3612
Ido Schimmel9665b742017-02-08 11:16:42 +01003613static void mlxsw_sp_nexthop_rif_gone_sync(struct mlxsw_sp *mlxsw_sp,
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003614 struct mlxsw_sp_rif *rif)
Ido Schimmel9665b742017-02-08 11:16:42 +01003615{
3616 struct mlxsw_sp_nexthop *nh, *tmp;
3617
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003618 list_for_each_entry_safe(nh, tmp, &rif->nexthop_list, rif_list_node) {
Petr Machata35225e42017-09-02 23:49:22 +02003619 mlxsw_sp_nexthop_type_fini(mlxsw_sp, nh);
Ido Schimmel9665b742017-02-08 11:16:42 +01003620 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nh_grp);
3621 }
3622}
3623
Petr Machata9b014512017-09-02 23:49:20 +02003624static bool mlxsw_sp_fi_is_gateway(const struct mlxsw_sp *mlxsw_sp,
3625 const struct fib_info *fi)
3626{
Petr Machata1012b9a2017-09-02 23:49:23 +02003627 return fi->fib_nh->nh_scope == RT_SCOPE_LINK ||
3628 mlxsw_sp_nexthop4_ipip_type(mlxsw_sp, fi->fib_nh, NULL);
Petr Machata9b014512017-09-02 23:49:20 +02003629}
3630
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003631static struct mlxsw_sp_nexthop_group *
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02003632mlxsw_sp_nexthop4_group_create(struct mlxsw_sp *mlxsw_sp, struct fib_info *fi)
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003633{
3634 struct mlxsw_sp_nexthop_group *nh_grp;
3635 struct mlxsw_sp_nexthop *nh;
3636 struct fib_nh *fib_nh;
3637 size_t alloc_size;
3638 int i;
3639 int err;
3640
3641 alloc_size = sizeof(*nh_grp) +
3642 fi->fib_nhs * sizeof(struct mlxsw_sp_nexthop);
3643 nh_grp = kzalloc(alloc_size, GFP_KERNEL);
3644 if (!nh_grp)
3645 return ERR_PTR(-ENOMEM);
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02003646 nh_grp->priv = fi;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003647 INIT_LIST_HEAD(&nh_grp->fib_list);
Ido Schimmel58adf2c2017-07-18 10:10:19 +02003648 nh_grp->neigh_tbl = &arp_tbl;
3649
Petr Machata9b014512017-09-02 23:49:20 +02003650 nh_grp->gateway = mlxsw_sp_fi_is_gateway(mlxsw_sp, fi);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003651 nh_grp->count = fi->fib_nhs;
Ido Schimmel7387dbb2017-07-12 09:12:53 +02003652 fib_info_hold(fi);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003653 for (i = 0; i < nh_grp->count; i++) {
3654 nh = &nh_grp->nexthops[i];
3655 fib_nh = &fi->fib_nh[i];
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02003656 err = mlxsw_sp_nexthop4_init(mlxsw_sp, nh_grp, nh, fib_nh);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003657 if (err)
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02003658 goto err_nexthop4_init;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003659 }
Ido Schimmele9ad5e72017-02-08 11:16:29 +01003660 err = mlxsw_sp_nexthop_group_insert(mlxsw_sp, nh_grp);
3661 if (err)
3662 goto err_nexthop_group_insert;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003663 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
3664 return nh_grp;
3665
Ido Schimmele9ad5e72017-02-08 11:16:29 +01003666err_nexthop_group_insert:
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02003667err_nexthop4_init:
Ido Schimmeldf6dd79b2017-02-08 14:36:49 +01003668 for (i--; i >= 0; i--) {
3669 nh = &nh_grp->nexthops[i];
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02003670 mlxsw_sp_nexthop4_fini(mlxsw_sp, nh);
Ido Schimmeldf6dd79b2017-02-08 14:36:49 +01003671 }
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02003672 fib_info_put(fi);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003673 kfree(nh_grp);
3674 return ERR_PTR(err);
3675}
3676
3677static void
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02003678mlxsw_sp_nexthop4_group_destroy(struct mlxsw_sp *mlxsw_sp,
3679 struct mlxsw_sp_nexthop_group *nh_grp)
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003680{
3681 struct mlxsw_sp_nexthop *nh;
3682 int i;
3683
Ido Schimmele9ad5e72017-02-08 11:16:29 +01003684 mlxsw_sp_nexthop_group_remove(mlxsw_sp, nh_grp);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003685 for (i = 0; i < nh_grp->count; i++) {
3686 nh = &nh_grp->nexthops[i];
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02003687 mlxsw_sp_nexthop4_fini(mlxsw_sp, nh);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003688 }
Ido Schimmel58312122016-12-23 09:32:50 +01003689 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
3690 WARN_ON_ONCE(nh_grp->adj_index_valid);
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02003691 fib_info_put(mlxsw_sp_nexthop4_group_fi(nh_grp));
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003692 kfree(nh_grp);
3693}
3694
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02003695static int mlxsw_sp_nexthop4_group_get(struct mlxsw_sp *mlxsw_sp,
3696 struct mlxsw_sp_fib_entry *fib_entry,
3697 struct fib_info *fi)
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003698{
3699 struct mlxsw_sp_nexthop_group *nh_grp;
3700
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02003701 nh_grp = mlxsw_sp_nexthop4_group_lookup(mlxsw_sp, fi);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003702 if (!nh_grp) {
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02003703 nh_grp = mlxsw_sp_nexthop4_group_create(mlxsw_sp, fi);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003704 if (IS_ERR(nh_grp))
3705 return PTR_ERR(nh_grp);
3706 }
3707 list_add_tail(&fib_entry->nexthop_group_node, &nh_grp->fib_list);
3708 fib_entry->nh_group = nh_grp;
3709 return 0;
3710}
3711
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02003712static void mlxsw_sp_nexthop4_group_put(struct mlxsw_sp *mlxsw_sp,
3713 struct mlxsw_sp_fib_entry *fib_entry)
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003714{
3715 struct mlxsw_sp_nexthop_group *nh_grp = fib_entry->nh_group;
3716
3717 list_del(&fib_entry->nexthop_group_node);
3718 if (!list_empty(&nh_grp->fib_list))
3719 return;
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02003720 mlxsw_sp_nexthop4_group_destroy(mlxsw_sp, nh_grp);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003721}
3722
Ido Schimmel013b20f2017-02-08 11:16:36 +01003723static bool
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02003724mlxsw_sp_fib4_entry_should_offload(const struct mlxsw_sp_fib_entry *fib_entry)
3725{
3726 struct mlxsw_sp_fib4_entry *fib4_entry;
3727
3728 fib4_entry = container_of(fib_entry, struct mlxsw_sp_fib4_entry,
3729 common);
3730 return !fib4_entry->tos;
3731}
3732
3733static bool
Ido Schimmel013b20f2017-02-08 11:16:36 +01003734mlxsw_sp_fib_entry_should_offload(const struct mlxsw_sp_fib_entry *fib_entry)
3735{
3736 struct mlxsw_sp_nexthop_group *nh_group = fib_entry->nh_group;
3737
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02003738 switch (fib_entry->fib_node->fib->proto) {
3739 case MLXSW_SP_L3_PROTO_IPV4:
3740 if (!mlxsw_sp_fib4_entry_should_offload(fib_entry))
3741 return false;
3742 break;
3743 case MLXSW_SP_L3_PROTO_IPV6:
3744 break;
3745 }
Ido Schimmel9aecce12017-02-09 10:28:42 +01003746
Ido Schimmel013b20f2017-02-08 11:16:36 +01003747 switch (fib_entry->type) {
3748 case MLXSW_SP_FIB_ENTRY_TYPE_REMOTE:
3749 return !!nh_group->adj_index_valid;
3750 case MLXSW_SP_FIB_ENTRY_TYPE_LOCAL:
Ido Schimmel70ad3502017-02-08 11:16:38 +01003751 return !!nh_group->nh_rif;
Petr Machata4607f6d2017-09-02 23:49:25 +02003752 case MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP:
3753 return true;
Ido Schimmel013b20f2017-02-08 11:16:36 +01003754 default:
3755 return false;
3756 }
3757}
3758
Ido Schimmel428b8512017-08-03 13:28:28 +02003759static struct mlxsw_sp_nexthop *
3760mlxsw_sp_rt6_nexthop(struct mlxsw_sp_nexthop_group *nh_grp,
3761 const struct mlxsw_sp_rt6 *mlxsw_sp_rt6)
3762{
3763 int i;
3764
3765 for (i = 0; i < nh_grp->count; i++) {
3766 struct mlxsw_sp_nexthop *nh = &nh_grp->nexthops[i];
3767 struct rt6_info *rt = mlxsw_sp_rt6->rt;
3768
3769 if (nh->rif && nh->rif->dev == rt->dst.dev &&
3770 ipv6_addr_equal((const struct in6_addr *) &nh->gw_addr,
3771 &rt->rt6i_gateway))
3772 return nh;
3773 continue;
3774 }
3775
3776 return NULL;
3777}
3778
Ido Schimmel3984d1a2017-08-02 09:56:03 +02003779static void
3780mlxsw_sp_fib4_entry_offload_set(struct mlxsw_sp_fib_entry *fib_entry)
3781{
3782 struct mlxsw_sp_nexthop_group *nh_grp = fib_entry->nh_group;
3783 int i;
3784
Petr Machata4607f6d2017-09-02 23:49:25 +02003785 if (fib_entry->type == MLXSW_SP_FIB_ENTRY_TYPE_LOCAL ||
3786 fib_entry->type == MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP) {
Ido Schimmel3984d1a2017-08-02 09:56:03 +02003787 nh_grp->nexthops->key.fib_nh->nh_flags |= RTNH_F_OFFLOAD;
3788 return;
3789 }
3790
3791 for (i = 0; i < nh_grp->count; i++) {
3792 struct mlxsw_sp_nexthop *nh = &nh_grp->nexthops[i];
3793
3794 if (nh->offloaded)
3795 nh->key.fib_nh->nh_flags |= RTNH_F_OFFLOAD;
3796 else
3797 nh->key.fib_nh->nh_flags &= ~RTNH_F_OFFLOAD;
3798 }
3799}
3800
3801static void
3802mlxsw_sp_fib4_entry_offload_unset(struct mlxsw_sp_fib_entry *fib_entry)
3803{
3804 struct mlxsw_sp_nexthop_group *nh_grp = fib_entry->nh_group;
3805 int i;
3806
Ido Schimmeld1c95af2018-02-17 00:30:44 +01003807 if (!list_is_singular(&nh_grp->fib_list))
3808 return;
3809
Ido Schimmel3984d1a2017-08-02 09:56:03 +02003810 for (i = 0; i < nh_grp->count; i++) {
3811 struct mlxsw_sp_nexthop *nh = &nh_grp->nexthops[i];
3812
3813 nh->key.fib_nh->nh_flags &= ~RTNH_F_OFFLOAD;
3814 }
3815}
3816
Ido Schimmel428b8512017-08-03 13:28:28 +02003817static void
3818mlxsw_sp_fib6_entry_offload_set(struct mlxsw_sp_fib_entry *fib_entry)
3819{
3820 struct mlxsw_sp_fib6_entry *fib6_entry;
3821 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
3822
3823 fib6_entry = container_of(fib_entry, struct mlxsw_sp_fib6_entry,
3824 common);
3825
3826 if (fib_entry->type == MLXSW_SP_FIB_ENTRY_TYPE_LOCAL) {
3827 list_first_entry(&fib6_entry->rt6_list, struct mlxsw_sp_rt6,
Ido Schimmelfe400792017-08-15 09:09:49 +02003828 list)->rt->rt6i_nh_flags |= RTNH_F_OFFLOAD;
Ido Schimmel428b8512017-08-03 13:28:28 +02003829 return;
3830 }
3831
3832 list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) {
3833 struct mlxsw_sp_nexthop_group *nh_grp = fib_entry->nh_group;
3834 struct mlxsw_sp_nexthop *nh;
3835
3836 nh = mlxsw_sp_rt6_nexthop(nh_grp, mlxsw_sp_rt6);
3837 if (nh && nh->offloaded)
Ido Schimmelfe400792017-08-15 09:09:49 +02003838 mlxsw_sp_rt6->rt->rt6i_nh_flags |= RTNH_F_OFFLOAD;
Ido Schimmel428b8512017-08-03 13:28:28 +02003839 else
Ido Schimmelfe400792017-08-15 09:09:49 +02003840 mlxsw_sp_rt6->rt->rt6i_nh_flags &= ~RTNH_F_OFFLOAD;
Ido Schimmel428b8512017-08-03 13:28:28 +02003841 }
3842}
3843
3844static void
3845mlxsw_sp_fib6_entry_offload_unset(struct mlxsw_sp_fib_entry *fib_entry)
3846{
3847 struct mlxsw_sp_fib6_entry *fib6_entry;
3848 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
3849
3850 fib6_entry = container_of(fib_entry, struct mlxsw_sp_fib6_entry,
3851 common);
3852 list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) {
3853 struct rt6_info *rt = mlxsw_sp_rt6->rt;
3854
Ido Schimmelfe400792017-08-15 09:09:49 +02003855 rt->rt6i_nh_flags &= ~RTNH_F_OFFLOAD;
Ido Schimmel428b8512017-08-03 13:28:28 +02003856 }
3857}
3858
Ido Schimmel013b20f2017-02-08 11:16:36 +01003859static void mlxsw_sp_fib_entry_offload_set(struct mlxsw_sp_fib_entry *fib_entry)
3860{
Ido Schimmel76610eb2017-03-10 08:53:41 +01003861 switch (fib_entry->fib_node->fib->proto) {
Ido Schimmel013b20f2017-02-08 11:16:36 +01003862 case MLXSW_SP_L3_PROTO_IPV4:
Ido Schimmel3984d1a2017-08-02 09:56:03 +02003863 mlxsw_sp_fib4_entry_offload_set(fib_entry);
Ido Schimmel013b20f2017-02-08 11:16:36 +01003864 break;
3865 case MLXSW_SP_L3_PROTO_IPV6:
Ido Schimmel428b8512017-08-03 13:28:28 +02003866 mlxsw_sp_fib6_entry_offload_set(fib_entry);
3867 break;
Ido Schimmel013b20f2017-02-08 11:16:36 +01003868 }
3869}
3870
3871static void
3872mlxsw_sp_fib_entry_offload_unset(struct mlxsw_sp_fib_entry *fib_entry)
3873{
Ido Schimmel76610eb2017-03-10 08:53:41 +01003874 switch (fib_entry->fib_node->fib->proto) {
Ido Schimmel013b20f2017-02-08 11:16:36 +01003875 case MLXSW_SP_L3_PROTO_IPV4:
Ido Schimmel3984d1a2017-08-02 09:56:03 +02003876 mlxsw_sp_fib4_entry_offload_unset(fib_entry);
Ido Schimmel013b20f2017-02-08 11:16:36 +01003877 break;
3878 case MLXSW_SP_L3_PROTO_IPV6:
Ido Schimmel428b8512017-08-03 13:28:28 +02003879 mlxsw_sp_fib6_entry_offload_unset(fib_entry);
3880 break;
Ido Schimmel013b20f2017-02-08 11:16:36 +01003881 }
Ido Schimmel013b20f2017-02-08 11:16:36 +01003882}
3883
3884static void
3885mlxsw_sp_fib_entry_offload_refresh(struct mlxsw_sp_fib_entry *fib_entry,
3886 enum mlxsw_reg_ralue_op op, int err)
3887{
3888 switch (op) {
3889 case MLXSW_REG_RALUE_OP_WRITE_DELETE:
Ido Schimmel013b20f2017-02-08 11:16:36 +01003890 return mlxsw_sp_fib_entry_offload_unset(fib_entry);
3891 case MLXSW_REG_RALUE_OP_WRITE_WRITE:
3892 if (err)
3893 return;
Ido Schimmel1353ee72017-08-02 09:56:04 +02003894 if (mlxsw_sp_fib_entry_should_offload(fib_entry))
Ido Schimmel013b20f2017-02-08 11:16:36 +01003895 mlxsw_sp_fib_entry_offload_set(fib_entry);
Petr Machata85f44a12017-10-02 12:21:58 +02003896 else
Ido Schimmel013b20f2017-02-08 11:16:36 +01003897 mlxsw_sp_fib_entry_offload_unset(fib_entry);
3898 return;
3899 default:
3900 return;
3901 }
3902}
3903
Ido Schimmel9dbf4d72017-07-18 10:10:24 +02003904static void
3905mlxsw_sp_fib_entry_ralue_pack(char *ralue_pl,
3906 const struct mlxsw_sp_fib_entry *fib_entry,
3907 enum mlxsw_reg_ralue_op op)
3908{
3909 struct mlxsw_sp_fib *fib = fib_entry->fib_node->fib;
3910 enum mlxsw_reg_ralxx_protocol proto;
3911 u32 *p_dip;
3912
3913 proto = (enum mlxsw_reg_ralxx_protocol) fib->proto;
3914
3915 switch (fib->proto) {
3916 case MLXSW_SP_L3_PROTO_IPV4:
3917 p_dip = (u32 *) fib_entry->fib_node->key.addr;
3918 mlxsw_reg_ralue_pack4(ralue_pl, proto, op, fib->vr->id,
3919 fib_entry->fib_node->key.prefix_len,
3920 *p_dip);
3921 break;
3922 case MLXSW_SP_L3_PROTO_IPV6:
3923 mlxsw_reg_ralue_pack6(ralue_pl, proto, op, fib->vr->id,
3924 fib_entry->fib_node->key.prefix_len,
3925 fib_entry->fib_node->key.addr);
3926 break;
3927 }
3928}
3929
3930static int mlxsw_sp_fib_entry_op_remote(struct mlxsw_sp *mlxsw_sp,
3931 struct mlxsw_sp_fib_entry *fib_entry,
3932 enum mlxsw_reg_ralue_op op)
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003933{
3934 char ralue_pl[MLXSW_REG_RALUE_LEN];
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003935 enum mlxsw_reg_ralue_trap_action trap_action;
3936 u16 trap_id = 0;
3937 u32 adjacency_index = 0;
3938 u16 ecmp_size = 0;
3939
3940 /* In case the nexthop group adjacency index is valid, use it
3941 * with provided ECMP size. Otherwise, setup trap and pass
3942 * traffic to kernel.
3943 */
Ido Schimmel4b411472017-02-08 11:16:37 +01003944 if (mlxsw_sp_fib_entry_should_offload(fib_entry)) {
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003945 trap_action = MLXSW_REG_RALUE_TRAP_ACTION_NOP;
3946 adjacency_index = fib_entry->nh_group->adj_index;
3947 ecmp_size = fib_entry->nh_group->ecmp_size;
3948 } else {
3949 trap_action = MLXSW_REG_RALUE_TRAP_ACTION_TRAP;
3950 trap_id = MLXSW_TRAP_ID_RTR_INGRESS0;
3951 }
3952
Ido Schimmel9dbf4d72017-07-18 10:10:24 +02003953 mlxsw_sp_fib_entry_ralue_pack(ralue_pl, fib_entry, op);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003954 mlxsw_reg_ralue_act_remote_pack(ralue_pl, trap_action, trap_id,
3955 adjacency_index, ecmp_size);
3956 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
3957}
3958
Ido Schimmel9dbf4d72017-07-18 10:10:24 +02003959static int mlxsw_sp_fib_entry_op_local(struct mlxsw_sp *mlxsw_sp,
3960 struct mlxsw_sp_fib_entry *fib_entry,
3961 enum mlxsw_reg_ralue_op op)
Jiri Pirko61c503f2016-07-04 08:23:11 +02003962{
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003963 struct mlxsw_sp_rif *rif = fib_entry->nh_group->nh_rif;
Ido Schimmel70ad3502017-02-08 11:16:38 +01003964 enum mlxsw_reg_ralue_trap_action trap_action;
Jiri Pirko61c503f2016-07-04 08:23:11 +02003965 char ralue_pl[MLXSW_REG_RALUE_LEN];
Ido Schimmel70ad3502017-02-08 11:16:38 +01003966 u16 trap_id = 0;
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003967 u16 rif_index = 0;
Ido Schimmel70ad3502017-02-08 11:16:38 +01003968
3969 if (mlxsw_sp_fib_entry_should_offload(fib_entry)) {
3970 trap_action = MLXSW_REG_RALUE_TRAP_ACTION_NOP;
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003971 rif_index = rif->rif_index;
Ido Schimmel70ad3502017-02-08 11:16:38 +01003972 } else {
3973 trap_action = MLXSW_REG_RALUE_TRAP_ACTION_TRAP;
3974 trap_id = MLXSW_TRAP_ID_RTR_INGRESS0;
3975 }
Jiri Pirko61c503f2016-07-04 08:23:11 +02003976
Ido Schimmel9dbf4d72017-07-18 10:10:24 +02003977 mlxsw_sp_fib_entry_ralue_pack(ralue_pl, fib_entry, op);
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003978 mlxsw_reg_ralue_act_local_pack(ralue_pl, trap_action, trap_id,
3979 rif_index);
Jiri Pirko61c503f2016-07-04 08:23:11 +02003980 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
3981}
3982
Ido Schimmel9dbf4d72017-07-18 10:10:24 +02003983static int mlxsw_sp_fib_entry_op_trap(struct mlxsw_sp *mlxsw_sp,
3984 struct mlxsw_sp_fib_entry *fib_entry,
3985 enum mlxsw_reg_ralue_op op)
Jiri Pirko61c503f2016-07-04 08:23:11 +02003986{
3987 char ralue_pl[MLXSW_REG_RALUE_LEN];
Jiri Pirko61c503f2016-07-04 08:23:11 +02003988
Ido Schimmel9dbf4d72017-07-18 10:10:24 +02003989 mlxsw_sp_fib_entry_ralue_pack(ralue_pl, fib_entry, op);
Jiri Pirko61c503f2016-07-04 08:23:11 +02003990 mlxsw_reg_ralue_act_ip2me_pack(ralue_pl);
3991 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
3992}
3993
Petr Machata4607f6d2017-09-02 23:49:25 +02003994static int
3995mlxsw_sp_fib_entry_op_ipip_decap(struct mlxsw_sp *mlxsw_sp,
3996 struct mlxsw_sp_fib_entry *fib_entry,
3997 enum mlxsw_reg_ralue_op op)
3998{
3999 struct mlxsw_sp_ipip_entry *ipip_entry = fib_entry->decap.ipip_entry;
4000 const struct mlxsw_sp_ipip_ops *ipip_ops;
4001
4002 if (WARN_ON(!ipip_entry))
4003 return -EINVAL;
4004
4005 ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt];
4006 return ipip_ops->fib_entry_op(mlxsw_sp, ipip_entry, op,
4007 fib_entry->decap.tunnel_index);
4008}
4009
Ido Schimmel9dbf4d72017-07-18 10:10:24 +02004010static int __mlxsw_sp_fib_entry_op(struct mlxsw_sp *mlxsw_sp,
4011 struct mlxsw_sp_fib_entry *fib_entry,
4012 enum mlxsw_reg_ralue_op op)
Jiri Pirko61c503f2016-07-04 08:23:11 +02004013{
4014 switch (fib_entry->type) {
4015 case MLXSW_SP_FIB_ENTRY_TYPE_REMOTE:
Ido Schimmel9dbf4d72017-07-18 10:10:24 +02004016 return mlxsw_sp_fib_entry_op_remote(mlxsw_sp, fib_entry, op);
Jiri Pirko61c503f2016-07-04 08:23:11 +02004017 case MLXSW_SP_FIB_ENTRY_TYPE_LOCAL:
Ido Schimmel9dbf4d72017-07-18 10:10:24 +02004018 return mlxsw_sp_fib_entry_op_local(mlxsw_sp, fib_entry, op);
Jiri Pirko61c503f2016-07-04 08:23:11 +02004019 case MLXSW_SP_FIB_ENTRY_TYPE_TRAP:
Ido Schimmel9dbf4d72017-07-18 10:10:24 +02004020 return mlxsw_sp_fib_entry_op_trap(mlxsw_sp, fib_entry, op);
Petr Machata4607f6d2017-09-02 23:49:25 +02004021 case MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP:
4022 return mlxsw_sp_fib_entry_op_ipip_decap(mlxsw_sp,
4023 fib_entry, op);
Jiri Pirko61c503f2016-07-04 08:23:11 +02004024 }
4025 return -EINVAL;
4026}
4027
4028static int mlxsw_sp_fib_entry_op(struct mlxsw_sp *mlxsw_sp,
4029 struct mlxsw_sp_fib_entry *fib_entry,
4030 enum mlxsw_reg_ralue_op op)
4031{
Ido Schimmel9dbf4d72017-07-18 10:10:24 +02004032 int err = __mlxsw_sp_fib_entry_op(mlxsw_sp, fib_entry, op);
Ido Schimmel013b20f2017-02-08 11:16:36 +01004033
Ido Schimmel013b20f2017-02-08 11:16:36 +01004034 mlxsw_sp_fib_entry_offload_refresh(fib_entry, op, err);
Ido Schimmel9dbf4d72017-07-18 10:10:24 +02004035
Ido Schimmel013b20f2017-02-08 11:16:36 +01004036 return err;
Jiri Pirko61c503f2016-07-04 08:23:11 +02004037}
4038
4039static int mlxsw_sp_fib_entry_update(struct mlxsw_sp *mlxsw_sp,
4040 struct mlxsw_sp_fib_entry *fib_entry)
4041{
Jiri Pirko7146da32016-09-01 10:37:41 +02004042 return mlxsw_sp_fib_entry_op(mlxsw_sp, fib_entry,
4043 MLXSW_REG_RALUE_OP_WRITE_WRITE);
Jiri Pirko61c503f2016-07-04 08:23:11 +02004044}
4045
4046static int mlxsw_sp_fib_entry_del(struct mlxsw_sp *mlxsw_sp,
4047 struct mlxsw_sp_fib_entry *fib_entry)
4048{
4049 return mlxsw_sp_fib_entry_op(mlxsw_sp, fib_entry,
4050 MLXSW_REG_RALUE_OP_WRITE_DELETE);
4051}
4052
Jiri Pirko61c503f2016-07-04 08:23:11 +02004053static int
Ido Schimmel013b20f2017-02-08 11:16:36 +01004054mlxsw_sp_fib4_entry_type_set(struct mlxsw_sp *mlxsw_sp,
4055 const struct fib_entry_notifier_info *fen_info,
4056 struct mlxsw_sp_fib_entry *fib_entry)
Jiri Pirko61c503f2016-07-04 08:23:11 +02004057{
Petr Machata4607f6d2017-09-02 23:49:25 +02004058 union mlxsw_sp_l3addr dip = { .addr4 = htonl(fen_info->dst) };
4059 struct net_device *dev = fen_info->fi->fib_dev;
4060 struct mlxsw_sp_ipip_entry *ipip_entry;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02004061 struct fib_info *fi = fen_info->fi;
Jiri Pirko61c503f2016-07-04 08:23:11 +02004062
Ido Schimmel97989ee2017-03-10 08:53:38 +01004063 switch (fen_info->type) {
Ido Schimmel97989ee2017-03-10 08:53:38 +01004064 case RTN_LOCAL:
Petr Machata4607f6d2017-09-02 23:49:25 +02004065 ipip_entry = mlxsw_sp_ipip_entry_find_by_decap(mlxsw_sp, dev,
4066 MLXSW_SP_L3_PROTO_IPV4, dip);
Petr Machata57c77ce2017-11-28 13:17:11 +01004067 if (ipip_entry && ipip_entry->ol_dev->flags & IFF_UP) {
Petr Machata4607f6d2017-09-02 23:49:25 +02004068 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP;
4069 return mlxsw_sp_fib_entry_decap_init(mlxsw_sp,
4070 fib_entry,
4071 ipip_entry);
4072 }
4073 /* fall through */
4074 case RTN_BROADCAST:
Jiri Pirko61c503f2016-07-04 08:23:11 +02004075 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
4076 return 0;
Ido Schimmel97989ee2017-03-10 08:53:38 +01004077 case RTN_UNREACHABLE: /* fall through */
4078 case RTN_BLACKHOLE: /* fall through */
4079 case RTN_PROHIBIT:
4080 /* Packets hitting these routes need to be trapped, but
4081 * can do so with a lower priority than packets directed
4082 * at the host, so use action type local instead of trap.
4083 */
Jiri Pirkob45f64d2016-09-26 12:52:31 +02004084 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_LOCAL;
Ido Schimmel97989ee2017-03-10 08:53:38 +01004085 return 0;
4086 case RTN_UNICAST:
Petr Machata9b014512017-09-02 23:49:20 +02004087 if (mlxsw_sp_fi_is_gateway(mlxsw_sp, fi))
Ido Schimmel97989ee2017-03-10 08:53:38 +01004088 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_REMOTE;
Petr Machata9b014512017-09-02 23:49:20 +02004089 else
4090 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_LOCAL;
Ido Schimmel97989ee2017-03-10 08:53:38 +01004091 return 0;
4092 default:
4093 return -EINVAL;
4094 }
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02004095}
4096
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004097static struct mlxsw_sp_fib4_entry *
Ido Schimmel9aecce12017-02-09 10:28:42 +01004098mlxsw_sp_fib4_entry_create(struct mlxsw_sp *mlxsw_sp,
4099 struct mlxsw_sp_fib_node *fib_node,
4100 const struct fib_entry_notifier_info *fen_info)
Jiri Pirko5b004412016-09-01 10:37:40 +02004101{
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004102 struct mlxsw_sp_fib4_entry *fib4_entry;
Jiri Pirko5b004412016-09-01 10:37:40 +02004103 struct mlxsw_sp_fib_entry *fib_entry;
Ido Schimmel9aecce12017-02-09 10:28:42 +01004104 int err;
4105
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004106 fib4_entry = kzalloc(sizeof(*fib4_entry), GFP_KERNEL);
4107 if (!fib4_entry)
4108 return ERR_PTR(-ENOMEM);
4109 fib_entry = &fib4_entry->common;
Ido Schimmel9aecce12017-02-09 10:28:42 +01004110
4111 err = mlxsw_sp_fib4_entry_type_set(mlxsw_sp, fen_info, fib_entry);
4112 if (err)
4113 goto err_fib4_entry_type_set;
4114
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02004115 err = mlxsw_sp_nexthop4_group_get(mlxsw_sp, fib_entry, fen_info->fi);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004116 if (err)
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02004117 goto err_nexthop4_group_get;
Ido Schimmel9aecce12017-02-09 10:28:42 +01004118
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004119 fib4_entry->prio = fen_info->fi->fib_priority;
4120 fib4_entry->tb_id = fen_info->tb_id;
4121 fib4_entry->type = fen_info->type;
4122 fib4_entry->tos = fen_info->tos;
Ido Schimmel9aecce12017-02-09 10:28:42 +01004123
4124 fib_entry->fib_node = fib_node;
4125
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004126 return fib4_entry;
Ido Schimmel9aecce12017-02-09 10:28:42 +01004127
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02004128err_nexthop4_group_get:
Ido Schimmel9aecce12017-02-09 10:28:42 +01004129err_fib4_entry_type_set:
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004130 kfree(fib4_entry);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004131 return ERR_PTR(err);
4132}
4133
4134static void mlxsw_sp_fib4_entry_destroy(struct mlxsw_sp *mlxsw_sp,
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004135 struct mlxsw_sp_fib4_entry *fib4_entry)
Ido Schimmel9aecce12017-02-09 10:28:42 +01004136{
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02004137 mlxsw_sp_nexthop4_group_put(mlxsw_sp, &fib4_entry->common);
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004138 kfree(fib4_entry);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004139}
4140
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004141static struct mlxsw_sp_fib4_entry *
Ido Schimmel9aecce12017-02-09 10:28:42 +01004142mlxsw_sp_fib4_entry_lookup(struct mlxsw_sp *mlxsw_sp,
4143 const struct fib_entry_notifier_info *fen_info)
4144{
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004145 struct mlxsw_sp_fib4_entry *fib4_entry;
Ido Schimmel9aecce12017-02-09 10:28:42 +01004146 struct mlxsw_sp_fib_node *fib_node;
Ido Schimmel160e22a2017-07-18 10:10:20 +02004147 struct mlxsw_sp_fib *fib;
4148 struct mlxsw_sp_vr *vr;
Ido Schimmel9aecce12017-02-09 10:28:42 +01004149
Ido Schimmel160e22a2017-07-18 10:10:20 +02004150 vr = mlxsw_sp_vr_find(mlxsw_sp, fen_info->tb_id);
4151 if (!vr)
4152 return NULL;
4153 fib = mlxsw_sp_vr_fib(vr, MLXSW_SP_L3_PROTO_IPV4);
4154
4155 fib_node = mlxsw_sp_fib_node_lookup(fib, &fen_info->dst,
4156 sizeof(fen_info->dst),
4157 fen_info->dst_len);
4158 if (!fib_node)
Ido Schimmel9aecce12017-02-09 10:28:42 +01004159 return NULL;
4160
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004161 list_for_each_entry(fib4_entry, &fib_node->entry_list, common.list) {
4162 if (fib4_entry->tb_id == fen_info->tb_id &&
4163 fib4_entry->tos == fen_info->tos &&
4164 fib4_entry->type == fen_info->type &&
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02004165 mlxsw_sp_nexthop4_group_fi(fib4_entry->common.nh_group) ==
4166 fen_info->fi) {
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004167 return fib4_entry;
Ido Schimmel9aecce12017-02-09 10:28:42 +01004168 }
4169 }
4170
4171 return NULL;
4172}
4173
4174static const struct rhashtable_params mlxsw_sp_fib_ht_params = {
4175 .key_offset = offsetof(struct mlxsw_sp_fib_node, key),
4176 .head_offset = offsetof(struct mlxsw_sp_fib_node, ht_node),
4177 .key_len = sizeof(struct mlxsw_sp_fib_key),
4178 .automatic_shrinking = true,
4179};
4180
4181static int mlxsw_sp_fib_node_insert(struct mlxsw_sp_fib *fib,
4182 struct mlxsw_sp_fib_node *fib_node)
4183{
4184 return rhashtable_insert_fast(&fib->ht, &fib_node->ht_node,
4185 mlxsw_sp_fib_ht_params);
4186}
4187
4188static void mlxsw_sp_fib_node_remove(struct mlxsw_sp_fib *fib,
4189 struct mlxsw_sp_fib_node *fib_node)
4190{
4191 rhashtable_remove_fast(&fib->ht, &fib_node->ht_node,
4192 mlxsw_sp_fib_ht_params);
4193}
4194
4195static struct mlxsw_sp_fib_node *
4196mlxsw_sp_fib_node_lookup(struct mlxsw_sp_fib *fib, const void *addr,
4197 size_t addr_len, unsigned char prefix_len)
4198{
4199 struct mlxsw_sp_fib_key key;
4200
4201 memset(&key, 0, sizeof(key));
4202 memcpy(key.addr, addr, addr_len);
4203 key.prefix_len = prefix_len;
4204 return rhashtable_lookup_fast(&fib->ht, &key, mlxsw_sp_fib_ht_params);
4205}
4206
4207static struct mlxsw_sp_fib_node *
Ido Schimmel76610eb2017-03-10 08:53:41 +01004208mlxsw_sp_fib_node_create(struct mlxsw_sp_fib *fib, const void *addr,
Ido Schimmel9aecce12017-02-09 10:28:42 +01004209 size_t addr_len, unsigned char prefix_len)
4210{
4211 struct mlxsw_sp_fib_node *fib_node;
4212
4213 fib_node = kzalloc(sizeof(*fib_node), GFP_KERNEL);
4214 if (!fib_node)
4215 return NULL;
4216
4217 INIT_LIST_HEAD(&fib_node->entry_list);
Ido Schimmel76610eb2017-03-10 08:53:41 +01004218 list_add(&fib_node->list, &fib->node_list);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004219 memcpy(fib_node->key.addr, addr, addr_len);
4220 fib_node->key.prefix_len = prefix_len;
Ido Schimmel9aecce12017-02-09 10:28:42 +01004221
4222 return fib_node;
4223}
4224
4225static void mlxsw_sp_fib_node_destroy(struct mlxsw_sp_fib_node *fib_node)
4226{
Ido Schimmel9aecce12017-02-09 10:28:42 +01004227 list_del(&fib_node->list);
4228 WARN_ON(!list_empty(&fib_node->entry_list));
4229 kfree(fib_node);
4230}
4231
4232static bool
4233mlxsw_sp_fib_node_entry_is_first(const struct mlxsw_sp_fib_node *fib_node,
4234 const struct mlxsw_sp_fib_entry *fib_entry)
4235{
4236 return list_first_entry(&fib_node->entry_list,
4237 struct mlxsw_sp_fib_entry, list) == fib_entry;
4238}
4239
Ido Schimmelfc922bb2017-08-14 10:54:05 +02004240static int mlxsw_sp_fib_lpm_tree_link(struct mlxsw_sp *mlxsw_sp,
Ido Schimmelfc922bb2017-08-14 10:54:05 +02004241 struct mlxsw_sp_fib_node *fib_node)
4242{
Ido Schimmel2b52ce02018-01-22 09:17:42 +01004243 struct mlxsw_sp_prefix_usage req_prefix_usage;
Ido Schimmel3aad95d2018-01-22 09:17:41 +01004244 struct mlxsw_sp_fib *fib = fib_node->fib;
Ido Schimmelfc922bb2017-08-14 10:54:05 +02004245 struct mlxsw_sp_lpm_tree *lpm_tree;
4246 int err;
4247
Ido Schimmel2b52ce02018-01-22 09:17:42 +01004248 lpm_tree = mlxsw_sp->router->lpm.proto_trees[fib->proto];
4249 if (lpm_tree->prefix_ref_count[fib_node->key.prefix_len] != 0)
4250 goto out;
Ido Schimmelfc922bb2017-08-14 10:54:05 +02004251
Ido Schimmel2b52ce02018-01-22 09:17:42 +01004252 mlxsw_sp_prefix_usage_cpy(&req_prefix_usage, &lpm_tree->prefix_usage);
4253 mlxsw_sp_prefix_usage_set(&req_prefix_usage, fib_node->key.prefix_len);
Ido Schimmelfc922bb2017-08-14 10:54:05 +02004254 lpm_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, &req_prefix_usage,
4255 fib->proto);
4256 if (IS_ERR(lpm_tree))
4257 return PTR_ERR(lpm_tree);
4258
Ido Schimmelfc922bb2017-08-14 10:54:05 +02004259 err = mlxsw_sp_vrs_lpm_tree_replace(mlxsw_sp, fib, lpm_tree);
4260 if (err)
Ido Schimmel2b52ce02018-01-22 09:17:42 +01004261 goto err_lpm_tree_replace;
Ido Schimmelfc922bb2017-08-14 10:54:05 +02004262
Ido Schimmel2b52ce02018-01-22 09:17:42 +01004263out:
4264 lpm_tree->prefix_ref_count[fib_node->key.prefix_len]++;
Ido Schimmelfc922bb2017-08-14 10:54:05 +02004265 return 0;
Ido Schimmel2b52ce02018-01-22 09:17:42 +01004266
4267err_lpm_tree_replace:
4268 mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
4269 return err;
Ido Schimmelfc922bb2017-08-14 10:54:05 +02004270}
4271
4272static void mlxsw_sp_fib_lpm_tree_unlink(struct mlxsw_sp *mlxsw_sp,
Ido Schimmel3aad95d2018-01-22 09:17:41 +01004273 struct mlxsw_sp_fib_node *fib_node)
Ido Schimmelfc922bb2017-08-14 10:54:05 +02004274{
Ido Schimmel2b52ce02018-01-22 09:17:42 +01004275 struct mlxsw_sp_lpm_tree *lpm_tree = fib_node->fib->lpm_tree;
4276 struct mlxsw_sp_prefix_usage req_prefix_usage;
Ido Schimmel3aad95d2018-01-22 09:17:41 +01004277 struct mlxsw_sp_fib *fib = fib_node->fib;
Ido Schimmel2b52ce02018-01-22 09:17:42 +01004278 int err;
Ido Schimmel3aad95d2018-01-22 09:17:41 +01004279
Ido Schimmel2b52ce02018-01-22 09:17:42 +01004280 if (--lpm_tree->prefix_ref_count[fib_node->key.prefix_len] != 0)
Ido Schimmelfc922bb2017-08-14 10:54:05 +02004281 return;
Ido Schimmel2b52ce02018-01-22 09:17:42 +01004282 /* Try to construct a new LPM tree from the current prefix usage
4283 * minus the unused one. If we fail, continue using the old one.
Ido Schimmel4fd00312018-01-22 09:17:40 +01004284 */
Ido Schimmel2b52ce02018-01-22 09:17:42 +01004285 mlxsw_sp_prefix_usage_cpy(&req_prefix_usage, &lpm_tree->prefix_usage);
4286 mlxsw_sp_prefix_usage_clear(&req_prefix_usage,
4287 fib_node->key.prefix_len);
4288 lpm_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, &req_prefix_usage,
4289 fib->proto);
4290 if (IS_ERR(lpm_tree))
4291 return;
Ido Schimmelfc922bb2017-08-14 10:54:05 +02004292
Ido Schimmel2b52ce02018-01-22 09:17:42 +01004293 err = mlxsw_sp_vrs_lpm_tree_replace(mlxsw_sp, fib, lpm_tree);
4294 if (err)
4295 goto err_lpm_tree_replace;
Ido Schimmel9aecce12017-02-09 10:28:42 +01004296
Ido Schimmel2b52ce02018-01-22 09:17:42 +01004297 return;
Ido Schimmel9aecce12017-02-09 10:28:42 +01004298
Ido Schimmel2b52ce02018-01-22 09:17:42 +01004299err_lpm_tree_replace:
4300 mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004301}
4302
Ido Schimmel76610eb2017-03-10 08:53:41 +01004303static int mlxsw_sp_fib_node_init(struct mlxsw_sp *mlxsw_sp,
4304 struct mlxsw_sp_fib_node *fib_node,
4305 struct mlxsw_sp_fib *fib)
4306{
Ido Schimmel76610eb2017-03-10 08:53:41 +01004307 int err;
4308
4309 err = mlxsw_sp_fib_node_insert(fib, fib_node);
4310 if (err)
4311 return err;
4312 fib_node->fib = fib;
4313
Ido Schimmel3aad95d2018-01-22 09:17:41 +01004314 err = mlxsw_sp_fib_lpm_tree_link(mlxsw_sp, fib_node);
Ido Schimmelfc922bb2017-08-14 10:54:05 +02004315 if (err)
4316 goto err_fib_lpm_tree_link;
Ido Schimmel76610eb2017-03-10 08:53:41 +01004317
Ido Schimmel76610eb2017-03-10 08:53:41 +01004318 return 0;
4319
Ido Schimmelfc922bb2017-08-14 10:54:05 +02004320err_fib_lpm_tree_link:
Ido Schimmel76610eb2017-03-10 08:53:41 +01004321 fib_node->fib = NULL;
4322 mlxsw_sp_fib_node_remove(fib, fib_node);
4323 return err;
4324}
4325
4326static void mlxsw_sp_fib_node_fini(struct mlxsw_sp *mlxsw_sp,
4327 struct mlxsw_sp_fib_node *fib_node)
4328{
Ido Schimmel76610eb2017-03-10 08:53:41 +01004329 struct mlxsw_sp_fib *fib = fib_node->fib;
4330
Ido Schimmel3aad95d2018-01-22 09:17:41 +01004331 mlxsw_sp_fib_lpm_tree_unlink(mlxsw_sp, fib_node);
Ido Schimmel76610eb2017-03-10 08:53:41 +01004332 fib_node->fib = NULL;
4333 mlxsw_sp_fib_node_remove(fib, fib_node);
4334}
4335
Ido Schimmel9aecce12017-02-09 10:28:42 +01004336static struct mlxsw_sp_fib_node *
Ido Schimmel731ea1c2017-07-18 10:10:21 +02004337mlxsw_sp_fib_node_get(struct mlxsw_sp *mlxsw_sp, u32 tb_id, const void *addr,
4338 size_t addr_len, unsigned char prefix_len,
4339 enum mlxsw_sp_l3proto proto)
Ido Schimmel9aecce12017-02-09 10:28:42 +01004340{
4341 struct mlxsw_sp_fib_node *fib_node;
Ido Schimmel76610eb2017-03-10 08:53:41 +01004342 struct mlxsw_sp_fib *fib;
Jiri Pirko5b004412016-09-01 10:37:40 +02004343 struct mlxsw_sp_vr *vr;
4344 int err;
4345
David Ahernf8fa9b42017-10-18 09:56:56 -07004346 vr = mlxsw_sp_vr_get(mlxsw_sp, tb_id, NULL);
Jiri Pirko5b004412016-09-01 10:37:40 +02004347 if (IS_ERR(vr))
4348 return ERR_CAST(vr);
Ido Schimmel731ea1c2017-07-18 10:10:21 +02004349 fib = mlxsw_sp_vr_fib(vr, proto);
Jiri Pirko5b004412016-09-01 10:37:40 +02004350
Ido Schimmel731ea1c2017-07-18 10:10:21 +02004351 fib_node = mlxsw_sp_fib_node_lookup(fib, addr, addr_len, prefix_len);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004352 if (fib_node)
4353 return fib_node;
4354
Ido Schimmel731ea1c2017-07-18 10:10:21 +02004355 fib_node = mlxsw_sp_fib_node_create(fib, addr, addr_len, prefix_len);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004356 if (!fib_node) {
Jiri Pirko5b004412016-09-01 10:37:40 +02004357 err = -ENOMEM;
Ido Schimmel9aecce12017-02-09 10:28:42 +01004358 goto err_fib_node_create;
Jiri Pirko5b004412016-09-01 10:37:40 +02004359 }
Jiri Pirko5b004412016-09-01 10:37:40 +02004360
Ido Schimmel76610eb2017-03-10 08:53:41 +01004361 err = mlxsw_sp_fib_node_init(mlxsw_sp, fib_node, fib);
4362 if (err)
4363 goto err_fib_node_init;
4364
Ido Schimmel9aecce12017-02-09 10:28:42 +01004365 return fib_node;
Jiri Pirko5b004412016-09-01 10:37:40 +02004366
Ido Schimmel76610eb2017-03-10 08:53:41 +01004367err_fib_node_init:
4368 mlxsw_sp_fib_node_destroy(fib_node);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004369err_fib_node_create:
Ido Schimmel2b52ce02018-01-22 09:17:42 +01004370 mlxsw_sp_vr_put(mlxsw_sp, vr);
Jiri Pirko5b004412016-09-01 10:37:40 +02004371 return ERR_PTR(err);
4372}
4373
Ido Schimmel731ea1c2017-07-18 10:10:21 +02004374static void mlxsw_sp_fib_node_put(struct mlxsw_sp *mlxsw_sp,
4375 struct mlxsw_sp_fib_node *fib_node)
Jiri Pirko5b004412016-09-01 10:37:40 +02004376{
Ido Schimmel76610eb2017-03-10 08:53:41 +01004377 struct mlxsw_sp_vr *vr = fib_node->fib->vr;
Jiri Pirko5b004412016-09-01 10:37:40 +02004378
Ido Schimmel9aecce12017-02-09 10:28:42 +01004379 if (!list_empty(&fib_node->entry_list))
4380 return;
Ido Schimmel76610eb2017-03-10 08:53:41 +01004381 mlxsw_sp_fib_node_fini(mlxsw_sp, fib_node);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004382 mlxsw_sp_fib_node_destroy(fib_node);
Ido Schimmel2b52ce02018-01-22 09:17:42 +01004383 mlxsw_sp_vr_put(mlxsw_sp, vr);
Jiri Pirko5b004412016-09-01 10:37:40 +02004384}
4385
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004386static struct mlxsw_sp_fib4_entry *
Ido Schimmel9aecce12017-02-09 10:28:42 +01004387mlxsw_sp_fib4_node_entry_find(const struct mlxsw_sp_fib_node *fib_node,
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004388 const struct mlxsw_sp_fib4_entry *new4_entry)
Jiri Pirko61c503f2016-07-04 08:23:11 +02004389{
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004390 struct mlxsw_sp_fib4_entry *fib4_entry;
Ido Schimmel9aecce12017-02-09 10:28:42 +01004391
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004392 list_for_each_entry(fib4_entry, &fib_node->entry_list, common.list) {
4393 if (fib4_entry->tb_id > new4_entry->tb_id)
Ido Schimmel9aecce12017-02-09 10:28:42 +01004394 continue;
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004395 if (fib4_entry->tb_id != new4_entry->tb_id)
Ido Schimmel9aecce12017-02-09 10:28:42 +01004396 break;
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004397 if (fib4_entry->tos > new4_entry->tos)
Ido Schimmel9aecce12017-02-09 10:28:42 +01004398 continue;
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004399 if (fib4_entry->prio >= new4_entry->prio ||
4400 fib4_entry->tos < new4_entry->tos)
4401 return fib4_entry;
Ido Schimmel9aecce12017-02-09 10:28:42 +01004402 }
4403
4404 return NULL;
4405}
4406
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004407static int
4408mlxsw_sp_fib4_node_list_append(struct mlxsw_sp_fib4_entry *fib4_entry,
4409 struct mlxsw_sp_fib4_entry *new4_entry)
Ido Schimmel4283bce2017-02-09 10:28:43 +01004410{
4411 struct mlxsw_sp_fib_node *fib_node;
4412
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004413 if (WARN_ON(!fib4_entry))
Ido Schimmel4283bce2017-02-09 10:28:43 +01004414 return -EINVAL;
4415
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004416 fib_node = fib4_entry->common.fib_node;
4417 list_for_each_entry_from(fib4_entry, &fib_node->entry_list,
4418 common.list) {
4419 if (fib4_entry->tb_id != new4_entry->tb_id ||
4420 fib4_entry->tos != new4_entry->tos ||
4421 fib4_entry->prio != new4_entry->prio)
Ido Schimmel4283bce2017-02-09 10:28:43 +01004422 break;
4423 }
4424
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004425 list_add_tail(&new4_entry->common.list, &fib4_entry->common.list);
Ido Schimmel4283bce2017-02-09 10:28:43 +01004426 return 0;
4427}
4428
Ido Schimmel9aecce12017-02-09 10:28:42 +01004429static int
Ido Schimmel9efbee62017-07-18 10:10:28 +02004430mlxsw_sp_fib4_node_list_insert(struct mlxsw_sp_fib4_entry *new4_entry,
Ido Schimmel599cf8f2017-02-09 10:28:44 +01004431 bool replace, bool append)
Ido Schimmel9aecce12017-02-09 10:28:42 +01004432{
Ido Schimmel9efbee62017-07-18 10:10:28 +02004433 struct mlxsw_sp_fib_node *fib_node = new4_entry->common.fib_node;
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004434 struct mlxsw_sp_fib4_entry *fib4_entry;
Ido Schimmel9aecce12017-02-09 10:28:42 +01004435
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004436 fib4_entry = mlxsw_sp_fib4_node_entry_find(fib_node, new4_entry);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004437
Ido Schimmel4283bce2017-02-09 10:28:43 +01004438 if (append)
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004439 return mlxsw_sp_fib4_node_list_append(fib4_entry, new4_entry);
4440 if (replace && WARN_ON(!fib4_entry))
Ido Schimmel599cf8f2017-02-09 10:28:44 +01004441 return -EINVAL;
Ido Schimmel4283bce2017-02-09 10:28:43 +01004442
Ido Schimmel599cf8f2017-02-09 10:28:44 +01004443 /* Insert new entry before replaced one, so that we can later
4444 * remove the second.
4445 */
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004446 if (fib4_entry) {
4447 list_add_tail(&new4_entry->common.list,
4448 &fib4_entry->common.list);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004449 } else {
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004450 struct mlxsw_sp_fib4_entry *last;
Ido Schimmel9aecce12017-02-09 10:28:42 +01004451
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004452 list_for_each_entry(last, &fib_node->entry_list, common.list) {
4453 if (new4_entry->tb_id > last->tb_id)
Ido Schimmel9aecce12017-02-09 10:28:42 +01004454 break;
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004455 fib4_entry = last;
Ido Schimmel9aecce12017-02-09 10:28:42 +01004456 }
4457
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004458 if (fib4_entry)
4459 list_add(&new4_entry->common.list,
4460 &fib4_entry->common.list);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004461 else
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004462 list_add(&new4_entry->common.list,
4463 &fib_node->entry_list);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004464 }
4465
4466 return 0;
4467}
4468
4469static void
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004470mlxsw_sp_fib4_node_list_remove(struct mlxsw_sp_fib4_entry *fib4_entry)
Ido Schimmel9aecce12017-02-09 10:28:42 +01004471{
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004472 list_del(&fib4_entry->common.list);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004473}
4474
Ido Schimmel80c238f2017-07-18 10:10:29 +02004475static int mlxsw_sp_fib_node_entry_add(struct mlxsw_sp *mlxsw_sp,
4476 struct mlxsw_sp_fib_entry *fib_entry)
Ido Schimmel9aecce12017-02-09 10:28:42 +01004477{
Ido Schimmel9efbee62017-07-18 10:10:28 +02004478 struct mlxsw_sp_fib_node *fib_node = fib_entry->fib_node;
4479
Ido Schimmel9aecce12017-02-09 10:28:42 +01004480 if (!mlxsw_sp_fib_node_entry_is_first(fib_node, fib_entry))
4481 return 0;
4482
4483 /* To prevent packet loss, overwrite the previously offloaded
4484 * entry.
4485 */
4486 if (!list_is_singular(&fib_node->entry_list)) {
4487 enum mlxsw_reg_ralue_op op = MLXSW_REG_RALUE_OP_WRITE_DELETE;
4488 struct mlxsw_sp_fib_entry *n = list_next_entry(fib_entry, list);
4489
4490 mlxsw_sp_fib_entry_offload_refresh(n, op, 0);
4491 }
4492
4493 return mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
4494}
4495
Ido Schimmel80c238f2017-07-18 10:10:29 +02004496static void mlxsw_sp_fib_node_entry_del(struct mlxsw_sp *mlxsw_sp,
4497 struct mlxsw_sp_fib_entry *fib_entry)
Ido Schimmel9aecce12017-02-09 10:28:42 +01004498{
Ido Schimmel9efbee62017-07-18 10:10:28 +02004499 struct mlxsw_sp_fib_node *fib_node = fib_entry->fib_node;
4500
Ido Schimmel9aecce12017-02-09 10:28:42 +01004501 if (!mlxsw_sp_fib_node_entry_is_first(fib_node, fib_entry))
4502 return;
4503
4504 /* Promote the next entry by overwriting the deleted entry */
4505 if (!list_is_singular(&fib_node->entry_list)) {
4506 struct mlxsw_sp_fib_entry *n = list_next_entry(fib_entry, list);
4507 enum mlxsw_reg_ralue_op op = MLXSW_REG_RALUE_OP_WRITE_DELETE;
4508
4509 mlxsw_sp_fib_entry_update(mlxsw_sp, n);
4510 mlxsw_sp_fib_entry_offload_refresh(fib_entry, op, 0);
4511 return;
4512 }
4513
4514 mlxsw_sp_fib_entry_del(mlxsw_sp, fib_entry);
4515}
4516
4517static int mlxsw_sp_fib4_node_entry_link(struct mlxsw_sp *mlxsw_sp,
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004518 struct mlxsw_sp_fib4_entry *fib4_entry,
Ido Schimmel599cf8f2017-02-09 10:28:44 +01004519 bool replace, bool append)
Ido Schimmel9aecce12017-02-09 10:28:42 +01004520{
Ido Schimmel9aecce12017-02-09 10:28:42 +01004521 int err;
4522
Ido Schimmel9efbee62017-07-18 10:10:28 +02004523 err = mlxsw_sp_fib4_node_list_insert(fib4_entry, replace, append);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004524 if (err)
4525 return err;
4526
Ido Schimmel80c238f2017-07-18 10:10:29 +02004527 err = mlxsw_sp_fib_node_entry_add(mlxsw_sp, &fib4_entry->common);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004528 if (err)
Ido Schimmel80c238f2017-07-18 10:10:29 +02004529 goto err_fib_node_entry_add;
Ido Schimmel9aecce12017-02-09 10:28:42 +01004530
Ido Schimmel9aecce12017-02-09 10:28:42 +01004531 return 0;
4532
Ido Schimmel80c238f2017-07-18 10:10:29 +02004533err_fib_node_entry_add:
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004534 mlxsw_sp_fib4_node_list_remove(fib4_entry);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004535 return err;
4536}
4537
4538static void
4539mlxsw_sp_fib4_node_entry_unlink(struct mlxsw_sp *mlxsw_sp,
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004540 struct mlxsw_sp_fib4_entry *fib4_entry)
Ido Schimmel9aecce12017-02-09 10:28:42 +01004541{
Ido Schimmel80c238f2017-07-18 10:10:29 +02004542 mlxsw_sp_fib_node_entry_del(mlxsw_sp, &fib4_entry->common);
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004543 mlxsw_sp_fib4_node_list_remove(fib4_entry);
Petr Machata4607f6d2017-09-02 23:49:25 +02004544
4545 if (fib4_entry->common.type == MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP)
4546 mlxsw_sp_fib_entry_decap_fini(mlxsw_sp, &fib4_entry->common);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004547}
4548
Ido Schimmel599cf8f2017-02-09 10:28:44 +01004549static void mlxsw_sp_fib4_entry_replace(struct mlxsw_sp *mlxsw_sp,
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004550 struct mlxsw_sp_fib4_entry *fib4_entry,
Ido Schimmel599cf8f2017-02-09 10:28:44 +01004551 bool replace)
4552{
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004553 struct mlxsw_sp_fib_node *fib_node = fib4_entry->common.fib_node;
4554 struct mlxsw_sp_fib4_entry *replaced;
Ido Schimmel599cf8f2017-02-09 10:28:44 +01004555
4556 if (!replace)
4557 return;
4558
4559 /* We inserted the new entry before replaced one */
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004560 replaced = list_next_entry(fib4_entry, common.list);
Ido Schimmel599cf8f2017-02-09 10:28:44 +01004561
4562 mlxsw_sp_fib4_node_entry_unlink(mlxsw_sp, replaced);
4563 mlxsw_sp_fib4_entry_destroy(mlxsw_sp, replaced);
Ido Schimmel731ea1c2017-07-18 10:10:21 +02004564 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
Ido Schimmel599cf8f2017-02-09 10:28:44 +01004565}
4566
Ido Schimmel9aecce12017-02-09 10:28:42 +01004567static int
4568mlxsw_sp_router_fib4_add(struct mlxsw_sp *mlxsw_sp,
Ido Schimmel4283bce2017-02-09 10:28:43 +01004569 const struct fib_entry_notifier_info *fen_info,
Ido Schimmel599cf8f2017-02-09 10:28:44 +01004570 bool replace, bool append)
Ido Schimmel9aecce12017-02-09 10:28:42 +01004571{
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004572 struct mlxsw_sp_fib4_entry *fib4_entry;
Ido Schimmel9aecce12017-02-09 10:28:42 +01004573 struct mlxsw_sp_fib_node *fib_node;
Jiri Pirko61c503f2016-07-04 08:23:11 +02004574 int err;
4575
Ido Schimmel9011b672017-05-16 19:38:25 +02004576 if (mlxsw_sp->router->aborted)
Jiri Pirkob45f64d2016-09-26 12:52:31 +02004577 return 0;
4578
Ido Schimmel731ea1c2017-07-18 10:10:21 +02004579 fib_node = mlxsw_sp_fib_node_get(mlxsw_sp, fen_info->tb_id,
4580 &fen_info->dst, sizeof(fen_info->dst),
4581 fen_info->dst_len,
4582 MLXSW_SP_L3_PROTO_IPV4);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004583 if (IS_ERR(fib_node)) {
4584 dev_warn(mlxsw_sp->bus_info->dev, "Failed to get FIB node\n");
4585 return PTR_ERR(fib_node);
4586 }
4587
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004588 fib4_entry = mlxsw_sp_fib4_entry_create(mlxsw_sp, fib_node, fen_info);
4589 if (IS_ERR(fib4_entry)) {
Ido Schimmel9aecce12017-02-09 10:28:42 +01004590 dev_warn(mlxsw_sp->bus_info->dev, "Failed to create FIB entry\n");
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004591 err = PTR_ERR(fib4_entry);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004592 goto err_fib4_entry_create;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02004593 }
Jiri Pirko61c503f2016-07-04 08:23:11 +02004594
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004595 err = mlxsw_sp_fib4_node_entry_link(mlxsw_sp, fib4_entry, replace,
Ido Schimmel599cf8f2017-02-09 10:28:44 +01004596 append);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02004597 if (err) {
Ido Schimmel9aecce12017-02-09 10:28:42 +01004598 dev_warn(mlxsw_sp->bus_info->dev, "Failed to link FIB entry to node\n");
4599 goto err_fib4_node_entry_link;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02004600 }
Ido Schimmel9aecce12017-02-09 10:28:42 +01004601
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004602 mlxsw_sp_fib4_entry_replace(mlxsw_sp, fib4_entry, replace);
Ido Schimmel599cf8f2017-02-09 10:28:44 +01004603
Jiri Pirko61c503f2016-07-04 08:23:11 +02004604 return 0;
4605
Ido Schimmel9aecce12017-02-09 10:28:42 +01004606err_fib4_node_entry_link:
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004607 mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib4_entry);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004608err_fib4_entry_create:
Ido Schimmel731ea1c2017-07-18 10:10:21 +02004609 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
Jiri Pirko61c503f2016-07-04 08:23:11 +02004610 return err;
4611}
4612
Jiri Pirko37956d72016-10-20 16:05:43 +02004613static void mlxsw_sp_router_fib4_del(struct mlxsw_sp *mlxsw_sp,
4614 struct fib_entry_notifier_info *fen_info)
Jiri Pirko61c503f2016-07-04 08:23:11 +02004615{
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004616 struct mlxsw_sp_fib4_entry *fib4_entry;
Ido Schimmel9aecce12017-02-09 10:28:42 +01004617 struct mlxsw_sp_fib_node *fib_node;
Jiri Pirko61c503f2016-07-04 08:23:11 +02004618
Ido Schimmel9011b672017-05-16 19:38:25 +02004619 if (mlxsw_sp->router->aborted)
Jiri Pirko37956d72016-10-20 16:05:43 +02004620 return;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02004621
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004622 fib4_entry = mlxsw_sp_fib4_entry_lookup(mlxsw_sp, fen_info);
4623 if (WARN_ON(!fib4_entry))
Jiri Pirko37956d72016-10-20 16:05:43 +02004624 return;
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004625 fib_node = fib4_entry->common.fib_node;
Jiri Pirko5b004412016-09-01 10:37:40 +02004626
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004627 mlxsw_sp_fib4_node_entry_unlink(mlxsw_sp, fib4_entry);
4628 mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib4_entry);
Ido Schimmel731ea1c2017-07-18 10:10:21 +02004629 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
Jiri Pirko61c503f2016-07-04 08:23:11 +02004630}
Jiri Pirkob45f64d2016-09-26 12:52:31 +02004631
Ido Schimmel428b8512017-08-03 13:28:28 +02004632static bool mlxsw_sp_fib6_rt_should_ignore(const struct rt6_info *rt)
4633{
4634 /* Packets with link-local destination IP arriving to the router
4635 * are trapped to the CPU, so no need to program specific routes
4636 * for them.
4637 */
4638 if (ipv6_addr_type(&rt->rt6i_dst.addr) & IPV6_ADDR_LINKLOCAL)
4639 return true;
4640
4641 /* Multicast routes aren't supported, so ignore them. Neighbour
4642 * Discovery packets are specifically trapped.
4643 */
4644 if (ipv6_addr_type(&rt->rt6i_dst.addr) & IPV6_ADDR_MULTICAST)
4645 return true;
4646
4647 /* Cloned routes are irrelevant in the forwarding path. */
4648 if (rt->rt6i_flags & RTF_CACHE)
4649 return true;
4650
4651 return false;
4652}
4653
4654static struct mlxsw_sp_rt6 *mlxsw_sp_rt6_create(struct rt6_info *rt)
4655{
4656 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
4657
4658 mlxsw_sp_rt6 = kzalloc(sizeof(*mlxsw_sp_rt6), GFP_KERNEL);
4659 if (!mlxsw_sp_rt6)
4660 return ERR_PTR(-ENOMEM);
4661
4662 /* In case of route replace, replaced route is deleted with
4663 * no notification. Take reference to prevent accessing freed
4664 * memory.
4665 */
4666 mlxsw_sp_rt6->rt = rt;
4667 rt6_hold(rt);
4668
4669 return mlxsw_sp_rt6;
4670}
4671
4672#if IS_ENABLED(CONFIG_IPV6)
4673static void mlxsw_sp_rt6_release(struct rt6_info *rt)
4674{
4675 rt6_release(rt);
4676}
4677#else
4678static void mlxsw_sp_rt6_release(struct rt6_info *rt)
4679{
4680}
4681#endif
4682
4683static void mlxsw_sp_rt6_destroy(struct mlxsw_sp_rt6 *mlxsw_sp_rt6)
4684{
4685 mlxsw_sp_rt6_release(mlxsw_sp_rt6->rt);
4686 kfree(mlxsw_sp_rt6);
4687}
4688
4689static bool mlxsw_sp_fib6_rt_can_mp(const struct rt6_info *rt)
4690{
4691 /* RTF_CACHE routes are ignored */
4692 return (rt->rt6i_flags & (RTF_GATEWAY | RTF_ADDRCONF)) == RTF_GATEWAY;
4693}
4694
4695static struct rt6_info *
4696mlxsw_sp_fib6_entry_rt(const struct mlxsw_sp_fib6_entry *fib6_entry)
4697{
4698 return list_first_entry(&fib6_entry->rt6_list, struct mlxsw_sp_rt6,
4699 list)->rt;
4700}
4701
4702static struct mlxsw_sp_fib6_entry *
4703mlxsw_sp_fib6_node_mp_entry_find(const struct mlxsw_sp_fib_node *fib_node,
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02004704 const struct rt6_info *nrt, bool replace)
Ido Schimmel428b8512017-08-03 13:28:28 +02004705{
4706 struct mlxsw_sp_fib6_entry *fib6_entry;
4707
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02004708 if (!mlxsw_sp_fib6_rt_can_mp(nrt) || replace)
Ido Schimmel428b8512017-08-03 13:28:28 +02004709 return NULL;
4710
4711 list_for_each_entry(fib6_entry, &fib_node->entry_list, common.list) {
4712 struct rt6_info *rt = mlxsw_sp_fib6_entry_rt(fib6_entry);
4713
4714 /* RT6_TABLE_LOCAL and RT6_TABLE_MAIN share the same
4715 * virtual router.
4716 */
4717 if (rt->rt6i_table->tb6_id > nrt->rt6i_table->tb6_id)
4718 continue;
4719 if (rt->rt6i_table->tb6_id != nrt->rt6i_table->tb6_id)
4720 break;
4721 if (rt->rt6i_metric < nrt->rt6i_metric)
4722 continue;
4723 if (rt->rt6i_metric == nrt->rt6i_metric &&
4724 mlxsw_sp_fib6_rt_can_mp(rt))
4725 return fib6_entry;
4726 if (rt->rt6i_metric > nrt->rt6i_metric)
4727 break;
4728 }
4729
4730 return NULL;
4731}
4732
4733static struct mlxsw_sp_rt6 *
4734mlxsw_sp_fib6_entry_rt_find(const struct mlxsw_sp_fib6_entry *fib6_entry,
4735 const struct rt6_info *rt)
4736{
4737 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
4738
4739 list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) {
4740 if (mlxsw_sp_rt6->rt == rt)
4741 return mlxsw_sp_rt6;
4742 }
4743
4744 return NULL;
4745}
4746
Petr Machata8f28a302017-09-02 23:49:24 +02004747static bool mlxsw_sp_nexthop6_ipip_type(const struct mlxsw_sp *mlxsw_sp,
4748 const struct rt6_info *rt,
4749 enum mlxsw_sp_ipip_type *ret)
4750{
4751 return rt->dst.dev &&
4752 mlxsw_sp_netdev_ipip_type(mlxsw_sp, rt->dst.dev, ret);
4753}
4754
Petr Machata35225e42017-09-02 23:49:22 +02004755static int mlxsw_sp_nexthop6_type_init(struct mlxsw_sp *mlxsw_sp,
4756 struct mlxsw_sp_nexthop_group *nh_grp,
4757 struct mlxsw_sp_nexthop *nh,
4758 const struct rt6_info *rt)
Ido Schimmel428b8512017-08-03 13:28:28 +02004759{
Petr Machatad97cda52017-11-28 13:17:13 +01004760 const struct mlxsw_sp_ipip_ops *ipip_ops;
4761 struct mlxsw_sp_ipip_entry *ipip_entry;
Ido Schimmel428b8512017-08-03 13:28:28 +02004762 struct net_device *dev = rt->dst.dev;
4763 struct mlxsw_sp_rif *rif;
4764 int err;
4765
Petr Machatad97cda52017-11-28 13:17:13 +01004766 ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, dev);
4767 if (ipip_entry) {
4768 ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt];
4769 if (ipip_ops->can_offload(mlxsw_sp, dev,
4770 MLXSW_SP_L3_PROTO_IPV6)) {
4771 nh->type = MLXSW_SP_NEXTHOP_TYPE_IPIP;
4772 mlxsw_sp_nexthop_ipip_init(mlxsw_sp, nh, ipip_entry);
4773 return 0;
4774 }
Petr Machata8f28a302017-09-02 23:49:24 +02004775 }
4776
Petr Machata35225e42017-09-02 23:49:22 +02004777 nh->type = MLXSW_SP_NEXTHOP_TYPE_ETH;
Ido Schimmel428b8512017-08-03 13:28:28 +02004778 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
4779 if (!rif)
4780 return 0;
4781 mlxsw_sp_nexthop_rif_init(nh, rif);
4782
4783 err = mlxsw_sp_nexthop_neigh_init(mlxsw_sp, nh);
4784 if (err)
4785 goto err_nexthop_neigh_init;
4786
4787 return 0;
4788
4789err_nexthop_neigh_init:
4790 mlxsw_sp_nexthop_rif_fini(nh);
4791 return err;
4792}
4793
Petr Machata35225e42017-09-02 23:49:22 +02004794static void mlxsw_sp_nexthop6_type_fini(struct mlxsw_sp *mlxsw_sp,
4795 struct mlxsw_sp_nexthop *nh)
4796{
4797 mlxsw_sp_nexthop_type_fini(mlxsw_sp, nh);
4798}
4799
4800static int mlxsw_sp_nexthop6_init(struct mlxsw_sp *mlxsw_sp,
4801 struct mlxsw_sp_nexthop_group *nh_grp,
4802 struct mlxsw_sp_nexthop *nh,
4803 const struct rt6_info *rt)
4804{
4805 struct net_device *dev = rt->dst.dev;
4806
4807 nh->nh_grp = nh_grp;
Ido Schimmel3743d882018-01-12 17:15:59 +01004808 nh->nh_weight = rt->rt6i_nh_weight;
Petr Machata35225e42017-09-02 23:49:22 +02004809 memcpy(&nh->gw_addr, &rt->rt6i_gateway, sizeof(nh->gw_addr));
Arkadi Sharshevskya5390272017-09-25 10:32:28 +02004810 mlxsw_sp_nexthop_counter_alloc(mlxsw_sp, nh);
Petr Machata35225e42017-09-02 23:49:22 +02004811
Arkadi Sharshevskydbe45982017-09-25 10:32:23 +02004812 list_add_tail(&nh->router_list_node, &mlxsw_sp->router->nexthop_list);
4813
Petr Machata35225e42017-09-02 23:49:22 +02004814 if (!dev)
4815 return 0;
4816 nh->ifindex = dev->ifindex;
4817
4818 return mlxsw_sp_nexthop6_type_init(mlxsw_sp, nh_grp, nh, rt);
4819}
4820
Ido Schimmel428b8512017-08-03 13:28:28 +02004821static void mlxsw_sp_nexthop6_fini(struct mlxsw_sp *mlxsw_sp,
4822 struct mlxsw_sp_nexthop *nh)
4823{
Petr Machata35225e42017-09-02 23:49:22 +02004824 mlxsw_sp_nexthop6_type_fini(mlxsw_sp, nh);
Arkadi Sharshevskydbe45982017-09-25 10:32:23 +02004825 list_del(&nh->router_list_node);
Arkadi Sharshevskya5390272017-09-25 10:32:28 +02004826 mlxsw_sp_nexthop_counter_free(mlxsw_sp, nh);
Ido Schimmel428b8512017-08-03 13:28:28 +02004827}
4828
Petr Machataf6050ee2017-09-02 23:49:21 +02004829static bool mlxsw_sp_rt6_is_gateway(const struct mlxsw_sp *mlxsw_sp,
4830 const struct rt6_info *rt)
4831{
Petr Machata8f28a302017-09-02 23:49:24 +02004832 return rt->rt6i_flags & RTF_GATEWAY ||
4833 mlxsw_sp_nexthop6_ipip_type(mlxsw_sp, rt, NULL);
Petr Machataf6050ee2017-09-02 23:49:21 +02004834}
4835
Ido Schimmel428b8512017-08-03 13:28:28 +02004836static struct mlxsw_sp_nexthop_group *
4837mlxsw_sp_nexthop6_group_create(struct mlxsw_sp *mlxsw_sp,
4838 struct mlxsw_sp_fib6_entry *fib6_entry)
4839{
4840 struct mlxsw_sp_nexthop_group *nh_grp;
4841 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
4842 struct mlxsw_sp_nexthop *nh;
4843 size_t alloc_size;
4844 int i = 0;
4845 int err;
4846
4847 alloc_size = sizeof(*nh_grp) +
4848 fib6_entry->nrt6 * sizeof(struct mlxsw_sp_nexthop);
4849 nh_grp = kzalloc(alloc_size, GFP_KERNEL);
4850 if (!nh_grp)
4851 return ERR_PTR(-ENOMEM);
4852 INIT_LIST_HEAD(&nh_grp->fib_list);
4853#if IS_ENABLED(CONFIG_IPV6)
4854 nh_grp->neigh_tbl = &nd_tbl;
4855#endif
4856 mlxsw_sp_rt6 = list_first_entry(&fib6_entry->rt6_list,
4857 struct mlxsw_sp_rt6, list);
Petr Machataf6050ee2017-09-02 23:49:21 +02004858 nh_grp->gateway = mlxsw_sp_rt6_is_gateway(mlxsw_sp, mlxsw_sp_rt6->rt);
Ido Schimmel428b8512017-08-03 13:28:28 +02004859 nh_grp->count = fib6_entry->nrt6;
4860 for (i = 0; i < nh_grp->count; i++) {
4861 struct rt6_info *rt = mlxsw_sp_rt6->rt;
4862
4863 nh = &nh_grp->nexthops[i];
4864 err = mlxsw_sp_nexthop6_init(mlxsw_sp, nh_grp, nh, rt);
4865 if (err)
4866 goto err_nexthop6_init;
4867 mlxsw_sp_rt6 = list_next_entry(mlxsw_sp_rt6, list);
4868 }
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02004869
4870 err = mlxsw_sp_nexthop_group_insert(mlxsw_sp, nh_grp);
4871 if (err)
4872 goto err_nexthop_group_insert;
4873
Ido Schimmel428b8512017-08-03 13:28:28 +02004874 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
4875 return nh_grp;
4876
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02004877err_nexthop_group_insert:
Ido Schimmel428b8512017-08-03 13:28:28 +02004878err_nexthop6_init:
4879 for (i--; i >= 0; i--) {
4880 nh = &nh_grp->nexthops[i];
4881 mlxsw_sp_nexthop6_fini(mlxsw_sp, nh);
4882 }
4883 kfree(nh_grp);
4884 return ERR_PTR(err);
4885}
4886
4887static void
4888mlxsw_sp_nexthop6_group_destroy(struct mlxsw_sp *mlxsw_sp,
4889 struct mlxsw_sp_nexthop_group *nh_grp)
4890{
4891 struct mlxsw_sp_nexthop *nh;
4892 int i = nh_grp->count;
4893
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02004894 mlxsw_sp_nexthop_group_remove(mlxsw_sp, nh_grp);
Ido Schimmel428b8512017-08-03 13:28:28 +02004895 for (i--; i >= 0; i--) {
4896 nh = &nh_grp->nexthops[i];
4897 mlxsw_sp_nexthop6_fini(mlxsw_sp, nh);
4898 }
4899 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
4900 WARN_ON(nh_grp->adj_index_valid);
4901 kfree(nh_grp);
4902}
4903
4904static int mlxsw_sp_nexthop6_group_get(struct mlxsw_sp *mlxsw_sp,
4905 struct mlxsw_sp_fib6_entry *fib6_entry)
4906{
4907 struct mlxsw_sp_nexthop_group *nh_grp;
4908
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02004909 nh_grp = mlxsw_sp_nexthop6_group_lookup(mlxsw_sp, fib6_entry);
4910 if (!nh_grp) {
4911 nh_grp = mlxsw_sp_nexthop6_group_create(mlxsw_sp, fib6_entry);
4912 if (IS_ERR(nh_grp))
4913 return PTR_ERR(nh_grp);
4914 }
Ido Schimmel428b8512017-08-03 13:28:28 +02004915
4916 list_add_tail(&fib6_entry->common.nexthop_group_node,
4917 &nh_grp->fib_list);
4918 fib6_entry->common.nh_group = nh_grp;
4919
4920 return 0;
4921}
4922
4923static void mlxsw_sp_nexthop6_group_put(struct mlxsw_sp *mlxsw_sp,
4924 struct mlxsw_sp_fib_entry *fib_entry)
4925{
4926 struct mlxsw_sp_nexthop_group *nh_grp = fib_entry->nh_group;
4927
4928 list_del(&fib_entry->nexthop_group_node);
4929 if (!list_empty(&nh_grp->fib_list))
4930 return;
4931 mlxsw_sp_nexthop6_group_destroy(mlxsw_sp, nh_grp);
4932}
4933
4934static int
4935mlxsw_sp_nexthop6_group_update(struct mlxsw_sp *mlxsw_sp,
4936 struct mlxsw_sp_fib6_entry *fib6_entry)
4937{
4938 struct mlxsw_sp_nexthop_group *old_nh_grp = fib6_entry->common.nh_group;
4939 int err;
4940
4941 fib6_entry->common.nh_group = NULL;
4942 list_del(&fib6_entry->common.nexthop_group_node);
4943
4944 err = mlxsw_sp_nexthop6_group_get(mlxsw_sp, fib6_entry);
4945 if (err)
4946 goto err_nexthop6_group_get;
4947
4948 /* In case this entry is offloaded, then the adjacency index
4949 * currently associated with it in the device's table is that
4950 * of the old group. Start using the new one instead.
4951 */
4952 err = mlxsw_sp_fib_node_entry_add(mlxsw_sp, &fib6_entry->common);
4953 if (err)
4954 goto err_fib_node_entry_add;
4955
4956 if (list_empty(&old_nh_grp->fib_list))
4957 mlxsw_sp_nexthop6_group_destroy(mlxsw_sp, old_nh_grp);
4958
4959 return 0;
4960
4961err_fib_node_entry_add:
4962 mlxsw_sp_nexthop6_group_put(mlxsw_sp, &fib6_entry->common);
4963err_nexthop6_group_get:
4964 list_add_tail(&fib6_entry->common.nexthop_group_node,
4965 &old_nh_grp->fib_list);
4966 fib6_entry->common.nh_group = old_nh_grp;
4967 return err;
4968}
4969
4970static int
4971mlxsw_sp_fib6_entry_nexthop_add(struct mlxsw_sp *mlxsw_sp,
4972 struct mlxsw_sp_fib6_entry *fib6_entry,
4973 struct rt6_info *rt)
4974{
4975 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
4976 int err;
4977
4978 mlxsw_sp_rt6 = mlxsw_sp_rt6_create(rt);
4979 if (IS_ERR(mlxsw_sp_rt6))
4980 return PTR_ERR(mlxsw_sp_rt6);
4981
4982 list_add_tail(&mlxsw_sp_rt6->list, &fib6_entry->rt6_list);
4983 fib6_entry->nrt6++;
4984
4985 err = mlxsw_sp_nexthop6_group_update(mlxsw_sp, fib6_entry);
4986 if (err)
4987 goto err_nexthop6_group_update;
4988
4989 return 0;
4990
4991err_nexthop6_group_update:
4992 fib6_entry->nrt6--;
4993 list_del(&mlxsw_sp_rt6->list);
4994 mlxsw_sp_rt6_destroy(mlxsw_sp_rt6);
4995 return err;
4996}
4997
4998static void
4999mlxsw_sp_fib6_entry_nexthop_del(struct mlxsw_sp *mlxsw_sp,
5000 struct mlxsw_sp_fib6_entry *fib6_entry,
5001 struct rt6_info *rt)
5002{
5003 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
5004
5005 mlxsw_sp_rt6 = mlxsw_sp_fib6_entry_rt_find(fib6_entry, rt);
5006 if (WARN_ON(!mlxsw_sp_rt6))
5007 return;
5008
5009 fib6_entry->nrt6--;
5010 list_del(&mlxsw_sp_rt6->list);
5011 mlxsw_sp_nexthop6_group_update(mlxsw_sp, fib6_entry);
5012 mlxsw_sp_rt6_destroy(mlxsw_sp_rt6);
5013}
5014
Petr Machataf6050ee2017-09-02 23:49:21 +02005015static void mlxsw_sp_fib6_entry_type_set(struct mlxsw_sp *mlxsw_sp,
5016 struct mlxsw_sp_fib_entry *fib_entry,
Ido Schimmel428b8512017-08-03 13:28:28 +02005017 const struct rt6_info *rt)
5018{
5019 /* Packets hitting RTF_REJECT routes need to be discarded by the
5020 * stack. We can rely on their destination device not having a
5021 * RIF (it's the loopback device) and can thus use action type
5022 * local, which will cause them to be trapped with a lower
5023 * priority than packets that need to be locally received.
5024 */
Ido Schimmeld3b6d372017-09-01 10:58:55 +02005025 if (rt->rt6i_flags & (RTF_LOCAL | RTF_ANYCAST))
Ido Schimmel428b8512017-08-03 13:28:28 +02005026 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
5027 else if (rt->rt6i_flags & RTF_REJECT)
5028 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_LOCAL;
Petr Machataf6050ee2017-09-02 23:49:21 +02005029 else if (mlxsw_sp_rt6_is_gateway(mlxsw_sp, rt))
Ido Schimmel428b8512017-08-03 13:28:28 +02005030 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_REMOTE;
5031 else
5032 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_LOCAL;
5033}
5034
5035static void
5036mlxsw_sp_fib6_entry_rt_destroy_all(struct mlxsw_sp_fib6_entry *fib6_entry)
5037{
5038 struct mlxsw_sp_rt6 *mlxsw_sp_rt6, *tmp;
5039
5040 list_for_each_entry_safe(mlxsw_sp_rt6, tmp, &fib6_entry->rt6_list,
5041 list) {
5042 fib6_entry->nrt6--;
5043 list_del(&mlxsw_sp_rt6->list);
5044 mlxsw_sp_rt6_destroy(mlxsw_sp_rt6);
5045 }
5046}
5047
5048static struct mlxsw_sp_fib6_entry *
5049mlxsw_sp_fib6_entry_create(struct mlxsw_sp *mlxsw_sp,
5050 struct mlxsw_sp_fib_node *fib_node,
5051 struct rt6_info *rt)
5052{
5053 struct mlxsw_sp_fib6_entry *fib6_entry;
5054 struct mlxsw_sp_fib_entry *fib_entry;
5055 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
5056 int err;
5057
5058 fib6_entry = kzalloc(sizeof(*fib6_entry), GFP_KERNEL);
5059 if (!fib6_entry)
5060 return ERR_PTR(-ENOMEM);
5061 fib_entry = &fib6_entry->common;
5062
5063 mlxsw_sp_rt6 = mlxsw_sp_rt6_create(rt);
5064 if (IS_ERR(mlxsw_sp_rt6)) {
5065 err = PTR_ERR(mlxsw_sp_rt6);
5066 goto err_rt6_create;
5067 }
5068
Petr Machataf6050ee2017-09-02 23:49:21 +02005069 mlxsw_sp_fib6_entry_type_set(mlxsw_sp, fib_entry, mlxsw_sp_rt6->rt);
Ido Schimmel428b8512017-08-03 13:28:28 +02005070
5071 INIT_LIST_HEAD(&fib6_entry->rt6_list);
5072 list_add_tail(&mlxsw_sp_rt6->list, &fib6_entry->rt6_list);
5073 fib6_entry->nrt6 = 1;
5074 err = mlxsw_sp_nexthop6_group_get(mlxsw_sp, fib6_entry);
5075 if (err)
5076 goto err_nexthop6_group_get;
5077
5078 fib_entry->fib_node = fib_node;
5079
5080 return fib6_entry;
5081
5082err_nexthop6_group_get:
5083 list_del(&mlxsw_sp_rt6->list);
5084 mlxsw_sp_rt6_destroy(mlxsw_sp_rt6);
5085err_rt6_create:
5086 kfree(fib6_entry);
5087 return ERR_PTR(err);
5088}
5089
5090static void mlxsw_sp_fib6_entry_destroy(struct mlxsw_sp *mlxsw_sp,
5091 struct mlxsw_sp_fib6_entry *fib6_entry)
5092{
5093 mlxsw_sp_nexthop6_group_put(mlxsw_sp, &fib6_entry->common);
5094 mlxsw_sp_fib6_entry_rt_destroy_all(fib6_entry);
5095 WARN_ON(fib6_entry->nrt6);
5096 kfree(fib6_entry);
5097}
5098
5099static struct mlxsw_sp_fib6_entry *
5100mlxsw_sp_fib6_node_entry_find(const struct mlxsw_sp_fib_node *fib_node,
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02005101 const struct rt6_info *nrt, bool replace)
Ido Schimmel428b8512017-08-03 13:28:28 +02005102{
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02005103 struct mlxsw_sp_fib6_entry *fib6_entry, *fallback = NULL;
Ido Schimmel428b8512017-08-03 13:28:28 +02005104
5105 list_for_each_entry(fib6_entry, &fib_node->entry_list, common.list) {
5106 struct rt6_info *rt = mlxsw_sp_fib6_entry_rt(fib6_entry);
5107
5108 if (rt->rt6i_table->tb6_id > nrt->rt6i_table->tb6_id)
5109 continue;
5110 if (rt->rt6i_table->tb6_id != nrt->rt6i_table->tb6_id)
5111 break;
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02005112 if (replace && rt->rt6i_metric == nrt->rt6i_metric) {
5113 if (mlxsw_sp_fib6_rt_can_mp(rt) ==
5114 mlxsw_sp_fib6_rt_can_mp(nrt))
5115 return fib6_entry;
5116 if (mlxsw_sp_fib6_rt_can_mp(nrt))
5117 fallback = fallback ?: fib6_entry;
5118 }
Ido Schimmel428b8512017-08-03 13:28:28 +02005119 if (rt->rt6i_metric > nrt->rt6i_metric)
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02005120 return fallback ?: fib6_entry;
Ido Schimmel428b8512017-08-03 13:28:28 +02005121 }
5122
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02005123 return fallback;
Ido Schimmel428b8512017-08-03 13:28:28 +02005124}
5125
5126static int
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02005127mlxsw_sp_fib6_node_list_insert(struct mlxsw_sp_fib6_entry *new6_entry,
5128 bool replace)
Ido Schimmel428b8512017-08-03 13:28:28 +02005129{
5130 struct mlxsw_sp_fib_node *fib_node = new6_entry->common.fib_node;
5131 struct rt6_info *nrt = mlxsw_sp_fib6_entry_rt(new6_entry);
5132 struct mlxsw_sp_fib6_entry *fib6_entry;
5133
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02005134 fib6_entry = mlxsw_sp_fib6_node_entry_find(fib_node, nrt, replace);
5135
5136 if (replace && WARN_ON(!fib6_entry))
5137 return -EINVAL;
Ido Schimmel428b8512017-08-03 13:28:28 +02005138
5139 if (fib6_entry) {
5140 list_add_tail(&new6_entry->common.list,
5141 &fib6_entry->common.list);
5142 } else {
5143 struct mlxsw_sp_fib6_entry *last;
5144
5145 list_for_each_entry(last, &fib_node->entry_list, common.list) {
5146 struct rt6_info *rt = mlxsw_sp_fib6_entry_rt(last);
5147
5148 if (nrt->rt6i_table->tb6_id > rt->rt6i_table->tb6_id)
5149 break;
5150 fib6_entry = last;
5151 }
5152
5153 if (fib6_entry)
5154 list_add(&new6_entry->common.list,
5155 &fib6_entry->common.list);
5156 else
5157 list_add(&new6_entry->common.list,
5158 &fib_node->entry_list);
5159 }
5160
5161 return 0;
5162}
5163
5164static void
5165mlxsw_sp_fib6_node_list_remove(struct mlxsw_sp_fib6_entry *fib6_entry)
5166{
5167 list_del(&fib6_entry->common.list);
5168}
5169
5170static int mlxsw_sp_fib6_node_entry_link(struct mlxsw_sp *mlxsw_sp,
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02005171 struct mlxsw_sp_fib6_entry *fib6_entry,
5172 bool replace)
Ido Schimmel428b8512017-08-03 13:28:28 +02005173{
5174 int err;
5175
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02005176 err = mlxsw_sp_fib6_node_list_insert(fib6_entry, replace);
Ido Schimmel428b8512017-08-03 13:28:28 +02005177 if (err)
5178 return err;
5179
5180 err = mlxsw_sp_fib_node_entry_add(mlxsw_sp, &fib6_entry->common);
5181 if (err)
5182 goto err_fib_node_entry_add;
5183
5184 return 0;
5185
5186err_fib_node_entry_add:
5187 mlxsw_sp_fib6_node_list_remove(fib6_entry);
5188 return err;
5189}
5190
5191static void
5192mlxsw_sp_fib6_node_entry_unlink(struct mlxsw_sp *mlxsw_sp,
5193 struct mlxsw_sp_fib6_entry *fib6_entry)
5194{
5195 mlxsw_sp_fib_node_entry_del(mlxsw_sp, &fib6_entry->common);
5196 mlxsw_sp_fib6_node_list_remove(fib6_entry);
5197}
5198
5199static struct mlxsw_sp_fib6_entry *
5200mlxsw_sp_fib6_entry_lookup(struct mlxsw_sp *mlxsw_sp,
5201 const struct rt6_info *rt)
5202{
5203 struct mlxsw_sp_fib6_entry *fib6_entry;
5204 struct mlxsw_sp_fib_node *fib_node;
5205 struct mlxsw_sp_fib *fib;
5206 struct mlxsw_sp_vr *vr;
5207
5208 vr = mlxsw_sp_vr_find(mlxsw_sp, rt->rt6i_table->tb6_id);
5209 if (!vr)
5210 return NULL;
5211 fib = mlxsw_sp_vr_fib(vr, MLXSW_SP_L3_PROTO_IPV6);
5212
5213 fib_node = mlxsw_sp_fib_node_lookup(fib, &rt->rt6i_dst.addr,
5214 sizeof(rt->rt6i_dst.addr),
5215 rt->rt6i_dst.plen);
5216 if (!fib_node)
5217 return NULL;
5218
5219 list_for_each_entry(fib6_entry, &fib_node->entry_list, common.list) {
5220 struct rt6_info *iter_rt = mlxsw_sp_fib6_entry_rt(fib6_entry);
5221
5222 if (rt->rt6i_table->tb6_id == iter_rt->rt6i_table->tb6_id &&
5223 rt->rt6i_metric == iter_rt->rt6i_metric &&
5224 mlxsw_sp_fib6_entry_rt_find(fib6_entry, rt))
5225 return fib6_entry;
5226 }
5227
5228 return NULL;
5229}
5230
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02005231static void mlxsw_sp_fib6_entry_replace(struct mlxsw_sp *mlxsw_sp,
5232 struct mlxsw_sp_fib6_entry *fib6_entry,
5233 bool replace)
5234{
5235 struct mlxsw_sp_fib_node *fib_node = fib6_entry->common.fib_node;
5236 struct mlxsw_sp_fib6_entry *replaced;
5237
5238 if (!replace)
5239 return;
5240
5241 replaced = list_next_entry(fib6_entry, common.list);
5242
5243 mlxsw_sp_fib6_node_entry_unlink(mlxsw_sp, replaced);
5244 mlxsw_sp_fib6_entry_destroy(mlxsw_sp, replaced);
5245 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
5246}
5247
Ido Schimmel428b8512017-08-03 13:28:28 +02005248static int mlxsw_sp_router_fib6_add(struct mlxsw_sp *mlxsw_sp,
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02005249 struct rt6_info *rt, bool replace)
Ido Schimmel428b8512017-08-03 13:28:28 +02005250{
5251 struct mlxsw_sp_fib6_entry *fib6_entry;
5252 struct mlxsw_sp_fib_node *fib_node;
5253 int err;
5254
5255 if (mlxsw_sp->router->aborted)
5256 return 0;
5257
Ido Schimmelf36f5ac2017-08-03 13:28:30 +02005258 if (rt->rt6i_src.plen)
5259 return -EINVAL;
5260
Ido Schimmel428b8512017-08-03 13:28:28 +02005261 if (mlxsw_sp_fib6_rt_should_ignore(rt))
5262 return 0;
5263
5264 fib_node = mlxsw_sp_fib_node_get(mlxsw_sp, rt->rt6i_table->tb6_id,
5265 &rt->rt6i_dst.addr,
5266 sizeof(rt->rt6i_dst.addr),
5267 rt->rt6i_dst.plen,
5268 MLXSW_SP_L3_PROTO_IPV6);
5269 if (IS_ERR(fib_node))
5270 return PTR_ERR(fib_node);
5271
5272 /* Before creating a new entry, try to append route to an existing
5273 * multipath entry.
5274 */
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02005275 fib6_entry = mlxsw_sp_fib6_node_mp_entry_find(fib_node, rt, replace);
Ido Schimmel428b8512017-08-03 13:28:28 +02005276 if (fib6_entry) {
5277 err = mlxsw_sp_fib6_entry_nexthop_add(mlxsw_sp, fib6_entry, rt);
5278 if (err)
5279 goto err_fib6_entry_nexthop_add;
5280 return 0;
5281 }
5282
5283 fib6_entry = mlxsw_sp_fib6_entry_create(mlxsw_sp, fib_node, rt);
5284 if (IS_ERR(fib6_entry)) {
5285 err = PTR_ERR(fib6_entry);
5286 goto err_fib6_entry_create;
5287 }
5288
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02005289 err = mlxsw_sp_fib6_node_entry_link(mlxsw_sp, fib6_entry, replace);
Ido Schimmel428b8512017-08-03 13:28:28 +02005290 if (err)
5291 goto err_fib6_node_entry_link;
5292
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02005293 mlxsw_sp_fib6_entry_replace(mlxsw_sp, fib6_entry, replace);
5294
Ido Schimmel428b8512017-08-03 13:28:28 +02005295 return 0;
5296
5297err_fib6_node_entry_link:
5298 mlxsw_sp_fib6_entry_destroy(mlxsw_sp, fib6_entry);
5299err_fib6_entry_create:
5300err_fib6_entry_nexthop_add:
5301 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
5302 return err;
5303}
5304
5305static void mlxsw_sp_router_fib6_del(struct mlxsw_sp *mlxsw_sp,
5306 struct rt6_info *rt)
5307{
5308 struct mlxsw_sp_fib6_entry *fib6_entry;
5309 struct mlxsw_sp_fib_node *fib_node;
5310
5311 if (mlxsw_sp->router->aborted)
5312 return;
5313
5314 if (mlxsw_sp_fib6_rt_should_ignore(rt))
5315 return;
5316
5317 fib6_entry = mlxsw_sp_fib6_entry_lookup(mlxsw_sp, rt);
5318 if (WARN_ON(!fib6_entry))
5319 return;
5320
5321 /* If route is part of a multipath entry, but not the last one
5322 * removed, then only reduce its nexthop group.
5323 */
5324 if (!list_is_singular(&fib6_entry->rt6_list)) {
5325 mlxsw_sp_fib6_entry_nexthop_del(mlxsw_sp, fib6_entry, rt);
5326 return;
5327 }
5328
5329 fib_node = fib6_entry->common.fib_node;
5330
5331 mlxsw_sp_fib6_node_entry_unlink(mlxsw_sp, fib6_entry);
5332 mlxsw_sp_fib6_entry_destroy(mlxsw_sp, fib6_entry);
5333 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
5334}
5335
Ido Schimmelbc65a8a2017-07-18 10:10:25 +02005336static int __mlxsw_sp_router_set_abort_trap(struct mlxsw_sp *mlxsw_sp,
5337 enum mlxsw_reg_ralxx_protocol proto,
5338 u8 tree_id)
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005339{
5340 char ralta_pl[MLXSW_REG_RALTA_LEN];
5341 char ralst_pl[MLXSW_REG_RALST_LEN];
Ido Schimmelb5d90e62017-03-10 08:53:43 +01005342 int i, err;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005343
Ido Schimmelbc65a8a2017-07-18 10:10:25 +02005344 mlxsw_reg_ralta_pack(ralta_pl, true, proto, tree_id);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005345 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralta), ralta_pl);
5346 if (err)
5347 return err;
5348
Ido Schimmelbc65a8a2017-07-18 10:10:25 +02005349 mlxsw_reg_ralst_pack(ralst_pl, 0xff, tree_id);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005350 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralst), ralst_pl);
5351 if (err)
5352 return err;
5353
Ido Schimmelb5d90e62017-03-10 08:53:43 +01005354 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
Ido Schimmel9011b672017-05-16 19:38:25 +02005355 struct mlxsw_sp_vr *vr = &mlxsw_sp->router->vrs[i];
Ido Schimmelb5d90e62017-03-10 08:53:43 +01005356 char raltb_pl[MLXSW_REG_RALTB_LEN];
5357 char ralue_pl[MLXSW_REG_RALUE_LEN];
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005358
Ido Schimmelbc65a8a2017-07-18 10:10:25 +02005359 mlxsw_reg_raltb_pack(raltb_pl, vr->id, proto, tree_id);
Ido Schimmelb5d90e62017-03-10 08:53:43 +01005360 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raltb),
5361 raltb_pl);
5362 if (err)
5363 return err;
5364
Ido Schimmelbc65a8a2017-07-18 10:10:25 +02005365 mlxsw_reg_ralue_pack(ralue_pl, proto,
5366 MLXSW_REG_RALUE_OP_WRITE_WRITE, vr->id, 0);
Ido Schimmelb5d90e62017-03-10 08:53:43 +01005367 mlxsw_reg_ralue_act_ip2me_pack(ralue_pl);
5368 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue),
5369 ralue_pl);
5370 if (err)
5371 return err;
5372 }
5373
5374 return 0;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005375}
5376
Yotam Gigid42b0962017-09-27 08:23:20 +02005377static int mlxsw_sp_router_fibmr_add(struct mlxsw_sp *mlxsw_sp,
5378 struct mfc_entry_notifier_info *men_info,
5379 bool replace)
5380{
5381 struct mlxsw_sp_vr *vr;
5382
5383 if (mlxsw_sp->router->aborted)
5384 return 0;
5385
David Ahernf8fa9b42017-10-18 09:56:56 -07005386 vr = mlxsw_sp_vr_get(mlxsw_sp, men_info->tb_id, NULL);
Yotam Gigid42b0962017-09-27 08:23:20 +02005387 if (IS_ERR(vr))
5388 return PTR_ERR(vr);
5389
5390 return mlxsw_sp_mr_route4_add(vr->mr4_table, men_info->mfc, replace);
5391}
5392
5393static void mlxsw_sp_router_fibmr_del(struct mlxsw_sp *mlxsw_sp,
5394 struct mfc_entry_notifier_info *men_info)
5395{
5396 struct mlxsw_sp_vr *vr;
5397
5398 if (mlxsw_sp->router->aborted)
5399 return;
5400
5401 vr = mlxsw_sp_vr_find(mlxsw_sp, men_info->tb_id);
5402 if (WARN_ON(!vr))
5403 return;
5404
5405 mlxsw_sp_mr_route4_del(vr->mr4_table, men_info->mfc);
Ido Schimmel2b52ce02018-01-22 09:17:42 +01005406 mlxsw_sp_vr_put(mlxsw_sp, vr);
Yotam Gigid42b0962017-09-27 08:23:20 +02005407}
5408
5409static int
5410mlxsw_sp_router_fibmr_vif_add(struct mlxsw_sp *mlxsw_sp,
5411 struct vif_entry_notifier_info *ven_info)
5412{
5413 struct mlxsw_sp_rif *rif;
5414 struct mlxsw_sp_vr *vr;
5415
5416 if (mlxsw_sp->router->aborted)
5417 return 0;
5418
David Ahernf8fa9b42017-10-18 09:56:56 -07005419 vr = mlxsw_sp_vr_get(mlxsw_sp, ven_info->tb_id, NULL);
Yotam Gigid42b0962017-09-27 08:23:20 +02005420 if (IS_ERR(vr))
5421 return PTR_ERR(vr);
5422
5423 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, ven_info->dev);
5424 return mlxsw_sp_mr_vif_add(vr->mr4_table, ven_info->dev,
5425 ven_info->vif_index,
5426 ven_info->vif_flags, rif);
5427}
5428
5429static void
5430mlxsw_sp_router_fibmr_vif_del(struct mlxsw_sp *mlxsw_sp,
5431 struct vif_entry_notifier_info *ven_info)
5432{
5433 struct mlxsw_sp_vr *vr;
5434
5435 if (mlxsw_sp->router->aborted)
5436 return;
5437
5438 vr = mlxsw_sp_vr_find(mlxsw_sp, ven_info->tb_id);
5439 if (WARN_ON(!vr))
5440 return;
5441
5442 mlxsw_sp_mr_vif_del(vr->mr4_table, ven_info->vif_index);
Ido Schimmel2b52ce02018-01-22 09:17:42 +01005443 mlxsw_sp_vr_put(mlxsw_sp, vr);
Yotam Gigid42b0962017-09-27 08:23:20 +02005444}
5445
Ido Schimmelbc65a8a2017-07-18 10:10:25 +02005446static int mlxsw_sp_router_set_abort_trap(struct mlxsw_sp *mlxsw_sp)
5447{
5448 enum mlxsw_reg_ralxx_protocol proto = MLXSW_REG_RALXX_PROTOCOL_IPV4;
5449 int err;
5450
5451 err = __mlxsw_sp_router_set_abort_trap(mlxsw_sp, proto,
5452 MLXSW_SP_LPM_TREE_MIN);
5453 if (err)
5454 return err;
5455
Yotam Gigid42b0962017-09-27 08:23:20 +02005456 /* The multicast router code does not need an abort trap as by default,
5457 * packets that don't match any routes are trapped to the CPU.
5458 */
5459
Ido Schimmelbc65a8a2017-07-18 10:10:25 +02005460 proto = MLXSW_REG_RALXX_PROTOCOL_IPV6;
5461 return __mlxsw_sp_router_set_abort_trap(mlxsw_sp, proto,
5462 MLXSW_SP_LPM_TREE_MIN + 1);
5463}
5464
Ido Schimmel9aecce12017-02-09 10:28:42 +01005465static void mlxsw_sp_fib4_node_flush(struct mlxsw_sp *mlxsw_sp,
5466 struct mlxsw_sp_fib_node *fib_node)
5467{
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02005468 struct mlxsw_sp_fib4_entry *fib4_entry, *tmp;
Ido Schimmel9aecce12017-02-09 10:28:42 +01005469
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02005470 list_for_each_entry_safe(fib4_entry, tmp, &fib_node->entry_list,
5471 common.list) {
5472 bool do_break = &tmp->common.list == &fib_node->entry_list;
Ido Schimmel9aecce12017-02-09 10:28:42 +01005473
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02005474 mlxsw_sp_fib4_node_entry_unlink(mlxsw_sp, fib4_entry);
5475 mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib4_entry);
Ido Schimmel731ea1c2017-07-18 10:10:21 +02005476 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
Ido Schimmel9aecce12017-02-09 10:28:42 +01005477 /* Break when entry list is empty and node was freed.
5478 * Otherwise, we'll access freed memory in the next
5479 * iteration.
5480 */
5481 if (do_break)
5482 break;
5483 }
5484}
5485
Ido Schimmel428b8512017-08-03 13:28:28 +02005486static void mlxsw_sp_fib6_node_flush(struct mlxsw_sp *mlxsw_sp,
5487 struct mlxsw_sp_fib_node *fib_node)
5488{
5489 struct mlxsw_sp_fib6_entry *fib6_entry, *tmp;
5490
5491 list_for_each_entry_safe(fib6_entry, tmp, &fib_node->entry_list,
5492 common.list) {
5493 bool do_break = &tmp->common.list == &fib_node->entry_list;
5494
5495 mlxsw_sp_fib6_node_entry_unlink(mlxsw_sp, fib6_entry);
5496 mlxsw_sp_fib6_entry_destroy(mlxsw_sp, fib6_entry);
5497 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
5498 if (do_break)
5499 break;
5500 }
5501}
5502
Ido Schimmel9aecce12017-02-09 10:28:42 +01005503static void mlxsw_sp_fib_node_flush(struct mlxsw_sp *mlxsw_sp,
5504 struct mlxsw_sp_fib_node *fib_node)
5505{
Ido Schimmel76610eb2017-03-10 08:53:41 +01005506 switch (fib_node->fib->proto) {
Ido Schimmel9aecce12017-02-09 10:28:42 +01005507 case MLXSW_SP_L3_PROTO_IPV4:
5508 mlxsw_sp_fib4_node_flush(mlxsw_sp, fib_node);
5509 break;
5510 case MLXSW_SP_L3_PROTO_IPV6:
Ido Schimmel428b8512017-08-03 13:28:28 +02005511 mlxsw_sp_fib6_node_flush(mlxsw_sp, fib_node);
Ido Schimmel9aecce12017-02-09 10:28:42 +01005512 break;
5513 }
5514}
5515
Ido Schimmel76610eb2017-03-10 08:53:41 +01005516static void mlxsw_sp_vr_fib_flush(struct mlxsw_sp *mlxsw_sp,
5517 struct mlxsw_sp_vr *vr,
5518 enum mlxsw_sp_l3proto proto)
5519{
5520 struct mlxsw_sp_fib *fib = mlxsw_sp_vr_fib(vr, proto);
5521 struct mlxsw_sp_fib_node *fib_node, *tmp;
5522
5523 list_for_each_entry_safe(fib_node, tmp, &fib->node_list, list) {
5524 bool do_break = &tmp->list == &fib->node_list;
5525
5526 mlxsw_sp_fib_node_flush(mlxsw_sp, fib_node);
5527 if (do_break)
5528 break;
5529 }
5530}
5531
Ido Schimmelac571de2016-11-14 11:26:32 +01005532static void mlxsw_sp_router_fib_flush(struct mlxsw_sp *mlxsw_sp)
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005533{
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005534 int i;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005535
Jiri Pirkoc1a38312016-10-21 16:07:23 +02005536 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
Ido Schimmel9011b672017-05-16 19:38:25 +02005537 struct mlxsw_sp_vr *vr = &mlxsw_sp->router->vrs[i];
Ido Schimmelac571de2016-11-14 11:26:32 +01005538
Ido Schimmel76610eb2017-03-10 08:53:41 +01005539 if (!mlxsw_sp_vr_is_used(vr))
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005540 continue;
Yotam Gigid42b0962017-09-27 08:23:20 +02005541
5542 mlxsw_sp_mr_table_flush(vr->mr4_table);
Ido Schimmel76610eb2017-03-10 08:53:41 +01005543 mlxsw_sp_vr_fib_flush(mlxsw_sp, vr, MLXSW_SP_L3_PROTO_IPV4);
Ido Schimmela3d9bc52017-07-18 10:10:22 +02005544
5545 /* If virtual router was only used for IPv4, then it's no
5546 * longer used.
5547 */
5548 if (!mlxsw_sp_vr_is_used(vr))
5549 continue;
5550 mlxsw_sp_vr_fib_flush(mlxsw_sp, vr, MLXSW_SP_L3_PROTO_IPV6);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005551 }
Ido Schimmelac571de2016-11-14 11:26:32 +01005552}
5553
Ido Schimmelbc65a8a2017-07-18 10:10:25 +02005554static void mlxsw_sp_router_fib_abort(struct mlxsw_sp *mlxsw_sp)
Ido Schimmelac571de2016-11-14 11:26:32 +01005555{
5556 int err;
5557
Ido Schimmel9011b672017-05-16 19:38:25 +02005558 if (mlxsw_sp->router->aborted)
Ido Schimmeld331d302016-11-16 09:51:58 +01005559 return;
5560 dev_warn(mlxsw_sp->bus_info->dev, "FIB abort triggered. Note that FIB entries are no longer being offloaded to this device.\n");
Ido Schimmelac571de2016-11-14 11:26:32 +01005561 mlxsw_sp_router_fib_flush(mlxsw_sp);
Ido Schimmel9011b672017-05-16 19:38:25 +02005562 mlxsw_sp->router->aborted = true;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005563 err = mlxsw_sp_router_set_abort_trap(mlxsw_sp);
5564 if (err)
5565 dev_warn(mlxsw_sp->bus_info->dev, "Failed to set abort trap.\n");
5566}
5567
Ido Schimmel30572242016-12-03 16:45:01 +01005568struct mlxsw_sp_fib_event_work {
Ido Schimmela0e47612017-02-06 16:20:10 +01005569 struct work_struct work;
Ido Schimmelad178c82017-02-08 11:16:40 +01005570 union {
Ido Schimmel428b8512017-08-03 13:28:28 +02005571 struct fib6_entry_notifier_info fen6_info;
Ido Schimmelad178c82017-02-08 11:16:40 +01005572 struct fib_entry_notifier_info fen_info;
Ido Schimmel5d7bfd12017-03-16 09:08:14 +01005573 struct fib_rule_notifier_info fr_info;
Ido Schimmelad178c82017-02-08 11:16:40 +01005574 struct fib_nh_notifier_info fnh_info;
Yotam Gigid42b0962017-09-27 08:23:20 +02005575 struct mfc_entry_notifier_info men_info;
5576 struct vif_entry_notifier_info ven_info;
Ido Schimmelad178c82017-02-08 11:16:40 +01005577 };
Ido Schimmel30572242016-12-03 16:45:01 +01005578 struct mlxsw_sp *mlxsw_sp;
5579 unsigned long event;
5580};
5581
Ido Schimmel66a57632017-08-03 13:28:26 +02005582static void mlxsw_sp_router_fib4_event_work(struct work_struct *work)
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005583{
Ido Schimmel30572242016-12-03 16:45:01 +01005584 struct mlxsw_sp_fib_event_work *fib_work =
Ido Schimmela0e47612017-02-06 16:20:10 +01005585 container_of(work, struct mlxsw_sp_fib_event_work, work);
Ido Schimmel30572242016-12-03 16:45:01 +01005586 struct mlxsw_sp *mlxsw_sp = fib_work->mlxsw_sp;
Ido Schimmel599cf8f2017-02-09 10:28:44 +01005587 bool replace, append;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005588 int err;
5589
Ido Schimmel30572242016-12-03 16:45:01 +01005590 /* Protect internal structures from changes */
5591 rtnl_lock();
5592 switch (fib_work->event) {
Ido Schimmel599cf8f2017-02-09 10:28:44 +01005593 case FIB_EVENT_ENTRY_REPLACE: /* fall through */
Ido Schimmel4283bce2017-02-09 10:28:43 +01005594 case FIB_EVENT_ENTRY_APPEND: /* fall through */
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005595 case FIB_EVENT_ENTRY_ADD:
Ido Schimmel599cf8f2017-02-09 10:28:44 +01005596 replace = fib_work->event == FIB_EVENT_ENTRY_REPLACE;
Ido Schimmel4283bce2017-02-09 10:28:43 +01005597 append = fib_work->event == FIB_EVENT_ENTRY_APPEND;
5598 err = mlxsw_sp_router_fib4_add(mlxsw_sp, &fib_work->fen_info,
Ido Schimmel599cf8f2017-02-09 10:28:44 +01005599 replace, append);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005600 if (err)
Ido Schimmelbc65a8a2017-07-18 10:10:25 +02005601 mlxsw_sp_router_fib_abort(mlxsw_sp);
Ido Schimmel30572242016-12-03 16:45:01 +01005602 fib_info_put(fib_work->fen_info.fi);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005603 break;
5604 case FIB_EVENT_ENTRY_DEL:
Ido Schimmel30572242016-12-03 16:45:01 +01005605 mlxsw_sp_router_fib4_del(mlxsw_sp, &fib_work->fen_info);
5606 fib_info_put(fib_work->fen_info.fi);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005607 break;
David Ahern1f279232017-10-27 17:37:14 -07005608 case FIB_EVENT_RULE_ADD:
5609 /* if we get here, a rule was added that we do not support.
5610 * just do the fib_abort
5611 */
5612 mlxsw_sp_router_fib_abort(mlxsw_sp);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005613 break;
Ido Schimmelad178c82017-02-08 11:16:40 +01005614 case FIB_EVENT_NH_ADD: /* fall through */
5615 case FIB_EVENT_NH_DEL:
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02005616 mlxsw_sp_nexthop4_event(mlxsw_sp, fib_work->event,
5617 fib_work->fnh_info.fib_nh);
Ido Schimmelad178c82017-02-08 11:16:40 +01005618 fib_info_put(fib_work->fnh_info.fib_nh->nh_parent);
5619 break;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005620 }
Ido Schimmel30572242016-12-03 16:45:01 +01005621 rtnl_unlock();
5622 kfree(fib_work);
5623}
5624
Ido Schimmel66a57632017-08-03 13:28:26 +02005625static void mlxsw_sp_router_fib6_event_work(struct work_struct *work)
5626{
Ido Schimmel583419f2017-08-03 13:28:27 +02005627 struct mlxsw_sp_fib_event_work *fib_work =
5628 container_of(work, struct mlxsw_sp_fib_event_work, work);
5629 struct mlxsw_sp *mlxsw_sp = fib_work->mlxsw_sp;
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02005630 bool replace;
Ido Schimmel428b8512017-08-03 13:28:28 +02005631 int err;
Ido Schimmel583419f2017-08-03 13:28:27 +02005632
5633 rtnl_lock();
5634 switch (fib_work->event) {
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02005635 case FIB_EVENT_ENTRY_REPLACE: /* fall through */
Ido Schimmel428b8512017-08-03 13:28:28 +02005636 case FIB_EVENT_ENTRY_ADD:
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02005637 replace = fib_work->event == FIB_EVENT_ENTRY_REPLACE;
Ido Schimmel428b8512017-08-03 13:28:28 +02005638 err = mlxsw_sp_router_fib6_add(mlxsw_sp,
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02005639 fib_work->fen6_info.rt, replace);
Ido Schimmel428b8512017-08-03 13:28:28 +02005640 if (err)
5641 mlxsw_sp_router_fib_abort(mlxsw_sp);
5642 mlxsw_sp_rt6_release(fib_work->fen6_info.rt);
5643 break;
5644 case FIB_EVENT_ENTRY_DEL:
5645 mlxsw_sp_router_fib6_del(mlxsw_sp, fib_work->fen6_info.rt);
5646 mlxsw_sp_rt6_release(fib_work->fen6_info.rt);
5647 break;
David Ahern1f279232017-10-27 17:37:14 -07005648 case FIB_EVENT_RULE_ADD:
5649 /* if we get here, a rule was added that we do not support.
5650 * just do the fib_abort
5651 */
5652 mlxsw_sp_router_fib_abort(mlxsw_sp);
Ido Schimmel583419f2017-08-03 13:28:27 +02005653 break;
5654 }
5655 rtnl_unlock();
5656 kfree(fib_work);
Ido Schimmel66a57632017-08-03 13:28:26 +02005657}
5658
Yotam Gigid42b0962017-09-27 08:23:20 +02005659static void mlxsw_sp_router_fibmr_event_work(struct work_struct *work)
5660{
5661 struct mlxsw_sp_fib_event_work *fib_work =
5662 container_of(work, struct mlxsw_sp_fib_event_work, work);
5663 struct mlxsw_sp *mlxsw_sp = fib_work->mlxsw_sp;
Yotam Gigid42b0962017-09-27 08:23:20 +02005664 bool replace;
5665 int err;
5666
5667 rtnl_lock();
5668 switch (fib_work->event) {
5669 case FIB_EVENT_ENTRY_REPLACE: /* fall through */
5670 case FIB_EVENT_ENTRY_ADD:
5671 replace = fib_work->event == FIB_EVENT_ENTRY_REPLACE;
5672
5673 err = mlxsw_sp_router_fibmr_add(mlxsw_sp, &fib_work->men_info,
5674 replace);
5675 if (err)
5676 mlxsw_sp_router_fib_abort(mlxsw_sp);
5677 ipmr_cache_put(fib_work->men_info.mfc);
5678 break;
5679 case FIB_EVENT_ENTRY_DEL:
5680 mlxsw_sp_router_fibmr_del(mlxsw_sp, &fib_work->men_info);
5681 ipmr_cache_put(fib_work->men_info.mfc);
5682 break;
5683 case FIB_EVENT_VIF_ADD:
5684 err = mlxsw_sp_router_fibmr_vif_add(mlxsw_sp,
5685 &fib_work->ven_info);
5686 if (err)
5687 mlxsw_sp_router_fib_abort(mlxsw_sp);
5688 dev_put(fib_work->ven_info.dev);
5689 break;
5690 case FIB_EVENT_VIF_DEL:
5691 mlxsw_sp_router_fibmr_vif_del(mlxsw_sp,
5692 &fib_work->ven_info);
5693 dev_put(fib_work->ven_info.dev);
5694 break;
David Ahern1f279232017-10-27 17:37:14 -07005695 case FIB_EVENT_RULE_ADD:
5696 /* if we get here, a rule was added that we do not support.
5697 * just do the fib_abort
5698 */
5699 mlxsw_sp_router_fib_abort(mlxsw_sp);
Yotam Gigid42b0962017-09-27 08:23:20 +02005700 break;
5701 }
5702 rtnl_unlock();
5703 kfree(fib_work);
5704}
5705
Ido Schimmel66a57632017-08-03 13:28:26 +02005706static void mlxsw_sp_router_fib4_event(struct mlxsw_sp_fib_event_work *fib_work,
5707 struct fib_notifier_info *info)
5708{
David Ahern3c75f9b2017-10-18 15:01:38 -07005709 struct fib_entry_notifier_info *fen_info;
David Ahern3c75f9b2017-10-18 15:01:38 -07005710 struct fib_nh_notifier_info *fnh_info;
5711
Ido Schimmel66a57632017-08-03 13:28:26 +02005712 switch (fib_work->event) {
5713 case FIB_EVENT_ENTRY_REPLACE: /* fall through */
5714 case FIB_EVENT_ENTRY_APPEND: /* fall through */
5715 case FIB_EVENT_ENTRY_ADD: /* fall through */
5716 case FIB_EVENT_ENTRY_DEL:
David Ahern3c75f9b2017-10-18 15:01:38 -07005717 fen_info = container_of(info, struct fib_entry_notifier_info,
5718 info);
5719 fib_work->fen_info = *fen_info;
5720 /* Take reference on fib_info to prevent it from being
Ido Schimmel66a57632017-08-03 13:28:26 +02005721 * freed while work is queued. Release it afterwards.
5722 */
5723 fib_info_hold(fib_work->fen_info.fi);
5724 break;
Ido Schimmel66a57632017-08-03 13:28:26 +02005725 case FIB_EVENT_NH_ADD: /* fall through */
5726 case FIB_EVENT_NH_DEL:
David Ahern3c75f9b2017-10-18 15:01:38 -07005727 fnh_info = container_of(info, struct fib_nh_notifier_info,
5728 info);
5729 fib_work->fnh_info = *fnh_info;
Ido Schimmel66a57632017-08-03 13:28:26 +02005730 fib_info_hold(fib_work->fnh_info.fib_nh->nh_parent);
5731 break;
5732 }
5733}
5734
5735static void mlxsw_sp_router_fib6_event(struct mlxsw_sp_fib_event_work *fib_work,
5736 struct fib_notifier_info *info)
5737{
David Ahern3c75f9b2017-10-18 15:01:38 -07005738 struct fib6_entry_notifier_info *fen6_info;
David Ahern3c75f9b2017-10-18 15:01:38 -07005739
Ido Schimmel583419f2017-08-03 13:28:27 +02005740 switch (fib_work->event) {
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02005741 case FIB_EVENT_ENTRY_REPLACE: /* fall through */
Ido Schimmel428b8512017-08-03 13:28:28 +02005742 case FIB_EVENT_ENTRY_ADD: /* fall through */
5743 case FIB_EVENT_ENTRY_DEL:
David Ahern3c75f9b2017-10-18 15:01:38 -07005744 fen6_info = container_of(info, struct fib6_entry_notifier_info,
5745 info);
5746 fib_work->fen6_info = *fen6_info;
Ido Schimmel428b8512017-08-03 13:28:28 +02005747 rt6_hold(fib_work->fen6_info.rt);
5748 break;
Ido Schimmel583419f2017-08-03 13:28:27 +02005749 }
Ido Schimmel66a57632017-08-03 13:28:26 +02005750}
5751
Yotam Gigid42b0962017-09-27 08:23:20 +02005752static void
5753mlxsw_sp_router_fibmr_event(struct mlxsw_sp_fib_event_work *fib_work,
5754 struct fib_notifier_info *info)
5755{
5756 switch (fib_work->event) {
5757 case FIB_EVENT_ENTRY_REPLACE: /* fall through */
5758 case FIB_EVENT_ENTRY_ADD: /* fall through */
5759 case FIB_EVENT_ENTRY_DEL:
5760 memcpy(&fib_work->men_info, info, sizeof(fib_work->men_info));
5761 ipmr_cache_hold(fib_work->men_info.mfc);
5762 break;
5763 case FIB_EVENT_VIF_ADD: /* fall through */
5764 case FIB_EVENT_VIF_DEL:
5765 memcpy(&fib_work->ven_info, info, sizeof(fib_work->ven_info));
5766 dev_hold(fib_work->ven_info.dev);
5767 break;
David Ahern1f279232017-10-27 17:37:14 -07005768 }
5769}
5770
5771static int mlxsw_sp_router_fib_rule_event(unsigned long event,
5772 struct fib_notifier_info *info,
5773 struct mlxsw_sp *mlxsw_sp)
5774{
5775 struct netlink_ext_ack *extack = info->extack;
5776 struct fib_rule_notifier_info *fr_info;
5777 struct fib_rule *rule;
5778 int err = 0;
5779
5780 /* nothing to do at the moment */
5781 if (event == FIB_EVENT_RULE_DEL)
5782 return 0;
5783
5784 if (mlxsw_sp->router->aborted)
5785 return 0;
5786
5787 fr_info = container_of(info, struct fib_rule_notifier_info, info);
5788 rule = fr_info->rule;
5789
5790 switch (info->family) {
5791 case AF_INET:
5792 if (!fib4_rule_default(rule) && !rule->l3mdev)
5793 err = -1;
5794 break;
5795 case AF_INET6:
5796 if (!fib6_rule_default(rule) && !rule->l3mdev)
5797 err = -1;
5798 break;
5799 case RTNL_FAMILY_IPMR:
5800 if (!ipmr_rule_default(rule) && !rule->l3mdev)
5801 err = -1;
Yotam Gigid42b0962017-09-27 08:23:20 +02005802 break;
5803 }
David Ahern1f279232017-10-27 17:37:14 -07005804
5805 if (err < 0)
Arkadi Sharshevsky6c677752018-02-13 11:29:05 +01005806 NL_SET_ERR_MSG_MOD(extack, "FIB rules not supported. Aborting offload");
David Ahern1f279232017-10-27 17:37:14 -07005807
5808 return err;
Yotam Gigid42b0962017-09-27 08:23:20 +02005809}
5810
Ido Schimmel30572242016-12-03 16:45:01 +01005811/* Called with rcu_read_lock() */
5812static int mlxsw_sp_router_fib_event(struct notifier_block *nb,
5813 unsigned long event, void *ptr)
5814{
Ido Schimmel30572242016-12-03 16:45:01 +01005815 struct mlxsw_sp_fib_event_work *fib_work;
5816 struct fib_notifier_info *info = ptr;
Ido Schimmel7e39d112017-05-16 19:38:28 +02005817 struct mlxsw_sp_router *router;
David Ahern1f279232017-10-27 17:37:14 -07005818 int err;
Ido Schimmel30572242016-12-03 16:45:01 +01005819
Ido Schimmel8e29f972017-09-15 15:31:07 +02005820 if (!net_eq(info->net, &init_net) ||
Yotam Gigi664375e2017-09-27 08:23:22 +02005821 (info->family != AF_INET && info->family != AF_INET6 &&
5822 info->family != RTNL_FAMILY_IPMR))
Ido Schimmel30572242016-12-03 16:45:01 +01005823 return NOTIFY_DONE;
5824
David Ahern1f279232017-10-27 17:37:14 -07005825 router = container_of(nb, struct mlxsw_sp_router, fib_nb);
5826
5827 switch (event) {
5828 case FIB_EVENT_RULE_ADD: /* fall through */
5829 case FIB_EVENT_RULE_DEL:
5830 err = mlxsw_sp_router_fib_rule_event(event, info,
5831 router->mlxsw_sp);
5832 if (!err)
5833 return NOTIFY_DONE;
5834 }
5835
Ido Schimmel30572242016-12-03 16:45:01 +01005836 fib_work = kzalloc(sizeof(*fib_work), GFP_ATOMIC);
5837 if (WARN_ON(!fib_work))
5838 return NOTIFY_BAD;
5839
Ido Schimmel7e39d112017-05-16 19:38:28 +02005840 fib_work->mlxsw_sp = router->mlxsw_sp;
Ido Schimmel30572242016-12-03 16:45:01 +01005841 fib_work->event = event;
5842
Ido Schimmel66a57632017-08-03 13:28:26 +02005843 switch (info->family) {
5844 case AF_INET:
5845 INIT_WORK(&fib_work->work, mlxsw_sp_router_fib4_event_work);
5846 mlxsw_sp_router_fib4_event(fib_work, info);
Ido Schimmel30572242016-12-03 16:45:01 +01005847 break;
Ido Schimmel66a57632017-08-03 13:28:26 +02005848 case AF_INET6:
5849 INIT_WORK(&fib_work->work, mlxsw_sp_router_fib6_event_work);
5850 mlxsw_sp_router_fib6_event(fib_work, info);
Ido Schimmelad178c82017-02-08 11:16:40 +01005851 break;
Yotam Gigid42b0962017-09-27 08:23:20 +02005852 case RTNL_FAMILY_IPMR:
5853 INIT_WORK(&fib_work->work, mlxsw_sp_router_fibmr_event_work);
5854 mlxsw_sp_router_fibmr_event(fib_work, info);
5855 break;
Ido Schimmel30572242016-12-03 16:45:01 +01005856 }
5857
Ido Schimmela0e47612017-02-06 16:20:10 +01005858 mlxsw_core_schedule_work(&fib_work->work);
Ido Schimmel30572242016-12-03 16:45:01 +01005859
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005860 return NOTIFY_DONE;
5861}
5862
Ido Schimmel4724ba562017-03-10 08:53:39 +01005863static struct mlxsw_sp_rif *
5864mlxsw_sp_rif_find_by_dev(const struct mlxsw_sp *mlxsw_sp,
5865 const struct net_device *dev)
5866{
5867 int i;
5868
5869 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++)
Ido Schimmel5f9efff2017-05-16 19:38:27 +02005870 if (mlxsw_sp->router->rifs[i] &&
5871 mlxsw_sp->router->rifs[i]->dev == dev)
5872 return mlxsw_sp->router->rifs[i];
Ido Schimmel4724ba562017-03-10 08:53:39 +01005873
5874 return NULL;
5875}
5876
5877static int mlxsw_sp_router_rif_disable(struct mlxsw_sp *mlxsw_sp, u16 rif)
5878{
5879 char ritr_pl[MLXSW_REG_RITR_LEN];
5880 int err;
5881
5882 mlxsw_reg_ritr_rif_pack(ritr_pl, rif);
5883 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
5884 if (WARN_ON_ONCE(err))
5885 return err;
5886
5887 mlxsw_reg_ritr_enable_set(ritr_pl, false);
5888 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
5889}
5890
5891static void mlxsw_sp_router_rif_gone_sync(struct mlxsw_sp *mlxsw_sp,
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01005892 struct mlxsw_sp_rif *rif)
Ido Schimmel4724ba562017-03-10 08:53:39 +01005893{
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01005894 mlxsw_sp_router_rif_disable(mlxsw_sp, rif->rif_index);
5895 mlxsw_sp_nexthop_rif_gone_sync(mlxsw_sp, rif);
5896 mlxsw_sp_neigh_rif_gone_sync(mlxsw_sp, rif);
Ido Schimmel4724ba562017-03-10 08:53:39 +01005897}
5898
Arkadi Sharshevsky5ea12372017-07-18 10:10:13 +02005899static bool
5900mlxsw_sp_rif_should_config(struct mlxsw_sp_rif *rif, struct net_device *dev,
5901 unsigned long event)
Ido Schimmel4724ba562017-03-10 08:53:39 +01005902{
Arkadi Sharshevsky5ea12372017-07-18 10:10:13 +02005903 struct inet6_dev *inet6_dev;
5904 bool addr_list_empty = true;
5905 struct in_device *idev;
5906
Ido Schimmel4724ba562017-03-10 08:53:39 +01005907 switch (event) {
5908 case NETDEV_UP:
Petr Machataf1b1f272017-07-31 09:27:28 +02005909 return rif == NULL;
Ido Schimmel4724ba562017-03-10 08:53:39 +01005910 case NETDEV_DOWN:
Arkadi Sharshevsky5ea12372017-07-18 10:10:13 +02005911 idev = __in_dev_get_rtnl(dev);
5912 if (idev && idev->ifa_list)
5913 addr_list_empty = false;
5914
5915 inet6_dev = __in6_dev_get(dev);
5916 if (addr_list_empty && inet6_dev &&
5917 !list_empty(&inet6_dev->addr_list))
5918 addr_list_empty = false;
5919
5920 if (rif && addr_list_empty &&
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01005921 !netif_is_l3_slave(rif->dev))
Ido Schimmel4724ba562017-03-10 08:53:39 +01005922 return true;
5923 /* It is possible we already removed the RIF ourselves
5924 * if it was assigned to a netdev that is now a bridge
5925 * or LAG slave.
5926 */
5927 return false;
5928 }
5929
5930 return false;
5931}
5932
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02005933static enum mlxsw_sp_rif_type
5934mlxsw_sp_dev_rif_type(const struct mlxsw_sp *mlxsw_sp,
5935 const struct net_device *dev)
5936{
5937 enum mlxsw_sp_fid_type type;
5938
Petr Machata6ddb7422017-09-02 23:49:19 +02005939 if (mlxsw_sp_netdev_ipip_type(mlxsw_sp, dev, NULL))
5940 return MLXSW_SP_RIF_TYPE_IPIP_LB;
5941
5942 /* Otherwise RIF type is derived from the type of the underlying FID. */
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02005943 if (is_vlan_dev(dev) && netif_is_bridge_master(vlan_dev_real_dev(dev)))
5944 type = MLXSW_SP_FID_TYPE_8021Q;
5945 else if (netif_is_bridge_master(dev) && br_vlan_enabled(dev))
5946 type = MLXSW_SP_FID_TYPE_8021Q;
5947 else if (netif_is_bridge_master(dev))
5948 type = MLXSW_SP_FID_TYPE_8021D;
5949 else
5950 type = MLXSW_SP_FID_TYPE_RFID;
5951
5952 return mlxsw_sp_fid_type_rif_type(mlxsw_sp, type);
5953}
5954
Ido Schimmelde5ed992017-06-04 16:53:40 +02005955static int mlxsw_sp_rif_index_alloc(struct mlxsw_sp *mlxsw_sp, u16 *p_rif_index)
Ido Schimmel4724ba562017-03-10 08:53:39 +01005956{
5957 int i;
5958
Ido Schimmelde5ed992017-06-04 16:53:40 +02005959 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++) {
5960 if (!mlxsw_sp->router->rifs[i]) {
5961 *p_rif_index = i;
5962 return 0;
5963 }
5964 }
Ido Schimmel4724ba562017-03-10 08:53:39 +01005965
Ido Schimmelde5ed992017-06-04 16:53:40 +02005966 return -ENOBUFS;
Ido Schimmel4724ba562017-03-10 08:53:39 +01005967}
5968
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02005969static struct mlxsw_sp_rif *mlxsw_sp_rif_alloc(size_t rif_size, u16 rif_index,
5970 u16 vr_id,
5971 struct net_device *l3_dev)
Ido Schimmel4724ba562017-03-10 08:53:39 +01005972{
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01005973 struct mlxsw_sp_rif *rif;
Ido Schimmel4724ba562017-03-10 08:53:39 +01005974
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02005975 rif = kzalloc(rif_size, GFP_KERNEL);
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01005976 if (!rif)
Ido Schimmel4724ba562017-03-10 08:53:39 +01005977 return NULL;
5978
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01005979 INIT_LIST_HEAD(&rif->nexthop_list);
5980 INIT_LIST_HEAD(&rif->neigh_list);
5981 ether_addr_copy(rif->addr, l3_dev->dev_addr);
5982 rif->mtu = l3_dev->mtu;
5983 rif->vr_id = vr_id;
5984 rif->dev = l3_dev;
5985 rif->rif_index = rif_index;
Ido Schimmel4724ba562017-03-10 08:53:39 +01005986
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01005987 return rif;
Ido Schimmel4724ba562017-03-10 08:53:39 +01005988}
5989
Ido Schimmel5f9efff2017-05-16 19:38:27 +02005990struct mlxsw_sp_rif *mlxsw_sp_rif_by_index(const struct mlxsw_sp *mlxsw_sp,
5991 u16 rif_index)
5992{
5993 return mlxsw_sp->router->rifs[rif_index];
5994}
5995
Arkadi Sharshevskyfd1b9d42017-03-28 17:24:16 +02005996u16 mlxsw_sp_rif_index(const struct mlxsw_sp_rif *rif)
5997{
5998 return rif->rif_index;
5999}
6000
Petr Machata92107cf2017-09-02 23:49:28 +02006001u16 mlxsw_sp_ipip_lb_rif_index(const struct mlxsw_sp_rif_ipip_lb *lb_rif)
6002{
6003 return lb_rif->common.rif_index;
6004}
6005
6006u16 mlxsw_sp_ipip_lb_ul_vr_id(const struct mlxsw_sp_rif_ipip_lb *lb_rif)
6007{
6008 return lb_rif->ul_vr_id;
6009}
6010
Arkadi Sharshevskyfd1b9d42017-03-28 17:24:16 +02006011int mlxsw_sp_rif_dev_ifindex(const struct mlxsw_sp_rif *rif)
6012{
6013 return rif->dev->ifindex;
6014}
6015
Yotam Gigi91e4d592017-09-19 10:00:19 +02006016const struct net_device *mlxsw_sp_rif_dev(const struct mlxsw_sp_rif *rif)
6017{
6018 return rif->dev;
6019}
6020
Ido Schimmel4724ba562017-03-10 08:53:39 +01006021static struct mlxsw_sp_rif *
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006022mlxsw_sp_rif_create(struct mlxsw_sp *mlxsw_sp,
David Ahernf8fa9b42017-10-18 09:56:56 -07006023 const struct mlxsw_sp_rif_params *params,
6024 struct netlink_ext_ack *extack)
Ido Schimmel4724ba562017-03-10 08:53:39 +01006025{
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006026 u32 tb_id = l3mdev_fib_table(params->dev);
6027 const struct mlxsw_sp_rif_ops *ops;
Petr Machata010cadf2017-09-02 23:49:18 +02006028 struct mlxsw_sp_fid *fid = NULL;
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006029 enum mlxsw_sp_rif_type type;
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01006030 struct mlxsw_sp_rif *rif;
Ido Schimmela1107482017-05-26 08:37:39 +02006031 struct mlxsw_sp_vr *vr;
6032 u16 rif_index;
Ido Schimmel4724ba562017-03-10 08:53:39 +01006033 int err;
6034
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006035 type = mlxsw_sp_dev_rif_type(mlxsw_sp, params->dev);
6036 ops = mlxsw_sp->router->rif_ops_arr[type];
6037
David Ahernf8fa9b42017-10-18 09:56:56 -07006038 vr = mlxsw_sp_vr_get(mlxsw_sp, tb_id ? : RT_TABLE_MAIN, extack);
Ido Schimmelc9ec53f2017-05-26 08:37:38 +02006039 if (IS_ERR(vr))
6040 return ERR_CAST(vr);
Petr Machata28a04c72017-10-02 12:14:56 +02006041 vr->rif_count++;
Ido Schimmelc9ec53f2017-05-26 08:37:38 +02006042
Ido Schimmelde5ed992017-06-04 16:53:40 +02006043 err = mlxsw_sp_rif_index_alloc(mlxsw_sp, &rif_index);
David Ahernf8fa9b42017-10-18 09:56:56 -07006044 if (err) {
Arkadi Sharshevsky6c677752018-02-13 11:29:05 +01006045 NL_SET_ERR_MSG_MOD(extack, "Exceeded number of supported router interfaces");
Ido Schimmelde5ed992017-06-04 16:53:40 +02006046 goto err_rif_index_alloc;
David Ahernf8fa9b42017-10-18 09:56:56 -07006047 }
Ido Schimmel4724ba562017-03-10 08:53:39 +01006048
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006049 rif = mlxsw_sp_rif_alloc(ops->rif_size, rif_index, vr->id, params->dev);
Ido Schimmela13a5942017-05-26 08:37:33 +02006050 if (!rif) {
6051 err = -ENOMEM;
6052 goto err_rif_alloc;
6053 }
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006054 rif->mlxsw_sp = mlxsw_sp;
6055 rif->ops = ops;
Ido Schimmela13a5942017-05-26 08:37:33 +02006056
Petr Machata010cadf2017-09-02 23:49:18 +02006057 if (ops->fid_get) {
6058 fid = ops->fid_get(rif);
6059 if (IS_ERR(fid)) {
6060 err = PTR_ERR(fid);
6061 goto err_fid_get;
6062 }
6063 rif->fid = fid;
Ido Schimmel4d93cee2017-05-26 08:37:34 +02006064 }
6065
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006066 if (ops->setup)
6067 ops->setup(rif, params);
6068
6069 err = ops->configure(rif);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006070 if (err)
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006071 goto err_configure;
Ido Schimmel4724ba562017-03-10 08:53:39 +01006072
Yotam Gigid42b0962017-09-27 08:23:20 +02006073 err = mlxsw_sp_mr_rif_add(vr->mr4_table, rif);
6074 if (err)
6075 goto err_mr_rif_add;
6076
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006077 mlxsw_sp_rif_counters_alloc(rif);
Ido Schimmel5f9efff2017-05-16 19:38:27 +02006078 mlxsw_sp->router->rifs[rif_index] = rif;
Ido Schimmel4724ba562017-03-10 08:53:39 +01006079
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01006080 return rif;
Ido Schimmel4724ba562017-03-10 08:53:39 +01006081
Yotam Gigid42b0962017-09-27 08:23:20 +02006082err_mr_rif_add:
6083 ops->deconfigure(rif);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006084err_configure:
Petr Machata010cadf2017-09-02 23:49:18 +02006085 if (fid)
6086 mlxsw_sp_fid_put(fid);
Ido Schimmela1107482017-05-26 08:37:39 +02006087err_fid_get:
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006088 kfree(rif);
6089err_rif_alloc:
Ido Schimmelde5ed992017-06-04 16:53:40 +02006090err_rif_index_alloc:
Petr Machata28a04c72017-10-02 12:14:56 +02006091 vr->rif_count--;
Ido Schimmel2b52ce02018-01-22 09:17:42 +01006092 mlxsw_sp_vr_put(mlxsw_sp, vr);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006093 return ERR_PTR(err);
6094}
6095
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006096void mlxsw_sp_rif_destroy(struct mlxsw_sp_rif *rif)
Ido Schimmel4724ba562017-03-10 08:53:39 +01006097{
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006098 const struct mlxsw_sp_rif_ops *ops = rif->ops;
6099 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
Ido Schimmela1107482017-05-26 08:37:39 +02006100 struct mlxsw_sp_fid *fid = rif->fid;
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006101 struct mlxsw_sp_vr *vr;
Ido Schimmel4724ba562017-03-10 08:53:39 +01006102
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01006103 mlxsw_sp_router_rif_gone_sync(mlxsw_sp, rif);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006104 vr = &mlxsw_sp->router->vrs[rif->vr_id];
Arkadi Sharshevskye0c0afd2017-03-28 17:24:15 +02006105
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006106 mlxsw_sp->router->rifs[rif->rif_index] = NULL;
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006107 mlxsw_sp_rif_counters_free(rif);
Yotam Gigid42b0962017-09-27 08:23:20 +02006108 mlxsw_sp_mr_rif_del(vr->mr4_table, rif);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006109 ops->deconfigure(rif);
Petr Machata010cadf2017-09-02 23:49:18 +02006110 if (fid)
6111 /* Loopback RIFs are not associated with a FID. */
6112 mlxsw_sp_fid_put(fid);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006113 kfree(rif);
Petr Machata28a04c72017-10-02 12:14:56 +02006114 vr->rif_count--;
Ido Schimmel2b52ce02018-01-22 09:17:42 +01006115 mlxsw_sp_vr_put(mlxsw_sp, vr);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006116}
6117
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006118static void
6119mlxsw_sp_rif_subport_params_init(struct mlxsw_sp_rif_params *params,
6120 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
6121{
6122 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
6123
6124 params->vid = mlxsw_sp_port_vlan->vid;
6125 params->lag = mlxsw_sp_port->lagged;
6126 if (params->lag)
6127 params->lag_id = mlxsw_sp_port->lag_id;
6128 else
6129 params->system_port = mlxsw_sp_port->local_port;
6130}
6131
Ido Schimmel7cbecf22017-05-26 08:37:28 +02006132static int
Ido Schimmela1107482017-05-26 08:37:39 +02006133mlxsw_sp_port_vlan_router_join(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan,
David Ahernf8fa9b42017-10-18 09:56:56 -07006134 struct net_device *l3_dev,
6135 struct netlink_ext_ack *extack)
Ido Schimmel4724ba562017-03-10 08:53:39 +01006136{
Ido Schimmel7cbecf22017-05-26 08:37:28 +02006137 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
Ido Schimmel1b8f09a2017-05-26 08:37:36 +02006138 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
Ido Schimmel7cbecf22017-05-26 08:37:28 +02006139 u16 vid = mlxsw_sp_port_vlan->vid;
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01006140 struct mlxsw_sp_rif *rif;
Ido Schimmela1107482017-05-26 08:37:39 +02006141 struct mlxsw_sp_fid *fid;
Ido Schimmel03ea01e2017-05-23 21:56:30 +02006142 int err;
Ido Schimmel4724ba562017-03-10 08:53:39 +01006143
Ido Schimmel1b8f09a2017-05-26 08:37:36 +02006144 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev);
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01006145 if (!rif) {
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006146 struct mlxsw_sp_rif_params params = {
6147 .dev = l3_dev,
6148 };
6149
6150 mlxsw_sp_rif_subport_params_init(&params, mlxsw_sp_port_vlan);
David Ahernf8fa9b42017-10-18 09:56:56 -07006151 rif = mlxsw_sp_rif_create(mlxsw_sp, &params, extack);
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01006152 if (IS_ERR(rif))
6153 return PTR_ERR(rif);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006154 }
6155
Ido Schimmela1107482017-05-26 08:37:39 +02006156 /* FID was already created, just take a reference */
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006157 fid = rif->ops->fid_get(rif);
Ido Schimmela1107482017-05-26 08:37:39 +02006158 err = mlxsw_sp_fid_port_vid_map(fid, mlxsw_sp_port, vid);
6159 if (err)
6160 goto err_fid_port_vid_map;
6161
Ido Schimmel7cbecf22017-05-26 08:37:28 +02006162 err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, false);
Ido Schimmel03ea01e2017-05-23 21:56:30 +02006163 if (err)
6164 goto err_port_vid_learning_set;
6165
Ido Schimmel7cbecf22017-05-26 08:37:28 +02006166 err = mlxsw_sp_port_vid_stp_set(mlxsw_sp_port, vid,
Ido Schimmel03ea01e2017-05-23 21:56:30 +02006167 BR_STATE_FORWARDING);
6168 if (err)
6169 goto err_port_vid_stp_set;
6170
Ido Schimmela1107482017-05-26 08:37:39 +02006171 mlxsw_sp_port_vlan->fid = fid;
Ido Schimmel4724ba562017-03-10 08:53:39 +01006172
Ido Schimmel4724ba562017-03-10 08:53:39 +01006173 return 0;
Ido Schimmel03ea01e2017-05-23 21:56:30 +02006174
6175err_port_vid_stp_set:
Ido Schimmel7cbecf22017-05-26 08:37:28 +02006176 mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, true);
Ido Schimmel03ea01e2017-05-23 21:56:30 +02006177err_port_vid_learning_set:
Ido Schimmela1107482017-05-26 08:37:39 +02006178 mlxsw_sp_fid_port_vid_unmap(fid, mlxsw_sp_port, vid);
6179err_fid_port_vid_map:
6180 mlxsw_sp_fid_put(fid);
Ido Schimmel03ea01e2017-05-23 21:56:30 +02006181 return err;
Ido Schimmel4724ba562017-03-10 08:53:39 +01006182}
6183
Ido Schimmela1107482017-05-26 08:37:39 +02006184void
6185mlxsw_sp_port_vlan_router_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
Ido Schimmel4724ba562017-03-10 08:53:39 +01006186{
Ido Schimmelce95e152017-05-26 08:37:27 +02006187 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
Ido Schimmel7cbecf22017-05-26 08:37:28 +02006188 struct mlxsw_sp_fid *fid = mlxsw_sp_port_vlan->fid;
Ido Schimmelce95e152017-05-26 08:37:27 +02006189 u16 vid = mlxsw_sp_port_vlan->vid;
Ido Schimmelce95e152017-05-26 08:37:27 +02006190
Ido Schimmela1107482017-05-26 08:37:39 +02006191 if (WARN_ON(mlxsw_sp_fid_type(fid) != MLXSW_SP_FID_TYPE_RFID))
6192 return;
Ido Schimmel4aafc362017-05-26 08:37:25 +02006193
Ido Schimmela1107482017-05-26 08:37:39 +02006194 mlxsw_sp_port_vlan->fid = NULL;
Ido Schimmel7cbecf22017-05-26 08:37:28 +02006195 mlxsw_sp_port_vid_stp_set(mlxsw_sp_port, vid, BR_STATE_BLOCKING);
6196 mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, true);
Ido Schimmela1107482017-05-26 08:37:39 +02006197 mlxsw_sp_fid_port_vid_unmap(fid, mlxsw_sp_port, vid);
6198 /* If router port holds the last reference on the rFID, then the
6199 * associated Sub-port RIF will be destroyed.
6200 */
6201 mlxsw_sp_fid_put(fid);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006202}
6203
Ido Schimmel7cbecf22017-05-26 08:37:28 +02006204static int mlxsw_sp_inetaddr_port_vlan_event(struct net_device *l3_dev,
6205 struct net_device *port_dev,
David Ahernf8fa9b42017-10-18 09:56:56 -07006206 unsigned long event, u16 vid,
6207 struct netlink_ext_ack *extack)
Ido Schimmel4724ba562017-03-10 08:53:39 +01006208{
6209 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(port_dev);
Ido Schimmelce95e152017-05-26 08:37:27 +02006210 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
Ido Schimmel4724ba562017-03-10 08:53:39 +01006211
Ido Schimmelce95e152017-05-26 08:37:27 +02006212 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
Ido Schimmel7cbecf22017-05-26 08:37:28 +02006213 if (WARN_ON(!mlxsw_sp_port_vlan))
6214 return -EINVAL;
Ido Schimmel4724ba562017-03-10 08:53:39 +01006215
6216 switch (event) {
6217 case NETDEV_UP:
Ido Schimmela1107482017-05-26 08:37:39 +02006218 return mlxsw_sp_port_vlan_router_join(mlxsw_sp_port_vlan,
David Ahernf8fa9b42017-10-18 09:56:56 -07006219 l3_dev, extack);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006220 case NETDEV_DOWN:
Ido Schimmela1107482017-05-26 08:37:39 +02006221 mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port_vlan);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006222 break;
6223 }
6224
6225 return 0;
6226}
6227
6228static int mlxsw_sp_inetaddr_port_event(struct net_device *port_dev,
David Ahernf8fa9b42017-10-18 09:56:56 -07006229 unsigned long event,
6230 struct netlink_ext_ack *extack)
Ido Schimmel4724ba562017-03-10 08:53:39 +01006231{
Jiri Pirko2b94e582017-04-18 16:55:37 +02006232 if (netif_is_bridge_port(port_dev) ||
6233 netif_is_lag_port(port_dev) ||
6234 netif_is_ovs_port(port_dev))
Ido Schimmel4724ba562017-03-10 08:53:39 +01006235 return 0;
6236
David Ahernf8fa9b42017-10-18 09:56:56 -07006237 return mlxsw_sp_inetaddr_port_vlan_event(port_dev, port_dev, event, 1,
6238 extack);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006239}
6240
6241static int __mlxsw_sp_inetaddr_lag_event(struct net_device *l3_dev,
6242 struct net_device *lag_dev,
David Ahernf8fa9b42017-10-18 09:56:56 -07006243 unsigned long event, u16 vid,
6244 struct netlink_ext_ack *extack)
Ido Schimmel4724ba562017-03-10 08:53:39 +01006245{
6246 struct net_device *port_dev;
6247 struct list_head *iter;
6248 int err;
6249
6250 netdev_for_each_lower_dev(lag_dev, port_dev, iter) {
6251 if (mlxsw_sp_port_dev_check(port_dev)) {
Ido Schimmel7cbecf22017-05-26 08:37:28 +02006252 err = mlxsw_sp_inetaddr_port_vlan_event(l3_dev,
6253 port_dev,
David Ahernf8fa9b42017-10-18 09:56:56 -07006254 event, vid,
6255 extack);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006256 if (err)
6257 return err;
6258 }
6259 }
6260
6261 return 0;
6262}
6263
6264static int mlxsw_sp_inetaddr_lag_event(struct net_device *lag_dev,
David Ahernf8fa9b42017-10-18 09:56:56 -07006265 unsigned long event,
6266 struct netlink_ext_ack *extack)
Ido Schimmel4724ba562017-03-10 08:53:39 +01006267{
6268 if (netif_is_bridge_port(lag_dev))
6269 return 0;
6270
David Ahernf8fa9b42017-10-18 09:56:56 -07006271 return __mlxsw_sp_inetaddr_lag_event(lag_dev, lag_dev, event, 1,
6272 extack);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006273}
6274
Ido Schimmel4724ba562017-03-10 08:53:39 +01006275static int mlxsw_sp_inetaddr_bridge_event(struct net_device *l3_dev,
David Ahernf8fa9b42017-10-18 09:56:56 -07006276 unsigned long event,
6277 struct netlink_ext_ack *extack)
Ido Schimmel4724ba562017-03-10 08:53:39 +01006278{
6279 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(l3_dev);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006280 struct mlxsw_sp_rif_params params = {
6281 .dev = l3_dev,
6282 };
Ido Schimmela1107482017-05-26 08:37:39 +02006283 struct mlxsw_sp_rif *rif;
Ido Schimmel4724ba562017-03-10 08:53:39 +01006284
6285 switch (event) {
6286 case NETDEV_UP:
David Ahernf8fa9b42017-10-18 09:56:56 -07006287 rif = mlxsw_sp_rif_create(mlxsw_sp, &params, extack);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006288 if (IS_ERR(rif))
6289 return PTR_ERR(rif);
6290 break;
Ido Schimmel4724ba562017-03-10 08:53:39 +01006291 case NETDEV_DOWN:
Ido Schimmela1107482017-05-26 08:37:39 +02006292 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006293 mlxsw_sp_rif_destroy(rif);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006294 break;
6295 }
6296
6297 return 0;
6298}
6299
6300static int mlxsw_sp_inetaddr_vlan_event(struct net_device *vlan_dev,
David Ahernf8fa9b42017-10-18 09:56:56 -07006301 unsigned long event,
6302 struct netlink_ext_ack *extack)
Ido Schimmel4724ba562017-03-10 08:53:39 +01006303{
6304 struct net_device *real_dev = vlan_dev_real_dev(vlan_dev);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006305 u16 vid = vlan_dev_vlan_id(vlan_dev);
6306
Ido Schimmel6b27c8a2017-06-28 09:03:12 +03006307 if (netif_is_bridge_port(vlan_dev))
6308 return 0;
6309
Ido Schimmel4724ba562017-03-10 08:53:39 +01006310 if (mlxsw_sp_port_dev_check(real_dev))
Ido Schimmel7cbecf22017-05-26 08:37:28 +02006311 return mlxsw_sp_inetaddr_port_vlan_event(vlan_dev, real_dev,
David Ahernf8fa9b42017-10-18 09:56:56 -07006312 event, vid, extack);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006313 else if (netif_is_lag_master(real_dev))
6314 return __mlxsw_sp_inetaddr_lag_event(vlan_dev, real_dev, event,
David Ahernf8fa9b42017-10-18 09:56:56 -07006315 vid, extack);
Ido Schimmelc57529e2017-05-26 08:37:31 +02006316 else if (netif_is_bridge_master(real_dev) && br_vlan_enabled(real_dev))
David Ahernf8fa9b42017-10-18 09:56:56 -07006317 return mlxsw_sp_inetaddr_bridge_event(vlan_dev, event, extack);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006318
6319 return 0;
6320}
6321
Ido Schimmelb1e45522017-04-30 19:47:14 +03006322static int __mlxsw_sp_inetaddr_event(struct net_device *dev,
David Ahernf8fa9b42017-10-18 09:56:56 -07006323 unsigned long event,
6324 struct netlink_ext_ack *extack)
Ido Schimmelb1e45522017-04-30 19:47:14 +03006325{
6326 if (mlxsw_sp_port_dev_check(dev))
David Ahernf8fa9b42017-10-18 09:56:56 -07006327 return mlxsw_sp_inetaddr_port_event(dev, event, extack);
Ido Schimmelb1e45522017-04-30 19:47:14 +03006328 else if (netif_is_lag_master(dev))
David Ahernf8fa9b42017-10-18 09:56:56 -07006329 return mlxsw_sp_inetaddr_lag_event(dev, event, extack);
Ido Schimmelb1e45522017-04-30 19:47:14 +03006330 else if (netif_is_bridge_master(dev))
David Ahernf8fa9b42017-10-18 09:56:56 -07006331 return mlxsw_sp_inetaddr_bridge_event(dev, event, extack);
Ido Schimmelb1e45522017-04-30 19:47:14 +03006332 else if (is_vlan_dev(dev))
David Ahernf8fa9b42017-10-18 09:56:56 -07006333 return mlxsw_sp_inetaddr_vlan_event(dev, event, extack);
Ido Schimmelb1e45522017-04-30 19:47:14 +03006334 else
6335 return 0;
6336}
6337
Ido Schimmel4724ba562017-03-10 08:53:39 +01006338int mlxsw_sp_inetaddr_event(struct notifier_block *unused,
6339 unsigned long event, void *ptr)
6340{
6341 struct in_ifaddr *ifa = (struct in_ifaddr *) ptr;
6342 struct net_device *dev = ifa->ifa_dev->dev;
6343 struct mlxsw_sp *mlxsw_sp;
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01006344 struct mlxsw_sp_rif *rif;
Ido Schimmel4724ba562017-03-10 08:53:39 +01006345 int err = 0;
6346
David Ahern89d5dd22017-10-18 09:56:55 -07006347 /* NETDEV_UP event is handled by mlxsw_sp_inetaddr_valid_event */
6348 if (event == NETDEV_UP)
6349 goto out;
6350
6351 mlxsw_sp = mlxsw_sp_lower_get(dev);
6352 if (!mlxsw_sp)
6353 goto out;
6354
6355 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
6356 if (!mlxsw_sp_rif_should_config(rif, dev, event))
6357 goto out;
6358
David Ahernf8fa9b42017-10-18 09:56:56 -07006359 err = __mlxsw_sp_inetaddr_event(dev, event, NULL);
David Ahern89d5dd22017-10-18 09:56:55 -07006360out:
6361 return notifier_from_errno(err);
6362}
6363
6364int mlxsw_sp_inetaddr_valid_event(struct notifier_block *unused,
6365 unsigned long event, void *ptr)
6366{
6367 struct in_validator_info *ivi = (struct in_validator_info *) ptr;
6368 struct net_device *dev = ivi->ivi_dev->dev;
6369 struct mlxsw_sp *mlxsw_sp;
6370 struct mlxsw_sp_rif *rif;
6371 int err = 0;
6372
Ido Schimmel4724ba562017-03-10 08:53:39 +01006373 mlxsw_sp = mlxsw_sp_lower_get(dev);
6374 if (!mlxsw_sp)
6375 goto out;
6376
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01006377 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
Arkadi Sharshevsky5ea12372017-07-18 10:10:13 +02006378 if (!mlxsw_sp_rif_should_config(rif, dev, event))
Ido Schimmel4724ba562017-03-10 08:53:39 +01006379 goto out;
6380
David Ahernf8fa9b42017-10-18 09:56:56 -07006381 err = __mlxsw_sp_inetaddr_event(dev, event, ivi->extack);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006382out:
6383 return notifier_from_errno(err);
6384}
6385
Arkadi Sharshevsky5ea12372017-07-18 10:10:13 +02006386struct mlxsw_sp_inet6addr_event_work {
6387 struct work_struct work;
6388 struct net_device *dev;
6389 unsigned long event;
6390};
6391
6392static void mlxsw_sp_inet6addr_event_work(struct work_struct *work)
6393{
6394 struct mlxsw_sp_inet6addr_event_work *inet6addr_work =
6395 container_of(work, struct mlxsw_sp_inet6addr_event_work, work);
6396 struct net_device *dev = inet6addr_work->dev;
6397 unsigned long event = inet6addr_work->event;
6398 struct mlxsw_sp *mlxsw_sp;
6399 struct mlxsw_sp_rif *rif;
6400
6401 rtnl_lock();
6402 mlxsw_sp = mlxsw_sp_lower_get(dev);
6403 if (!mlxsw_sp)
6404 goto out;
6405
6406 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
6407 if (!mlxsw_sp_rif_should_config(rif, dev, event))
6408 goto out;
6409
David Ahernf8fa9b42017-10-18 09:56:56 -07006410 __mlxsw_sp_inetaddr_event(dev, event, NULL);
Arkadi Sharshevsky5ea12372017-07-18 10:10:13 +02006411out:
6412 rtnl_unlock();
6413 dev_put(dev);
6414 kfree(inet6addr_work);
6415}
6416
6417/* Called with rcu_read_lock() */
6418int mlxsw_sp_inet6addr_event(struct notifier_block *unused,
6419 unsigned long event, void *ptr)
6420{
6421 struct inet6_ifaddr *if6 = (struct inet6_ifaddr *) ptr;
6422 struct mlxsw_sp_inet6addr_event_work *inet6addr_work;
6423 struct net_device *dev = if6->idev->dev;
6424
David Ahern89d5dd22017-10-18 09:56:55 -07006425 /* NETDEV_UP event is handled by mlxsw_sp_inet6addr_valid_event */
6426 if (event == NETDEV_UP)
6427 return NOTIFY_DONE;
6428
Arkadi Sharshevsky5ea12372017-07-18 10:10:13 +02006429 if (!mlxsw_sp_port_dev_lower_find_rcu(dev))
6430 return NOTIFY_DONE;
6431
6432 inet6addr_work = kzalloc(sizeof(*inet6addr_work), GFP_ATOMIC);
6433 if (!inet6addr_work)
6434 return NOTIFY_BAD;
6435
6436 INIT_WORK(&inet6addr_work->work, mlxsw_sp_inet6addr_event_work);
6437 inet6addr_work->dev = dev;
6438 inet6addr_work->event = event;
6439 dev_hold(dev);
6440 mlxsw_core_schedule_work(&inet6addr_work->work);
6441
6442 return NOTIFY_DONE;
6443}
6444
David Ahern89d5dd22017-10-18 09:56:55 -07006445int mlxsw_sp_inet6addr_valid_event(struct notifier_block *unused,
6446 unsigned long event, void *ptr)
6447{
6448 struct in6_validator_info *i6vi = (struct in6_validator_info *) ptr;
6449 struct net_device *dev = i6vi->i6vi_dev->dev;
6450 struct mlxsw_sp *mlxsw_sp;
6451 struct mlxsw_sp_rif *rif;
6452 int err = 0;
6453
6454 mlxsw_sp = mlxsw_sp_lower_get(dev);
6455 if (!mlxsw_sp)
6456 goto out;
6457
6458 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
6459 if (!mlxsw_sp_rif_should_config(rif, dev, event))
6460 goto out;
6461
David Ahernf8fa9b42017-10-18 09:56:56 -07006462 err = __mlxsw_sp_inetaddr_event(dev, event, i6vi->extack);
David Ahern89d5dd22017-10-18 09:56:55 -07006463out:
6464 return notifier_from_errno(err);
6465}
6466
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01006467static int mlxsw_sp_rif_edit(struct mlxsw_sp *mlxsw_sp, u16 rif_index,
Ido Schimmel4724ba562017-03-10 08:53:39 +01006468 const char *mac, int mtu)
6469{
6470 char ritr_pl[MLXSW_REG_RITR_LEN];
6471 int err;
6472
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01006473 mlxsw_reg_ritr_rif_pack(ritr_pl, rif_index);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006474 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
6475 if (err)
6476 return err;
6477
6478 mlxsw_reg_ritr_mtu_set(ritr_pl, mtu);
6479 mlxsw_reg_ritr_if_mac_memcpy_to(ritr_pl, mac);
6480 mlxsw_reg_ritr_op_set(ritr_pl, MLXSW_REG_RITR_RIF_CREATE);
6481 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
6482}
6483
6484int mlxsw_sp_netdevice_router_port_event(struct net_device *dev)
6485{
6486 struct mlxsw_sp *mlxsw_sp;
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01006487 struct mlxsw_sp_rif *rif;
Ido Schimmela1107482017-05-26 08:37:39 +02006488 u16 fid_index;
Ido Schimmel4724ba562017-03-10 08:53:39 +01006489 int err;
6490
6491 mlxsw_sp = mlxsw_sp_lower_get(dev);
6492 if (!mlxsw_sp)
6493 return 0;
6494
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01006495 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
6496 if (!rif)
Ido Schimmel4724ba562017-03-10 08:53:39 +01006497 return 0;
Ido Schimmela1107482017-05-26 08:37:39 +02006498 fid_index = mlxsw_sp_fid_index(rif->fid);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006499
Ido Schimmela1107482017-05-26 08:37:39 +02006500 err = mlxsw_sp_rif_fdb_op(mlxsw_sp, rif->addr, fid_index, false);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006501 if (err)
6502 return err;
6503
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01006504 err = mlxsw_sp_rif_edit(mlxsw_sp, rif->rif_index, dev->dev_addr,
6505 dev->mtu);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006506 if (err)
6507 goto err_rif_edit;
6508
Ido Schimmela1107482017-05-26 08:37:39 +02006509 err = mlxsw_sp_rif_fdb_op(mlxsw_sp, dev->dev_addr, fid_index, true);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006510 if (err)
6511 goto err_rif_fdb_op;
6512
Yotam Gigifd890fe2017-09-27 08:23:21 +02006513 if (rif->mtu != dev->mtu) {
6514 struct mlxsw_sp_vr *vr;
6515
6516 /* The RIF is relevant only to its mr_table instance, as unlike
6517 * unicast routing, in multicast routing a RIF cannot be shared
6518 * between several multicast routing tables.
6519 */
6520 vr = &mlxsw_sp->router->vrs[rif->vr_id];
6521 mlxsw_sp_mr_rif_mtu_update(vr->mr4_table, rif, dev->mtu);
6522 }
6523
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01006524 ether_addr_copy(rif->addr, dev->dev_addr);
6525 rif->mtu = dev->mtu;
Ido Schimmel4724ba562017-03-10 08:53:39 +01006526
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01006527 netdev_dbg(dev, "Updated RIF=%d\n", rif->rif_index);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006528
6529 return 0;
6530
6531err_rif_fdb_op:
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01006532 mlxsw_sp_rif_edit(mlxsw_sp, rif->rif_index, rif->addr, rif->mtu);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006533err_rif_edit:
Ido Schimmela1107482017-05-26 08:37:39 +02006534 mlxsw_sp_rif_fdb_op(mlxsw_sp, rif->addr, fid_index, true);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006535 return err;
6536}
6537
Ido Schimmelb1e45522017-04-30 19:47:14 +03006538static int mlxsw_sp_port_vrf_join(struct mlxsw_sp *mlxsw_sp,
David Ahernf8fa9b42017-10-18 09:56:56 -07006539 struct net_device *l3_dev,
6540 struct netlink_ext_ack *extack)
Ido Schimmel7179eb52017-03-16 09:08:18 +01006541{
Ido Schimmelb1e45522017-04-30 19:47:14 +03006542 struct mlxsw_sp_rif *rif;
Ido Schimmel7179eb52017-03-16 09:08:18 +01006543
Ido Schimmelb1e45522017-04-30 19:47:14 +03006544 /* If netdev is already associated with a RIF, then we need to
6545 * destroy it and create a new one with the new virtual router ID.
Ido Schimmel7179eb52017-03-16 09:08:18 +01006546 */
Ido Schimmelb1e45522017-04-30 19:47:14 +03006547 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev);
6548 if (rif)
David Ahernf8fa9b42017-10-18 09:56:56 -07006549 __mlxsw_sp_inetaddr_event(l3_dev, NETDEV_DOWN, extack);
Ido Schimmel7179eb52017-03-16 09:08:18 +01006550
David Ahernf8fa9b42017-10-18 09:56:56 -07006551 return __mlxsw_sp_inetaddr_event(l3_dev, NETDEV_UP, extack);
Ido Schimmel7179eb52017-03-16 09:08:18 +01006552}
6553
Ido Schimmelb1e45522017-04-30 19:47:14 +03006554static void mlxsw_sp_port_vrf_leave(struct mlxsw_sp *mlxsw_sp,
6555 struct net_device *l3_dev)
Ido Schimmel7179eb52017-03-16 09:08:18 +01006556{
Ido Schimmelb1e45522017-04-30 19:47:14 +03006557 struct mlxsw_sp_rif *rif;
Ido Schimmel7179eb52017-03-16 09:08:18 +01006558
Ido Schimmelb1e45522017-04-30 19:47:14 +03006559 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev);
6560 if (!rif)
Ido Schimmel7179eb52017-03-16 09:08:18 +01006561 return;
David Ahernf8fa9b42017-10-18 09:56:56 -07006562 __mlxsw_sp_inetaddr_event(l3_dev, NETDEV_DOWN, NULL);
Ido Schimmel7179eb52017-03-16 09:08:18 +01006563}
6564
Ido Schimmelb1e45522017-04-30 19:47:14 +03006565int mlxsw_sp_netdevice_vrf_event(struct net_device *l3_dev, unsigned long event,
6566 struct netdev_notifier_changeupper_info *info)
Ido Schimmel3d70e4582017-03-16 09:08:19 +01006567{
Ido Schimmelb1e45522017-04-30 19:47:14 +03006568 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(l3_dev);
6569 int err = 0;
Ido Schimmel3d70e4582017-03-16 09:08:19 +01006570
Ido Schimmelb1e45522017-04-30 19:47:14 +03006571 if (!mlxsw_sp)
6572 return 0;
Ido Schimmel3d70e4582017-03-16 09:08:19 +01006573
Ido Schimmelb1e45522017-04-30 19:47:14 +03006574 switch (event) {
6575 case NETDEV_PRECHANGEUPPER:
6576 return 0;
6577 case NETDEV_CHANGEUPPER:
David Ahernf8fa9b42017-10-18 09:56:56 -07006578 if (info->linking) {
6579 struct netlink_ext_ack *extack;
6580
6581 extack = netdev_notifier_info_to_extack(&info->info);
6582 err = mlxsw_sp_port_vrf_join(mlxsw_sp, l3_dev, extack);
6583 } else {
Ido Schimmelb1e45522017-04-30 19:47:14 +03006584 mlxsw_sp_port_vrf_leave(mlxsw_sp, l3_dev);
David Ahernf8fa9b42017-10-18 09:56:56 -07006585 }
Ido Schimmelb1e45522017-04-30 19:47:14 +03006586 break;
6587 }
Ido Schimmel3d70e4582017-03-16 09:08:19 +01006588
Ido Schimmelb1e45522017-04-30 19:47:14 +03006589 return err;
Ido Schimmel3d70e4582017-03-16 09:08:19 +01006590}
6591
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006592static struct mlxsw_sp_rif_subport *
6593mlxsw_sp_rif_subport_rif(const struct mlxsw_sp_rif *rif)
Ido Schimmela1107482017-05-26 08:37:39 +02006594{
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006595 return container_of(rif, struct mlxsw_sp_rif_subport, common);
Ido Schimmela1107482017-05-26 08:37:39 +02006596}
6597
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006598static void mlxsw_sp_rif_subport_setup(struct mlxsw_sp_rif *rif,
6599 const struct mlxsw_sp_rif_params *params)
6600{
6601 struct mlxsw_sp_rif_subport *rif_subport;
6602
6603 rif_subport = mlxsw_sp_rif_subport_rif(rif);
6604 rif_subport->vid = params->vid;
6605 rif_subport->lag = params->lag;
6606 if (params->lag)
6607 rif_subport->lag_id = params->lag_id;
6608 else
6609 rif_subport->system_port = params->system_port;
6610}
6611
6612static int mlxsw_sp_rif_subport_op(struct mlxsw_sp_rif *rif, bool enable)
6613{
6614 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
6615 struct mlxsw_sp_rif_subport *rif_subport;
6616 char ritr_pl[MLXSW_REG_RITR_LEN];
6617
6618 rif_subport = mlxsw_sp_rif_subport_rif(rif);
6619 mlxsw_reg_ritr_pack(ritr_pl, enable, MLXSW_REG_RITR_SP_IF,
Petr Machata9571e822017-09-02 23:49:14 +02006620 rif->rif_index, rif->vr_id, rif->dev->mtu);
6621 mlxsw_reg_ritr_mac_pack(ritr_pl, rif->dev->dev_addr);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006622 mlxsw_reg_ritr_sp_if_pack(ritr_pl, rif_subport->lag,
6623 rif_subport->lag ? rif_subport->lag_id :
6624 rif_subport->system_port,
6625 rif_subport->vid);
6626
6627 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
6628}
6629
6630static int mlxsw_sp_rif_subport_configure(struct mlxsw_sp_rif *rif)
6631{
Petr Machata010cadf2017-09-02 23:49:18 +02006632 int err;
6633
6634 err = mlxsw_sp_rif_subport_op(rif, true);
6635 if (err)
6636 return err;
6637
6638 err = mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
6639 mlxsw_sp_fid_index(rif->fid), true);
6640 if (err)
6641 goto err_rif_fdb_op;
6642
6643 mlxsw_sp_fid_rif_set(rif->fid, rif);
6644 return 0;
6645
6646err_rif_fdb_op:
6647 mlxsw_sp_rif_subport_op(rif, false);
6648 return err;
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006649}
6650
6651static void mlxsw_sp_rif_subport_deconfigure(struct mlxsw_sp_rif *rif)
6652{
Petr Machata010cadf2017-09-02 23:49:18 +02006653 struct mlxsw_sp_fid *fid = rif->fid;
6654
6655 mlxsw_sp_fid_rif_set(fid, NULL);
6656 mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
6657 mlxsw_sp_fid_index(fid), false);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006658 mlxsw_sp_rif_subport_op(rif, false);
6659}
6660
6661static struct mlxsw_sp_fid *
6662mlxsw_sp_rif_subport_fid_get(struct mlxsw_sp_rif *rif)
6663{
6664 return mlxsw_sp_fid_rfid_get(rif->mlxsw_sp, rif->rif_index);
6665}
6666
6667static const struct mlxsw_sp_rif_ops mlxsw_sp_rif_subport_ops = {
6668 .type = MLXSW_SP_RIF_TYPE_SUBPORT,
6669 .rif_size = sizeof(struct mlxsw_sp_rif_subport),
6670 .setup = mlxsw_sp_rif_subport_setup,
6671 .configure = mlxsw_sp_rif_subport_configure,
6672 .deconfigure = mlxsw_sp_rif_subport_deconfigure,
6673 .fid_get = mlxsw_sp_rif_subport_fid_get,
6674};
6675
6676static int mlxsw_sp_rif_vlan_fid_op(struct mlxsw_sp_rif *rif,
6677 enum mlxsw_reg_ritr_if_type type,
6678 u16 vid_fid, bool enable)
6679{
6680 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
6681 char ritr_pl[MLXSW_REG_RITR_LEN];
6682
6683 mlxsw_reg_ritr_pack(ritr_pl, enable, type, rif->rif_index, rif->vr_id,
Petr Machata9571e822017-09-02 23:49:14 +02006684 rif->dev->mtu);
6685 mlxsw_reg_ritr_mac_pack(ritr_pl, rif->dev->dev_addr);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006686 mlxsw_reg_ritr_fid_set(ritr_pl, type, vid_fid);
6687
6688 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
6689}
6690
Yotam Gigib35750f2017-10-09 11:15:33 +02006691u8 mlxsw_sp_router_port(const struct mlxsw_sp *mlxsw_sp)
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006692{
6693 return mlxsw_core_max_ports(mlxsw_sp->core) + 1;
6694}
6695
6696static int mlxsw_sp_rif_vlan_configure(struct mlxsw_sp_rif *rif)
6697{
6698 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
6699 u16 vid = mlxsw_sp_fid_8021q_vid(rif->fid);
6700 int err;
6701
6702 err = mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_VLAN_IF, vid, true);
6703 if (err)
6704 return err;
6705
Ido Schimmel0d284812017-07-18 10:10:12 +02006706 err = mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
6707 mlxsw_sp_router_port(mlxsw_sp), true);
6708 if (err)
6709 goto err_fid_mc_flood_set;
6710
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006711 err = mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
6712 mlxsw_sp_router_port(mlxsw_sp), true);
6713 if (err)
6714 goto err_fid_bc_flood_set;
6715
Petr Machata010cadf2017-09-02 23:49:18 +02006716 err = mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
6717 mlxsw_sp_fid_index(rif->fid), true);
6718 if (err)
6719 goto err_rif_fdb_op;
6720
6721 mlxsw_sp_fid_rif_set(rif->fid, rif);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006722 return 0;
6723
Petr Machata010cadf2017-09-02 23:49:18 +02006724err_rif_fdb_op:
6725 mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
6726 mlxsw_sp_router_port(mlxsw_sp), false);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006727err_fid_bc_flood_set:
Ido Schimmel0d284812017-07-18 10:10:12 +02006728 mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
6729 mlxsw_sp_router_port(mlxsw_sp), false);
6730err_fid_mc_flood_set:
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006731 mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_VLAN_IF, vid, false);
6732 return err;
6733}
6734
6735static void mlxsw_sp_rif_vlan_deconfigure(struct mlxsw_sp_rif *rif)
6736{
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006737 u16 vid = mlxsw_sp_fid_8021q_vid(rif->fid);
Petr Machata010cadf2017-09-02 23:49:18 +02006738 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
6739 struct mlxsw_sp_fid *fid = rif->fid;
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006740
Petr Machata010cadf2017-09-02 23:49:18 +02006741 mlxsw_sp_fid_rif_set(fid, NULL);
6742 mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
6743 mlxsw_sp_fid_index(fid), false);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006744 mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
6745 mlxsw_sp_router_port(mlxsw_sp), false);
Ido Schimmel0d284812017-07-18 10:10:12 +02006746 mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
6747 mlxsw_sp_router_port(mlxsw_sp), false);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006748 mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_VLAN_IF, vid, false);
6749}
6750
6751static struct mlxsw_sp_fid *
6752mlxsw_sp_rif_vlan_fid_get(struct mlxsw_sp_rif *rif)
6753{
6754 u16 vid = is_vlan_dev(rif->dev) ? vlan_dev_vlan_id(rif->dev) : 1;
6755
6756 return mlxsw_sp_fid_8021q_get(rif->mlxsw_sp, vid);
6757}
6758
6759static const struct mlxsw_sp_rif_ops mlxsw_sp_rif_vlan_ops = {
6760 .type = MLXSW_SP_RIF_TYPE_VLAN,
6761 .rif_size = sizeof(struct mlxsw_sp_rif),
6762 .configure = mlxsw_sp_rif_vlan_configure,
6763 .deconfigure = mlxsw_sp_rif_vlan_deconfigure,
6764 .fid_get = mlxsw_sp_rif_vlan_fid_get,
6765};
6766
6767static int mlxsw_sp_rif_fid_configure(struct mlxsw_sp_rif *rif)
6768{
6769 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
6770 u16 fid_index = mlxsw_sp_fid_index(rif->fid);
6771 int err;
6772
6773 err = mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_FID_IF, fid_index,
6774 true);
6775 if (err)
6776 return err;
6777
Ido Schimmel0d284812017-07-18 10:10:12 +02006778 err = mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
6779 mlxsw_sp_router_port(mlxsw_sp), true);
6780 if (err)
6781 goto err_fid_mc_flood_set;
6782
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006783 err = mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
6784 mlxsw_sp_router_port(mlxsw_sp), true);
6785 if (err)
6786 goto err_fid_bc_flood_set;
6787
Petr Machata010cadf2017-09-02 23:49:18 +02006788 err = mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
6789 mlxsw_sp_fid_index(rif->fid), true);
6790 if (err)
6791 goto err_rif_fdb_op;
6792
6793 mlxsw_sp_fid_rif_set(rif->fid, rif);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006794 return 0;
6795
Petr Machata010cadf2017-09-02 23:49:18 +02006796err_rif_fdb_op:
6797 mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
6798 mlxsw_sp_router_port(mlxsw_sp), false);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006799err_fid_bc_flood_set:
Ido Schimmel0d284812017-07-18 10:10:12 +02006800 mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
6801 mlxsw_sp_router_port(mlxsw_sp), false);
6802err_fid_mc_flood_set:
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006803 mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_FID_IF, fid_index, false);
6804 return err;
6805}
6806
6807static void mlxsw_sp_rif_fid_deconfigure(struct mlxsw_sp_rif *rif)
6808{
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006809 u16 fid_index = mlxsw_sp_fid_index(rif->fid);
Petr Machata010cadf2017-09-02 23:49:18 +02006810 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
6811 struct mlxsw_sp_fid *fid = rif->fid;
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006812
Petr Machata010cadf2017-09-02 23:49:18 +02006813 mlxsw_sp_fid_rif_set(fid, NULL);
6814 mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
6815 mlxsw_sp_fid_index(fid), false);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006816 mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
6817 mlxsw_sp_router_port(mlxsw_sp), false);
Ido Schimmel0d284812017-07-18 10:10:12 +02006818 mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
6819 mlxsw_sp_router_port(mlxsw_sp), false);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006820 mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_FID_IF, fid_index, false);
6821}
6822
6823static struct mlxsw_sp_fid *
6824mlxsw_sp_rif_fid_fid_get(struct mlxsw_sp_rif *rif)
6825{
6826 return mlxsw_sp_fid_8021d_get(rif->mlxsw_sp, rif->dev->ifindex);
6827}
6828
6829static const struct mlxsw_sp_rif_ops mlxsw_sp_rif_fid_ops = {
6830 .type = MLXSW_SP_RIF_TYPE_FID,
6831 .rif_size = sizeof(struct mlxsw_sp_rif),
6832 .configure = mlxsw_sp_rif_fid_configure,
6833 .deconfigure = mlxsw_sp_rif_fid_deconfigure,
6834 .fid_get = mlxsw_sp_rif_fid_fid_get,
6835};
6836
Petr Machata6ddb7422017-09-02 23:49:19 +02006837static struct mlxsw_sp_rif_ipip_lb *
6838mlxsw_sp_rif_ipip_lb_rif(struct mlxsw_sp_rif *rif)
6839{
6840 return container_of(rif, struct mlxsw_sp_rif_ipip_lb, common);
6841}
6842
6843static void
6844mlxsw_sp_rif_ipip_lb_setup(struct mlxsw_sp_rif *rif,
6845 const struct mlxsw_sp_rif_params *params)
6846{
6847 struct mlxsw_sp_rif_params_ipip_lb *params_lb;
6848 struct mlxsw_sp_rif_ipip_lb *rif_lb;
6849
6850 params_lb = container_of(params, struct mlxsw_sp_rif_params_ipip_lb,
6851 common);
6852 rif_lb = mlxsw_sp_rif_ipip_lb_rif(rif);
6853 rif_lb->lb_config = params_lb->lb_config;
6854}
6855
6856static int
6857mlxsw_sp_rif_ipip_lb_op(struct mlxsw_sp_rif_ipip_lb *lb_rif,
6858 struct mlxsw_sp_vr *ul_vr, bool enable)
6859{
6860 struct mlxsw_sp_rif_ipip_lb_config lb_cf = lb_rif->lb_config;
6861 struct mlxsw_sp_rif *rif = &lb_rif->common;
6862 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
6863 char ritr_pl[MLXSW_REG_RITR_LEN];
6864 u32 saddr4;
6865
6866 switch (lb_cf.ul_protocol) {
6867 case MLXSW_SP_L3_PROTO_IPV4:
6868 saddr4 = be32_to_cpu(lb_cf.saddr.addr4);
6869 mlxsw_reg_ritr_pack(ritr_pl, enable, MLXSW_REG_RITR_LOOPBACK_IF,
6870 rif->rif_index, rif->vr_id, rif->dev->mtu);
6871 mlxsw_reg_ritr_loopback_ipip4_pack(ritr_pl, lb_cf.lb_ipipt,
6872 MLXSW_REG_RITR_LOOPBACK_IPIP_OPTIONS_GRE_KEY_PRESET,
6873 ul_vr->id, saddr4, lb_cf.okey);
6874 break;
6875
6876 case MLXSW_SP_L3_PROTO_IPV6:
6877 return -EAFNOSUPPORT;
6878 }
6879
6880 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
6881}
6882
6883static int
6884mlxsw_sp_rif_ipip_lb_configure(struct mlxsw_sp_rif *rif)
6885{
6886 struct mlxsw_sp_rif_ipip_lb *lb_rif = mlxsw_sp_rif_ipip_lb_rif(rif);
6887 u32 ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(rif->dev);
6888 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
6889 struct mlxsw_sp_vr *ul_vr;
6890 int err;
6891
David Ahernf8fa9b42017-10-18 09:56:56 -07006892 ul_vr = mlxsw_sp_vr_get(mlxsw_sp, ul_tb_id, NULL);
Petr Machata6ddb7422017-09-02 23:49:19 +02006893 if (IS_ERR(ul_vr))
6894 return PTR_ERR(ul_vr);
6895
6896 err = mlxsw_sp_rif_ipip_lb_op(lb_rif, ul_vr, true);
6897 if (err)
6898 goto err_loopback_op;
6899
6900 lb_rif->ul_vr_id = ul_vr->id;
6901 ++ul_vr->rif_count;
6902 return 0;
6903
6904err_loopback_op:
Ido Schimmel2b52ce02018-01-22 09:17:42 +01006905 mlxsw_sp_vr_put(mlxsw_sp, ul_vr);
Petr Machata6ddb7422017-09-02 23:49:19 +02006906 return err;
6907}
6908
6909static void mlxsw_sp_rif_ipip_lb_deconfigure(struct mlxsw_sp_rif *rif)
6910{
6911 struct mlxsw_sp_rif_ipip_lb *lb_rif = mlxsw_sp_rif_ipip_lb_rif(rif);
6912 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
6913 struct mlxsw_sp_vr *ul_vr;
6914
6915 ul_vr = &mlxsw_sp->router->vrs[lb_rif->ul_vr_id];
6916 mlxsw_sp_rif_ipip_lb_op(lb_rif, ul_vr, false);
6917
6918 --ul_vr->rif_count;
Ido Schimmel2b52ce02018-01-22 09:17:42 +01006919 mlxsw_sp_vr_put(mlxsw_sp, ul_vr);
Petr Machata6ddb7422017-09-02 23:49:19 +02006920}
6921
6922static const struct mlxsw_sp_rif_ops mlxsw_sp_rif_ipip_lb_ops = {
6923 .type = MLXSW_SP_RIF_TYPE_IPIP_LB,
6924 .rif_size = sizeof(struct mlxsw_sp_rif_ipip_lb),
6925 .setup = mlxsw_sp_rif_ipip_lb_setup,
6926 .configure = mlxsw_sp_rif_ipip_lb_configure,
6927 .deconfigure = mlxsw_sp_rif_ipip_lb_deconfigure,
6928};
6929
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006930static const struct mlxsw_sp_rif_ops *mlxsw_sp_rif_ops_arr[] = {
6931 [MLXSW_SP_RIF_TYPE_SUBPORT] = &mlxsw_sp_rif_subport_ops,
6932 [MLXSW_SP_RIF_TYPE_VLAN] = &mlxsw_sp_rif_vlan_ops,
6933 [MLXSW_SP_RIF_TYPE_FID] = &mlxsw_sp_rif_fid_ops,
Petr Machata6ddb7422017-09-02 23:49:19 +02006934 [MLXSW_SP_RIF_TYPE_IPIP_LB] = &mlxsw_sp_rif_ipip_lb_ops,
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006935};
6936
Ido Schimmel348b8fc2017-05-16 19:38:29 +02006937static int mlxsw_sp_rifs_init(struct mlxsw_sp *mlxsw_sp)
6938{
6939 u64 max_rifs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS);
6940
6941 mlxsw_sp->router->rifs = kcalloc(max_rifs,
6942 sizeof(struct mlxsw_sp_rif *),
6943 GFP_KERNEL);
6944 if (!mlxsw_sp->router->rifs)
6945 return -ENOMEM;
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006946
6947 mlxsw_sp->router->rif_ops_arr = mlxsw_sp_rif_ops_arr;
6948
Ido Schimmel348b8fc2017-05-16 19:38:29 +02006949 return 0;
6950}
6951
6952static void mlxsw_sp_rifs_fini(struct mlxsw_sp *mlxsw_sp)
6953{
6954 int i;
6955
6956 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++)
6957 WARN_ON_ONCE(mlxsw_sp->router->rifs[i]);
6958
6959 kfree(mlxsw_sp->router->rifs);
6960}
6961
Petr Machatadcbda282017-10-20 09:16:16 +02006962static int
6963mlxsw_sp_ipip_config_tigcr(struct mlxsw_sp *mlxsw_sp)
6964{
6965 char tigcr_pl[MLXSW_REG_TIGCR_LEN];
6966
6967 mlxsw_reg_tigcr_pack(tigcr_pl, true, 0);
6968 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(tigcr), tigcr_pl);
6969}
6970
Petr Machata38ebc0f2017-09-02 23:49:17 +02006971static int mlxsw_sp_ipips_init(struct mlxsw_sp *mlxsw_sp)
6972{
6973 mlxsw_sp->router->ipip_ops_arr = mlxsw_sp_ipip_ops_arr;
Petr Machata1012b9a2017-09-02 23:49:23 +02006974 INIT_LIST_HEAD(&mlxsw_sp->router->ipip_list);
Petr Machatadcbda282017-10-20 09:16:16 +02006975 return mlxsw_sp_ipip_config_tigcr(mlxsw_sp);
Petr Machata38ebc0f2017-09-02 23:49:17 +02006976}
6977
6978static void mlxsw_sp_ipips_fini(struct mlxsw_sp *mlxsw_sp)
6979{
Petr Machata1012b9a2017-09-02 23:49:23 +02006980 WARN_ON(!list_empty(&mlxsw_sp->router->ipip_list));
Petr Machata38ebc0f2017-09-02 23:49:17 +02006981}
6982
Ido Schimmelc3852ef2016-12-03 16:45:07 +01006983static void mlxsw_sp_router_fib_dump_flush(struct notifier_block *nb)
6984{
Ido Schimmel7e39d112017-05-16 19:38:28 +02006985 struct mlxsw_sp_router *router;
Ido Schimmelc3852ef2016-12-03 16:45:07 +01006986
6987 /* Flush pending FIB notifications and then flush the device's
6988 * table before requesting another dump. The FIB notification
6989 * block is unregistered, so no need to take RTNL.
6990 */
6991 mlxsw_core_flush_owq();
Ido Schimmel7e39d112017-05-16 19:38:28 +02006992 router = container_of(nb, struct mlxsw_sp_router, fib_nb);
6993 mlxsw_sp_router_fib_flush(router->mlxsw_sp);
Ido Schimmelc3852ef2016-12-03 16:45:07 +01006994}
6995
Ido Schimmelaf658b62017-11-02 17:14:09 +01006996#ifdef CONFIG_IP_ROUTE_MULTIPATH
6997static void mlxsw_sp_mp_hash_header_set(char *recr2_pl, int header)
6998{
6999 mlxsw_reg_recr2_outer_header_enables_set(recr2_pl, header, true);
7000}
7001
7002static void mlxsw_sp_mp_hash_field_set(char *recr2_pl, int field)
7003{
7004 mlxsw_reg_recr2_outer_header_fields_enable_set(recr2_pl, field, true);
7005}
7006
7007static void mlxsw_sp_mp4_hash_init(char *recr2_pl)
7008{
7009 bool only_l3 = !init_net.ipv4.sysctl_fib_multipath_hash_policy;
7010
7011 mlxsw_sp_mp_hash_header_set(recr2_pl,
7012 MLXSW_REG_RECR2_IPV4_EN_NOT_TCP_NOT_UDP);
7013 mlxsw_sp_mp_hash_header_set(recr2_pl, MLXSW_REG_RECR2_IPV4_EN_TCP_UDP);
7014 mlxsw_reg_recr2_ipv4_sip_enable(recr2_pl);
7015 mlxsw_reg_recr2_ipv4_dip_enable(recr2_pl);
7016 if (only_l3)
7017 return;
7018 mlxsw_sp_mp_hash_header_set(recr2_pl, MLXSW_REG_RECR2_TCP_UDP_EN_IPV4);
7019 mlxsw_sp_mp_hash_field_set(recr2_pl, MLXSW_REG_RECR2_IPV4_PROTOCOL);
7020 mlxsw_sp_mp_hash_field_set(recr2_pl, MLXSW_REG_RECR2_TCP_UDP_SPORT);
7021 mlxsw_sp_mp_hash_field_set(recr2_pl, MLXSW_REG_RECR2_TCP_UDP_DPORT);
7022}
7023
7024static void mlxsw_sp_mp6_hash_init(char *recr2_pl)
7025{
7026 mlxsw_sp_mp_hash_header_set(recr2_pl,
7027 MLXSW_REG_RECR2_IPV6_EN_NOT_TCP_NOT_UDP);
7028 mlxsw_sp_mp_hash_header_set(recr2_pl, MLXSW_REG_RECR2_IPV6_EN_TCP_UDP);
7029 mlxsw_reg_recr2_ipv6_sip_enable(recr2_pl);
7030 mlxsw_reg_recr2_ipv6_dip_enable(recr2_pl);
7031 mlxsw_sp_mp_hash_field_set(recr2_pl, MLXSW_REG_RECR2_IPV6_FLOW_LABEL);
7032 mlxsw_sp_mp_hash_field_set(recr2_pl, MLXSW_REG_RECR2_IPV6_NEXT_HEADER);
7033}
7034
7035static int mlxsw_sp_mp_hash_init(struct mlxsw_sp *mlxsw_sp)
7036{
7037 char recr2_pl[MLXSW_REG_RECR2_LEN];
7038 u32 seed;
7039
7040 get_random_bytes(&seed, sizeof(seed));
7041 mlxsw_reg_recr2_pack(recr2_pl, seed);
7042 mlxsw_sp_mp4_hash_init(recr2_pl);
7043 mlxsw_sp_mp6_hash_init(recr2_pl);
7044
7045 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(recr2), recr2_pl);
7046}
7047#else
7048static int mlxsw_sp_mp_hash_init(struct mlxsw_sp *mlxsw_sp)
7049{
7050 return 0;
7051}
7052#endif
7053
Yuval Mintz48276a22018-01-14 12:33:14 +01007054static int mlxsw_sp_dscp_init(struct mlxsw_sp *mlxsw_sp)
7055{
7056 char rdpm_pl[MLXSW_REG_RDPM_LEN];
7057 unsigned int i;
7058
7059 MLXSW_REG_ZERO(rdpm, rdpm_pl);
7060
7061 /* HW is determining switch priority based on DSCP-bits, but the
7062 * kernel is still doing that based on the ToS. Since there's a
7063 * mismatch in bits we need to make sure to translate the right
7064 * value ToS would observe, skipping the 2 least-significant ECN bits.
7065 */
7066 for (i = 0; i < MLXSW_REG_RDPM_DSCP_ENTRY_REC_MAX_COUNT; i++)
7067 mlxsw_reg_rdpm_pack(rdpm_pl, i, rt_tos2priority(i << 2));
7068
7069 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rdpm), rdpm_pl);
7070}
7071
Ido Schimmel4724ba562017-03-10 08:53:39 +01007072static int __mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp)
7073{
7074 char rgcr_pl[MLXSW_REG_RGCR_LEN];
7075 u64 max_rifs;
7076 int err;
7077
7078 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_RIFS))
7079 return -EIO;
Ido Schimmel4724ba562017-03-10 08:53:39 +01007080 max_rifs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS);
Ido Schimmel4724ba562017-03-10 08:53:39 +01007081
Arkadi Sharshevskye29237e2017-07-18 10:10:09 +02007082 mlxsw_reg_rgcr_pack(rgcr_pl, true, true);
Ido Schimmel4724ba562017-03-10 08:53:39 +01007083 mlxsw_reg_rgcr_max_router_interfaces_set(rgcr_pl, max_rifs);
Yuval Mintz48276a22018-01-14 12:33:14 +01007084 mlxsw_reg_rgcr_usp_set(rgcr_pl, true);
Ido Schimmel4724ba562017-03-10 08:53:39 +01007085 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rgcr), rgcr_pl);
7086 if (err)
Ido Schimmel348b8fc2017-05-16 19:38:29 +02007087 return err;
Ido Schimmel4724ba562017-03-10 08:53:39 +01007088 return 0;
Ido Schimmel4724ba562017-03-10 08:53:39 +01007089}
7090
7091static void __mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp)
7092{
7093 char rgcr_pl[MLXSW_REG_RGCR_LEN];
Ido Schimmel4724ba562017-03-10 08:53:39 +01007094
Arkadi Sharshevskye29237e2017-07-18 10:10:09 +02007095 mlxsw_reg_rgcr_pack(rgcr_pl, false, false);
Ido Schimmel4724ba562017-03-10 08:53:39 +01007096 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rgcr), rgcr_pl);
Ido Schimmel4724ba562017-03-10 08:53:39 +01007097}
7098
Jiri Pirkob45f64d2016-09-26 12:52:31 +02007099int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp)
7100{
Ido Schimmel9011b672017-05-16 19:38:25 +02007101 struct mlxsw_sp_router *router;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02007102 int err;
7103
Ido Schimmel9011b672017-05-16 19:38:25 +02007104 router = kzalloc(sizeof(*mlxsw_sp->router), GFP_KERNEL);
7105 if (!router)
7106 return -ENOMEM;
7107 mlxsw_sp->router = router;
7108 router->mlxsw_sp = mlxsw_sp;
7109
7110 INIT_LIST_HEAD(&mlxsw_sp->router->nexthop_neighs_list);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02007111 err = __mlxsw_sp_router_init(mlxsw_sp);
7112 if (err)
Ido Schimmel9011b672017-05-16 19:38:25 +02007113 goto err_router_init;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02007114
Ido Schimmel348b8fc2017-05-16 19:38:29 +02007115 err = mlxsw_sp_rifs_init(mlxsw_sp);
7116 if (err)
7117 goto err_rifs_init;
7118
Petr Machata38ebc0f2017-09-02 23:49:17 +02007119 err = mlxsw_sp_ipips_init(mlxsw_sp);
7120 if (err)
7121 goto err_ipips_init;
7122
Ido Schimmel9011b672017-05-16 19:38:25 +02007123 err = rhashtable_init(&mlxsw_sp->router->nexthop_ht,
Ido Schimmelc53b8e12017-02-08 11:16:30 +01007124 &mlxsw_sp_nexthop_ht_params);
7125 if (err)
7126 goto err_nexthop_ht_init;
7127
Ido Schimmel9011b672017-05-16 19:38:25 +02007128 err = rhashtable_init(&mlxsw_sp->router->nexthop_group_ht,
Ido Schimmele9ad5e72017-02-08 11:16:29 +01007129 &mlxsw_sp_nexthop_group_ht_params);
7130 if (err)
7131 goto err_nexthop_group_ht_init;
7132
Arkadi Sharshevskydbe45982017-09-25 10:32:23 +02007133 INIT_LIST_HEAD(&mlxsw_sp->router->nexthop_list);
Ido Schimmel8494ab02017-03-24 08:02:47 +01007134 err = mlxsw_sp_lpm_init(mlxsw_sp);
7135 if (err)
7136 goto err_lpm_init;
7137
Yotam Gigid42b0962017-09-27 08:23:20 +02007138 err = mlxsw_sp_mr_init(mlxsw_sp, &mlxsw_sp_mr_tcam_ops);
7139 if (err)
7140 goto err_mr_init;
7141
Jiri Pirkob45f64d2016-09-26 12:52:31 +02007142 err = mlxsw_sp_vrs_init(mlxsw_sp);
7143 if (err)
7144 goto err_vrs_init;
7145
Ido Schimmel8c9583a2016-10-27 15:12:57 +02007146 err = mlxsw_sp_neigh_init(mlxsw_sp);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02007147 if (err)
7148 goto err_neigh_init;
7149
Ido Schimmel48fac882017-11-02 17:14:06 +01007150 mlxsw_sp->router->netevent_nb.notifier_call =
7151 mlxsw_sp_router_netevent_event;
7152 err = register_netevent_notifier(&mlxsw_sp->router->netevent_nb);
7153 if (err)
7154 goto err_register_netevent_notifier;
7155
Ido Schimmelaf658b62017-11-02 17:14:09 +01007156 err = mlxsw_sp_mp_hash_init(mlxsw_sp);
7157 if (err)
7158 goto err_mp_hash_init;
7159
Yuval Mintz48276a22018-01-14 12:33:14 +01007160 err = mlxsw_sp_dscp_init(mlxsw_sp);
7161 if (err)
7162 goto err_dscp_init;
7163
Ido Schimmel7e39d112017-05-16 19:38:28 +02007164 mlxsw_sp->router->fib_nb.notifier_call = mlxsw_sp_router_fib_event;
7165 err = register_fib_notifier(&mlxsw_sp->router->fib_nb,
Ido Schimmelc3852ef2016-12-03 16:45:07 +01007166 mlxsw_sp_router_fib_dump_flush);
7167 if (err)
7168 goto err_register_fib_notifier;
7169
Jiri Pirkob45f64d2016-09-26 12:52:31 +02007170 return 0;
7171
Ido Schimmelc3852ef2016-12-03 16:45:07 +01007172err_register_fib_notifier:
Yuval Mintz48276a22018-01-14 12:33:14 +01007173err_dscp_init:
Ido Schimmelaf658b62017-11-02 17:14:09 +01007174err_mp_hash_init:
Ido Schimmel48fac882017-11-02 17:14:06 +01007175 unregister_netevent_notifier(&mlxsw_sp->router->netevent_nb);
7176err_register_netevent_notifier:
Ido Schimmelc3852ef2016-12-03 16:45:07 +01007177 mlxsw_sp_neigh_fini(mlxsw_sp);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02007178err_neigh_init:
7179 mlxsw_sp_vrs_fini(mlxsw_sp);
7180err_vrs_init:
Yotam Gigid42b0962017-09-27 08:23:20 +02007181 mlxsw_sp_mr_fini(mlxsw_sp);
7182err_mr_init:
Ido Schimmel8494ab02017-03-24 08:02:47 +01007183 mlxsw_sp_lpm_fini(mlxsw_sp);
7184err_lpm_init:
Ido Schimmel9011b672017-05-16 19:38:25 +02007185 rhashtable_destroy(&mlxsw_sp->router->nexthop_group_ht);
Ido Schimmele9ad5e72017-02-08 11:16:29 +01007186err_nexthop_group_ht_init:
Ido Schimmel9011b672017-05-16 19:38:25 +02007187 rhashtable_destroy(&mlxsw_sp->router->nexthop_ht);
Ido Schimmelc53b8e12017-02-08 11:16:30 +01007188err_nexthop_ht_init:
Petr Machata38ebc0f2017-09-02 23:49:17 +02007189 mlxsw_sp_ipips_fini(mlxsw_sp);
7190err_ipips_init:
Ido Schimmel348b8fc2017-05-16 19:38:29 +02007191 mlxsw_sp_rifs_fini(mlxsw_sp);
7192err_rifs_init:
Jiri Pirkob45f64d2016-09-26 12:52:31 +02007193 __mlxsw_sp_router_fini(mlxsw_sp);
Ido Schimmel9011b672017-05-16 19:38:25 +02007194err_router_init:
7195 kfree(mlxsw_sp->router);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02007196 return err;
7197}
7198
7199void mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp)
7200{
Ido Schimmel7e39d112017-05-16 19:38:28 +02007201 unregister_fib_notifier(&mlxsw_sp->router->fib_nb);
Ido Schimmel48fac882017-11-02 17:14:06 +01007202 unregister_netevent_notifier(&mlxsw_sp->router->netevent_nb);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02007203 mlxsw_sp_neigh_fini(mlxsw_sp);
7204 mlxsw_sp_vrs_fini(mlxsw_sp);
Yotam Gigid42b0962017-09-27 08:23:20 +02007205 mlxsw_sp_mr_fini(mlxsw_sp);
Ido Schimmel8494ab02017-03-24 08:02:47 +01007206 mlxsw_sp_lpm_fini(mlxsw_sp);
Ido Schimmel9011b672017-05-16 19:38:25 +02007207 rhashtable_destroy(&mlxsw_sp->router->nexthop_group_ht);
7208 rhashtable_destroy(&mlxsw_sp->router->nexthop_ht);
Petr Machata38ebc0f2017-09-02 23:49:17 +02007209 mlxsw_sp_ipips_fini(mlxsw_sp);
Ido Schimmel348b8fc2017-05-16 19:38:29 +02007210 mlxsw_sp_rifs_fini(mlxsw_sp);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02007211 __mlxsw_sp_router_fini(mlxsw_sp);
Ido Schimmel9011b672017-05-16 19:38:25 +02007212 kfree(mlxsw_sp->router);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02007213}