blob: b4126db695ddc842e1f22ed357dd887ba7d36bd2 [file] [log] [blame]
Ido Schimmel464dce12016-07-02 11:00:15 +02001/*
2 * drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
Petr Machatae437f3b2018-02-13 11:26:09 +01003 * Copyright (c) 2016-2018 Mellanox Technologies. All rights reserved.
Ido Schimmel464dce12016-07-02 11:00:15 +02004 * Copyright (c) 2016 Jiri Pirko <jiri@mellanox.com>
5 * Copyright (c) 2016 Ido Schimmel <idosch@mellanox.com>
Yotam Gigic723c7352016-07-05 11:27:43 +02006 * Copyright (c) 2016 Yotam Gigi <yotamg@mellanox.com>
Petr Machatae437f3b2018-02-13 11:26:09 +01007 * Copyright (c) 2017-2018 Petr Machata <petrm@mellanox.com>
Ido Schimmel464dce12016-07-02 11:00:15 +02008 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions are met:
11 *
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. Neither the names of the copyright holders nor the names of its
18 * contributors may be used to endorse or promote products derived from
19 * this software without specific prior written permission.
20 *
21 * Alternatively, this software may be distributed under the terms of the
22 * GNU General Public License ("GPL") version 2 as published by the Free
23 * Software Foundation.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
26 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
29 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
36 */
37
38#include <linux/kernel.h>
39#include <linux/types.h>
Jiri Pirko5e9c16c2016-07-04 08:23:04 +020040#include <linux/rhashtable.h>
41#include <linux/bitops.h>
42#include <linux/in6.h>
Yotam Gigic723c7352016-07-05 11:27:43 +020043#include <linux/notifier.h>
Ido Schimmeldf6dd79b2017-02-08 14:36:49 +010044#include <linux/inetdevice.h>
Ido Schimmel9db032b2017-03-16 09:08:17 +010045#include <linux/netdevice.h>
Ido Schimmel03ea01e2017-05-23 21:56:30 +020046#include <linux/if_bridge.h>
Ido Schimmelb5f3e0d2017-07-24 09:56:00 +020047#include <linux/socket.h>
Ido Schimmel428b8512017-08-03 13:28:28 +020048#include <linux/route.h>
Ido Schimmeleb789982017-10-22 23:11:48 +020049#include <linux/gcd.h>
Ido Schimmelaf658b62017-11-02 17:14:09 +010050#include <linux/random.h>
Yotam Gigic723c7352016-07-05 11:27:43 +020051#include <net/netevent.h>
Jiri Pirko6cf3c972016-07-05 11:27:39 +020052#include <net/neighbour.h>
53#include <net/arp.h>
Jiri Pirkob45f64d2016-09-26 12:52:31 +020054#include <net/ip_fib.h>
Ido Schimmel583419f2017-08-03 13:28:27 +020055#include <net/ip6_fib.h>
Ido Schimmel5d7bfd12017-03-16 09:08:14 +010056#include <net/fib_rules.h>
Petr Machata6ddb7422017-09-02 23:49:19 +020057#include <net/ip_tunnels.h>
Ido Schimmel57837882017-03-16 09:08:16 +010058#include <net/l3mdev.h>
Arkadi Sharshevsky5ea12372017-07-18 10:10:13 +020059#include <net/addrconf.h>
Arkadi Sharshevskyd5eb89c2017-07-18 10:10:15 +020060#include <net/ndisc.h>
61#include <net/ipv6.h>
Ido Schimmel04b1d4e2017-08-03 13:28:11 +020062#include <net/fib_notifier.h>
Ido Schimmel464dce12016-07-02 11:00:15 +020063
64#include "spectrum.h"
65#include "core.h"
66#include "reg.h"
Arkadi Sharshevskye0c0afd2017-03-28 17:24:15 +020067#include "spectrum_cnt.h"
68#include "spectrum_dpipe.h"
Petr Machata38ebc0f2017-09-02 23:49:17 +020069#include "spectrum_ipip.h"
Yotam Gigid42b0962017-09-27 08:23:20 +020070#include "spectrum_mr.h"
71#include "spectrum_mr_tcam.h"
Arkadi Sharshevskye0c0afd2017-03-28 17:24:15 +020072#include "spectrum_router.h"
Petr Machata803335a2018-02-27 14:53:46 +010073#include "spectrum_span.h"
Ido Schimmel464dce12016-07-02 11:00:15 +020074
Ido Schimmel2b52ce02018-01-22 09:17:42 +010075struct mlxsw_sp_fib;
Ido Schimmel9011b672017-05-16 19:38:25 +020076struct mlxsw_sp_vr;
77struct mlxsw_sp_lpm_tree;
Ido Schimmele4f3c1c2017-05-26 08:37:40 +020078struct mlxsw_sp_rif_ops;
Ido Schimmel9011b672017-05-16 19:38:25 +020079
80struct mlxsw_sp_router {
81 struct mlxsw_sp *mlxsw_sp;
Ido Schimmel5f9efff2017-05-16 19:38:27 +020082 struct mlxsw_sp_rif **rifs;
Ido Schimmel9011b672017-05-16 19:38:25 +020083 struct mlxsw_sp_vr *vrs;
84 struct rhashtable neigh_ht;
85 struct rhashtable nexthop_group_ht;
86 struct rhashtable nexthop_ht;
Arkadi Sharshevskydbe45982017-09-25 10:32:23 +020087 struct list_head nexthop_list;
Ido Schimmel9011b672017-05-16 19:38:25 +020088 struct {
Ido Schimmel2b52ce02018-01-22 09:17:42 +010089 /* One tree for each protocol: IPv4 and IPv6 */
90 struct mlxsw_sp_lpm_tree *proto_trees[2];
Ido Schimmel9011b672017-05-16 19:38:25 +020091 struct mlxsw_sp_lpm_tree *trees;
92 unsigned int tree_count;
93 } lpm;
94 struct {
95 struct delayed_work dw;
96 unsigned long interval; /* ms */
97 } neighs_update;
98 struct delayed_work nexthop_probe_dw;
99#define MLXSW_SP_UNRESOLVED_NH_PROBE_INTERVAL 5000 /* ms */
100 struct list_head nexthop_neighs_list;
Petr Machata1012b9a2017-09-02 23:49:23 +0200101 struct list_head ipip_list;
Ido Schimmel9011b672017-05-16 19:38:25 +0200102 bool aborted;
Ido Schimmel7e39d112017-05-16 19:38:28 +0200103 struct notifier_block fib_nb;
Ido Schimmel48fac882017-11-02 17:14:06 +0100104 struct notifier_block netevent_nb;
Ido Schimmele4f3c1c2017-05-26 08:37:40 +0200105 const struct mlxsw_sp_rif_ops **rif_ops_arr;
Petr Machata38ebc0f2017-09-02 23:49:17 +0200106 const struct mlxsw_sp_ipip_ops **ipip_ops_arr;
Ido Schimmel9011b672017-05-16 19:38:25 +0200107};
108
Ido Schimmel4724ba562017-03-10 08:53:39 +0100109struct mlxsw_sp_rif {
110 struct list_head nexthop_list;
111 struct list_head neigh_list;
112 struct net_device *dev;
Ido Schimmela1107482017-05-26 08:37:39 +0200113 struct mlxsw_sp_fid *fid;
Ido Schimmel4724ba562017-03-10 08:53:39 +0100114 unsigned char addr[ETH_ALEN];
115 int mtu;
Arkadi Sharshevskybf952332017-03-17 09:38:00 +0100116 u16 rif_index;
Ido Schimmel69132292017-03-10 08:53:42 +0100117 u16 vr_id;
Ido Schimmele4f3c1c2017-05-26 08:37:40 +0200118 const struct mlxsw_sp_rif_ops *ops;
119 struct mlxsw_sp *mlxsw_sp;
120
Arkadi Sharshevskye0c0afd2017-03-28 17:24:15 +0200121 unsigned int counter_ingress;
122 bool counter_ingress_valid;
123 unsigned int counter_egress;
124 bool counter_egress_valid;
Ido Schimmel4724ba562017-03-10 08:53:39 +0100125};
126
Ido Schimmele4f3c1c2017-05-26 08:37:40 +0200127struct mlxsw_sp_rif_params {
128 struct net_device *dev;
129 union {
130 u16 system_port;
131 u16 lag_id;
132 };
133 u16 vid;
134 bool lag;
135};
136
Ido Schimmel4d93cee2017-05-26 08:37:34 +0200137struct mlxsw_sp_rif_subport {
138 struct mlxsw_sp_rif common;
139 union {
140 u16 system_port;
141 u16 lag_id;
142 };
143 u16 vid;
144 bool lag;
145};
146
Petr Machata6ddb7422017-09-02 23:49:19 +0200147struct mlxsw_sp_rif_ipip_lb {
148 struct mlxsw_sp_rif common;
149 struct mlxsw_sp_rif_ipip_lb_config lb_config;
150 u16 ul_vr_id; /* Reserved for Spectrum-2. */
151};
152
153struct mlxsw_sp_rif_params_ipip_lb {
154 struct mlxsw_sp_rif_params common;
155 struct mlxsw_sp_rif_ipip_lb_config lb_config;
156};
157
Ido Schimmele4f3c1c2017-05-26 08:37:40 +0200158struct mlxsw_sp_rif_ops {
159 enum mlxsw_sp_rif_type type;
160 size_t rif_size;
161
162 void (*setup)(struct mlxsw_sp_rif *rif,
163 const struct mlxsw_sp_rif_params *params);
164 int (*configure)(struct mlxsw_sp_rif *rif);
165 void (*deconfigure)(struct mlxsw_sp_rif *rif);
Petr Machata5f15e252018-06-25 10:48:13 +0300166 struct mlxsw_sp_fid * (*fid_get)(struct mlxsw_sp_rif *rif,
167 struct netlink_ext_ack *extack);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +0200168};
169
Ido Schimmel2b52ce02018-01-22 09:17:42 +0100170static void mlxsw_sp_lpm_tree_hold(struct mlxsw_sp_lpm_tree *lpm_tree);
171static void mlxsw_sp_lpm_tree_put(struct mlxsw_sp *mlxsw_sp,
172 struct mlxsw_sp_lpm_tree *lpm_tree);
173static int mlxsw_sp_vr_lpm_tree_bind(struct mlxsw_sp *mlxsw_sp,
174 const struct mlxsw_sp_fib *fib,
175 u8 tree_id);
176static int mlxsw_sp_vr_lpm_tree_unbind(struct mlxsw_sp *mlxsw_sp,
177 const struct mlxsw_sp_fib *fib);
178
Arkadi Sharshevskye0c0afd2017-03-28 17:24:15 +0200179static unsigned int *
180mlxsw_sp_rif_p_counter_get(struct mlxsw_sp_rif *rif,
181 enum mlxsw_sp_rif_counter_dir dir)
182{
183 switch (dir) {
184 case MLXSW_SP_RIF_COUNTER_EGRESS:
185 return &rif->counter_egress;
186 case MLXSW_SP_RIF_COUNTER_INGRESS:
187 return &rif->counter_ingress;
188 }
189 return NULL;
190}
191
192static bool
193mlxsw_sp_rif_counter_valid_get(struct mlxsw_sp_rif *rif,
194 enum mlxsw_sp_rif_counter_dir dir)
195{
196 switch (dir) {
197 case MLXSW_SP_RIF_COUNTER_EGRESS:
198 return rif->counter_egress_valid;
199 case MLXSW_SP_RIF_COUNTER_INGRESS:
200 return rif->counter_ingress_valid;
201 }
202 return false;
203}
204
205static void
206mlxsw_sp_rif_counter_valid_set(struct mlxsw_sp_rif *rif,
207 enum mlxsw_sp_rif_counter_dir dir,
208 bool valid)
209{
210 switch (dir) {
211 case MLXSW_SP_RIF_COUNTER_EGRESS:
212 rif->counter_egress_valid = valid;
213 break;
214 case MLXSW_SP_RIF_COUNTER_INGRESS:
215 rif->counter_ingress_valid = valid;
216 break;
217 }
218}
219
220static int mlxsw_sp_rif_counter_edit(struct mlxsw_sp *mlxsw_sp, u16 rif_index,
221 unsigned int counter_index, bool enable,
222 enum mlxsw_sp_rif_counter_dir dir)
223{
224 char ritr_pl[MLXSW_REG_RITR_LEN];
225 bool is_egress = false;
226 int err;
227
228 if (dir == MLXSW_SP_RIF_COUNTER_EGRESS)
229 is_egress = true;
230 mlxsw_reg_ritr_rif_pack(ritr_pl, rif_index);
231 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
232 if (err)
233 return err;
234
235 mlxsw_reg_ritr_counter_pack(ritr_pl, counter_index, enable,
236 is_egress);
237 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
238}
239
240int mlxsw_sp_rif_counter_value_get(struct mlxsw_sp *mlxsw_sp,
241 struct mlxsw_sp_rif *rif,
242 enum mlxsw_sp_rif_counter_dir dir, u64 *cnt)
243{
244 char ricnt_pl[MLXSW_REG_RICNT_LEN];
245 unsigned int *p_counter_index;
246 bool valid;
247 int err;
248
249 valid = mlxsw_sp_rif_counter_valid_get(rif, dir);
250 if (!valid)
251 return -EINVAL;
252
253 p_counter_index = mlxsw_sp_rif_p_counter_get(rif, dir);
254 if (!p_counter_index)
255 return -EINVAL;
256 mlxsw_reg_ricnt_pack(ricnt_pl, *p_counter_index,
257 MLXSW_REG_RICNT_OPCODE_NOP);
258 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ricnt), ricnt_pl);
259 if (err)
260 return err;
261 *cnt = mlxsw_reg_ricnt_good_unicast_packets_get(ricnt_pl);
262 return 0;
263}
264
265static int mlxsw_sp_rif_counter_clear(struct mlxsw_sp *mlxsw_sp,
266 unsigned int counter_index)
267{
268 char ricnt_pl[MLXSW_REG_RICNT_LEN];
269
270 mlxsw_reg_ricnt_pack(ricnt_pl, counter_index,
271 MLXSW_REG_RICNT_OPCODE_CLEAR);
272 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ricnt), ricnt_pl);
273}
274
275int mlxsw_sp_rif_counter_alloc(struct mlxsw_sp *mlxsw_sp,
276 struct mlxsw_sp_rif *rif,
277 enum mlxsw_sp_rif_counter_dir dir)
278{
279 unsigned int *p_counter_index;
280 int err;
281
282 p_counter_index = mlxsw_sp_rif_p_counter_get(rif, dir);
283 if (!p_counter_index)
284 return -EINVAL;
285 err = mlxsw_sp_counter_alloc(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_RIF,
286 p_counter_index);
287 if (err)
288 return err;
289
290 err = mlxsw_sp_rif_counter_clear(mlxsw_sp, *p_counter_index);
291 if (err)
292 goto err_counter_clear;
293
294 err = mlxsw_sp_rif_counter_edit(mlxsw_sp, rif->rif_index,
295 *p_counter_index, true, dir);
296 if (err)
297 goto err_counter_edit;
298 mlxsw_sp_rif_counter_valid_set(rif, dir, true);
299 return 0;
300
301err_counter_edit:
302err_counter_clear:
303 mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_RIF,
304 *p_counter_index);
305 return err;
306}
307
308void mlxsw_sp_rif_counter_free(struct mlxsw_sp *mlxsw_sp,
309 struct mlxsw_sp_rif *rif,
310 enum mlxsw_sp_rif_counter_dir dir)
311{
312 unsigned int *p_counter_index;
313
Arkadi Sharshevsky6b1206b2017-05-18 09:18:53 +0200314 if (!mlxsw_sp_rif_counter_valid_get(rif, dir))
315 return;
316
Arkadi Sharshevskye0c0afd2017-03-28 17:24:15 +0200317 p_counter_index = mlxsw_sp_rif_p_counter_get(rif, dir);
318 if (WARN_ON(!p_counter_index))
319 return;
320 mlxsw_sp_rif_counter_edit(mlxsw_sp, rif->rif_index,
321 *p_counter_index, false, dir);
322 mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_RIF,
323 *p_counter_index);
324 mlxsw_sp_rif_counter_valid_set(rif, dir, false);
325}
326
Ido Schimmele4f3c1c2017-05-26 08:37:40 +0200327static void mlxsw_sp_rif_counters_alloc(struct mlxsw_sp_rif *rif)
328{
329 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
330 struct devlink *devlink;
331
332 devlink = priv_to_devlink(mlxsw_sp->core);
333 if (!devlink_dpipe_table_counter_enabled(devlink,
334 MLXSW_SP_DPIPE_TABLE_NAME_ERIF))
335 return;
336 mlxsw_sp_rif_counter_alloc(mlxsw_sp, rif, MLXSW_SP_RIF_COUNTER_EGRESS);
337}
338
339static void mlxsw_sp_rif_counters_free(struct mlxsw_sp_rif *rif)
340{
341 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
342
343 mlxsw_sp_rif_counter_free(mlxsw_sp, rif, MLXSW_SP_RIF_COUNTER_EGRESS);
344}
345
Ido Schimmel7dcc18a2017-07-18 10:10:30 +0200346#define MLXSW_SP_PREFIX_COUNT (sizeof(struct in6_addr) * BITS_PER_BYTE + 1)
Ido Schimmel9011b672017-05-16 19:38:25 +0200347
348struct mlxsw_sp_prefix_usage {
349 DECLARE_BITMAP(b, MLXSW_SP_PREFIX_COUNT);
350};
351
Jiri Pirko53342022016-07-04 08:23:08 +0200352#define mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage) \
353 for_each_set_bit(prefix, (prefix_usage)->b, MLXSW_SP_PREFIX_COUNT)
354
355static bool
356mlxsw_sp_prefix_usage_eq(struct mlxsw_sp_prefix_usage *prefix_usage1,
357 struct mlxsw_sp_prefix_usage *prefix_usage2)
358{
359 return !memcmp(prefix_usage1, prefix_usage2, sizeof(*prefix_usage1));
360}
361
Jiri Pirko6b75c482016-07-04 08:23:09 +0200362static void
363mlxsw_sp_prefix_usage_cpy(struct mlxsw_sp_prefix_usage *prefix_usage1,
364 struct mlxsw_sp_prefix_usage *prefix_usage2)
365{
366 memcpy(prefix_usage1, prefix_usage2, sizeof(*prefix_usage1));
367}
368
369static void
Jiri Pirko5e9c16c2016-07-04 08:23:04 +0200370mlxsw_sp_prefix_usage_set(struct mlxsw_sp_prefix_usage *prefix_usage,
371 unsigned char prefix_len)
372{
373 set_bit(prefix_len, prefix_usage->b);
374}
375
376static void
377mlxsw_sp_prefix_usage_clear(struct mlxsw_sp_prefix_usage *prefix_usage,
378 unsigned char prefix_len)
379{
380 clear_bit(prefix_len, prefix_usage->b);
381}
382
383struct mlxsw_sp_fib_key {
384 unsigned char addr[sizeof(struct in6_addr)];
385 unsigned char prefix_len;
386};
387
Jiri Pirko61c503f2016-07-04 08:23:11 +0200388enum mlxsw_sp_fib_entry_type {
389 MLXSW_SP_FIB_ENTRY_TYPE_REMOTE,
390 MLXSW_SP_FIB_ENTRY_TYPE_LOCAL,
391 MLXSW_SP_FIB_ENTRY_TYPE_TRAP,
Petr Machata4607f6d2017-09-02 23:49:25 +0200392
393 /* This is a special case of local delivery, where a packet should be
394 * decapsulated on reception. Note that there is no corresponding ENCAP,
395 * because that's a type of next hop, not of FIB entry. (There can be
396 * several next hops in a REMOTE entry, and some of them may be
397 * encapsulating entries.)
398 */
399 MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP,
Jiri Pirko61c503f2016-07-04 08:23:11 +0200400};
401
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +0200402struct mlxsw_sp_nexthop_group;
403
Ido Schimmel9aecce12017-02-09 10:28:42 +0100404struct mlxsw_sp_fib_node {
405 struct list_head entry_list;
Jiri Pirkob45f64d2016-09-26 12:52:31 +0200406 struct list_head list;
Ido Schimmel9aecce12017-02-09 10:28:42 +0100407 struct rhash_head ht_node;
Ido Schimmel76610eb2017-03-10 08:53:41 +0100408 struct mlxsw_sp_fib *fib;
Ido Schimmel9aecce12017-02-09 10:28:42 +0100409 struct mlxsw_sp_fib_key key;
410};
411
Petr Machata4607f6d2017-09-02 23:49:25 +0200412struct mlxsw_sp_fib_entry_decap {
413 struct mlxsw_sp_ipip_entry *ipip_entry;
414 u32 tunnel_index;
415};
416
Ido Schimmel9aecce12017-02-09 10:28:42 +0100417struct mlxsw_sp_fib_entry {
418 struct list_head list;
419 struct mlxsw_sp_fib_node *fib_node;
420 enum mlxsw_sp_fib_entry_type type;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +0200421 struct list_head nexthop_group_node;
422 struct mlxsw_sp_nexthop_group *nh_group;
Petr Machata4607f6d2017-09-02 23:49:25 +0200423 struct mlxsw_sp_fib_entry_decap decap; /* Valid for decap entries. */
Jiri Pirko5e9c16c2016-07-04 08:23:04 +0200424};
425
Ido Schimmel4f1c7f12017-07-18 10:10:26 +0200426struct mlxsw_sp_fib4_entry {
427 struct mlxsw_sp_fib_entry common;
428 u32 tb_id;
429 u32 prio;
430 u8 tos;
431 u8 type;
432};
433
Ido Schimmel428b8512017-08-03 13:28:28 +0200434struct mlxsw_sp_fib6_entry {
435 struct mlxsw_sp_fib_entry common;
436 struct list_head rt6_list;
437 unsigned int nrt6;
438};
439
440struct mlxsw_sp_rt6 {
441 struct list_head list;
David Ahern8d1c8022018-04-17 17:33:26 -0700442 struct fib6_info *rt;
Ido Schimmel428b8512017-08-03 13:28:28 +0200443};
444
Ido Schimmel9011b672017-05-16 19:38:25 +0200445struct mlxsw_sp_lpm_tree {
446 u8 id; /* tree ID */
447 unsigned int ref_count;
448 enum mlxsw_sp_l3proto proto;
Ido Schimmel2b52ce02018-01-22 09:17:42 +0100449 unsigned long prefix_ref_count[MLXSW_SP_PREFIX_COUNT];
Ido Schimmel9011b672017-05-16 19:38:25 +0200450 struct mlxsw_sp_prefix_usage prefix_usage;
451};
452
Jiri Pirko5e9c16c2016-07-04 08:23:04 +0200453struct mlxsw_sp_fib {
454 struct rhashtable ht;
Ido Schimmel9aecce12017-02-09 10:28:42 +0100455 struct list_head node_list;
Ido Schimmel76610eb2017-03-10 08:53:41 +0100456 struct mlxsw_sp_vr *vr;
457 struct mlxsw_sp_lpm_tree *lpm_tree;
Ido Schimmel76610eb2017-03-10 08:53:41 +0100458 enum mlxsw_sp_l3proto proto;
Jiri Pirko5e9c16c2016-07-04 08:23:04 +0200459};
460
Ido Schimmel9011b672017-05-16 19:38:25 +0200461struct mlxsw_sp_vr {
462 u16 id; /* virtual router ID */
463 u32 tb_id; /* kernel fib table id */
464 unsigned int rif_count;
465 struct mlxsw_sp_fib *fib4;
Ido Schimmela3d9bc52017-07-18 10:10:22 +0200466 struct mlxsw_sp_fib *fib6;
Yuval Mintz9742f862018-03-26 15:01:40 +0300467 struct mlxsw_sp_mr_table *mr_table[MLXSW_SP_L3_PROTO_MAX];
Ido Schimmel9011b672017-05-16 19:38:25 +0200468};
469
Ido Schimmel9aecce12017-02-09 10:28:42 +0100470static const struct rhashtable_params mlxsw_sp_fib_ht_params;
Jiri Pirko5e9c16c2016-07-04 08:23:04 +0200471
Ido Schimmel2b52ce02018-01-22 09:17:42 +0100472static struct mlxsw_sp_fib *mlxsw_sp_fib_create(struct mlxsw_sp *mlxsw_sp,
473 struct mlxsw_sp_vr *vr,
Ido Schimmel76610eb2017-03-10 08:53:41 +0100474 enum mlxsw_sp_l3proto proto)
Jiri Pirko5e9c16c2016-07-04 08:23:04 +0200475{
Ido Schimmel2b52ce02018-01-22 09:17:42 +0100476 struct mlxsw_sp_lpm_tree *lpm_tree;
Jiri Pirko5e9c16c2016-07-04 08:23:04 +0200477 struct mlxsw_sp_fib *fib;
478 int err;
479
Ido Schimmel2b52ce02018-01-22 09:17:42 +0100480 lpm_tree = mlxsw_sp->router->lpm.proto_trees[proto];
Jiri Pirko5e9c16c2016-07-04 08:23:04 +0200481 fib = kzalloc(sizeof(*fib), GFP_KERNEL);
482 if (!fib)
483 return ERR_PTR(-ENOMEM);
484 err = rhashtable_init(&fib->ht, &mlxsw_sp_fib_ht_params);
485 if (err)
486 goto err_rhashtable_init;
Ido Schimmel9aecce12017-02-09 10:28:42 +0100487 INIT_LIST_HEAD(&fib->node_list);
Ido Schimmel76610eb2017-03-10 08:53:41 +0100488 fib->proto = proto;
489 fib->vr = vr;
Ido Schimmel2b52ce02018-01-22 09:17:42 +0100490 fib->lpm_tree = lpm_tree;
491 mlxsw_sp_lpm_tree_hold(lpm_tree);
492 err = mlxsw_sp_vr_lpm_tree_bind(mlxsw_sp, fib, lpm_tree->id);
493 if (err)
494 goto err_lpm_tree_bind;
Jiri Pirko5e9c16c2016-07-04 08:23:04 +0200495 return fib;
496
Ido Schimmel2b52ce02018-01-22 09:17:42 +0100497err_lpm_tree_bind:
498 mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
Jiri Pirko5e9c16c2016-07-04 08:23:04 +0200499err_rhashtable_init:
500 kfree(fib);
501 return ERR_PTR(err);
502}
503
Ido Schimmel2b52ce02018-01-22 09:17:42 +0100504static void mlxsw_sp_fib_destroy(struct mlxsw_sp *mlxsw_sp,
505 struct mlxsw_sp_fib *fib)
Jiri Pirko5e9c16c2016-07-04 08:23:04 +0200506{
Ido Schimmel2b52ce02018-01-22 09:17:42 +0100507 mlxsw_sp_vr_lpm_tree_unbind(mlxsw_sp, fib);
508 mlxsw_sp_lpm_tree_put(mlxsw_sp, fib->lpm_tree);
Ido Schimmel9aecce12017-02-09 10:28:42 +0100509 WARN_ON(!list_empty(&fib->node_list));
Jiri Pirko5e9c16c2016-07-04 08:23:04 +0200510 rhashtable_destroy(&fib->ht);
511 kfree(fib);
512}
513
Jiri Pirko53342022016-07-04 08:23:08 +0200514static struct mlxsw_sp_lpm_tree *
Ido Schimmel382dbb42017-03-10 08:53:40 +0100515mlxsw_sp_lpm_tree_find_unused(struct mlxsw_sp *mlxsw_sp)
Jiri Pirko53342022016-07-04 08:23:08 +0200516{
517 static struct mlxsw_sp_lpm_tree *lpm_tree;
518 int i;
519
Ido Schimmel9011b672017-05-16 19:38:25 +0200520 for (i = 0; i < mlxsw_sp->router->lpm.tree_count; i++) {
521 lpm_tree = &mlxsw_sp->router->lpm.trees[i];
Ido Schimmel382dbb42017-03-10 08:53:40 +0100522 if (lpm_tree->ref_count == 0)
523 return lpm_tree;
Jiri Pirko53342022016-07-04 08:23:08 +0200524 }
525 return NULL;
526}
527
528static int mlxsw_sp_lpm_tree_alloc(struct mlxsw_sp *mlxsw_sp,
529 struct mlxsw_sp_lpm_tree *lpm_tree)
530{
531 char ralta_pl[MLXSW_REG_RALTA_LEN];
532
Ido Schimmel1a9234e662016-09-19 08:29:26 +0200533 mlxsw_reg_ralta_pack(ralta_pl, true,
534 (enum mlxsw_reg_ralxx_protocol) lpm_tree->proto,
535 lpm_tree->id);
Jiri Pirko53342022016-07-04 08:23:08 +0200536 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralta), ralta_pl);
537}
538
Ido Schimmelcc702672017-08-14 10:54:03 +0200539static void mlxsw_sp_lpm_tree_free(struct mlxsw_sp *mlxsw_sp,
540 struct mlxsw_sp_lpm_tree *lpm_tree)
Jiri Pirko53342022016-07-04 08:23:08 +0200541{
542 char ralta_pl[MLXSW_REG_RALTA_LEN];
543
Ido Schimmel1a9234e662016-09-19 08:29:26 +0200544 mlxsw_reg_ralta_pack(ralta_pl, false,
545 (enum mlxsw_reg_ralxx_protocol) lpm_tree->proto,
546 lpm_tree->id);
Ido Schimmelcc702672017-08-14 10:54:03 +0200547 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralta), ralta_pl);
Jiri Pirko53342022016-07-04 08:23:08 +0200548}
549
550static int
551mlxsw_sp_lpm_tree_left_struct_set(struct mlxsw_sp *mlxsw_sp,
552 struct mlxsw_sp_prefix_usage *prefix_usage,
553 struct mlxsw_sp_lpm_tree *lpm_tree)
554{
555 char ralst_pl[MLXSW_REG_RALST_LEN];
556 u8 root_bin = 0;
557 u8 prefix;
558 u8 last_prefix = MLXSW_REG_RALST_BIN_NO_CHILD;
559
560 mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage)
561 root_bin = prefix;
562
563 mlxsw_reg_ralst_pack(ralst_pl, root_bin, lpm_tree->id);
564 mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage) {
565 if (prefix == 0)
566 continue;
567 mlxsw_reg_ralst_bin_pack(ralst_pl, prefix, last_prefix,
568 MLXSW_REG_RALST_BIN_NO_CHILD);
569 last_prefix = prefix;
570 }
571 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralst), ralst_pl);
572}
573
574static struct mlxsw_sp_lpm_tree *
575mlxsw_sp_lpm_tree_create(struct mlxsw_sp *mlxsw_sp,
576 struct mlxsw_sp_prefix_usage *prefix_usage,
Ido Schimmel382dbb42017-03-10 08:53:40 +0100577 enum mlxsw_sp_l3proto proto)
Jiri Pirko53342022016-07-04 08:23:08 +0200578{
579 struct mlxsw_sp_lpm_tree *lpm_tree;
580 int err;
581
Ido Schimmel382dbb42017-03-10 08:53:40 +0100582 lpm_tree = mlxsw_sp_lpm_tree_find_unused(mlxsw_sp);
Jiri Pirko53342022016-07-04 08:23:08 +0200583 if (!lpm_tree)
584 return ERR_PTR(-EBUSY);
585 lpm_tree->proto = proto;
586 err = mlxsw_sp_lpm_tree_alloc(mlxsw_sp, lpm_tree);
587 if (err)
588 return ERR_PTR(err);
589
590 err = mlxsw_sp_lpm_tree_left_struct_set(mlxsw_sp, prefix_usage,
591 lpm_tree);
592 if (err)
593 goto err_left_struct_set;
Jiri Pirko2083d362016-10-25 11:25:56 +0200594 memcpy(&lpm_tree->prefix_usage, prefix_usage,
595 sizeof(lpm_tree->prefix_usage));
Ido Schimmel2b52ce02018-01-22 09:17:42 +0100596 memset(&lpm_tree->prefix_ref_count, 0,
597 sizeof(lpm_tree->prefix_ref_count));
598 lpm_tree->ref_count = 1;
Jiri Pirko53342022016-07-04 08:23:08 +0200599 return lpm_tree;
600
601err_left_struct_set:
602 mlxsw_sp_lpm_tree_free(mlxsw_sp, lpm_tree);
603 return ERR_PTR(err);
604}
605
Ido Schimmelcc702672017-08-14 10:54:03 +0200606static void mlxsw_sp_lpm_tree_destroy(struct mlxsw_sp *mlxsw_sp,
607 struct mlxsw_sp_lpm_tree *lpm_tree)
Jiri Pirko53342022016-07-04 08:23:08 +0200608{
Ido Schimmelcc702672017-08-14 10:54:03 +0200609 mlxsw_sp_lpm_tree_free(mlxsw_sp, lpm_tree);
Jiri Pirko53342022016-07-04 08:23:08 +0200610}
611
612static struct mlxsw_sp_lpm_tree *
613mlxsw_sp_lpm_tree_get(struct mlxsw_sp *mlxsw_sp,
614 struct mlxsw_sp_prefix_usage *prefix_usage,
Ido Schimmel382dbb42017-03-10 08:53:40 +0100615 enum mlxsw_sp_l3proto proto)
Jiri Pirko53342022016-07-04 08:23:08 +0200616{
617 struct mlxsw_sp_lpm_tree *lpm_tree;
618 int i;
619
Ido Schimmel9011b672017-05-16 19:38:25 +0200620 for (i = 0; i < mlxsw_sp->router->lpm.tree_count; i++) {
621 lpm_tree = &mlxsw_sp->router->lpm.trees[i];
Jiri Pirko8b99bec2016-10-25 11:25:57 +0200622 if (lpm_tree->ref_count != 0 &&
623 lpm_tree->proto == proto &&
Jiri Pirko53342022016-07-04 08:23:08 +0200624 mlxsw_sp_prefix_usage_eq(&lpm_tree->prefix_usage,
Ido Schimmel2b52ce02018-01-22 09:17:42 +0100625 prefix_usage)) {
626 mlxsw_sp_lpm_tree_hold(lpm_tree);
Ido Schimmelfc922bb2017-08-14 10:54:05 +0200627 return lpm_tree;
Ido Schimmel2b52ce02018-01-22 09:17:42 +0100628 }
Jiri Pirko53342022016-07-04 08:23:08 +0200629 }
Ido Schimmelfc922bb2017-08-14 10:54:05 +0200630 return mlxsw_sp_lpm_tree_create(mlxsw_sp, prefix_usage, proto);
631}
Jiri Pirko53342022016-07-04 08:23:08 +0200632
Ido Schimmelfc922bb2017-08-14 10:54:05 +0200633static void mlxsw_sp_lpm_tree_hold(struct mlxsw_sp_lpm_tree *lpm_tree)
634{
Jiri Pirko53342022016-07-04 08:23:08 +0200635 lpm_tree->ref_count++;
Jiri Pirko53342022016-07-04 08:23:08 +0200636}
637
Ido Schimmelcc702672017-08-14 10:54:03 +0200638static void mlxsw_sp_lpm_tree_put(struct mlxsw_sp *mlxsw_sp,
639 struct mlxsw_sp_lpm_tree *lpm_tree)
Jiri Pirko53342022016-07-04 08:23:08 +0200640{
641 if (--lpm_tree->ref_count == 0)
Ido Schimmelcc702672017-08-14 10:54:03 +0200642 mlxsw_sp_lpm_tree_destroy(mlxsw_sp, lpm_tree);
Jiri Pirko53342022016-07-04 08:23:08 +0200643}
644
Ido Schimmeld7a60302017-06-08 08:47:43 +0200645#define MLXSW_SP_LPM_TREE_MIN 1 /* tree 0 is reserved */
Ido Schimmel8494ab02017-03-24 08:02:47 +0100646
647static int mlxsw_sp_lpm_init(struct mlxsw_sp *mlxsw_sp)
Jiri Pirko53342022016-07-04 08:23:08 +0200648{
Ido Schimmel2b52ce02018-01-22 09:17:42 +0100649 struct mlxsw_sp_prefix_usage req_prefix_usage = {{ 0 } };
Jiri Pirko53342022016-07-04 08:23:08 +0200650 struct mlxsw_sp_lpm_tree *lpm_tree;
Ido Schimmel8494ab02017-03-24 08:02:47 +0100651 u64 max_trees;
Ido Schimmel2b52ce02018-01-22 09:17:42 +0100652 int err, i;
Jiri Pirko53342022016-07-04 08:23:08 +0200653
Ido Schimmel8494ab02017-03-24 08:02:47 +0100654 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_LPM_TREES))
655 return -EIO;
656
657 max_trees = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_LPM_TREES);
Ido Schimmel9011b672017-05-16 19:38:25 +0200658 mlxsw_sp->router->lpm.tree_count = max_trees - MLXSW_SP_LPM_TREE_MIN;
659 mlxsw_sp->router->lpm.trees = kcalloc(mlxsw_sp->router->lpm.tree_count,
Ido Schimmel8494ab02017-03-24 08:02:47 +0100660 sizeof(struct mlxsw_sp_lpm_tree),
661 GFP_KERNEL);
Ido Schimmel9011b672017-05-16 19:38:25 +0200662 if (!mlxsw_sp->router->lpm.trees)
Ido Schimmel8494ab02017-03-24 08:02:47 +0100663 return -ENOMEM;
664
Ido Schimmel9011b672017-05-16 19:38:25 +0200665 for (i = 0; i < mlxsw_sp->router->lpm.tree_count; i++) {
666 lpm_tree = &mlxsw_sp->router->lpm.trees[i];
Jiri Pirko53342022016-07-04 08:23:08 +0200667 lpm_tree->id = i + MLXSW_SP_LPM_TREE_MIN;
668 }
Ido Schimmel8494ab02017-03-24 08:02:47 +0100669
Ido Schimmel2b52ce02018-01-22 09:17:42 +0100670 lpm_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, &req_prefix_usage,
671 MLXSW_SP_L3_PROTO_IPV4);
672 if (IS_ERR(lpm_tree)) {
673 err = PTR_ERR(lpm_tree);
674 goto err_ipv4_tree_get;
675 }
676 mlxsw_sp->router->lpm.proto_trees[MLXSW_SP_L3_PROTO_IPV4] = lpm_tree;
677
678 lpm_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, &req_prefix_usage,
679 MLXSW_SP_L3_PROTO_IPV6);
680 if (IS_ERR(lpm_tree)) {
681 err = PTR_ERR(lpm_tree);
682 goto err_ipv6_tree_get;
683 }
684 mlxsw_sp->router->lpm.proto_trees[MLXSW_SP_L3_PROTO_IPV6] = lpm_tree;
685
Ido Schimmel8494ab02017-03-24 08:02:47 +0100686 return 0;
Ido Schimmel2b52ce02018-01-22 09:17:42 +0100687
688err_ipv6_tree_get:
689 lpm_tree = mlxsw_sp->router->lpm.proto_trees[MLXSW_SP_L3_PROTO_IPV4];
690 mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
691err_ipv4_tree_get:
692 kfree(mlxsw_sp->router->lpm.trees);
693 return err;
Ido Schimmel8494ab02017-03-24 08:02:47 +0100694}
695
696static void mlxsw_sp_lpm_fini(struct mlxsw_sp *mlxsw_sp)
697{
Ido Schimmel2b52ce02018-01-22 09:17:42 +0100698 struct mlxsw_sp_lpm_tree *lpm_tree;
699
700 lpm_tree = mlxsw_sp->router->lpm.proto_trees[MLXSW_SP_L3_PROTO_IPV6];
701 mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
702
703 lpm_tree = mlxsw_sp->router->lpm.proto_trees[MLXSW_SP_L3_PROTO_IPV4];
704 mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
705
Ido Schimmel9011b672017-05-16 19:38:25 +0200706 kfree(mlxsw_sp->router->lpm.trees);
Jiri Pirko53342022016-07-04 08:23:08 +0200707}
708
Ido Schimmel76610eb2017-03-10 08:53:41 +0100709static bool mlxsw_sp_vr_is_used(const struct mlxsw_sp_vr *vr)
710{
Yuval Mintz9742f862018-03-26 15:01:40 +0300711 return !!vr->fib4 || !!vr->fib6 ||
712 !!vr->mr_table[MLXSW_SP_L3_PROTO_IPV4] ||
713 !!vr->mr_table[MLXSW_SP_L3_PROTO_IPV6];
Ido Schimmel76610eb2017-03-10 08:53:41 +0100714}
715
Jiri Pirko6b75c482016-07-04 08:23:09 +0200716static struct mlxsw_sp_vr *mlxsw_sp_vr_find_unused(struct mlxsw_sp *mlxsw_sp)
717{
718 struct mlxsw_sp_vr *vr;
719 int i;
720
Jiri Pirkoc1a38312016-10-21 16:07:23 +0200721 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
Ido Schimmel9011b672017-05-16 19:38:25 +0200722 vr = &mlxsw_sp->router->vrs[i];
Ido Schimmel76610eb2017-03-10 08:53:41 +0100723 if (!mlxsw_sp_vr_is_used(vr))
Jiri Pirko6b75c482016-07-04 08:23:09 +0200724 return vr;
725 }
726 return NULL;
727}
728
729static int mlxsw_sp_vr_lpm_tree_bind(struct mlxsw_sp *mlxsw_sp,
Ido Schimmel0adb2142017-08-14 10:54:04 +0200730 const struct mlxsw_sp_fib *fib, u8 tree_id)
Jiri Pirko6b75c482016-07-04 08:23:09 +0200731{
732 char raltb_pl[MLXSW_REG_RALTB_LEN];
733
Ido Schimmel76610eb2017-03-10 08:53:41 +0100734 mlxsw_reg_raltb_pack(raltb_pl, fib->vr->id,
735 (enum mlxsw_reg_ralxx_protocol) fib->proto,
Ido Schimmel0adb2142017-08-14 10:54:04 +0200736 tree_id);
Jiri Pirko6b75c482016-07-04 08:23:09 +0200737 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raltb), raltb_pl);
738}
739
740static int mlxsw_sp_vr_lpm_tree_unbind(struct mlxsw_sp *mlxsw_sp,
Ido Schimmel76610eb2017-03-10 08:53:41 +0100741 const struct mlxsw_sp_fib *fib)
Jiri Pirko6b75c482016-07-04 08:23:09 +0200742{
743 char raltb_pl[MLXSW_REG_RALTB_LEN];
744
745 /* Bind to tree 0 which is default */
Ido Schimmel76610eb2017-03-10 08:53:41 +0100746 mlxsw_reg_raltb_pack(raltb_pl, fib->vr->id,
747 (enum mlxsw_reg_ralxx_protocol) fib->proto, 0);
Jiri Pirko6b75c482016-07-04 08:23:09 +0200748 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raltb), raltb_pl);
749}
750
751static u32 mlxsw_sp_fix_tb_id(u32 tb_id)
752{
Yotam Gigi7e50d432017-09-27 08:23:19 +0200753 /* For our purpose, squash main, default and local tables into one */
754 if (tb_id == RT_TABLE_LOCAL || tb_id == RT_TABLE_DEFAULT)
Jiri Pirko6b75c482016-07-04 08:23:09 +0200755 tb_id = RT_TABLE_MAIN;
756 return tb_id;
757}
758
759static struct mlxsw_sp_vr *mlxsw_sp_vr_find(struct mlxsw_sp *mlxsw_sp,
Ido Schimmel76610eb2017-03-10 08:53:41 +0100760 u32 tb_id)
Jiri Pirko6b75c482016-07-04 08:23:09 +0200761{
762 struct mlxsw_sp_vr *vr;
763 int i;
764
765 tb_id = mlxsw_sp_fix_tb_id(tb_id);
Nogah Frankel9497c042016-09-20 11:16:54 +0200766
Jiri Pirkoc1a38312016-10-21 16:07:23 +0200767 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
Ido Schimmel9011b672017-05-16 19:38:25 +0200768 vr = &mlxsw_sp->router->vrs[i];
Ido Schimmel76610eb2017-03-10 08:53:41 +0100769 if (mlxsw_sp_vr_is_used(vr) && vr->tb_id == tb_id)
Jiri Pirko6b75c482016-07-04 08:23:09 +0200770 return vr;
771 }
772 return NULL;
773}
774
Ido Schimmel76610eb2017-03-10 08:53:41 +0100775static struct mlxsw_sp_fib *mlxsw_sp_vr_fib(const struct mlxsw_sp_vr *vr,
776 enum mlxsw_sp_l3proto proto)
Jiri Pirko6b75c482016-07-04 08:23:09 +0200777{
Ido Schimmel76610eb2017-03-10 08:53:41 +0100778 switch (proto) {
779 case MLXSW_SP_L3_PROTO_IPV4:
780 return vr->fib4;
781 case MLXSW_SP_L3_PROTO_IPV6:
Ido Schimmela3d9bc52017-07-18 10:10:22 +0200782 return vr->fib6;
Ido Schimmel76610eb2017-03-10 08:53:41 +0100783 }
784 return NULL;
785}
786
787static struct mlxsw_sp_vr *mlxsw_sp_vr_create(struct mlxsw_sp *mlxsw_sp,
David Ahernf8fa9b42017-10-18 09:56:56 -0700788 u32 tb_id,
789 struct netlink_ext_ack *extack)
Ido Schimmel76610eb2017-03-10 08:53:41 +0100790{
Yuval Mintz9742f862018-03-26 15:01:40 +0300791 struct mlxsw_sp_mr_table *mr4_table, *mr6_table;
Jiri Pirko0f2d2b22018-02-13 11:22:42 +0100792 struct mlxsw_sp_fib *fib4;
793 struct mlxsw_sp_fib *fib6;
Jiri Pirko6b75c482016-07-04 08:23:09 +0200794 struct mlxsw_sp_vr *vr;
Ido Schimmela3d9bc52017-07-18 10:10:22 +0200795 int err;
Jiri Pirko6b75c482016-07-04 08:23:09 +0200796
797 vr = mlxsw_sp_vr_find_unused(mlxsw_sp);
David Ahernf8fa9b42017-10-18 09:56:56 -0700798 if (!vr) {
Arkadi Sharshevsky6c677752018-02-13 11:29:05 +0100799 NL_SET_ERR_MSG_MOD(extack, "Exceeded number of supported virtual routers");
Jiri Pirko6b75c482016-07-04 08:23:09 +0200800 return ERR_PTR(-EBUSY);
David Ahernf8fa9b42017-10-18 09:56:56 -0700801 }
Jiri Pirko0f2d2b22018-02-13 11:22:42 +0100802 fib4 = mlxsw_sp_fib_create(mlxsw_sp, vr, MLXSW_SP_L3_PROTO_IPV4);
803 if (IS_ERR(fib4))
804 return ERR_CAST(fib4);
805 fib6 = mlxsw_sp_fib_create(mlxsw_sp, vr, MLXSW_SP_L3_PROTO_IPV6);
806 if (IS_ERR(fib6)) {
807 err = PTR_ERR(fib6);
Ido Schimmela3d9bc52017-07-18 10:10:22 +0200808 goto err_fib6_create;
809 }
Jiri Pirko0f2d2b22018-02-13 11:22:42 +0100810 mr4_table = mlxsw_sp_mr_table_create(mlxsw_sp, vr->id,
811 MLXSW_SP_L3_PROTO_IPV4);
812 if (IS_ERR(mr4_table)) {
813 err = PTR_ERR(mr4_table);
Yuval Mintz9742f862018-03-26 15:01:40 +0300814 goto err_mr4_table_create;
Yotam Gigid42b0962017-09-27 08:23:20 +0200815 }
Yuval Mintz9742f862018-03-26 15:01:40 +0300816 mr6_table = mlxsw_sp_mr_table_create(mlxsw_sp, vr->id,
817 MLXSW_SP_L3_PROTO_IPV6);
818 if (IS_ERR(mr6_table)) {
819 err = PTR_ERR(mr6_table);
820 goto err_mr6_table_create;
821 }
822
Jiri Pirko0f2d2b22018-02-13 11:22:42 +0100823 vr->fib4 = fib4;
824 vr->fib6 = fib6;
Yuval Mintz9742f862018-03-26 15:01:40 +0300825 vr->mr_table[MLXSW_SP_L3_PROTO_IPV4] = mr4_table;
826 vr->mr_table[MLXSW_SP_L3_PROTO_IPV6] = mr6_table;
Jiri Pirko6b75c482016-07-04 08:23:09 +0200827 vr->tb_id = tb_id;
Jiri Pirko6b75c482016-07-04 08:23:09 +0200828 return vr;
Ido Schimmela3d9bc52017-07-18 10:10:22 +0200829
Yuval Mintz9742f862018-03-26 15:01:40 +0300830err_mr6_table_create:
831 mlxsw_sp_mr_table_destroy(mr4_table);
832err_mr4_table_create:
Jiri Pirko0f2d2b22018-02-13 11:22:42 +0100833 mlxsw_sp_fib_destroy(mlxsw_sp, fib6);
Ido Schimmela3d9bc52017-07-18 10:10:22 +0200834err_fib6_create:
Jiri Pirko0f2d2b22018-02-13 11:22:42 +0100835 mlxsw_sp_fib_destroy(mlxsw_sp, fib4);
Ido Schimmela3d9bc52017-07-18 10:10:22 +0200836 return ERR_PTR(err);
Jiri Pirko6b75c482016-07-04 08:23:09 +0200837}
838
Ido Schimmel2b52ce02018-01-22 09:17:42 +0100839static void mlxsw_sp_vr_destroy(struct mlxsw_sp *mlxsw_sp,
840 struct mlxsw_sp_vr *vr)
Jiri Pirko6b75c482016-07-04 08:23:09 +0200841{
Yuval Mintz9742f862018-03-26 15:01:40 +0300842 mlxsw_sp_mr_table_destroy(vr->mr_table[MLXSW_SP_L3_PROTO_IPV6]);
843 vr->mr_table[MLXSW_SP_L3_PROTO_IPV6] = NULL;
844 mlxsw_sp_mr_table_destroy(vr->mr_table[MLXSW_SP_L3_PROTO_IPV4]);
845 vr->mr_table[MLXSW_SP_L3_PROTO_IPV4] = NULL;
Ido Schimmel2b52ce02018-01-22 09:17:42 +0100846 mlxsw_sp_fib_destroy(mlxsw_sp, vr->fib6);
Ido Schimmela3d9bc52017-07-18 10:10:22 +0200847 vr->fib6 = NULL;
Ido Schimmel2b52ce02018-01-22 09:17:42 +0100848 mlxsw_sp_fib_destroy(mlxsw_sp, vr->fib4);
Ido Schimmel76610eb2017-03-10 08:53:41 +0100849 vr->fib4 = NULL;
Jiri Pirko6b75c482016-07-04 08:23:09 +0200850}
851
David Ahernf8fa9b42017-10-18 09:56:56 -0700852static struct mlxsw_sp_vr *mlxsw_sp_vr_get(struct mlxsw_sp *mlxsw_sp, u32 tb_id,
853 struct netlink_ext_ack *extack)
Jiri Pirko6b75c482016-07-04 08:23:09 +0200854{
855 struct mlxsw_sp_vr *vr;
Jiri Pirko6b75c482016-07-04 08:23:09 +0200856
857 tb_id = mlxsw_sp_fix_tb_id(tb_id);
Ido Schimmel76610eb2017-03-10 08:53:41 +0100858 vr = mlxsw_sp_vr_find(mlxsw_sp, tb_id);
859 if (!vr)
David Ahernf8fa9b42017-10-18 09:56:56 -0700860 vr = mlxsw_sp_vr_create(mlxsw_sp, tb_id, extack);
Jiri Pirko6b75c482016-07-04 08:23:09 +0200861 return vr;
862}
863
Ido Schimmel2b52ce02018-01-22 09:17:42 +0100864static void mlxsw_sp_vr_put(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_vr *vr)
Jiri Pirko6b75c482016-07-04 08:23:09 +0200865{
Ido Schimmela3d9bc52017-07-18 10:10:22 +0200866 if (!vr->rif_count && list_empty(&vr->fib4->node_list) &&
Yotam Gigid42b0962017-09-27 08:23:20 +0200867 list_empty(&vr->fib6->node_list) &&
Yuval Mintz9742f862018-03-26 15:01:40 +0300868 mlxsw_sp_mr_table_empty(vr->mr_table[MLXSW_SP_L3_PROTO_IPV4]) &&
869 mlxsw_sp_mr_table_empty(vr->mr_table[MLXSW_SP_L3_PROTO_IPV6]))
Ido Schimmel2b52ce02018-01-22 09:17:42 +0100870 mlxsw_sp_vr_destroy(mlxsw_sp, vr);
Jiri Pirko6b75c482016-07-04 08:23:09 +0200871}
872
Ido Schimmelfc922bb2017-08-14 10:54:05 +0200873static bool
874mlxsw_sp_vr_lpm_tree_should_replace(struct mlxsw_sp_vr *vr,
875 enum mlxsw_sp_l3proto proto, u8 tree_id)
876{
877 struct mlxsw_sp_fib *fib = mlxsw_sp_vr_fib(vr, proto);
878
879 if (!mlxsw_sp_vr_is_used(vr))
880 return false;
Ido Schimmel2b52ce02018-01-22 09:17:42 +0100881 if (fib->lpm_tree->id == tree_id)
Ido Schimmelfc922bb2017-08-14 10:54:05 +0200882 return true;
883 return false;
884}
885
886static int mlxsw_sp_vr_lpm_tree_replace(struct mlxsw_sp *mlxsw_sp,
887 struct mlxsw_sp_fib *fib,
888 struct mlxsw_sp_lpm_tree *new_tree)
889{
890 struct mlxsw_sp_lpm_tree *old_tree = fib->lpm_tree;
891 int err;
892
Ido Schimmelfc922bb2017-08-14 10:54:05 +0200893 fib->lpm_tree = new_tree;
894 mlxsw_sp_lpm_tree_hold(new_tree);
Ido Schimmeled604c52018-01-18 15:42:10 +0100895 err = mlxsw_sp_vr_lpm_tree_bind(mlxsw_sp, fib, new_tree->id);
896 if (err)
897 goto err_tree_bind;
Ido Schimmelfc922bb2017-08-14 10:54:05 +0200898 mlxsw_sp_lpm_tree_put(mlxsw_sp, old_tree);
899 return 0;
Ido Schimmeled604c52018-01-18 15:42:10 +0100900
901err_tree_bind:
902 mlxsw_sp_lpm_tree_put(mlxsw_sp, new_tree);
903 fib->lpm_tree = old_tree;
904 return err;
Ido Schimmelfc922bb2017-08-14 10:54:05 +0200905}
906
907static int mlxsw_sp_vrs_lpm_tree_replace(struct mlxsw_sp *mlxsw_sp,
908 struct mlxsw_sp_fib *fib,
909 struct mlxsw_sp_lpm_tree *new_tree)
910{
Ido Schimmelfc922bb2017-08-14 10:54:05 +0200911 enum mlxsw_sp_l3proto proto = fib->proto;
Ido Schimmel2b52ce02018-01-22 09:17:42 +0100912 struct mlxsw_sp_lpm_tree *old_tree;
Ido Schimmelfc922bb2017-08-14 10:54:05 +0200913 u8 old_id, new_id = new_tree->id;
914 struct mlxsw_sp_vr *vr;
915 int i, err;
916
Ido Schimmel2b52ce02018-01-22 09:17:42 +0100917 old_tree = mlxsw_sp->router->lpm.proto_trees[proto];
Ido Schimmelfc922bb2017-08-14 10:54:05 +0200918 old_id = old_tree->id;
919
920 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
921 vr = &mlxsw_sp->router->vrs[i];
922 if (!mlxsw_sp_vr_lpm_tree_should_replace(vr, proto, old_id))
923 continue;
924 err = mlxsw_sp_vr_lpm_tree_replace(mlxsw_sp,
925 mlxsw_sp_vr_fib(vr, proto),
926 new_tree);
927 if (err)
928 goto err_tree_replace;
929 }
930
Ido Schimmel2b52ce02018-01-22 09:17:42 +0100931 memcpy(new_tree->prefix_ref_count, old_tree->prefix_ref_count,
932 sizeof(new_tree->prefix_ref_count));
933 mlxsw_sp->router->lpm.proto_trees[proto] = new_tree;
934 mlxsw_sp_lpm_tree_put(mlxsw_sp, old_tree);
935
Ido Schimmelfc922bb2017-08-14 10:54:05 +0200936 return 0;
937
938err_tree_replace:
939 for (i--; i >= 0; i--) {
940 if (!mlxsw_sp_vr_lpm_tree_should_replace(vr, proto, new_id))
941 continue;
942 mlxsw_sp_vr_lpm_tree_replace(mlxsw_sp,
943 mlxsw_sp_vr_fib(vr, proto),
944 old_tree);
945 }
946 return err;
Ido Schimmelfc922bb2017-08-14 10:54:05 +0200947}
948
Nogah Frankel9497c042016-09-20 11:16:54 +0200949static int mlxsw_sp_vrs_init(struct mlxsw_sp *mlxsw_sp)
Jiri Pirko6b75c482016-07-04 08:23:09 +0200950{
951 struct mlxsw_sp_vr *vr;
Jiri Pirkoc1a38312016-10-21 16:07:23 +0200952 u64 max_vrs;
Jiri Pirko6b75c482016-07-04 08:23:09 +0200953 int i;
954
Jiri Pirkoc1a38312016-10-21 16:07:23 +0200955 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_VRS))
Nogah Frankel9497c042016-09-20 11:16:54 +0200956 return -EIO;
957
Jiri Pirkoc1a38312016-10-21 16:07:23 +0200958 max_vrs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS);
Ido Schimmel9011b672017-05-16 19:38:25 +0200959 mlxsw_sp->router->vrs = kcalloc(max_vrs, sizeof(struct mlxsw_sp_vr),
960 GFP_KERNEL);
961 if (!mlxsw_sp->router->vrs)
Nogah Frankel9497c042016-09-20 11:16:54 +0200962 return -ENOMEM;
963
Jiri Pirkoc1a38312016-10-21 16:07:23 +0200964 for (i = 0; i < max_vrs; i++) {
Ido Schimmel9011b672017-05-16 19:38:25 +0200965 vr = &mlxsw_sp->router->vrs[i];
Jiri Pirko6b75c482016-07-04 08:23:09 +0200966 vr->id = i;
967 }
Nogah Frankel9497c042016-09-20 11:16:54 +0200968
969 return 0;
970}
971
Ido Schimmelac571de2016-11-14 11:26:32 +0100972static void mlxsw_sp_router_fib_flush(struct mlxsw_sp *mlxsw_sp);
973
Nogah Frankel9497c042016-09-20 11:16:54 +0200974static void mlxsw_sp_vrs_fini(struct mlxsw_sp *mlxsw_sp)
975{
Ido Schimmel30572242016-12-03 16:45:01 +0100976 /* At this stage we're guaranteed not to have new incoming
977 * FIB notifications and the work queue is free from FIBs
978 * sitting on top of mlxsw netdevs. However, we can still
979 * have other FIBs queued. Flush the queue before flushing
980 * the device's tables. No need for locks, as we're the only
981 * writer.
982 */
983 mlxsw_core_flush_owq();
Ido Schimmelac571de2016-11-14 11:26:32 +0100984 mlxsw_sp_router_fib_flush(mlxsw_sp);
Ido Schimmel9011b672017-05-16 19:38:25 +0200985 kfree(mlxsw_sp->router->vrs);
Jiri Pirko6b75c482016-07-04 08:23:09 +0200986}
987
Petr Machata6ddb7422017-09-02 23:49:19 +0200988static struct net_device *
989__mlxsw_sp_ipip_netdev_ul_dev_get(const struct net_device *ol_dev)
990{
991 struct ip_tunnel *tun = netdev_priv(ol_dev);
992 struct net *net = dev_net(ol_dev);
993
994 return __dev_get_by_index(net, tun->parms.link);
995}
996
Petr Machata4cf04f32017-11-03 10:03:42 +0100997u32 mlxsw_sp_ipip_dev_ul_tb_id(const struct net_device *ol_dev)
Petr Machata6ddb7422017-09-02 23:49:19 +0200998{
999 struct net_device *d = __mlxsw_sp_ipip_netdev_ul_dev_get(ol_dev);
1000
1001 if (d)
1002 return l3mdev_fib_table(d) ? : RT_TABLE_MAIN;
1003 else
1004 return l3mdev_fib_table(ol_dev) ? : RT_TABLE_MAIN;
1005}
1006
Petr Machata1012b9a2017-09-02 23:49:23 +02001007static struct mlxsw_sp_rif *
1008mlxsw_sp_rif_create(struct mlxsw_sp *mlxsw_sp,
David Ahernf8fa9b42017-10-18 09:56:56 -07001009 const struct mlxsw_sp_rif_params *params,
1010 struct netlink_ext_ack *extack);
Petr Machata1012b9a2017-09-02 23:49:23 +02001011
1012static struct mlxsw_sp_rif_ipip_lb *
1013mlxsw_sp_ipip_ol_ipip_lb_create(struct mlxsw_sp *mlxsw_sp,
1014 enum mlxsw_sp_ipip_type ipipt,
Petr Machata7e75af62017-11-03 10:03:36 +01001015 struct net_device *ol_dev,
1016 struct netlink_ext_ack *extack)
Petr Machata1012b9a2017-09-02 23:49:23 +02001017{
1018 struct mlxsw_sp_rif_params_ipip_lb lb_params;
1019 const struct mlxsw_sp_ipip_ops *ipip_ops;
1020 struct mlxsw_sp_rif *rif;
1021
1022 ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipipt];
1023 lb_params = (struct mlxsw_sp_rif_params_ipip_lb) {
1024 .common.dev = ol_dev,
1025 .common.lag = false,
1026 .lb_config = ipip_ops->ol_loopback_config(mlxsw_sp, ol_dev),
1027 };
1028
Petr Machata7e75af62017-11-03 10:03:36 +01001029 rif = mlxsw_sp_rif_create(mlxsw_sp, &lb_params.common, extack);
Petr Machata1012b9a2017-09-02 23:49:23 +02001030 if (IS_ERR(rif))
1031 return ERR_CAST(rif);
1032 return container_of(rif, struct mlxsw_sp_rif_ipip_lb, common);
1033}
1034
1035static struct mlxsw_sp_ipip_entry *
1036mlxsw_sp_ipip_entry_alloc(struct mlxsw_sp *mlxsw_sp,
1037 enum mlxsw_sp_ipip_type ipipt,
1038 struct net_device *ol_dev)
1039{
Petr Machatae437f3b2018-02-13 11:26:09 +01001040 const struct mlxsw_sp_ipip_ops *ipip_ops;
Petr Machata1012b9a2017-09-02 23:49:23 +02001041 struct mlxsw_sp_ipip_entry *ipip_entry;
1042 struct mlxsw_sp_ipip_entry *ret = NULL;
1043
Petr Machatae437f3b2018-02-13 11:26:09 +01001044 ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipipt];
Petr Machata1012b9a2017-09-02 23:49:23 +02001045 ipip_entry = kzalloc(sizeof(*ipip_entry), GFP_KERNEL);
1046 if (!ipip_entry)
1047 return ERR_PTR(-ENOMEM);
1048
1049 ipip_entry->ol_lb = mlxsw_sp_ipip_ol_ipip_lb_create(mlxsw_sp, ipipt,
Petr Machata7e75af62017-11-03 10:03:36 +01001050 ol_dev, NULL);
Petr Machata1012b9a2017-09-02 23:49:23 +02001051 if (IS_ERR(ipip_entry->ol_lb)) {
1052 ret = ERR_CAST(ipip_entry->ol_lb);
1053 goto err_ol_ipip_lb_create;
1054 }
1055
1056 ipip_entry->ipipt = ipipt;
1057 ipip_entry->ol_dev = ol_dev;
Petr Machatae437f3b2018-02-13 11:26:09 +01001058
1059 switch (ipip_ops->ul_proto) {
1060 case MLXSW_SP_L3_PROTO_IPV4:
1061 ipip_entry->parms4 = mlxsw_sp_ipip_netdev_parms4(ol_dev);
1062 break;
1063 case MLXSW_SP_L3_PROTO_IPV6:
1064 WARN_ON(1);
1065 break;
1066 }
Petr Machata1012b9a2017-09-02 23:49:23 +02001067
1068 return ipip_entry;
1069
1070err_ol_ipip_lb_create:
1071 kfree(ipip_entry);
1072 return ret;
1073}
1074
1075static void
Petr Machata4cccb732017-10-16 16:26:39 +02001076mlxsw_sp_ipip_entry_dealloc(struct mlxsw_sp_ipip_entry *ipip_entry)
Petr Machata1012b9a2017-09-02 23:49:23 +02001077{
Petr Machata1012b9a2017-09-02 23:49:23 +02001078 mlxsw_sp_rif_destroy(&ipip_entry->ol_lb->common);
1079 kfree(ipip_entry);
1080}
1081
Petr Machata1012b9a2017-09-02 23:49:23 +02001082static bool
1083mlxsw_sp_ipip_entry_saddr_matches(struct mlxsw_sp *mlxsw_sp,
1084 const enum mlxsw_sp_l3proto ul_proto,
1085 union mlxsw_sp_l3addr saddr,
1086 u32 ul_tb_id,
1087 struct mlxsw_sp_ipip_entry *ipip_entry)
1088{
1089 u32 tun_ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(ipip_entry->ol_dev);
1090 enum mlxsw_sp_ipip_type ipipt = ipip_entry->ipipt;
1091 union mlxsw_sp_l3addr tun_saddr;
1092
1093 if (mlxsw_sp->router->ipip_ops_arr[ipipt]->ul_proto != ul_proto)
1094 return false;
1095
1096 tun_saddr = mlxsw_sp_ipip_netdev_saddr(ul_proto, ipip_entry->ol_dev);
1097 return tun_ul_tb_id == ul_tb_id &&
1098 mlxsw_sp_l3addr_eq(&tun_saddr, &saddr);
1099}
1100
Petr Machata4607f6d2017-09-02 23:49:25 +02001101static int
1102mlxsw_sp_fib_entry_decap_init(struct mlxsw_sp *mlxsw_sp,
1103 struct mlxsw_sp_fib_entry *fib_entry,
1104 struct mlxsw_sp_ipip_entry *ipip_entry)
1105{
1106 u32 tunnel_index;
1107 int err;
1108
Jiri Pirko4b6b1862018-07-08 23:51:17 +03001109 err = mlxsw_sp_kvdl_alloc(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ,
1110 1, &tunnel_index);
Petr Machata4607f6d2017-09-02 23:49:25 +02001111 if (err)
1112 return err;
1113
1114 ipip_entry->decap_fib_entry = fib_entry;
1115 fib_entry->decap.ipip_entry = ipip_entry;
1116 fib_entry->decap.tunnel_index = tunnel_index;
1117 return 0;
1118}
1119
1120static void mlxsw_sp_fib_entry_decap_fini(struct mlxsw_sp *mlxsw_sp,
1121 struct mlxsw_sp_fib_entry *fib_entry)
1122{
1123 /* Unlink this node from the IPIP entry that it's the decap entry of. */
1124 fib_entry->decap.ipip_entry->decap_fib_entry = NULL;
1125 fib_entry->decap.ipip_entry = NULL;
Jiri Pirko4b6b1862018-07-08 23:51:17 +03001126 mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ,
Jiri Pirko0304c002018-07-08 23:51:18 +03001127 1, fib_entry->decap.tunnel_index);
Petr Machata4607f6d2017-09-02 23:49:25 +02001128}
1129
Petr Machata1cc38fb2017-09-02 23:49:26 +02001130static struct mlxsw_sp_fib_node *
1131mlxsw_sp_fib_node_lookup(struct mlxsw_sp_fib *fib, const void *addr,
1132 size_t addr_len, unsigned char prefix_len);
Petr Machata4607f6d2017-09-02 23:49:25 +02001133static int mlxsw_sp_fib_entry_update(struct mlxsw_sp *mlxsw_sp,
1134 struct mlxsw_sp_fib_entry *fib_entry);
1135
1136static void
1137mlxsw_sp_ipip_entry_demote_decap(struct mlxsw_sp *mlxsw_sp,
1138 struct mlxsw_sp_ipip_entry *ipip_entry)
1139{
1140 struct mlxsw_sp_fib_entry *fib_entry = ipip_entry->decap_fib_entry;
1141
1142 mlxsw_sp_fib_entry_decap_fini(mlxsw_sp, fib_entry);
1143 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
1144
1145 mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
1146}
1147
Petr Machata1cc38fb2017-09-02 23:49:26 +02001148static void
1149mlxsw_sp_ipip_entry_promote_decap(struct mlxsw_sp *mlxsw_sp,
1150 struct mlxsw_sp_ipip_entry *ipip_entry,
1151 struct mlxsw_sp_fib_entry *decap_fib_entry)
1152{
1153 if (mlxsw_sp_fib_entry_decap_init(mlxsw_sp, decap_fib_entry,
1154 ipip_entry))
1155 return;
1156 decap_fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP;
1157
1158 if (mlxsw_sp_fib_entry_update(mlxsw_sp, decap_fib_entry))
1159 mlxsw_sp_ipip_entry_demote_decap(mlxsw_sp, ipip_entry);
1160}
1161
1162/* Given an IPIP entry, find the corresponding decap route. */
1163static struct mlxsw_sp_fib_entry *
1164mlxsw_sp_ipip_entry_find_decap(struct mlxsw_sp *mlxsw_sp,
1165 struct mlxsw_sp_ipip_entry *ipip_entry)
1166{
1167 static struct mlxsw_sp_fib_node *fib_node;
1168 const struct mlxsw_sp_ipip_ops *ipip_ops;
1169 struct mlxsw_sp_fib_entry *fib_entry;
1170 unsigned char saddr_prefix_len;
1171 union mlxsw_sp_l3addr saddr;
1172 struct mlxsw_sp_fib *ul_fib;
1173 struct mlxsw_sp_vr *ul_vr;
1174 const void *saddrp;
1175 size_t saddr_len;
1176 u32 ul_tb_id;
1177 u32 saddr4;
1178
1179 ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt];
1180
1181 ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(ipip_entry->ol_dev);
1182 ul_vr = mlxsw_sp_vr_find(mlxsw_sp, ul_tb_id);
1183 if (!ul_vr)
1184 return NULL;
1185
1186 ul_fib = mlxsw_sp_vr_fib(ul_vr, ipip_ops->ul_proto);
1187 saddr = mlxsw_sp_ipip_netdev_saddr(ipip_ops->ul_proto,
1188 ipip_entry->ol_dev);
1189
1190 switch (ipip_ops->ul_proto) {
1191 case MLXSW_SP_L3_PROTO_IPV4:
1192 saddr4 = be32_to_cpu(saddr.addr4);
1193 saddrp = &saddr4;
1194 saddr_len = 4;
1195 saddr_prefix_len = 32;
1196 break;
1197 case MLXSW_SP_L3_PROTO_IPV6:
1198 WARN_ON(1);
1199 return NULL;
1200 }
1201
1202 fib_node = mlxsw_sp_fib_node_lookup(ul_fib, saddrp, saddr_len,
1203 saddr_prefix_len);
1204 if (!fib_node || list_empty(&fib_node->entry_list))
1205 return NULL;
1206
1207 fib_entry = list_first_entry(&fib_node->entry_list,
1208 struct mlxsw_sp_fib_entry, list);
1209 if (fib_entry->type != MLXSW_SP_FIB_ENTRY_TYPE_TRAP)
1210 return NULL;
1211
1212 return fib_entry;
1213}
1214
Petr Machata1012b9a2017-09-02 23:49:23 +02001215static struct mlxsw_sp_ipip_entry *
Petr Machata4cccb732017-10-16 16:26:39 +02001216mlxsw_sp_ipip_entry_create(struct mlxsw_sp *mlxsw_sp,
1217 enum mlxsw_sp_ipip_type ipipt,
1218 struct net_device *ol_dev)
Petr Machata1012b9a2017-09-02 23:49:23 +02001219{
Petr Machata1012b9a2017-09-02 23:49:23 +02001220 struct mlxsw_sp_ipip_entry *ipip_entry;
Petr Machata1012b9a2017-09-02 23:49:23 +02001221
1222 ipip_entry = mlxsw_sp_ipip_entry_alloc(mlxsw_sp, ipipt, ol_dev);
1223 if (IS_ERR(ipip_entry))
1224 return ipip_entry;
1225
1226 list_add_tail(&ipip_entry->ipip_list_node,
1227 &mlxsw_sp->router->ipip_list);
1228
Petr Machata1012b9a2017-09-02 23:49:23 +02001229 return ipip_entry;
1230}
1231
1232static void
Petr Machata4cccb732017-10-16 16:26:39 +02001233mlxsw_sp_ipip_entry_destroy(struct mlxsw_sp *mlxsw_sp,
1234 struct mlxsw_sp_ipip_entry *ipip_entry)
Petr Machata1012b9a2017-09-02 23:49:23 +02001235{
Petr Machata4cccb732017-10-16 16:26:39 +02001236 list_del(&ipip_entry->ipip_list_node);
1237 mlxsw_sp_ipip_entry_dealloc(ipip_entry);
Petr Machata1012b9a2017-09-02 23:49:23 +02001238}
1239
Petr Machata4607f6d2017-09-02 23:49:25 +02001240static bool
1241mlxsw_sp_ipip_entry_matches_decap(struct mlxsw_sp *mlxsw_sp,
1242 const struct net_device *ul_dev,
1243 enum mlxsw_sp_l3proto ul_proto,
1244 union mlxsw_sp_l3addr ul_dip,
1245 struct mlxsw_sp_ipip_entry *ipip_entry)
1246{
1247 u32 ul_tb_id = l3mdev_fib_table(ul_dev) ? : RT_TABLE_MAIN;
1248 enum mlxsw_sp_ipip_type ipipt = ipip_entry->ipipt;
1249 struct net_device *ipip_ul_dev;
1250
1251 if (mlxsw_sp->router->ipip_ops_arr[ipipt]->ul_proto != ul_proto)
1252 return false;
1253
1254 ipip_ul_dev = __mlxsw_sp_ipip_netdev_ul_dev_get(ipip_entry->ol_dev);
1255 return mlxsw_sp_ipip_entry_saddr_matches(mlxsw_sp, ul_proto, ul_dip,
1256 ul_tb_id, ipip_entry) &&
1257 (!ipip_ul_dev || ipip_ul_dev == ul_dev);
1258}
1259
1260/* Given decap parameters, find the corresponding IPIP entry. */
1261static struct mlxsw_sp_ipip_entry *
1262mlxsw_sp_ipip_entry_find_by_decap(struct mlxsw_sp *mlxsw_sp,
1263 const struct net_device *ul_dev,
1264 enum mlxsw_sp_l3proto ul_proto,
1265 union mlxsw_sp_l3addr ul_dip)
1266{
1267 struct mlxsw_sp_ipip_entry *ipip_entry;
1268
1269 list_for_each_entry(ipip_entry, &mlxsw_sp->router->ipip_list,
1270 ipip_list_node)
1271 if (mlxsw_sp_ipip_entry_matches_decap(mlxsw_sp, ul_dev,
1272 ul_proto, ul_dip,
1273 ipip_entry))
1274 return ipip_entry;
1275
1276 return NULL;
1277}
1278
Petr Machata6698c162017-10-16 16:26:36 +02001279static bool mlxsw_sp_netdev_ipip_type(const struct mlxsw_sp *mlxsw_sp,
1280 const struct net_device *dev,
1281 enum mlxsw_sp_ipip_type *p_type)
1282{
1283 struct mlxsw_sp_router *router = mlxsw_sp->router;
1284 const struct mlxsw_sp_ipip_ops *ipip_ops;
1285 enum mlxsw_sp_ipip_type ipipt;
1286
1287 for (ipipt = 0; ipipt < MLXSW_SP_IPIP_TYPE_MAX; ++ipipt) {
1288 ipip_ops = router->ipip_ops_arr[ipipt];
1289 if (dev->type == ipip_ops->dev_type) {
1290 if (p_type)
1291 *p_type = ipipt;
1292 return true;
1293 }
1294 }
1295 return false;
1296}
1297
Petr Machata796ec772017-11-03 10:03:29 +01001298bool mlxsw_sp_netdev_is_ipip_ol(const struct mlxsw_sp *mlxsw_sp,
1299 const struct net_device *dev)
Petr Machata00635872017-10-16 16:26:37 +02001300{
1301 return mlxsw_sp_netdev_ipip_type(mlxsw_sp, dev, NULL);
1302}
1303
1304static struct mlxsw_sp_ipip_entry *
1305mlxsw_sp_ipip_entry_find_by_ol_dev(struct mlxsw_sp *mlxsw_sp,
1306 const struct net_device *ol_dev)
1307{
1308 struct mlxsw_sp_ipip_entry *ipip_entry;
1309
1310 list_for_each_entry(ipip_entry, &mlxsw_sp->router->ipip_list,
1311 ipip_list_node)
1312 if (ipip_entry->ol_dev == ol_dev)
1313 return ipip_entry;
1314
1315 return NULL;
1316}
1317
Petr Machata61481f22017-11-03 10:03:41 +01001318static struct mlxsw_sp_ipip_entry *
1319mlxsw_sp_ipip_entry_find_by_ul_dev(const struct mlxsw_sp *mlxsw_sp,
1320 const struct net_device *ul_dev,
1321 struct mlxsw_sp_ipip_entry *start)
1322{
1323 struct mlxsw_sp_ipip_entry *ipip_entry;
1324
1325 ipip_entry = list_prepare_entry(start, &mlxsw_sp->router->ipip_list,
1326 ipip_list_node);
1327 list_for_each_entry_continue(ipip_entry, &mlxsw_sp->router->ipip_list,
1328 ipip_list_node) {
1329 struct net_device *ipip_ul_dev =
1330 __mlxsw_sp_ipip_netdev_ul_dev_get(ipip_entry->ol_dev);
1331
1332 if (ipip_ul_dev == ul_dev)
1333 return ipip_entry;
1334 }
1335
1336 return NULL;
1337}
1338
1339bool mlxsw_sp_netdev_is_ipip_ul(const struct mlxsw_sp *mlxsw_sp,
1340 const struct net_device *dev)
1341{
1342 return mlxsw_sp_ipip_entry_find_by_ul_dev(mlxsw_sp, dev, NULL);
1343}
1344
Petr Machatacafdb2a2017-11-03 10:03:30 +01001345static bool mlxsw_sp_netdevice_ipip_can_offload(struct mlxsw_sp *mlxsw_sp,
1346 const struct net_device *ol_dev,
1347 enum mlxsw_sp_ipip_type ipipt)
1348{
1349 const struct mlxsw_sp_ipip_ops *ops
1350 = mlxsw_sp->router->ipip_ops_arr[ipipt];
1351
1352 /* For deciding whether decap should be offloaded, we don't care about
1353 * overlay protocol, so ask whether either one is supported.
1354 */
1355 return ops->can_offload(mlxsw_sp, ol_dev, MLXSW_SP_L3_PROTO_IPV4) ||
1356 ops->can_offload(mlxsw_sp, ol_dev, MLXSW_SP_L3_PROTO_IPV6);
1357}
1358
Petr Machata796ec772017-11-03 10:03:29 +01001359static int mlxsw_sp_netdevice_ipip_ol_reg_event(struct mlxsw_sp *mlxsw_sp,
1360 struct net_device *ol_dev)
Petr Machata00635872017-10-16 16:26:37 +02001361{
Petr Machata00635872017-10-16 16:26:37 +02001362 struct mlxsw_sp_ipip_entry *ipip_entry;
Petr Machataaf641712017-11-03 10:03:40 +01001363 enum mlxsw_sp_l3proto ul_proto;
Petr Machata00635872017-10-16 16:26:37 +02001364 enum mlxsw_sp_ipip_type ipipt;
Petr Machataaf641712017-11-03 10:03:40 +01001365 union mlxsw_sp_l3addr saddr;
1366 u32 ul_tb_id;
Petr Machata00635872017-10-16 16:26:37 +02001367
1368 mlxsw_sp_netdev_ipip_type(mlxsw_sp, ol_dev, &ipipt);
Petr Machatacafdb2a2017-11-03 10:03:30 +01001369 if (mlxsw_sp_netdevice_ipip_can_offload(mlxsw_sp, ol_dev, ipipt)) {
Petr Machataaf641712017-11-03 10:03:40 +01001370 ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(ol_dev);
1371 ul_proto = mlxsw_sp->router->ipip_ops_arr[ipipt]->ul_proto;
1372 saddr = mlxsw_sp_ipip_netdev_saddr(ul_proto, ol_dev);
1373 if (!mlxsw_sp_ipip_demote_tunnel_by_saddr(mlxsw_sp, ul_proto,
1374 saddr, ul_tb_id,
1375 NULL)) {
1376 ipip_entry = mlxsw_sp_ipip_entry_create(mlxsw_sp, ipipt,
1377 ol_dev);
1378 if (IS_ERR(ipip_entry))
1379 return PTR_ERR(ipip_entry);
1380 }
Petr Machata00635872017-10-16 16:26:37 +02001381 }
1382
1383 return 0;
1384}
1385
Petr Machata796ec772017-11-03 10:03:29 +01001386static void mlxsw_sp_netdevice_ipip_ol_unreg_event(struct mlxsw_sp *mlxsw_sp,
1387 struct net_device *ol_dev)
Petr Machata00635872017-10-16 16:26:37 +02001388{
1389 struct mlxsw_sp_ipip_entry *ipip_entry;
1390
1391 ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
1392 if (ipip_entry)
Petr Machata4cccb732017-10-16 16:26:39 +02001393 mlxsw_sp_ipip_entry_destroy(mlxsw_sp, ipip_entry);
Petr Machata00635872017-10-16 16:26:37 +02001394}
1395
Petr Machata47518ca2017-11-03 10:03:35 +01001396static void
1397mlxsw_sp_ipip_entry_ol_up_event(struct mlxsw_sp *mlxsw_sp,
1398 struct mlxsw_sp_ipip_entry *ipip_entry)
1399{
1400 struct mlxsw_sp_fib_entry *decap_fib_entry;
1401
1402 decap_fib_entry = mlxsw_sp_ipip_entry_find_decap(mlxsw_sp, ipip_entry);
1403 if (decap_fib_entry)
1404 mlxsw_sp_ipip_entry_promote_decap(mlxsw_sp, ipip_entry,
1405 decap_fib_entry);
1406}
1407
Petr Machata22b990582018-03-22 19:53:34 +02001408static int
1409mlxsw_sp_rif_ipip_lb_op(struct mlxsw_sp_rif_ipip_lb *lb_rif,
1410 struct mlxsw_sp_vr *ul_vr, bool enable)
1411{
1412 struct mlxsw_sp_rif_ipip_lb_config lb_cf = lb_rif->lb_config;
1413 struct mlxsw_sp_rif *rif = &lb_rif->common;
1414 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
1415 char ritr_pl[MLXSW_REG_RITR_LEN];
1416 u32 saddr4;
1417
1418 switch (lb_cf.ul_protocol) {
1419 case MLXSW_SP_L3_PROTO_IPV4:
1420 saddr4 = be32_to_cpu(lb_cf.saddr.addr4);
1421 mlxsw_reg_ritr_pack(ritr_pl, enable, MLXSW_REG_RITR_LOOPBACK_IF,
1422 rif->rif_index, rif->vr_id, rif->dev->mtu);
1423 mlxsw_reg_ritr_loopback_ipip4_pack(ritr_pl, lb_cf.lb_ipipt,
1424 MLXSW_REG_RITR_LOOPBACK_IPIP_OPTIONS_GRE_KEY_PRESET,
1425 ul_vr->id, saddr4, lb_cf.okey);
1426 break;
1427
1428 case MLXSW_SP_L3_PROTO_IPV6:
1429 return -EAFNOSUPPORT;
1430 }
1431
1432 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
1433}
1434
Petr Machata68c3cd92018-03-22 19:53:35 +02001435static int mlxsw_sp_netdevice_ipip_ol_update_mtu(struct mlxsw_sp *mlxsw_sp,
1436 struct net_device *ol_dev)
1437{
1438 struct mlxsw_sp_ipip_entry *ipip_entry;
1439 struct mlxsw_sp_rif_ipip_lb *lb_rif;
1440 struct mlxsw_sp_vr *ul_vr;
1441 int err = 0;
1442
1443 ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
1444 if (ipip_entry) {
1445 lb_rif = ipip_entry->ol_lb;
1446 ul_vr = &mlxsw_sp->router->vrs[lb_rif->ul_vr_id];
1447 err = mlxsw_sp_rif_ipip_lb_op(lb_rif, ul_vr, true);
1448 if (err)
1449 goto out;
1450 lb_rif->common.mtu = ol_dev->mtu;
1451 }
1452
1453out:
1454 return err;
1455}
1456
Petr Machata6d4de442017-11-03 10:03:34 +01001457static void mlxsw_sp_netdevice_ipip_ol_up_event(struct mlxsw_sp *mlxsw_sp,
1458 struct net_device *ol_dev)
Petr Machata00635872017-10-16 16:26:37 +02001459{
Petr Machata00635872017-10-16 16:26:37 +02001460 struct mlxsw_sp_ipip_entry *ipip_entry;
1461
1462 ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
Petr Machata47518ca2017-11-03 10:03:35 +01001463 if (ipip_entry)
1464 mlxsw_sp_ipip_entry_ol_up_event(mlxsw_sp, ipip_entry);
Petr Machata00635872017-10-16 16:26:37 +02001465}
1466
Petr Machataa3fe1982017-11-03 10:03:33 +01001467static void
1468mlxsw_sp_ipip_entry_ol_down_event(struct mlxsw_sp *mlxsw_sp,
1469 struct mlxsw_sp_ipip_entry *ipip_entry)
1470{
1471 if (ipip_entry->decap_fib_entry)
1472 mlxsw_sp_ipip_entry_demote_decap(mlxsw_sp, ipip_entry);
1473}
1474
Petr Machata796ec772017-11-03 10:03:29 +01001475static void mlxsw_sp_netdevice_ipip_ol_down_event(struct mlxsw_sp *mlxsw_sp,
1476 struct net_device *ol_dev)
Petr Machata00635872017-10-16 16:26:37 +02001477{
1478 struct mlxsw_sp_ipip_entry *ipip_entry;
1479
1480 ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
Petr Machataa3fe1982017-11-03 10:03:33 +01001481 if (ipip_entry)
1482 mlxsw_sp_ipip_entry_ol_down_event(mlxsw_sp, ipip_entry);
Petr Machata00635872017-10-16 16:26:37 +02001483}
1484
Petr Machata09dbf622017-11-28 13:17:14 +01001485static void mlxsw_sp_nexthop_rif_migrate(struct mlxsw_sp *mlxsw_sp,
1486 struct mlxsw_sp_rif *old_rif,
1487 struct mlxsw_sp_rif *new_rif);
Petr Machata65a61212017-11-03 10:03:37 +01001488static int
1489mlxsw_sp_ipip_entry_ol_lb_update(struct mlxsw_sp *mlxsw_sp,
1490 struct mlxsw_sp_ipip_entry *ipip_entry,
Petr Machata0c5f1cd2017-11-03 10:03:38 +01001491 bool keep_encap,
Petr Machata65a61212017-11-03 10:03:37 +01001492 struct netlink_ext_ack *extack)
Petr Machataf63ce4e2017-10-16 16:26:38 +02001493{
Petr Machata65a61212017-11-03 10:03:37 +01001494 struct mlxsw_sp_rif_ipip_lb *old_lb_rif = ipip_entry->ol_lb;
1495 struct mlxsw_sp_rif_ipip_lb *new_lb_rif;
Petr Machataf63ce4e2017-10-16 16:26:38 +02001496
Petr Machata65a61212017-11-03 10:03:37 +01001497 new_lb_rif = mlxsw_sp_ipip_ol_ipip_lb_create(mlxsw_sp,
1498 ipip_entry->ipipt,
1499 ipip_entry->ol_dev,
1500 extack);
1501 if (IS_ERR(new_lb_rif))
1502 return PTR_ERR(new_lb_rif);
1503 ipip_entry->ol_lb = new_lb_rif;
Petr Machata0c5f1cd2017-11-03 10:03:38 +01001504
Petr Machata09dbf622017-11-28 13:17:14 +01001505 if (keep_encap)
1506 mlxsw_sp_nexthop_rif_migrate(mlxsw_sp, &old_lb_rif->common,
1507 &new_lb_rif->common);
Petr Machata0c5f1cd2017-11-03 10:03:38 +01001508
Petr Machata65a61212017-11-03 10:03:37 +01001509 mlxsw_sp_rif_destroy(&old_lb_rif->common);
Petr Machataf63ce4e2017-10-16 16:26:38 +02001510
Petr Machata65a61212017-11-03 10:03:37 +01001511 return 0;
1512}
1513
Petr Machata09dbf622017-11-28 13:17:14 +01001514static void mlxsw_sp_nexthop_rif_update(struct mlxsw_sp *mlxsw_sp,
1515 struct mlxsw_sp_rif *rif);
1516
Petr Machata0c5f1cd2017-11-03 10:03:38 +01001517/**
1518 * Update the offload related to an IPIP entry. This always updates decap, and
1519 * in addition to that it also:
1520 * @recreate_loopback: recreates the associated loopback RIF
1521 * @keep_encap: updates next hops that use the tunnel netdevice. This is only
1522 * relevant when recreate_loopback is true.
1523 * @update_nexthops: updates next hops, keeping the current loopback RIF. This
1524 * is only relevant when recreate_loopback is false.
1525 */
Petr Machata65a61212017-11-03 10:03:37 +01001526int __mlxsw_sp_ipip_entry_update_tunnel(struct mlxsw_sp *mlxsw_sp,
1527 struct mlxsw_sp_ipip_entry *ipip_entry,
Petr Machata0c5f1cd2017-11-03 10:03:38 +01001528 bool recreate_loopback,
1529 bool keep_encap,
1530 bool update_nexthops,
Petr Machata65a61212017-11-03 10:03:37 +01001531 struct netlink_ext_ack *extack)
1532{
1533 int err;
1534
1535 /* RIFs can't be edited, so to update loopback, we need to destroy and
1536 * recreate it. That creates a window of opportunity where RALUE and
1537 * RATR registers end up referencing a RIF that's already gone. RATRs
1538 * are handled in mlxsw_sp_ipip_entry_ol_lb_update(), and to take care
Petr Machataf63ce4e2017-10-16 16:26:38 +02001539 * of RALUE, demote the decap route back.
1540 */
1541 if (ipip_entry->decap_fib_entry)
1542 mlxsw_sp_ipip_entry_demote_decap(mlxsw_sp, ipip_entry);
1543
Petr Machata0c5f1cd2017-11-03 10:03:38 +01001544 if (recreate_loopback) {
1545 err = mlxsw_sp_ipip_entry_ol_lb_update(mlxsw_sp, ipip_entry,
1546 keep_encap, extack);
1547 if (err)
1548 return err;
1549 } else if (update_nexthops) {
1550 mlxsw_sp_nexthop_rif_update(mlxsw_sp,
1551 &ipip_entry->ol_lb->common);
1552 }
Petr Machataf63ce4e2017-10-16 16:26:38 +02001553
Petr Machata65a61212017-11-03 10:03:37 +01001554 if (ipip_entry->ol_dev->flags & IFF_UP)
1555 mlxsw_sp_ipip_entry_ol_up_event(mlxsw_sp, ipip_entry);
Petr Machataf63ce4e2017-10-16 16:26:38 +02001556
1557 return 0;
1558}
1559
Petr Machata65a61212017-11-03 10:03:37 +01001560static int mlxsw_sp_netdevice_ipip_ol_vrf_event(struct mlxsw_sp *mlxsw_sp,
1561 struct net_device *ol_dev,
1562 struct netlink_ext_ack *extack)
1563{
1564 struct mlxsw_sp_ipip_entry *ipip_entry =
1565 mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
Petr Machatacab43d92017-11-28 13:17:12 +01001566 enum mlxsw_sp_l3proto ul_proto;
1567 union mlxsw_sp_l3addr saddr;
1568 u32 ul_tb_id;
Petr Machata65a61212017-11-03 10:03:37 +01001569
1570 if (!ipip_entry)
1571 return 0;
Petr Machatacab43d92017-11-28 13:17:12 +01001572
1573 /* For flat configuration cases, moving overlay to a different VRF might
1574 * cause local address conflict, and the conflicting tunnels need to be
1575 * demoted.
1576 */
1577 ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(ol_dev);
1578 ul_proto = mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt]->ul_proto;
1579 saddr = mlxsw_sp_ipip_netdev_saddr(ul_proto, ol_dev);
1580 if (mlxsw_sp_ipip_demote_tunnel_by_saddr(mlxsw_sp, ul_proto,
1581 saddr, ul_tb_id,
1582 ipip_entry)) {
1583 mlxsw_sp_ipip_entry_demote_tunnel(mlxsw_sp, ipip_entry);
1584 return 0;
1585 }
1586
Petr Machata65a61212017-11-03 10:03:37 +01001587 return __mlxsw_sp_ipip_entry_update_tunnel(mlxsw_sp, ipip_entry,
Petr Machata0c5f1cd2017-11-03 10:03:38 +01001588 true, false, false, extack);
Petr Machata65a61212017-11-03 10:03:37 +01001589}
1590
Petr Machata61481f22017-11-03 10:03:41 +01001591static int
1592mlxsw_sp_netdevice_ipip_ul_vrf_event(struct mlxsw_sp *mlxsw_sp,
1593 struct mlxsw_sp_ipip_entry *ipip_entry,
1594 struct net_device *ul_dev,
1595 struct netlink_ext_ack *extack)
1596{
1597 return __mlxsw_sp_ipip_entry_update_tunnel(mlxsw_sp, ipip_entry,
1598 true, true, false, extack);
1599}
1600
Petr Machata4cf04f32017-11-03 10:03:42 +01001601static int
Petr Machata44b0fff2017-11-03 10:03:44 +01001602mlxsw_sp_netdevice_ipip_ul_up_event(struct mlxsw_sp *mlxsw_sp,
1603 struct mlxsw_sp_ipip_entry *ipip_entry,
1604 struct net_device *ul_dev)
1605{
1606 return __mlxsw_sp_ipip_entry_update_tunnel(mlxsw_sp, ipip_entry,
1607 false, false, true, NULL);
1608}
1609
1610static int
1611mlxsw_sp_netdevice_ipip_ul_down_event(struct mlxsw_sp *mlxsw_sp,
1612 struct mlxsw_sp_ipip_entry *ipip_entry,
1613 struct net_device *ul_dev)
1614{
1615 /* A down underlay device causes encapsulated packets to not be
1616 * forwarded, but decap still works. So refresh next hops without
1617 * touching anything else.
1618 */
1619 return __mlxsw_sp_ipip_entry_update_tunnel(mlxsw_sp, ipip_entry,
1620 false, false, true, NULL);
1621}
1622
1623static int
Petr Machata4cf04f32017-11-03 10:03:42 +01001624mlxsw_sp_netdevice_ipip_ol_change_event(struct mlxsw_sp *mlxsw_sp,
1625 struct net_device *ol_dev,
1626 struct netlink_ext_ack *extack)
1627{
1628 const struct mlxsw_sp_ipip_ops *ipip_ops;
1629 struct mlxsw_sp_ipip_entry *ipip_entry;
1630 int err;
1631
1632 ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
1633 if (!ipip_entry)
1634 /* A change might make a tunnel eligible for offloading, but
1635 * that is currently not implemented. What falls to slow path
1636 * stays there.
1637 */
1638 return 0;
1639
1640 /* A change might make a tunnel not eligible for offloading. */
1641 if (!mlxsw_sp_netdevice_ipip_can_offload(mlxsw_sp, ol_dev,
1642 ipip_entry->ipipt)) {
1643 mlxsw_sp_ipip_entry_demote_tunnel(mlxsw_sp, ipip_entry);
1644 return 0;
1645 }
1646
1647 ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt];
1648 err = ipip_ops->ol_netdev_change(mlxsw_sp, ipip_entry, extack);
1649 return err;
1650}
1651
Petr Machataaf641712017-11-03 10:03:40 +01001652void mlxsw_sp_ipip_entry_demote_tunnel(struct mlxsw_sp *mlxsw_sp,
1653 struct mlxsw_sp_ipip_entry *ipip_entry)
1654{
1655 struct net_device *ol_dev = ipip_entry->ol_dev;
1656
1657 if (ol_dev->flags & IFF_UP)
1658 mlxsw_sp_ipip_entry_ol_down_event(mlxsw_sp, ipip_entry);
1659 mlxsw_sp_ipip_entry_destroy(mlxsw_sp, ipip_entry);
1660}
1661
1662/* The configuration where several tunnels have the same local address in the
1663 * same underlay table needs special treatment in the HW. That is currently not
1664 * implemented in the driver. This function finds and demotes the first tunnel
1665 * with a given source address, except the one passed in in the argument
1666 * `except'.
1667 */
1668bool
1669mlxsw_sp_ipip_demote_tunnel_by_saddr(struct mlxsw_sp *mlxsw_sp,
1670 enum mlxsw_sp_l3proto ul_proto,
1671 union mlxsw_sp_l3addr saddr,
1672 u32 ul_tb_id,
1673 const struct mlxsw_sp_ipip_entry *except)
1674{
1675 struct mlxsw_sp_ipip_entry *ipip_entry, *tmp;
1676
1677 list_for_each_entry_safe(ipip_entry, tmp, &mlxsw_sp->router->ipip_list,
1678 ipip_list_node) {
1679 if (ipip_entry != except &&
1680 mlxsw_sp_ipip_entry_saddr_matches(mlxsw_sp, ul_proto, saddr,
1681 ul_tb_id, ipip_entry)) {
1682 mlxsw_sp_ipip_entry_demote_tunnel(mlxsw_sp, ipip_entry);
1683 return true;
1684 }
1685 }
1686
1687 return false;
1688}
1689
Petr Machata61481f22017-11-03 10:03:41 +01001690static void mlxsw_sp_ipip_demote_tunnel_by_ul_netdev(struct mlxsw_sp *mlxsw_sp,
1691 struct net_device *ul_dev)
1692{
1693 struct mlxsw_sp_ipip_entry *ipip_entry, *tmp;
1694
1695 list_for_each_entry_safe(ipip_entry, tmp, &mlxsw_sp->router->ipip_list,
1696 ipip_list_node) {
1697 struct net_device *ipip_ul_dev =
1698 __mlxsw_sp_ipip_netdev_ul_dev_get(ipip_entry->ol_dev);
1699
1700 if (ipip_ul_dev == ul_dev)
1701 mlxsw_sp_ipip_entry_demote_tunnel(mlxsw_sp, ipip_entry);
1702 }
1703}
1704
Petr Machata7e75af62017-11-03 10:03:36 +01001705int mlxsw_sp_netdevice_ipip_ol_event(struct mlxsw_sp *mlxsw_sp,
1706 struct net_device *ol_dev,
1707 unsigned long event,
1708 struct netdev_notifier_info *info)
Petr Machata00635872017-10-16 16:26:37 +02001709{
Petr Machata7e75af62017-11-03 10:03:36 +01001710 struct netdev_notifier_changeupper_info *chup;
1711 struct netlink_ext_ack *extack;
1712
Petr Machata00635872017-10-16 16:26:37 +02001713 switch (event) {
1714 case NETDEV_REGISTER:
Petr Machata796ec772017-11-03 10:03:29 +01001715 return mlxsw_sp_netdevice_ipip_ol_reg_event(mlxsw_sp, ol_dev);
Petr Machata00635872017-10-16 16:26:37 +02001716 case NETDEV_UNREGISTER:
Petr Machata796ec772017-11-03 10:03:29 +01001717 mlxsw_sp_netdevice_ipip_ol_unreg_event(mlxsw_sp, ol_dev);
Petr Machata00635872017-10-16 16:26:37 +02001718 return 0;
1719 case NETDEV_UP:
Petr Machata6d4de442017-11-03 10:03:34 +01001720 mlxsw_sp_netdevice_ipip_ol_up_event(mlxsw_sp, ol_dev);
1721 return 0;
Petr Machata00635872017-10-16 16:26:37 +02001722 case NETDEV_DOWN:
Petr Machata796ec772017-11-03 10:03:29 +01001723 mlxsw_sp_netdevice_ipip_ol_down_event(mlxsw_sp, ol_dev);
Petr Machata00635872017-10-16 16:26:37 +02001724 return 0;
Petr Machataf63ce4e2017-10-16 16:26:38 +02001725 case NETDEV_CHANGEUPPER:
Petr Machata7e75af62017-11-03 10:03:36 +01001726 chup = container_of(info, typeof(*chup), info);
1727 extack = info->extack;
1728 if (netif_is_l3_master(chup->upper_dev))
Petr Machata796ec772017-11-03 10:03:29 +01001729 return mlxsw_sp_netdevice_ipip_ol_vrf_event(mlxsw_sp,
Petr Machata7e75af62017-11-03 10:03:36 +01001730 ol_dev,
1731 extack);
Petr Machataf63ce4e2017-10-16 16:26:38 +02001732 return 0;
Petr Machata4cf04f32017-11-03 10:03:42 +01001733 case NETDEV_CHANGE:
1734 extack = info->extack;
1735 return mlxsw_sp_netdevice_ipip_ol_change_event(mlxsw_sp,
1736 ol_dev, extack);
Petr Machata68c3cd92018-03-22 19:53:35 +02001737 case NETDEV_CHANGEMTU:
1738 return mlxsw_sp_netdevice_ipip_ol_update_mtu(mlxsw_sp, ol_dev);
Petr Machata00635872017-10-16 16:26:37 +02001739 }
1740 return 0;
1741}
1742
Petr Machata61481f22017-11-03 10:03:41 +01001743static int
1744__mlxsw_sp_netdevice_ipip_ul_event(struct mlxsw_sp *mlxsw_sp,
1745 struct mlxsw_sp_ipip_entry *ipip_entry,
1746 struct net_device *ul_dev,
1747 unsigned long event,
1748 struct netdev_notifier_info *info)
1749{
1750 struct netdev_notifier_changeupper_info *chup;
1751 struct netlink_ext_ack *extack;
1752
1753 switch (event) {
1754 case NETDEV_CHANGEUPPER:
1755 chup = container_of(info, typeof(*chup), info);
1756 extack = info->extack;
1757 if (netif_is_l3_master(chup->upper_dev))
1758 return mlxsw_sp_netdevice_ipip_ul_vrf_event(mlxsw_sp,
1759 ipip_entry,
1760 ul_dev,
1761 extack);
1762 break;
Petr Machata44b0fff2017-11-03 10:03:44 +01001763
1764 case NETDEV_UP:
1765 return mlxsw_sp_netdevice_ipip_ul_up_event(mlxsw_sp, ipip_entry,
1766 ul_dev);
1767 case NETDEV_DOWN:
1768 return mlxsw_sp_netdevice_ipip_ul_down_event(mlxsw_sp,
1769 ipip_entry,
1770 ul_dev);
Petr Machata61481f22017-11-03 10:03:41 +01001771 }
1772 return 0;
1773}
1774
1775int
1776mlxsw_sp_netdevice_ipip_ul_event(struct mlxsw_sp *mlxsw_sp,
1777 struct net_device *ul_dev,
1778 unsigned long event,
1779 struct netdev_notifier_info *info)
1780{
1781 struct mlxsw_sp_ipip_entry *ipip_entry = NULL;
1782 int err;
1783
1784 while ((ipip_entry = mlxsw_sp_ipip_entry_find_by_ul_dev(mlxsw_sp,
1785 ul_dev,
1786 ipip_entry))) {
1787 err = __mlxsw_sp_netdevice_ipip_ul_event(mlxsw_sp, ipip_entry,
1788 ul_dev, event, info);
1789 if (err) {
1790 mlxsw_sp_ipip_demote_tunnel_by_ul_netdev(mlxsw_sp,
1791 ul_dev);
1792 return err;
1793 }
1794 }
1795
1796 return 0;
1797}
1798
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001799struct mlxsw_sp_neigh_key {
Jiri Pirko33b13412016-11-10 12:31:04 +01001800 struct neighbour *n;
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001801};
1802
1803struct mlxsw_sp_neigh_entry {
Ido Schimmel9665b742017-02-08 11:16:42 +01001804 struct list_head rif_list_node;
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001805 struct rhash_head ht_node;
1806 struct mlxsw_sp_neigh_key key;
1807 u16 rif;
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001808 bool connected;
Yotam Gigia6bf9e92016-07-05 11:27:44 +02001809 unsigned char ha[ETH_ALEN];
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001810 struct list_head nexthop_list; /* list of nexthops using
1811 * this neigh entry
1812 */
Yotam Gigib2157142016-07-05 11:27:51 +02001813 struct list_head nexthop_neighs_list_node;
Arkadi Sharshevsky7cfcbc72017-08-24 08:40:08 +02001814 unsigned int counter_index;
1815 bool counter_valid;
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001816};
1817
1818static const struct rhashtable_params mlxsw_sp_neigh_ht_params = {
1819 .key_offset = offsetof(struct mlxsw_sp_neigh_entry, key),
1820 .head_offset = offsetof(struct mlxsw_sp_neigh_entry, ht_node),
1821 .key_len = sizeof(struct mlxsw_sp_neigh_key),
1822};
1823
Arkadi Sharshevskyf17cc842017-08-24 08:40:04 +02001824struct mlxsw_sp_neigh_entry *
1825mlxsw_sp_rif_neigh_next(struct mlxsw_sp_rif *rif,
1826 struct mlxsw_sp_neigh_entry *neigh_entry)
1827{
1828 if (!neigh_entry) {
1829 if (list_empty(&rif->neigh_list))
1830 return NULL;
1831 else
1832 return list_first_entry(&rif->neigh_list,
1833 typeof(*neigh_entry),
1834 rif_list_node);
1835 }
Arkadi Sharshevskyec2437f2017-09-25 10:32:24 +02001836 if (list_is_last(&neigh_entry->rif_list_node, &rif->neigh_list))
Arkadi Sharshevskyf17cc842017-08-24 08:40:04 +02001837 return NULL;
1838 return list_next_entry(neigh_entry, rif_list_node);
1839}
1840
1841int mlxsw_sp_neigh_entry_type(struct mlxsw_sp_neigh_entry *neigh_entry)
1842{
1843 return neigh_entry->key.n->tbl->family;
1844}
1845
1846unsigned char *
1847mlxsw_sp_neigh_entry_ha(struct mlxsw_sp_neigh_entry *neigh_entry)
1848{
1849 return neigh_entry->ha;
1850}
1851
1852u32 mlxsw_sp_neigh4_entry_dip(struct mlxsw_sp_neigh_entry *neigh_entry)
1853{
1854 struct neighbour *n;
1855
1856 n = neigh_entry->key.n;
1857 return ntohl(*((__be32 *) n->primary_key));
1858}
1859
Arkadi Sharshevsky02507682017-08-31 17:59:15 +02001860struct in6_addr *
1861mlxsw_sp_neigh6_entry_dip(struct mlxsw_sp_neigh_entry *neigh_entry)
1862{
1863 struct neighbour *n;
1864
1865 n = neigh_entry->key.n;
1866 return (struct in6_addr *) &n->primary_key;
1867}
1868
Arkadi Sharshevsky7cfcbc72017-08-24 08:40:08 +02001869int mlxsw_sp_neigh_counter_get(struct mlxsw_sp *mlxsw_sp,
1870 struct mlxsw_sp_neigh_entry *neigh_entry,
1871 u64 *p_counter)
1872{
1873 if (!neigh_entry->counter_valid)
1874 return -EINVAL;
1875
1876 return mlxsw_sp_flow_counter_get(mlxsw_sp, neigh_entry->counter_index,
1877 p_counter, NULL);
1878}
1879
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001880static struct mlxsw_sp_neigh_entry *
1881mlxsw_sp_neigh_entry_alloc(struct mlxsw_sp *mlxsw_sp, struct neighbour *n,
1882 u16 rif)
1883{
1884 struct mlxsw_sp_neigh_entry *neigh_entry;
1885
1886 neigh_entry = kzalloc(sizeof(*neigh_entry), GFP_KERNEL);
1887 if (!neigh_entry)
1888 return NULL;
1889
1890 neigh_entry->key.n = n;
1891 neigh_entry->rif = rif;
1892 INIT_LIST_HEAD(&neigh_entry->nexthop_list);
1893
1894 return neigh_entry;
1895}
1896
1897static void mlxsw_sp_neigh_entry_free(struct mlxsw_sp_neigh_entry *neigh_entry)
1898{
1899 kfree(neigh_entry);
1900}
1901
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001902static int
1903mlxsw_sp_neigh_entry_insert(struct mlxsw_sp *mlxsw_sp,
1904 struct mlxsw_sp_neigh_entry *neigh_entry)
1905{
Ido Schimmel9011b672017-05-16 19:38:25 +02001906 return rhashtable_insert_fast(&mlxsw_sp->router->neigh_ht,
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001907 &neigh_entry->ht_node,
1908 mlxsw_sp_neigh_ht_params);
1909}
1910
1911static void
1912mlxsw_sp_neigh_entry_remove(struct mlxsw_sp *mlxsw_sp,
1913 struct mlxsw_sp_neigh_entry *neigh_entry)
1914{
Ido Schimmel9011b672017-05-16 19:38:25 +02001915 rhashtable_remove_fast(&mlxsw_sp->router->neigh_ht,
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001916 &neigh_entry->ht_node,
1917 mlxsw_sp_neigh_ht_params);
1918}
1919
Arkadi Sharshevsky7cfcbc72017-08-24 08:40:08 +02001920static bool
Arkadi Sharshevsky1ed55742017-08-31 17:59:18 +02001921mlxsw_sp_neigh_counter_should_alloc(struct mlxsw_sp *mlxsw_sp,
1922 struct mlxsw_sp_neigh_entry *neigh_entry)
Arkadi Sharshevsky7cfcbc72017-08-24 08:40:08 +02001923{
1924 struct devlink *devlink;
Arkadi Sharshevsky1ed55742017-08-31 17:59:18 +02001925 const char *table_name;
1926
1927 switch (mlxsw_sp_neigh_entry_type(neigh_entry)) {
1928 case AF_INET:
1929 table_name = MLXSW_SP_DPIPE_TABLE_NAME_HOST4;
1930 break;
1931 case AF_INET6:
1932 table_name = MLXSW_SP_DPIPE_TABLE_NAME_HOST6;
1933 break;
1934 default:
1935 WARN_ON(1);
1936 return false;
1937 }
Arkadi Sharshevsky7cfcbc72017-08-24 08:40:08 +02001938
1939 devlink = priv_to_devlink(mlxsw_sp->core);
Arkadi Sharshevsky1ed55742017-08-31 17:59:18 +02001940 return devlink_dpipe_table_counter_enabled(devlink, table_name);
Arkadi Sharshevsky7cfcbc72017-08-24 08:40:08 +02001941}
1942
1943static void
1944mlxsw_sp_neigh_counter_alloc(struct mlxsw_sp *mlxsw_sp,
1945 struct mlxsw_sp_neigh_entry *neigh_entry)
1946{
Arkadi Sharshevsky1ed55742017-08-31 17:59:18 +02001947 if (!mlxsw_sp_neigh_counter_should_alloc(mlxsw_sp, neigh_entry))
Arkadi Sharshevsky7cfcbc72017-08-24 08:40:08 +02001948 return;
1949
1950 if (mlxsw_sp_flow_counter_alloc(mlxsw_sp, &neigh_entry->counter_index))
1951 return;
1952
1953 neigh_entry->counter_valid = true;
1954}
1955
1956static void
1957mlxsw_sp_neigh_counter_free(struct mlxsw_sp *mlxsw_sp,
1958 struct mlxsw_sp_neigh_entry *neigh_entry)
1959{
1960 if (!neigh_entry->counter_valid)
1961 return;
1962 mlxsw_sp_flow_counter_free(mlxsw_sp,
1963 neigh_entry->counter_index);
1964 neigh_entry->counter_valid = false;
1965}
1966
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001967static struct mlxsw_sp_neigh_entry *
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001968mlxsw_sp_neigh_entry_create(struct mlxsw_sp *mlxsw_sp, struct neighbour *n)
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001969{
1970 struct mlxsw_sp_neigh_entry *neigh_entry;
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01001971 struct mlxsw_sp_rif *rif;
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001972 int err;
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001973
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01001974 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, n->dev);
1975 if (!rif)
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001976 return ERR_PTR(-EINVAL);
1977
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01001978 neigh_entry = mlxsw_sp_neigh_entry_alloc(mlxsw_sp, n, rif->rif_index);
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001979 if (!neigh_entry)
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001980 return ERR_PTR(-ENOMEM);
1981
1982 err = mlxsw_sp_neigh_entry_insert(mlxsw_sp, neigh_entry);
1983 if (err)
1984 goto err_neigh_entry_insert;
1985
Arkadi Sharshevsky7cfcbc72017-08-24 08:40:08 +02001986 mlxsw_sp_neigh_counter_alloc(mlxsw_sp, neigh_entry);
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01001987 list_add(&neigh_entry->rif_list_node, &rif->neigh_list);
Ido Schimmel9665b742017-02-08 11:16:42 +01001988
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001989 return neigh_entry;
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001990
1991err_neigh_entry_insert:
1992 mlxsw_sp_neigh_entry_free(neigh_entry);
1993 return ERR_PTR(err);
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001994}
1995
1996static void
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001997mlxsw_sp_neigh_entry_destroy(struct mlxsw_sp *mlxsw_sp,
1998 struct mlxsw_sp_neigh_entry *neigh_entry)
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001999{
Ido Schimmel9665b742017-02-08 11:16:42 +01002000 list_del(&neigh_entry->rif_list_node);
Arkadi Sharshevsky7cfcbc72017-08-24 08:40:08 +02002001 mlxsw_sp_neigh_counter_free(mlxsw_sp, neigh_entry);
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002002 mlxsw_sp_neigh_entry_remove(mlxsw_sp, neigh_entry);
2003 mlxsw_sp_neigh_entry_free(neigh_entry);
Jiri Pirko6cf3c972016-07-05 11:27:39 +02002004}
2005
2006static struct mlxsw_sp_neigh_entry *
Jiri Pirko33b13412016-11-10 12:31:04 +01002007mlxsw_sp_neigh_entry_lookup(struct mlxsw_sp *mlxsw_sp, struct neighbour *n)
Jiri Pirko6cf3c972016-07-05 11:27:39 +02002008{
Jiri Pirko33b13412016-11-10 12:31:04 +01002009 struct mlxsw_sp_neigh_key key;
Jiri Pirko6cf3c972016-07-05 11:27:39 +02002010
Jiri Pirko33b13412016-11-10 12:31:04 +01002011 key.n = n;
Ido Schimmel9011b672017-05-16 19:38:25 +02002012 return rhashtable_lookup_fast(&mlxsw_sp->router->neigh_ht,
Jiri Pirko6cf3c972016-07-05 11:27:39 +02002013 &key, mlxsw_sp_neigh_ht_params);
2014}
2015
Yotam Gigic723c7352016-07-05 11:27:43 +02002016static void
2017mlxsw_sp_router_neighs_update_interval_init(struct mlxsw_sp *mlxsw_sp)
2018{
Arkadi Sharshevskya6c9b5d2017-07-18 10:10:18 +02002019 unsigned long interval;
Yotam Gigic723c7352016-07-05 11:27:43 +02002020
Ido Schimmelb5f3e0d2017-07-24 09:56:00 +02002021#if IS_ENABLED(CONFIG_IPV6)
Arkadi Sharshevskya6c9b5d2017-07-18 10:10:18 +02002022 interval = min_t(unsigned long,
2023 NEIGH_VAR(&arp_tbl.parms, DELAY_PROBE_TIME),
2024 NEIGH_VAR(&nd_tbl.parms, DELAY_PROBE_TIME));
Ido Schimmelb5f3e0d2017-07-24 09:56:00 +02002025#else
2026 interval = NEIGH_VAR(&arp_tbl.parms, DELAY_PROBE_TIME);
2027#endif
Ido Schimmel9011b672017-05-16 19:38:25 +02002028 mlxsw_sp->router->neighs_update.interval = jiffies_to_msecs(interval);
Yotam Gigic723c7352016-07-05 11:27:43 +02002029}
2030
2031static void mlxsw_sp_router_neigh_ent_ipv4_process(struct mlxsw_sp *mlxsw_sp,
2032 char *rauhtd_pl,
2033 int ent_index)
2034{
2035 struct net_device *dev;
2036 struct neighbour *n;
2037 __be32 dipn;
2038 u32 dip;
2039 u16 rif;
2040
2041 mlxsw_reg_rauhtd_ent_ipv4_unpack(rauhtd_pl, ent_index, &rif, &dip);
2042
Ido Schimmel5f9efff2017-05-16 19:38:27 +02002043 if (!mlxsw_sp->router->rifs[rif]) {
Yotam Gigic723c7352016-07-05 11:27:43 +02002044 dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Incorrect RIF in neighbour entry\n");
2045 return;
2046 }
2047
2048 dipn = htonl(dip);
Ido Schimmel5f9efff2017-05-16 19:38:27 +02002049 dev = mlxsw_sp->router->rifs[rif]->dev;
Yotam Gigic723c7352016-07-05 11:27:43 +02002050 n = neigh_lookup(&arp_tbl, &dipn, dev);
Yuval Mintz1ecdaea2018-01-24 10:02:09 +01002051 if (!n)
Yotam Gigic723c7352016-07-05 11:27:43 +02002052 return;
Yotam Gigic723c7352016-07-05 11:27:43 +02002053
2054 netdev_dbg(dev, "Updating neighbour with IP=%pI4h\n", &dip);
2055 neigh_event_send(n, NULL);
2056 neigh_release(n);
2057}
2058
Ido Schimmeldf9a21f2017-08-15 09:10:33 +02002059#if IS_ENABLED(CONFIG_IPV6)
Arkadi Sharshevsky60f040c2017-07-18 10:10:17 +02002060static void mlxsw_sp_router_neigh_ent_ipv6_process(struct mlxsw_sp *mlxsw_sp,
2061 char *rauhtd_pl,
2062 int rec_index)
2063{
2064 struct net_device *dev;
2065 struct neighbour *n;
2066 struct in6_addr dip;
2067 u16 rif;
2068
2069 mlxsw_reg_rauhtd_ent_ipv6_unpack(rauhtd_pl, rec_index, &rif,
2070 (char *) &dip);
2071
2072 if (!mlxsw_sp->router->rifs[rif]) {
2073 dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Incorrect RIF in neighbour entry\n");
2074 return;
2075 }
2076
2077 dev = mlxsw_sp->router->rifs[rif]->dev;
2078 n = neigh_lookup(&nd_tbl, &dip, dev);
Yuval Mintz1ecdaea2018-01-24 10:02:09 +01002079 if (!n)
Arkadi Sharshevsky60f040c2017-07-18 10:10:17 +02002080 return;
Arkadi Sharshevsky60f040c2017-07-18 10:10:17 +02002081
2082 netdev_dbg(dev, "Updating neighbour with IP=%pI6c\n", &dip);
2083 neigh_event_send(n, NULL);
2084 neigh_release(n);
2085}
Ido Schimmelb5f3e0d2017-07-24 09:56:00 +02002086#else
2087static void mlxsw_sp_router_neigh_ent_ipv6_process(struct mlxsw_sp *mlxsw_sp,
2088 char *rauhtd_pl,
2089 int rec_index)
2090{
2091}
2092#endif
Arkadi Sharshevsky60f040c2017-07-18 10:10:17 +02002093
Yotam Gigic723c7352016-07-05 11:27:43 +02002094static void mlxsw_sp_router_neigh_rec_ipv4_process(struct mlxsw_sp *mlxsw_sp,
2095 char *rauhtd_pl,
2096 int rec_index)
2097{
2098 u8 num_entries;
2099 int i;
2100
2101 num_entries = mlxsw_reg_rauhtd_ipv4_rec_num_entries_get(rauhtd_pl,
2102 rec_index);
2103 /* Hardware starts counting at 0, so add 1. */
2104 num_entries++;
2105
2106 /* Each record consists of several neighbour entries. */
2107 for (i = 0; i < num_entries; i++) {
2108 int ent_index;
2109
2110 ent_index = rec_index * MLXSW_REG_RAUHTD_IPV4_ENT_PER_REC + i;
2111 mlxsw_sp_router_neigh_ent_ipv4_process(mlxsw_sp, rauhtd_pl,
2112 ent_index);
2113 }
2114
2115}
2116
Arkadi Sharshevsky60f040c2017-07-18 10:10:17 +02002117static void mlxsw_sp_router_neigh_rec_ipv6_process(struct mlxsw_sp *mlxsw_sp,
2118 char *rauhtd_pl,
2119 int rec_index)
2120{
2121 /* One record contains one entry. */
2122 mlxsw_sp_router_neigh_ent_ipv6_process(mlxsw_sp, rauhtd_pl,
2123 rec_index);
2124}
2125
Yotam Gigic723c7352016-07-05 11:27:43 +02002126static void mlxsw_sp_router_neigh_rec_process(struct mlxsw_sp *mlxsw_sp,
2127 char *rauhtd_pl, int rec_index)
2128{
2129 switch (mlxsw_reg_rauhtd_rec_type_get(rauhtd_pl, rec_index)) {
2130 case MLXSW_REG_RAUHTD_TYPE_IPV4:
2131 mlxsw_sp_router_neigh_rec_ipv4_process(mlxsw_sp, rauhtd_pl,
2132 rec_index);
2133 break;
2134 case MLXSW_REG_RAUHTD_TYPE_IPV6:
Arkadi Sharshevsky60f040c2017-07-18 10:10:17 +02002135 mlxsw_sp_router_neigh_rec_ipv6_process(mlxsw_sp, rauhtd_pl,
2136 rec_index);
Yotam Gigic723c7352016-07-05 11:27:43 +02002137 break;
2138 }
2139}
2140
Arkadi Sharshevsky42cdb332016-11-11 16:34:26 +01002141static bool mlxsw_sp_router_rauhtd_is_full(char *rauhtd_pl)
2142{
2143 u8 num_rec, last_rec_index, num_entries;
2144
2145 num_rec = mlxsw_reg_rauhtd_num_rec_get(rauhtd_pl);
2146 last_rec_index = num_rec - 1;
2147
2148 if (num_rec < MLXSW_REG_RAUHTD_REC_MAX_NUM)
2149 return false;
2150 if (mlxsw_reg_rauhtd_rec_type_get(rauhtd_pl, last_rec_index) ==
2151 MLXSW_REG_RAUHTD_TYPE_IPV6)
2152 return true;
2153
2154 num_entries = mlxsw_reg_rauhtd_ipv4_rec_num_entries_get(rauhtd_pl,
2155 last_rec_index);
2156 if (++num_entries == MLXSW_REG_RAUHTD_IPV4_ENT_PER_REC)
2157 return true;
2158 return false;
2159}
2160
Arkadi Sharshevsky60f040c2017-07-18 10:10:17 +02002161static int
2162__mlxsw_sp_router_neighs_update_rauhtd(struct mlxsw_sp *mlxsw_sp,
2163 char *rauhtd_pl,
2164 enum mlxsw_reg_rauhtd_type type)
Yotam Gigic723c7352016-07-05 11:27:43 +02002165{
Arkadi Sharshevsky60f040c2017-07-18 10:10:17 +02002166 int i, num_rec;
2167 int err;
Yotam Gigic723c7352016-07-05 11:27:43 +02002168
2169 /* Make sure the neighbour's netdev isn't removed in the
2170 * process.
2171 */
2172 rtnl_lock();
2173 do {
Arkadi Sharshevsky60f040c2017-07-18 10:10:17 +02002174 mlxsw_reg_rauhtd_pack(rauhtd_pl, type);
Yotam Gigic723c7352016-07-05 11:27:43 +02002175 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(rauhtd),
2176 rauhtd_pl);
2177 if (err) {
Petr Machata7ff176f2017-10-02 12:21:57 +02002178 dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Failed to dump neighbour table\n");
Yotam Gigic723c7352016-07-05 11:27:43 +02002179 break;
2180 }
2181 num_rec = mlxsw_reg_rauhtd_num_rec_get(rauhtd_pl);
2182 for (i = 0; i < num_rec; i++)
2183 mlxsw_sp_router_neigh_rec_process(mlxsw_sp, rauhtd_pl,
2184 i);
Arkadi Sharshevsky42cdb332016-11-11 16:34:26 +01002185 } while (mlxsw_sp_router_rauhtd_is_full(rauhtd_pl));
Yotam Gigic723c7352016-07-05 11:27:43 +02002186 rtnl_unlock();
2187
Arkadi Sharshevsky60f040c2017-07-18 10:10:17 +02002188 return err;
2189}
2190
2191static int mlxsw_sp_router_neighs_update_rauhtd(struct mlxsw_sp *mlxsw_sp)
2192{
2193 enum mlxsw_reg_rauhtd_type type;
2194 char *rauhtd_pl;
2195 int err;
2196
2197 rauhtd_pl = kmalloc(MLXSW_REG_RAUHTD_LEN, GFP_KERNEL);
2198 if (!rauhtd_pl)
2199 return -ENOMEM;
2200
2201 type = MLXSW_REG_RAUHTD_TYPE_IPV4;
2202 err = __mlxsw_sp_router_neighs_update_rauhtd(mlxsw_sp, rauhtd_pl, type);
2203 if (err)
2204 goto out;
2205
2206 type = MLXSW_REG_RAUHTD_TYPE_IPV6;
2207 err = __mlxsw_sp_router_neighs_update_rauhtd(mlxsw_sp, rauhtd_pl, type);
2208out:
Yotam Gigic723c7352016-07-05 11:27:43 +02002209 kfree(rauhtd_pl);
Yotam Gigib2157142016-07-05 11:27:51 +02002210 return err;
2211}
2212
2213static void mlxsw_sp_router_neighs_update_nh(struct mlxsw_sp *mlxsw_sp)
2214{
2215 struct mlxsw_sp_neigh_entry *neigh_entry;
2216
2217 /* Take RTNL mutex here to prevent lists from changes */
2218 rtnl_lock();
Ido Schimmel9011b672017-05-16 19:38:25 +02002219 list_for_each_entry(neigh_entry, &mlxsw_sp->router->nexthop_neighs_list,
Ido Schimmel8a0b7272017-02-06 16:20:15 +01002220 nexthop_neighs_list_node)
Yotam Gigib2157142016-07-05 11:27:51 +02002221 /* If this neigh have nexthops, make the kernel think this neigh
2222 * is active regardless of the traffic.
2223 */
Ido Schimmel8a0b7272017-02-06 16:20:15 +01002224 neigh_event_send(neigh_entry->key.n, NULL);
Yotam Gigib2157142016-07-05 11:27:51 +02002225 rtnl_unlock();
2226}
2227
2228static void
2229mlxsw_sp_router_neighs_update_work_schedule(struct mlxsw_sp *mlxsw_sp)
2230{
Ido Schimmel9011b672017-05-16 19:38:25 +02002231 unsigned long interval = mlxsw_sp->router->neighs_update.interval;
Yotam Gigib2157142016-07-05 11:27:51 +02002232
Ido Schimmel9011b672017-05-16 19:38:25 +02002233 mlxsw_core_schedule_dw(&mlxsw_sp->router->neighs_update.dw,
Yotam Gigib2157142016-07-05 11:27:51 +02002234 msecs_to_jiffies(interval));
2235}
2236
2237static void mlxsw_sp_router_neighs_update_work(struct work_struct *work)
2238{
Ido Schimmel9011b672017-05-16 19:38:25 +02002239 struct mlxsw_sp_router *router;
Yotam Gigib2157142016-07-05 11:27:51 +02002240 int err;
2241
Ido Schimmel9011b672017-05-16 19:38:25 +02002242 router = container_of(work, struct mlxsw_sp_router,
2243 neighs_update.dw.work);
2244 err = mlxsw_sp_router_neighs_update_rauhtd(router->mlxsw_sp);
Yotam Gigib2157142016-07-05 11:27:51 +02002245 if (err)
Ido Schimmel9011b672017-05-16 19:38:25 +02002246 dev_err(router->mlxsw_sp->bus_info->dev, "Could not update kernel for neigh activity");
Yotam Gigib2157142016-07-05 11:27:51 +02002247
Ido Schimmel9011b672017-05-16 19:38:25 +02002248 mlxsw_sp_router_neighs_update_nh(router->mlxsw_sp);
Yotam Gigib2157142016-07-05 11:27:51 +02002249
Ido Schimmel9011b672017-05-16 19:38:25 +02002250 mlxsw_sp_router_neighs_update_work_schedule(router->mlxsw_sp);
Yotam Gigic723c7352016-07-05 11:27:43 +02002251}
2252
Yotam Gigi0b2361d2016-07-05 11:27:52 +02002253static void mlxsw_sp_router_probe_unresolved_nexthops(struct work_struct *work)
2254{
2255 struct mlxsw_sp_neigh_entry *neigh_entry;
Ido Schimmel9011b672017-05-16 19:38:25 +02002256 struct mlxsw_sp_router *router;
Yotam Gigi0b2361d2016-07-05 11:27:52 +02002257
Ido Schimmel9011b672017-05-16 19:38:25 +02002258 router = container_of(work, struct mlxsw_sp_router,
2259 nexthop_probe_dw.work);
Yotam Gigi0b2361d2016-07-05 11:27:52 +02002260 /* Iterate over nexthop neighbours, find those who are unresolved and
2261 * send arp on them. This solves the chicken-egg problem when
2262 * the nexthop wouldn't get offloaded until the neighbor is resolved
2263 * but it wouldn't get resolved ever in case traffic is flowing in HW
2264 * using different nexthop.
2265 *
2266 * Take RTNL mutex here to prevent lists from changes.
2267 */
2268 rtnl_lock();
Ido Schimmel9011b672017-05-16 19:38:25 +02002269 list_for_each_entry(neigh_entry, &router->nexthop_neighs_list,
Ido Schimmel8a0b7272017-02-06 16:20:15 +01002270 nexthop_neighs_list_node)
Ido Schimmel01b1aa32017-02-06 16:20:16 +01002271 if (!neigh_entry->connected)
Jiri Pirko33b13412016-11-10 12:31:04 +01002272 neigh_event_send(neigh_entry->key.n, NULL);
Yotam Gigi0b2361d2016-07-05 11:27:52 +02002273 rtnl_unlock();
2274
Ido Schimmel9011b672017-05-16 19:38:25 +02002275 mlxsw_core_schedule_dw(&router->nexthop_probe_dw,
Yotam Gigi0b2361d2016-07-05 11:27:52 +02002276 MLXSW_SP_UNRESOLVED_NH_PROBE_INTERVAL);
2277}
2278
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002279static void
2280mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp *mlxsw_sp,
2281 struct mlxsw_sp_neigh_entry *neigh_entry,
2282 bool removing);
2283
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002284static enum mlxsw_reg_rauht_op mlxsw_sp_rauht_op(bool adding)
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002285{
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002286 return adding ? MLXSW_REG_RAUHT_OP_WRITE_ADD :
2287 MLXSW_REG_RAUHT_OP_WRITE_DELETE;
2288}
2289
2290static void
2291mlxsw_sp_router_neigh_entry_op4(struct mlxsw_sp *mlxsw_sp,
2292 struct mlxsw_sp_neigh_entry *neigh_entry,
2293 enum mlxsw_reg_rauht_op op)
2294{
Jiri Pirko33b13412016-11-10 12:31:04 +01002295 struct neighbour *n = neigh_entry->key.n;
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002296 u32 dip = ntohl(*((__be32 *) n->primary_key));
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002297 char rauht_pl[MLXSW_REG_RAUHT_LEN];
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002298
2299 mlxsw_reg_rauht_pack4(rauht_pl, op, neigh_entry->rif, neigh_entry->ha,
2300 dip);
Arkadi Sharshevsky7cfcbc72017-08-24 08:40:08 +02002301 if (neigh_entry->counter_valid)
2302 mlxsw_reg_rauht_pack_counter(rauht_pl,
2303 neigh_entry->counter_index);
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002304 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rauht), rauht_pl);
2305}
2306
2307static void
Arkadi Sharshevskyd5eb89c2017-07-18 10:10:15 +02002308mlxsw_sp_router_neigh_entry_op6(struct mlxsw_sp *mlxsw_sp,
2309 struct mlxsw_sp_neigh_entry *neigh_entry,
2310 enum mlxsw_reg_rauht_op op)
2311{
2312 struct neighbour *n = neigh_entry->key.n;
2313 char rauht_pl[MLXSW_REG_RAUHT_LEN];
2314 const char *dip = n->primary_key;
2315
2316 mlxsw_reg_rauht_pack6(rauht_pl, op, neigh_entry->rif, neigh_entry->ha,
2317 dip);
Arkadi Sharshevsky7cfcbc72017-08-24 08:40:08 +02002318 if (neigh_entry->counter_valid)
2319 mlxsw_reg_rauht_pack_counter(rauht_pl,
2320 neigh_entry->counter_index);
Arkadi Sharshevskyd5eb89c2017-07-18 10:10:15 +02002321 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rauht), rauht_pl);
2322}
2323
Arkadi Sharshevsky1d1056d82017-08-31 17:59:13 +02002324bool mlxsw_sp_neigh_ipv6_ignore(struct mlxsw_sp_neigh_entry *neigh_entry)
Arkadi Sharshevskyd5eb89c2017-07-18 10:10:15 +02002325{
Arkadi Sharshevsky1d1056d82017-08-31 17:59:13 +02002326 struct neighbour *n = neigh_entry->key.n;
2327
Arkadi Sharshevskyd5eb89c2017-07-18 10:10:15 +02002328 /* Packets with a link-local destination address are trapped
2329 * after LPM lookup and never reach the neighbour table, so
2330 * there is no need to program such neighbours to the device.
2331 */
2332 if (ipv6_addr_type((struct in6_addr *) &n->primary_key) &
2333 IPV6_ADDR_LINKLOCAL)
2334 return true;
2335 return false;
2336}
2337
2338static void
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002339mlxsw_sp_neigh_entry_update(struct mlxsw_sp *mlxsw_sp,
2340 struct mlxsw_sp_neigh_entry *neigh_entry,
2341 bool adding)
2342{
2343 if (!adding && !neigh_entry->connected)
2344 return;
2345 neigh_entry->connected = adding;
Ido Schimmelb5f3e0d2017-07-24 09:56:00 +02002346 if (neigh_entry->key.n->tbl->family == AF_INET) {
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002347 mlxsw_sp_router_neigh_entry_op4(mlxsw_sp, neigh_entry,
2348 mlxsw_sp_rauht_op(adding));
Ido Schimmelb5f3e0d2017-07-24 09:56:00 +02002349 } else if (neigh_entry->key.n->tbl->family == AF_INET6) {
Arkadi Sharshevsky1d1056d82017-08-31 17:59:13 +02002350 if (mlxsw_sp_neigh_ipv6_ignore(neigh_entry))
Arkadi Sharshevskyd5eb89c2017-07-18 10:10:15 +02002351 return;
2352 mlxsw_sp_router_neigh_entry_op6(mlxsw_sp, neigh_entry,
2353 mlxsw_sp_rauht_op(adding));
2354 } else {
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002355 WARN_ON_ONCE(1);
Arkadi Sharshevskyd5eb89c2017-07-18 10:10:15 +02002356 }
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002357}
2358
Arkadi Sharshevskya481d712017-08-24 08:40:10 +02002359void
2360mlxsw_sp_neigh_entry_counter_update(struct mlxsw_sp *mlxsw_sp,
2361 struct mlxsw_sp_neigh_entry *neigh_entry,
2362 bool adding)
2363{
2364 if (adding)
2365 mlxsw_sp_neigh_counter_alloc(mlxsw_sp, neigh_entry);
2366 else
2367 mlxsw_sp_neigh_counter_free(mlxsw_sp, neigh_entry);
2368 mlxsw_sp_neigh_entry_update(mlxsw_sp, neigh_entry, true);
2369}
2370
Ido Schimmelceb88812017-11-02 17:14:07 +01002371struct mlxsw_sp_netevent_work {
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002372 struct work_struct work;
2373 struct mlxsw_sp *mlxsw_sp;
2374 struct neighbour *n;
2375};
2376
2377static void mlxsw_sp_router_neigh_event_work(struct work_struct *work)
2378{
Ido Schimmelceb88812017-11-02 17:14:07 +01002379 struct mlxsw_sp_netevent_work *net_work =
2380 container_of(work, struct mlxsw_sp_netevent_work, work);
2381 struct mlxsw_sp *mlxsw_sp = net_work->mlxsw_sp;
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002382 struct mlxsw_sp_neigh_entry *neigh_entry;
Ido Schimmelceb88812017-11-02 17:14:07 +01002383 struct neighbour *n = net_work->n;
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002384 unsigned char ha[ETH_ALEN];
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002385 bool entry_connected;
Ido Schimmel93a87e52016-12-23 09:32:49 +01002386 u8 nud_state, dead;
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002387
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002388 /* If these parameters are changed after we release the lock,
2389 * then we are guaranteed to receive another event letting us
2390 * know about it.
2391 */
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002392 read_lock_bh(&n->lock);
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002393 memcpy(ha, n->ha, ETH_ALEN);
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002394 nud_state = n->nud_state;
Ido Schimmel93a87e52016-12-23 09:32:49 +01002395 dead = n->dead;
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002396 read_unlock_bh(&n->lock);
2397
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002398 rtnl_lock();
Petr Machata803335a2018-02-27 14:53:46 +01002399 mlxsw_sp_span_respin(mlxsw_sp);
2400
Ido Schimmel93a87e52016-12-23 09:32:49 +01002401 entry_connected = nud_state & NUD_VALID && !dead;
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002402 neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp, n);
2403 if (!entry_connected && !neigh_entry)
2404 goto out;
2405 if (!neigh_entry) {
2406 neigh_entry = mlxsw_sp_neigh_entry_create(mlxsw_sp, n);
2407 if (IS_ERR(neigh_entry))
2408 goto out;
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002409 }
2410
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002411 memcpy(neigh_entry->ha, ha, ETH_ALEN);
2412 mlxsw_sp_neigh_entry_update(mlxsw_sp, neigh_entry, entry_connected);
2413 mlxsw_sp_nexthop_neigh_update(mlxsw_sp, neigh_entry, !entry_connected);
2414
2415 if (!neigh_entry->connected && list_empty(&neigh_entry->nexthop_list))
2416 mlxsw_sp_neigh_entry_destroy(mlxsw_sp, neigh_entry);
2417
2418out:
2419 rtnl_unlock();
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002420 neigh_release(n);
Ido Schimmelceb88812017-11-02 17:14:07 +01002421 kfree(net_work);
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002422}
2423
Ido Schimmel28678f02017-11-02 17:14:10 +01002424static int mlxsw_sp_mp_hash_init(struct mlxsw_sp *mlxsw_sp);
2425
2426static void mlxsw_sp_router_mp_hash_event_work(struct work_struct *work)
2427{
2428 struct mlxsw_sp_netevent_work *net_work =
2429 container_of(work, struct mlxsw_sp_netevent_work, work);
2430 struct mlxsw_sp *mlxsw_sp = net_work->mlxsw_sp;
2431
2432 mlxsw_sp_mp_hash_init(mlxsw_sp);
2433 kfree(net_work);
2434}
2435
2436static int mlxsw_sp_router_netevent_event(struct notifier_block *nb,
Ido Schimmel48fac882017-11-02 17:14:06 +01002437 unsigned long event, void *ptr)
Yotam Gigic723c7352016-07-05 11:27:43 +02002438{
Ido Schimmelceb88812017-11-02 17:14:07 +01002439 struct mlxsw_sp_netevent_work *net_work;
Yotam Gigic723c7352016-07-05 11:27:43 +02002440 struct mlxsw_sp_port *mlxsw_sp_port;
Ido Schimmel28678f02017-11-02 17:14:10 +01002441 struct mlxsw_sp_router *router;
Yotam Gigic723c7352016-07-05 11:27:43 +02002442 struct mlxsw_sp *mlxsw_sp;
2443 unsigned long interval;
2444 struct neigh_parms *p;
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002445 struct neighbour *n;
Ido Schimmel28678f02017-11-02 17:14:10 +01002446 struct net *net;
Yotam Gigic723c7352016-07-05 11:27:43 +02002447
2448 switch (event) {
2449 case NETEVENT_DELAY_PROBE_TIME_UPDATE:
2450 p = ptr;
2451
2452 /* We don't care about changes in the default table. */
Ido Schimmelb5f3e0d2017-07-24 09:56:00 +02002453 if (!p->dev || (p->tbl->family != AF_INET &&
2454 p->tbl->family != AF_INET6))
Yotam Gigic723c7352016-07-05 11:27:43 +02002455 return NOTIFY_DONE;
2456
2457 /* We are in atomic context and can't take RTNL mutex,
2458 * so use RCU variant to walk the device chain.
2459 */
2460 mlxsw_sp_port = mlxsw_sp_port_lower_dev_hold(p->dev);
2461 if (!mlxsw_sp_port)
2462 return NOTIFY_DONE;
2463
2464 mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2465 interval = jiffies_to_msecs(NEIGH_VAR(p, DELAY_PROBE_TIME));
Ido Schimmel9011b672017-05-16 19:38:25 +02002466 mlxsw_sp->router->neighs_update.interval = interval;
Yotam Gigic723c7352016-07-05 11:27:43 +02002467
2468 mlxsw_sp_port_dev_put(mlxsw_sp_port);
2469 break;
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002470 case NETEVENT_NEIGH_UPDATE:
2471 n = ptr;
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002472
Ido Schimmelb5f3e0d2017-07-24 09:56:00 +02002473 if (n->tbl->family != AF_INET && n->tbl->family != AF_INET6)
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002474 return NOTIFY_DONE;
2475
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002476 mlxsw_sp_port = mlxsw_sp_port_lower_dev_hold(n->dev);
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002477 if (!mlxsw_sp_port)
2478 return NOTIFY_DONE;
2479
Ido Schimmelceb88812017-11-02 17:14:07 +01002480 net_work = kzalloc(sizeof(*net_work), GFP_ATOMIC);
2481 if (!net_work) {
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002482 mlxsw_sp_port_dev_put(mlxsw_sp_port);
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002483 return NOTIFY_BAD;
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002484 }
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002485
Ido Schimmelceb88812017-11-02 17:14:07 +01002486 INIT_WORK(&net_work->work, mlxsw_sp_router_neigh_event_work);
2487 net_work->mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2488 net_work->n = n;
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002489
2490 /* Take a reference to ensure the neighbour won't be
2491 * destructed until we drop the reference in delayed
2492 * work.
2493 */
2494 neigh_clone(n);
Ido Schimmelceb88812017-11-02 17:14:07 +01002495 mlxsw_core_schedule_work(&net_work->work);
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002496 mlxsw_sp_port_dev_put(mlxsw_sp_port);
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002497 break;
David Ahern3192dac2018-03-02 08:32:16 -08002498 case NETEVENT_IPV4_MPATH_HASH_UPDATE:
David Ahern5e18b9c552018-03-02 08:32:19 -08002499 case NETEVENT_IPV6_MPATH_HASH_UPDATE:
Ido Schimmel28678f02017-11-02 17:14:10 +01002500 net = ptr;
2501
2502 if (!net_eq(net, &init_net))
2503 return NOTIFY_DONE;
2504
2505 net_work = kzalloc(sizeof(*net_work), GFP_ATOMIC);
2506 if (!net_work)
2507 return NOTIFY_BAD;
2508
2509 router = container_of(nb, struct mlxsw_sp_router, netevent_nb);
2510 INIT_WORK(&net_work->work, mlxsw_sp_router_mp_hash_event_work);
2511 net_work->mlxsw_sp = router->mlxsw_sp;
2512 mlxsw_core_schedule_work(&net_work->work);
2513 break;
Yotam Gigic723c7352016-07-05 11:27:43 +02002514 }
2515
2516 return NOTIFY_DONE;
2517}
2518
Jiri Pirko6cf3c972016-07-05 11:27:39 +02002519static int mlxsw_sp_neigh_init(struct mlxsw_sp *mlxsw_sp)
2520{
Yotam Gigic723c7352016-07-05 11:27:43 +02002521 int err;
2522
Ido Schimmel9011b672017-05-16 19:38:25 +02002523 err = rhashtable_init(&mlxsw_sp->router->neigh_ht,
Yotam Gigic723c7352016-07-05 11:27:43 +02002524 &mlxsw_sp_neigh_ht_params);
2525 if (err)
2526 return err;
2527
2528 /* Initialize the polling interval according to the default
2529 * table.
2530 */
2531 mlxsw_sp_router_neighs_update_interval_init(mlxsw_sp);
2532
Yotam Gigi0b2361d2016-07-05 11:27:52 +02002533 /* Create the delayed works for the activity_update */
Ido Schimmel9011b672017-05-16 19:38:25 +02002534 INIT_DELAYED_WORK(&mlxsw_sp->router->neighs_update.dw,
Yotam Gigic723c7352016-07-05 11:27:43 +02002535 mlxsw_sp_router_neighs_update_work);
Ido Schimmel9011b672017-05-16 19:38:25 +02002536 INIT_DELAYED_WORK(&mlxsw_sp->router->nexthop_probe_dw,
Yotam Gigi0b2361d2016-07-05 11:27:52 +02002537 mlxsw_sp_router_probe_unresolved_nexthops);
Ido Schimmel9011b672017-05-16 19:38:25 +02002538 mlxsw_core_schedule_dw(&mlxsw_sp->router->neighs_update.dw, 0);
2539 mlxsw_core_schedule_dw(&mlxsw_sp->router->nexthop_probe_dw, 0);
Yotam Gigic723c7352016-07-05 11:27:43 +02002540 return 0;
Jiri Pirko6cf3c972016-07-05 11:27:39 +02002541}
2542
2543static void mlxsw_sp_neigh_fini(struct mlxsw_sp *mlxsw_sp)
2544{
Ido Schimmel9011b672017-05-16 19:38:25 +02002545 cancel_delayed_work_sync(&mlxsw_sp->router->neighs_update.dw);
2546 cancel_delayed_work_sync(&mlxsw_sp->router->nexthop_probe_dw);
2547 rhashtable_destroy(&mlxsw_sp->router->neigh_ht);
Jiri Pirko6cf3c972016-07-05 11:27:39 +02002548}
2549
Ido Schimmel9665b742017-02-08 11:16:42 +01002550static void mlxsw_sp_neigh_rif_gone_sync(struct mlxsw_sp *mlxsw_sp,
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01002551 struct mlxsw_sp_rif *rif)
Ido Schimmel9665b742017-02-08 11:16:42 +01002552{
2553 struct mlxsw_sp_neigh_entry *neigh_entry, *tmp;
2554
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01002555 list_for_each_entry_safe(neigh_entry, tmp, &rif->neigh_list,
Petr Machata8ba6b302017-12-17 17:16:43 +01002556 rif_list_node) {
2557 mlxsw_sp_neigh_entry_update(mlxsw_sp, neigh_entry, false);
Ido Schimmel9665b742017-02-08 11:16:42 +01002558 mlxsw_sp_neigh_entry_destroy(mlxsw_sp, neigh_entry);
Petr Machata8ba6b302017-12-17 17:16:43 +01002559 }
Ido Schimmel9665b742017-02-08 11:16:42 +01002560}
2561
Petr Machata35225e42017-09-02 23:49:22 +02002562enum mlxsw_sp_nexthop_type {
2563 MLXSW_SP_NEXTHOP_TYPE_ETH,
Petr Machata1012b9a2017-09-02 23:49:23 +02002564 MLXSW_SP_NEXTHOP_TYPE_IPIP,
Petr Machata35225e42017-09-02 23:49:22 +02002565};
2566
Ido Schimmelc53b8e12017-02-08 11:16:30 +01002567struct mlxsw_sp_nexthop_key {
2568 struct fib_nh *fib_nh;
2569};
2570
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002571struct mlxsw_sp_nexthop {
2572 struct list_head neigh_list_node; /* member of neigh entry list */
Ido Schimmel9665b742017-02-08 11:16:42 +01002573 struct list_head rif_list_node;
Arkadi Sharshevskydbe45982017-09-25 10:32:23 +02002574 struct list_head router_list_node;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002575 struct mlxsw_sp_nexthop_group *nh_grp; /* pointer back to the group
2576 * this belongs to
2577 */
Ido Schimmelc53b8e12017-02-08 11:16:30 +01002578 struct rhash_head ht_node;
2579 struct mlxsw_sp_nexthop_key key;
Ido Schimmel58adf2c2017-07-18 10:10:19 +02002580 unsigned char gw_addr[sizeof(struct in6_addr)];
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02002581 int ifindex;
Ido Schimmel408bd942017-10-22 23:11:46 +02002582 int nh_weight;
Ido Schimmeleb789982017-10-22 23:11:48 +02002583 int norm_nh_weight;
2584 int num_adj_entries;
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01002585 struct mlxsw_sp_rif *rif;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002586 u8 should_offload:1, /* set indicates this neigh is connected and
2587 * should be put to KVD linear area of this group.
2588 */
2589 offloaded:1, /* set in case the neigh is actually put into
2590 * KVD linear area of this group.
2591 */
2592 update:1; /* set indicates that MAC of this neigh should be
2593 * updated in HW
2594 */
Petr Machata35225e42017-09-02 23:49:22 +02002595 enum mlxsw_sp_nexthop_type type;
2596 union {
2597 struct mlxsw_sp_neigh_entry *neigh_entry;
Petr Machata1012b9a2017-09-02 23:49:23 +02002598 struct mlxsw_sp_ipip_entry *ipip_entry;
Petr Machata35225e42017-09-02 23:49:22 +02002599 };
Arkadi Sharshevskya5390272017-09-25 10:32:28 +02002600 unsigned int counter_index;
2601 bool counter_valid;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002602};
2603
2604struct mlxsw_sp_nexthop_group {
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02002605 void *priv;
Ido Schimmele9ad5e72017-02-08 11:16:29 +01002606 struct rhash_head ht_node;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002607 struct list_head fib_list; /* list of fib entries that use this group */
Ido Schimmel58adf2c2017-07-18 10:10:19 +02002608 struct neigh_table *neigh_tbl;
Ido Schimmelb3e8d1e2017-02-08 11:16:32 +01002609 u8 adj_index_valid:1,
2610 gateway:1; /* routes using the group use a gateway */
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002611 u32 adj_index;
2612 u16 ecmp_size;
2613 u16 count;
Ido Schimmeleb789982017-10-22 23:11:48 +02002614 int sum_norm_weight;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002615 struct mlxsw_sp_nexthop nexthops[0];
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01002616#define nh_rif nexthops[0].rif
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002617};
2618
Arkadi Sharshevsky427e6522017-09-25 10:32:30 +02002619void mlxsw_sp_nexthop_counter_alloc(struct mlxsw_sp *mlxsw_sp,
2620 struct mlxsw_sp_nexthop *nh)
Arkadi Sharshevskya5390272017-09-25 10:32:28 +02002621{
2622 struct devlink *devlink;
2623
2624 devlink = priv_to_devlink(mlxsw_sp->core);
2625 if (!devlink_dpipe_table_counter_enabled(devlink,
2626 MLXSW_SP_DPIPE_TABLE_NAME_ADJ))
2627 return;
2628
2629 if (mlxsw_sp_flow_counter_alloc(mlxsw_sp, &nh->counter_index))
2630 return;
2631
2632 nh->counter_valid = true;
2633}
2634
Arkadi Sharshevsky427e6522017-09-25 10:32:30 +02002635void mlxsw_sp_nexthop_counter_free(struct mlxsw_sp *mlxsw_sp,
2636 struct mlxsw_sp_nexthop *nh)
Arkadi Sharshevskya5390272017-09-25 10:32:28 +02002637{
2638 if (!nh->counter_valid)
2639 return;
2640 mlxsw_sp_flow_counter_free(mlxsw_sp, nh->counter_index);
2641 nh->counter_valid = false;
2642}
2643
2644int mlxsw_sp_nexthop_counter_get(struct mlxsw_sp *mlxsw_sp,
2645 struct mlxsw_sp_nexthop *nh, u64 *p_counter)
2646{
2647 if (!nh->counter_valid)
2648 return -EINVAL;
2649
2650 return mlxsw_sp_flow_counter_get(mlxsw_sp, nh->counter_index,
2651 p_counter, NULL);
2652}
2653
Arkadi Sharshevskyc556cd22017-09-25 10:32:25 +02002654struct mlxsw_sp_nexthop *mlxsw_sp_nexthop_next(struct mlxsw_sp_router *router,
2655 struct mlxsw_sp_nexthop *nh)
2656{
2657 if (!nh) {
2658 if (list_empty(&router->nexthop_list))
2659 return NULL;
2660 else
2661 return list_first_entry(&router->nexthop_list,
2662 typeof(*nh), router_list_node);
2663 }
2664 if (list_is_last(&nh->router_list_node, &router->nexthop_list))
2665 return NULL;
2666 return list_next_entry(nh, router_list_node);
2667}
2668
2669bool mlxsw_sp_nexthop_offload(struct mlxsw_sp_nexthop *nh)
2670{
2671 return nh->offloaded;
2672}
2673
2674unsigned char *mlxsw_sp_nexthop_ha(struct mlxsw_sp_nexthop *nh)
2675{
2676 if (!nh->offloaded)
2677 return NULL;
2678 return nh->neigh_entry->ha;
2679}
2680
2681int mlxsw_sp_nexthop_indexes(struct mlxsw_sp_nexthop *nh, u32 *p_adj_index,
Ido Schimmele69cd9d2017-10-22 23:11:43 +02002682 u32 *p_adj_size, u32 *p_adj_hash_index)
Arkadi Sharshevskyc556cd22017-09-25 10:32:25 +02002683{
2684 struct mlxsw_sp_nexthop_group *nh_grp = nh->nh_grp;
2685 u32 adj_hash_index = 0;
2686 int i;
2687
2688 if (!nh->offloaded || !nh_grp->adj_index_valid)
2689 return -EINVAL;
2690
2691 *p_adj_index = nh_grp->adj_index;
Ido Schimmele69cd9d2017-10-22 23:11:43 +02002692 *p_adj_size = nh_grp->ecmp_size;
Arkadi Sharshevskyc556cd22017-09-25 10:32:25 +02002693
2694 for (i = 0; i < nh_grp->count; i++) {
2695 struct mlxsw_sp_nexthop *nh_iter = &nh_grp->nexthops[i];
2696
2697 if (nh_iter == nh)
2698 break;
2699 if (nh_iter->offloaded)
Ido Schimmeleb789982017-10-22 23:11:48 +02002700 adj_hash_index += nh_iter->num_adj_entries;
Arkadi Sharshevskyc556cd22017-09-25 10:32:25 +02002701 }
2702
2703 *p_adj_hash_index = adj_hash_index;
2704 return 0;
2705}
2706
2707struct mlxsw_sp_rif *mlxsw_sp_nexthop_rif(struct mlxsw_sp_nexthop *nh)
2708{
2709 return nh->rif;
2710}
2711
2712bool mlxsw_sp_nexthop_group_has_ipip(struct mlxsw_sp_nexthop *nh)
2713{
2714 struct mlxsw_sp_nexthop_group *nh_grp = nh->nh_grp;
2715 int i;
2716
2717 for (i = 0; i < nh_grp->count; i++) {
2718 struct mlxsw_sp_nexthop *nh_iter = &nh_grp->nexthops[i];
2719
2720 if (nh_iter->type == MLXSW_SP_NEXTHOP_TYPE_IPIP)
2721 return true;
2722 }
2723 return false;
2724}
2725
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02002726static struct fib_info *
2727mlxsw_sp_nexthop4_group_fi(const struct mlxsw_sp_nexthop_group *nh_grp)
2728{
2729 return nh_grp->priv;
2730}
2731
2732struct mlxsw_sp_nexthop_group_cmp_arg {
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02002733 enum mlxsw_sp_l3proto proto;
2734 union {
2735 struct fib_info *fi;
2736 struct mlxsw_sp_fib6_entry *fib6_entry;
2737 };
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02002738};
2739
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02002740static bool
2741mlxsw_sp_nexthop6_group_has_nexthop(const struct mlxsw_sp_nexthop_group *nh_grp,
Ido Schimmel3743d882018-01-12 17:15:59 +01002742 const struct in6_addr *gw, int ifindex,
2743 int weight)
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02002744{
2745 int i;
2746
2747 for (i = 0; i < nh_grp->count; i++) {
2748 const struct mlxsw_sp_nexthop *nh;
2749
2750 nh = &nh_grp->nexthops[i];
Ido Schimmel3743d882018-01-12 17:15:59 +01002751 if (nh->ifindex == ifindex && nh->nh_weight == weight &&
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02002752 ipv6_addr_equal(gw, (struct in6_addr *) nh->gw_addr))
2753 return true;
2754 }
2755
2756 return false;
2757}
2758
2759static bool
2760mlxsw_sp_nexthop6_group_cmp(const struct mlxsw_sp_nexthop_group *nh_grp,
2761 const struct mlxsw_sp_fib6_entry *fib6_entry)
2762{
2763 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
2764
2765 if (nh_grp->count != fib6_entry->nrt6)
2766 return false;
2767
2768 list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) {
2769 struct in6_addr *gw;
Ido Schimmel3743d882018-01-12 17:15:59 +01002770 int ifindex, weight;
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02002771
David Ahern5e670d82018-04-17 17:33:14 -07002772 ifindex = mlxsw_sp_rt6->rt->fib6_nh.nh_dev->ifindex;
2773 weight = mlxsw_sp_rt6->rt->fib6_nh.nh_weight;
2774 gw = &mlxsw_sp_rt6->rt->fib6_nh.nh_gw;
Ido Schimmel3743d882018-01-12 17:15:59 +01002775 if (!mlxsw_sp_nexthop6_group_has_nexthop(nh_grp, gw, ifindex,
2776 weight))
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02002777 return false;
2778 }
2779
2780 return true;
2781}
2782
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02002783static int
2784mlxsw_sp_nexthop_group_cmp(struct rhashtable_compare_arg *arg, const void *ptr)
2785{
2786 const struct mlxsw_sp_nexthop_group_cmp_arg *cmp_arg = arg->key;
2787 const struct mlxsw_sp_nexthop_group *nh_grp = ptr;
2788
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02002789 switch (cmp_arg->proto) {
2790 case MLXSW_SP_L3_PROTO_IPV4:
2791 return cmp_arg->fi != mlxsw_sp_nexthop4_group_fi(nh_grp);
2792 case MLXSW_SP_L3_PROTO_IPV6:
2793 return !mlxsw_sp_nexthop6_group_cmp(nh_grp,
2794 cmp_arg->fib6_entry);
2795 default:
2796 WARN_ON(1);
2797 return 1;
2798 }
2799}
2800
2801static int
2802mlxsw_sp_nexthop_group_type(const struct mlxsw_sp_nexthop_group *nh_grp)
2803{
2804 return nh_grp->neigh_tbl->family;
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02002805}
2806
2807static u32 mlxsw_sp_nexthop_group_hash_obj(const void *data, u32 len, u32 seed)
2808{
2809 const struct mlxsw_sp_nexthop_group *nh_grp = data;
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02002810 const struct mlxsw_sp_nexthop *nh;
2811 struct fib_info *fi;
2812 unsigned int val;
2813 int i;
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02002814
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02002815 switch (mlxsw_sp_nexthop_group_type(nh_grp)) {
2816 case AF_INET:
2817 fi = mlxsw_sp_nexthop4_group_fi(nh_grp);
2818 return jhash(&fi, sizeof(fi), seed);
2819 case AF_INET6:
2820 val = nh_grp->count;
2821 for (i = 0; i < nh_grp->count; i++) {
2822 nh = &nh_grp->nexthops[i];
2823 val ^= nh->ifindex;
2824 }
2825 return jhash(&val, sizeof(val), seed);
2826 default:
2827 WARN_ON(1);
2828 return 0;
2829 }
2830}
2831
2832static u32
2833mlxsw_sp_nexthop6_group_hash(struct mlxsw_sp_fib6_entry *fib6_entry, u32 seed)
2834{
2835 unsigned int val = fib6_entry->nrt6;
2836 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
2837 struct net_device *dev;
2838
2839 list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) {
David Ahern5e670d82018-04-17 17:33:14 -07002840 dev = mlxsw_sp_rt6->rt->fib6_nh.nh_dev;
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02002841 val ^= dev->ifindex;
2842 }
2843
2844 return jhash(&val, sizeof(val), seed);
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02002845}
2846
2847static u32
2848mlxsw_sp_nexthop_group_hash(const void *data, u32 len, u32 seed)
2849{
2850 const struct mlxsw_sp_nexthop_group_cmp_arg *cmp_arg = data;
2851
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02002852 switch (cmp_arg->proto) {
2853 case MLXSW_SP_L3_PROTO_IPV4:
2854 return jhash(&cmp_arg->fi, sizeof(cmp_arg->fi), seed);
2855 case MLXSW_SP_L3_PROTO_IPV6:
2856 return mlxsw_sp_nexthop6_group_hash(cmp_arg->fib6_entry, seed);
2857 default:
2858 WARN_ON(1);
2859 return 0;
2860 }
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02002861}
2862
Ido Schimmele9ad5e72017-02-08 11:16:29 +01002863static const struct rhashtable_params mlxsw_sp_nexthop_group_ht_params = {
Ido Schimmele9ad5e72017-02-08 11:16:29 +01002864 .head_offset = offsetof(struct mlxsw_sp_nexthop_group, ht_node),
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02002865 .hashfn = mlxsw_sp_nexthop_group_hash,
2866 .obj_hashfn = mlxsw_sp_nexthop_group_hash_obj,
2867 .obj_cmpfn = mlxsw_sp_nexthop_group_cmp,
Ido Schimmele9ad5e72017-02-08 11:16:29 +01002868};
2869
2870static int mlxsw_sp_nexthop_group_insert(struct mlxsw_sp *mlxsw_sp,
2871 struct mlxsw_sp_nexthop_group *nh_grp)
2872{
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02002873 if (mlxsw_sp_nexthop_group_type(nh_grp) == AF_INET6 &&
2874 !nh_grp->gateway)
2875 return 0;
2876
Ido Schimmel9011b672017-05-16 19:38:25 +02002877 return rhashtable_insert_fast(&mlxsw_sp->router->nexthop_group_ht,
Ido Schimmele9ad5e72017-02-08 11:16:29 +01002878 &nh_grp->ht_node,
2879 mlxsw_sp_nexthop_group_ht_params);
2880}
2881
2882static void mlxsw_sp_nexthop_group_remove(struct mlxsw_sp *mlxsw_sp,
2883 struct mlxsw_sp_nexthop_group *nh_grp)
2884{
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02002885 if (mlxsw_sp_nexthop_group_type(nh_grp) == AF_INET6 &&
2886 !nh_grp->gateway)
2887 return;
2888
Ido Schimmel9011b672017-05-16 19:38:25 +02002889 rhashtable_remove_fast(&mlxsw_sp->router->nexthop_group_ht,
Ido Schimmele9ad5e72017-02-08 11:16:29 +01002890 &nh_grp->ht_node,
2891 mlxsw_sp_nexthop_group_ht_params);
2892}
2893
2894static struct mlxsw_sp_nexthop_group *
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02002895mlxsw_sp_nexthop4_group_lookup(struct mlxsw_sp *mlxsw_sp,
2896 struct fib_info *fi)
Ido Schimmele9ad5e72017-02-08 11:16:29 +01002897{
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02002898 struct mlxsw_sp_nexthop_group_cmp_arg cmp_arg;
2899
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02002900 cmp_arg.proto = MLXSW_SP_L3_PROTO_IPV4;
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02002901 cmp_arg.fi = fi;
2902 return rhashtable_lookup_fast(&mlxsw_sp->router->nexthop_group_ht,
2903 &cmp_arg,
Ido Schimmele9ad5e72017-02-08 11:16:29 +01002904 mlxsw_sp_nexthop_group_ht_params);
2905}
2906
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02002907static struct mlxsw_sp_nexthop_group *
2908mlxsw_sp_nexthop6_group_lookup(struct mlxsw_sp *mlxsw_sp,
2909 struct mlxsw_sp_fib6_entry *fib6_entry)
2910{
2911 struct mlxsw_sp_nexthop_group_cmp_arg cmp_arg;
2912
2913 cmp_arg.proto = MLXSW_SP_L3_PROTO_IPV6;
2914 cmp_arg.fib6_entry = fib6_entry;
2915 return rhashtable_lookup_fast(&mlxsw_sp->router->nexthop_group_ht,
2916 &cmp_arg,
2917 mlxsw_sp_nexthop_group_ht_params);
2918}
2919
Ido Schimmelc53b8e12017-02-08 11:16:30 +01002920static const struct rhashtable_params mlxsw_sp_nexthop_ht_params = {
2921 .key_offset = offsetof(struct mlxsw_sp_nexthop, key),
2922 .head_offset = offsetof(struct mlxsw_sp_nexthop, ht_node),
2923 .key_len = sizeof(struct mlxsw_sp_nexthop_key),
2924};
2925
2926static int mlxsw_sp_nexthop_insert(struct mlxsw_sp *mlxsw_sp,
2927 struct mlxsw_sp_nexthop *nh)
2928{
Ido Schimmel9011b672017-05-16 19:38:25 +02002929 return rhashtable_insert_fast(&mlxsw_sp->router->nexthop_ht,
Ido Schimmelc53b8e12017-02-08 11:16:30 +01002930 &nh->ht_node, mlxsw_sp_nexthop_ht_params);
2931}
2932
2933static void mlxsw_sp_nexthop_remove(struct mlxsw_sp *mlxsw_sp,
2934 struct mlxsw_sp_nexthop *nh)
2935{
Ido Schimmel9011b672017-05-16 19:38:25 +02002936 rhashtable_remove_fast(&mlxsw_sp->router->nexthop_ht, &nh->ht_node,
Ido Schimmelc53b8e12017-02-08 11:16:30 +01002937 mlxsw_sp_nexthop_ht_params);
2938}
2939
Ido Schimmelad178c82017-02-08 11:16:40 +01002940static struct mlxsw_sp_nexthop *
2941mlxsw_sp_nexthop_lookup(struct mlxsw_sp *mlxsw_sp,
2942 struct mlxsw_sp_nexthop_key key)
2943{
Ido Schimmel9011b672017-05-16 19:38:25 +02002944 return rhashtable_lookup_fast(&mlxsw_sp->router->nexthop_ht, &key,
Ido Schimmelad178c82017-02-08 11:16:40 +01002945 mlxsw_sp_nexthop_ht_params);
2946}
2947
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002948static int mlxsw_sp_adj_index_mass_update_vr(struct mlxsw_sp *mlxsw_sp,
Ido Schimmel76610eb2017-03-10 08:53:41 +01002949 const struct mlxsw_sp_fib *fib,
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002950 u32 adj_index, u16 ecmp_size,
2951 u32 new_adj_index,
2952 u16 new_ecmp_size)
2953{
2954 char raleu_pl[MLXSW_REG_RALEU_LEN];
2955
Ido Schimmel1a9234e662016-09-19 08:29:26 +02002956 mlxsw_reg_raleu_pack(raleu_pl,
Ido Schimmel76610eb2017-03-10 08:53:41 +01002957 (enum mlxsw_reg_ralxx_protocol) fib->proto,
2958 fib->vr->id, adj_index, ecmp_size, new_adj_index,
Ido Schimmel1a9234e662016-09-19 08:29:26 +02002959 new_ecmp_size);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002960 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raleu), raleu_pl);
2961}
2962
2963static int mlxsw_sp_adj_index_mass_update(struct mlxsw_sp *mlxsw_sp,
2964 struct mlxsw_sp_nexthop_group *nh_grp,
2965 u32 old_adj_index, u16 old_ecmp_size)
2966{
2967 struct mlxsw_sp_fib_entry *fib_entry;
Ido Schimmel76610eb2017-03-10 08:53:41 +01002968 struct mlxsw_sp_fib *fib = NULL;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002969 int err;
2970
2971 list_for_each_entry(fib_entry, &nh_grp->fib_list, nexthop_group_node) {
Ido Schimmel76610eb2017-03-10 08:53:41 +01002972 if (fib == fib_entry->fib_node->fib)
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002973 continue;
Ido Schimmel76610eb2017-03-10 08:53:41 +01002974 fib = fib_entry->fib_node->fib;
2975 err = mlxsw_sp_adj_index_mass_update_vr(mlxsw_sp, fib,
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002976 old_adj_index,
2977 old_ecmp_size,
2978 nh_grp->adj_index,
2979 nh_grp->ecmp_size);
2980 if (err)
2981 return err;
2982 }
2983 return 0;
2984}
2985
Ido Schimmeleb789982017-10-22 23:11:48 +02002986static int __mlxsw_sp_nexthop_update(struct mlxsw_sp *mlxsw_sp, u32 adj_index,
2987 struct mlxsw_sp_nexthop *nh)
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002988{
2989 struct mlxsw_sp_neigh_entry *neigh_entry = nh->neigh_entry;
2990 char ratr_pl[MLXSW_REG_RATR_LEN];
2991
2992 mlxsw_reg_ratr_pack(ratr_pl, MLXSW_REG_RATR_OP_WRITE_WRITE_ENTRY,
Petr Machata89e41982017-09-02 23:49:15 +02002993 true, MLXSW_REG_RATR_TYPE_ETHERNET,
2994 adj_index, neigh_entry->rif);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002995 mlxsw_reg_ratr_eth_entry_pack(ratr_pl, neigh_entry->ha);
Arkadi Sharshevskya5390272017-09-25 10:32:28 +02002996 if (nh->counter_valid)
2997 mlxsw_reg_ratr_counter_pack(ratr_pl, nh->counter_index, true);
2998 else
2999 mlxsw_reg_ratr_counter_pack(ratr_pl, 0, false);
3000
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003001 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ratr), ratr_pl);
3002}
3003
Ido Schimmeleb789982017-10-22 23:11:48 +02003004int mlxsw_sp_nexthop_update(struct mlxsw_sp *mlxsw_sp, u32 adj_index,
3005 struct mlxsw_sp_nexthop *nh)
3006{
3007 int i;
3008
3009 for (i = 0; i < nh->num_adj_entries; i++) {
3010 int err;
3011
3012 err = __mlxsw_sp_nexthop_update(mlxsw_sp, adj_index + i, nh);
3013 if (err)
3014 return err;
3015 }
3016
3017 return 0;
3018}
3019
3020static int __mlxsw_sp_nexthop_ipip_update(struct mlxsw_sp *mlxsw_sp,
3021 u32 adj_index,
3022 struct mlxsw_sp_nexthop *nh)
Petr Machata1012b9a2017-09-02 23:49:23 +02003023{
3024 const struct mlxsw_sp_ipip_ops *ipip_ops;
3025
3026 ipip_ops = mlxsw_sp->router->ipip_ops_arr[nh->ipip_entry->ipipt];
3027 return ipip_ops->nexthop_update(mlxsw_sp, adj_index, nh->ipip_entry);
3028}
3029
Ido Schimmeleb789982017-10-22 23:11:48 +02003030static int mlxsw_sp_nexthop_ipip_update(struct mlxsw_sp *mlxsw_sp,
3031 u32 adj_index,
3032 struct mlxsw_sp_nexthop *nh)
3033{
3034 int i;
3035
3036 for (i = 0; i < nh->num_adj_entries; i++) {
3037 int err;
3038
3039 err = __mlxsw_sp_nexthop_ipip_update(mlxsw_sp, adj_index + i,
3040 nh);
3041 if (err)
3042 return err;
3043 }
3044
3045 return 0;
3046}
3047
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003048static int
Petr Machata35225e42017-09-02 23:49:22 +02003049mlxsw_sp_nexthop_group_update(struct mlxsw_sp *mlxsw_sp,
3050 struct mlxsw_sp_nexthop_group *nh_grp,
3051 bool reallocate)
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003052{
3053 u32 adj_index = nh_grp->adj_index; /* base */
3054 struct mlxsw_sp_nexthop *nh;
3055 int i;
3056 int err;
3057
3058 for (i = 0; i < nh_grp->count; i++) {
3059 nh = &nh_grp->nexthops[i];
3060
3061 if (!nh->should_offload) {
3062 nh->offloaded = 0;
3063 continue;
3064 }
3065
Ido Schimmela59b7e02017-01-23 11:11:42 +01003066 if (nh->update || reallocate) {
Petr Machata35225e42017-09-02 23:49:22 +02003067 switch (nh->type) {
3068 case MLXSW_SP_NEXTHOP_TYPE_ETH:
Arkadi Sharshevskya5390272017-09-25 10:32:28 +02003069 err = mlxsw_sp_nexthop_update
Petr Machata35225e42017-09-02 23:49:22 +02003070 (mlxsw_sp, adj_index, nh);
3071 break;
Petr Machata1012b9a2017-09-02 23:49:23 +02003072 case MLXSW_SP_NEXTHOP_TYPE_IPIP:
3073 err = mlxsw_sp_nexthop_ipip_update
3074 (mlxsw_sp, adj_index, nh);
3075 break;
Petr Machata35225e42017-09-02 23:49:22 +02003076 }
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003077 if (err)
3078 return err;
3079 nh->update = 0;
3080 nh->offloaded = 1;
3081 }
Ido Schimmeleb789982017-10-22 23:11:48 +02003082 adj_index += nh->num_adj_entries;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003083 }
3084 return 0;
3085}
3086
Ido Schimmel1819ae32017-07-21 18:04:28 +02003087static bool
3088mlxsw_sp_fib_node_entry_is_first(const struct mlxsw_sp_fib_node *fib_node,
3089 const struct mlxsw_sp_fib_entry *fib_entry);
3090
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003091static int
3092mlxsw_sp_nexthop_fib_entries_update(struct mlxsw_sp *mlxsw_sp,
3093 struct mlxsw_sp_nexthop_group *nh_grp)
3094{
3095 struct mlxsw_sp_fib_entry *fib_entry;
3096 int err;
3097
3098 list_for_each_entry(fib_entry, &nh_grp->fib_list, nexthop_group_node) {
Ido Schimmel1819ae32017-07-21 18:04:28 +02003099 if (!mlxsw_sp_fib_node_entry_is_first(fib_entry->fib_node,
3100 fib_entry))
3101 continue;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003102 err = mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
3103 if (err)
3104 return err;
3105 }
3106 return 0;
3107}
3108
3109static void
Ido Schimmel77d964e2017-08-02 09:56:05 +02003110mlxsw_sp_fib_entry_offload_refresh(struct mlxsw_sp_fib_entry *fib_entry,
3111 enum mlxsw_reg_ralue_op op, int err);
3112
3113static void
3114mlxsw_sp_nexthop_fib_entries_refresh(struct mlxsw_sp_nexthop_group *nh_grp)
3115{
3116 enum mlxsw_reg_ralue_op op = MLXSW_REG_RALUE_OP_WRITE_WRITE;
3117 struct mlxsw_sp_fib_entry *fib_entry;
3118
3119 list_for_each_entry(fib_entry, &nh_grp->fib_list, nexthop_group_node) {
3120 if (!mlxsw_sp_fib_node_entry_is_first(fib_entry->fib_node,
3121 fib_entry))
3122 continue;
3123 mlxsw_sp_fib_entry_offload_refresh(fib_entry, op, 0);
3124 }
3125}
3126
Ido Schimmel425a08c2017-10-22 23:11:47 +02003127static void mlxsw_sp_adj_grp_size_round_up(u16 *p_adj_grp_size)
3128{
3129 /* Valid sizes for an adjacency group are:
3130 * 1-64, 512, 1024, 2048 and 4096.
3131 */
3132 if (*p_adj_grp_size <= 64)
3133 return;
3134 else if (*p_adj_grp_size <= 512)
3135 *p_adj_grp_size = 512;
3136 else if (*p_adj_grp_size <= 1024)
3137 *p_adj_grp_size = 1024;
3138 else if (*p_adj_grp_size <= 2048)
3139 *p_adj_grp_size = 2048;
3140 else
3141 *p_adj_grp_size = 4096;
3142}
3143
3144static void mlxsw_sp_adj_grp_size_round_down(u16 *p_adj_grp_size,
3145 unsigned int alloc_size)
3146{
3147 if (alloc_size >= 4096)
3148 *p_adj_grp_size = 4096;
3149 else if (alloc_size >= 2048)
3150 *p_adj_grp_size = 2048;
3151 else if (alloc_size >= 1024)
3152 *p_adj_grp_size = 1024;
3153 else if (alloc_size >= 512)
3154 *p_adj_grp_size = 512;
3155}
3156
3157static int mlxsw_sp_fix_adj_grp_size(struct mlxsw_sp *mlxsw_sp,
3158 u16 *p_adj_grp_size)
3159{
3160 unsigned int alloc_size;
3161 int err;
3162
3163 /* Round up the requested group size to the next size supported
3164 * by the device and make sure the request can be satisfied.
3165 */
3166 mlxsw_sp_adj_grp_size_round_up(p_adj_grp_size);
Jiri Pirko4b6b1862018-07-08 23:51:17 +03003167 err = mlxsw_sp_kvdl_alloc_count_query(mlxsw_sp,
3168 MLXSW_SP_KVDL_ENTRY_TYPE_ADJ,
3169 *p_adj_grp_size, &alloc_size);
Ido Schimmel425a08c2017-10-22 23:11:47 +02003170 if (err)
3171 return err;
3172 /* It is possible the allocation results in more allocated
3173 * entries than requested. Try to use as much of them as
3174 * possible.
3175 */
3176 mlxsw_sp_adj_grp_size_round_down(p_adj_grp_size, alloc_size);
3177
3178 return 0;
3179}
3180
Ido Schimmel77d964e2017-08-02 09:56:05 +02003181static void
Ido Schimmeleb789982017-10-22 23:11:48 +02003182mlxsw_sp_nexthop_group_normalize(struct mlxsw_sp_nexthop_group *nh_grp)
3183{
3184 int i, g = 0, sum_norm_weight = 0;
3185 struct mlxsw_sp_nexthop *nh;
3186
3187 for (i = 0; i < nh_grp->count; i++) {
3188 nh = &nh_grp->nexthops[i];
3189
3190 if (!nh->should_offload)
3191 continue;
3192 if (g > 0)
3193 g = gcd(nh->nh_weight, g);
3194 else
3195 g = nh->nh_weight;
3196 }
3197
3198 for (i = 0; i < nh_grp->count; i++) {
3199 nh = &nh_grp->nexthops[i];
3200
3201 if (!nh->should_offload)
3202 continue;
3203 nh->norm_nh_weight = nh->nh_weight / g;
3204 sum_norm_weight += nh->norm_nh_weight;
3205 }
3206
3207 nh_grp->sum_norm_weight = sum_norm_weight;
3208}
3209
3210static void
3211mlxsw_sp_nexthop_group_rebalance(struct mlxsw_sp_nexthop_group *nh_grp)
3212{
3213 int total = nh_grp->sum_norm_weight;
3214 u16 ecmp_size = nh_grp->ecmp_size;
3215 int i, weight = 0, lower_bound = 0;
3216
3217 for (i = 0; i < nh_grp->count; i++) {
3218 struct mlxsw_sp_nexthop *nh = &nh_grp->nexthops[i];
3219 int upper_bound;
3220
3221 if (!nh->should_offload)
3222 continue;
3223 weight += nh->norm_nh_weight;
3224 upper_bound = DIV_ROUND_CLOSEST(ecmp_size * weight, total);
3225 nh->num_adj_entries = upper_bound - lower_bound;
3226 lower_bound = upper_bound;
3227 }
3228}
3229
3230static void
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003231mlxsw_sp_nexthop_group_refresh(struct mlxsw_sp *mlxsw_sp,
3232 struct mlxsw_sp_nexthop_group *nh_grp)
3233{
Ido Schimmeleb789982017-10-22 23:11:48 +02003234 u16 ecmp_size, old_ecmp_size;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003235 struct mlxsw_sp_nexthop *nh;
3236 bool offload_change = false;
3237 u32 adj_index;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003238 bool old_adj_index_valid;
3239 u32 old_adj_index;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003240 int i;
3241 int err;
3242
Ido Schimmelb3e8d1e2017-02-08 11:16:32 +01003243 if (!nh_grp->gateway) {
3244 mlxsw_sp_nexthop_fib_entries_update(mlxsw_sp, nh_grp);
3245 return;
3246 }
3247
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003248 for (i = 0; i < nh_grp->count; i++) {
3249 nh = &nh_grp->nexthops[i];
3250
Petr Machata56b8a9e2017-07-31 09:27:29 +02003251 if (nh->should_offload != nh->offloaded) {
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003252 offload_change = true;
3253 if (nh->should_offload)
3254 nh->update = 1;
3255 }
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003256 }
3257 if (!offload_change) {
3258 /* Nothing was added or removed, so no need to reallocate. Just
3259 * update MAC on existing adjacency indexes.
3260 */
Petr Machata35225e42017-09-02 23:49:22 +02003261 err = mlxsw_sp_nexthop_group_update(mlxsw_sp, nh_grp, false);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003262 if (err) {
3263 dev_warn(mlxsw_sp->bus_info->dev, "Failed to update neigh MAC in adjacency table.\n");
3264 goto set_trap;
3265 }
3266 return;
3267 }
Ido Schimmeleb789982017-10-22 23:11:48 +02003268 mlxsw_sp_nexthop_group_normalize(nh_grp);
3269 if (!nh_grp->sum_norm_weight)
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003270 /* No neigh of this group is connected so we just set
3271 * the trap and let everthing flow through kernel.
3272 */
3273 goto set_trap;
3274
Ido Schimmeleb789982017-10-22 23:11:48 +02003275 ecmp_size = nh_grp->sum_norm_weight;
Ido Schimmel425a08c2017-10-22 23:11:47 +02003276 err = mlxsw_sp_fix_adj_grp_size(mlxsw_sp, &ecmp_size);
3277 if (err)
3278 /* No valid allocation size available. */
3279 goto set_trap;
3280
Jiri Pirko4b6b1862018-07-08 23:51:17 +03003281 err = mlxsw_sp_kvdl_alloc(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ,
3282 ecmp_size, &adj_index);
Arkadi Sharshevsky13124442017-03-25 08:28:22 +01003283 if (err) {
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003284 /* We ran out of KVD linear space, just set the
3285 * trap and let everything flow through kernel.
3286 */
3287 dev_warn(mlxsw_sp->bus_info->dev, "Failed to allocate KVD linear area for nexthop group.\n");
3288 goto set_trap;
3289 }
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003290 old_adj_index_valid = nh_grp->adj_index_valid;
3291 old_adj_index = nh_grp->adj_index;
3292 old_ecmp_size = nh_grp->ecmp_size;
3293 nh_grp->adj_index_valid = 1;
3294 nh_grp->adj_index = adj_index;
3295 nh_grp->ecmp_size = ecmp_size;
Ido Schimmeleb789982017-10-22 23:11:48 +02003296 mlxsw_sp_nexthop_group_rebalance(nh_grp);
Petr Machata35225e42017-09-02 23:49:22 +02003297 err = mlxsw_sp_nexthop_group_update(mlxsw_sp, nh_grp, true);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003298 if (err) {
3299 dev_warn(mlxsw_sp->bus_info->dev, "Failed to update neigh MAC in adjacency table.\n");
3300 goto set_trap;
3301 }
3302
3303 if (!old_adj_index_valid) {
3304 /* The trap was set for fib entries, so we have to call
3305 * fib entry update to unset it and use adjacency index.
3306 */
3307 err = mlxsw_sp_nexthop_fib_entries_update(mlxsw_sp, nh_grp);
3308 if (err) {
3309 dev_warn(mlxsw_sp->bus_info->dev, "Failed to add adjacency index to fib entries.\n");
3310 goto set_trap;
3311 }
3312 return;
3313 }
3314
3315 err = mlxsw_sp_adj_index_mass_update(mlxsw_sp, nh_grp,
3316 old_adj_index, old_ecmp_size);
Jiri Pirko4b6b1862018-07-08 23:51:17 +03003317 mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ,
Jiri Pirko0304c002018-07-08 23:51:18 +03003318 old_ecmp_size, old_adj_index);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003319 if (err) {
3320 dev_warn(mlxsw_sp->bus_info->dev, "Failed to mass-update adjacency index for nexthop group.\n");
3321 goto set_trap;
3322 }
Ido Schimmel77d964e2017-08-02 09:56:05 +02003323
3324 /* Offload state within the group changed, so update the flags. */
3325 mlxsw_sp_nexthop_fib_entries_refresh(nh_grp);
3326
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003327 return;
3328
3329set_trap:
3330 old_adj_index_valid = nh_grp->adj_index_valid;
3331 nh_grp->adj_index_valid = 0;
3332 for (i = 0; i < nh_grp->count; i++) {
3333 nh = &nh_grp->nexthops[i];
3334 nh->offloaded = 0;
3335 }
3336 err = mlxsw_sp_nexthop_fib_entries_update(mlxsw_sp, nh_grp);
3337 if (err)
3338 dev_warn(mlxsw_sp->bus_info->dev, "Failed to set traps for fib entries.\n");
3339 if (old_adj_index_valid)
Jiri Pirko4b6b1862018-07-08 23:51:17 +03003340 mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ,
Jiri Pirko0304c002018-07-08 23:51:18 +03003341 nh_grp->ecmp_size, nh_grp->adj_index);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003342}
3343
3344static void __mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp_nexthop *nh,
3345 bool removing)
3346{
Petr Machata213666a2017-07-31 09:27:30 +02003347 if (!removing)
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003348 nh->should_offload = 1;
Ido Schimmel8764a822017-12-25 08:57:35 +01003349 else
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003350 nh->should_offload = 0;
3351 nh->update = 1;
3352}
3353
3354static void
3355mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp *mlxsw_sp,
3356 struct mlxsw_sp_neigh_entry *neigh_entry,
3357 bool removing)
3358{
3359 struct mlxsw_sp_nexthop *nh;
3360
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003361 list_for_each_entry(nh, &neigh_entry->nexthop_list,
3362 neigh_list_node) {
3363 __mlxsw_sp_nexthop_neigh_update(nh, removing);
3364 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nh_grp);
3365 }
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003366}
3367
Ido Schimmel9665b742017-02-08 11:16:42 +01003368static void mlxsw_sp_nexthop_rif_init(struct mlxsw_sp_nexthop *nh,
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003369 struct mlxsw_sp_rif *rif)
Ido Schimmel9665b742017-02-08 11:16:42 +01003370{
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003371 if (nh->rif)
Ido Schimmel9665b742017-02-08 11:16:42 +01003372 return;
3373
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003374 nh->rif = rif;
3375 list_add(&nh->rif_list_node, &rif->nexthop_list);
Ido Schimmel9665b742017-02-08 11:16:42 +01003376}
3377
3378static void mlxsw_sp_nexthop_rif_fini(struct mlxsw_sp_nexthop *nh)
3379{
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003380 if (!nh->rif)
Ido Schimmel9665b742017-02-08 11:16:42 +01003381 return;
3382
3383 list_del(&nh->rif_list_node);
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003384 nh->rif = NULL;
Ido Schimmel9665b742017-02-08 11:16:42 +01003385}
3386
Ido Schimmela8c97012017-02-08 11:16:35 +01003387static int mlxsw_sp_nexthop_neigh_init(struct mlxsw_sp *mlxsw_sp,
3388 struct mlxsw_sp_nexthop *nh)
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003389{
3390 struct mlxsw_sp_neigh_entry *neigh_entry;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003391 struct neighbour *n;
Ido Schimmel93a87e52016-12-23 09:32:49 +01003392 u8 nud_state, dead;
Ido Schimmelc53b8e12017-02-08 11:16:30 +01003393 int err;
3394
Ido Schimmelad178c82017-02-08 11:16:40 +01003395 if (!nh->nh_grp->gateway || nh->neigh_entry)
Ido Schimmelb8399a12017-02-08 11:16:33 +01003396 return 0;
3397
Jiri Pirko33b13412016-11-10 12:31:04 +01003398 /* Take a reference of neigh here ensuring that neigh would
Petr Machata8de3c172017-07-31 09:27:25 +02003399 * not be destructed before the nexthop entry is finished.
Jiri Pirko33b13412016-11-10 12:31:04 +01003400 * The reference is taken either in neigh_lookup() or
Ido Schimmelfd76d912017-02-06 16:20:17 +01003401 * in neigh_create() in case n is not found.
Jiri Pirko33b13412016-11-10 12:31:04 +01003402 */
Ido Schimmel58adf2c2017-07-18 10:10:19 +02003403 n = neigh_lookup(nh->nh_grp->neigh_tbl, &nh->gw_addr, nh->rif->dev);
Jiri Pirko33b13412016-11-10 12:31:04 +01003404 if (!n) {
Ido Schimmel58adf2c2017-07-18 10:10:19 +02003405 n = neigh_create(nh->nh_grp->neigh_tbl, &nh->gw_addr,
3406 nh->rif->dev);
Ido Schimmela8c97012017-02-08 11:16:35 +01003407 if (IS_ERR(n))
3408 return PTR_ERR(n);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003409 neigh_event_send(n, NULL);
Jiri Pirko33b13412016-11-10 12:31:04 +01003410 }
3411 neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp, n);
3412 if (!neigh_entry) {
Ido Schimmel5c8802f2017-02-06 16:20:13 +01003413 neigh_entry = mlxsw_sp_neigh_entry_create(mlxsw_sp, n);
3414 if (IS_ERR(neigh_entry)) {
Ido Schimmelc53b8e12017-02-08 11:16:30 +01003415 err = -EINVAL;
3416 goto err_neigh_entry_create;
Ido Schimmel5c8802f2017-02-06 16:20:13 +01003417 }
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003418 }
Yotam Gigib2157142016-07-05 11:27:51 +02003419
3420 /* If that is the first nexthop connected to that neigh, add to
3421 * nexthop_neighs_list
3422 */
3423 if (list_empty(&neigh_entry->nexthop_list))
3424 list_add_tail(&neigh_entry->nexthop_neighs_list_node,
Ido Schimmel9011b672017-05-16 19:38:25 +02003425 &mlxsw_sp->router->nexthop_neighs_list);
Yotam Gigib2157142016-07-05 11:27:51 +02003426
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003427 nh->neigh_entry = neigh_entry;
3428 list_add_tail(&nh->neigh_list_node, &neigh_entry->nexthop_list);
3429 read_lock_bh(&n->lock);
3430 nud_state = n->nud_state;
Ido Schimmel93a87e52016-12-23 09:32:49 +01003431 dead = n->dead;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003432 read_unlock_bh(&n->lock);
Ido Schimmel93a87e52016-12-23 09:32:49 +01003433 __mlxsw_sp_nexthop_neigh_update(nh, !(nud_state & NUD_VALID && !dead));
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003434
3435 return 0;
Ido Schimmelc53b8e12017-02-08 11:16:30 +01003436
3437err_neigh_entry_create:
3438 neigh_release(n);
Ido Schimmelc53b8e12017-02-08 11:16:30 +01003439 return err;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003440}
3441
Ido Schimmela8c97012017-02-08 11:16:35 +01003442static void mlxsw_sp_nexthop_neigh_fini(struct mlxsw_sp *mlxsw_sp,
3443 struct mlxsw_sp_nexthop *nh)
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003444{
3445 struct mlxsw_sp_neigh_entry *neigh_entry = nh->neigh_entry;
Ido Schimmela8c97012017-02-08 11:16:35 +01003446 struct neighbour *n;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003447
Ido Schimmelb8399a12017-02-08 11:16:33 +01003448 if (!neigh_entry)
Ido Schimmela8c97012017-02-08 11:16:35 +01003449 return;
3450 n = neigh_entry->key.n;
Ido Schimmelb8399a12017-02-08 11:16:33 +01003451
Ido Schimmel58312122016-12-23 09:32:50 +01003452 __mlxsw_sp_nexthop_neigh_update(nh, true);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003453 list_del(&nh->neigh_list_node);
Ido Schimmele58be792017-02-08 11:16:28 +01003454 nh->neigh_entry = NULL;
Yotam Gigib2157142016-07-05 11:27:51 +02003455
3456 /* If that is the last nexthop connected to that neigh, remove from
3457 * nexthop_neighs_list
3458 */
Ido Schimmele58be792017-02-08 11:16:28 +01003459 if (list_empty(&neigh_entry->nexthop_list))
3460 list_del(&neigh_entry->nexthop_neighs_list_node);
Yotam Gigib2157142016-07-05 11:27:51 +02003461
Ido Schimmel5c8802f2017-02-06 16:20:13 +01003462 if (!neigh_entry->connected && list_empty(&neigh_entry->nexthop_list))
3463 mlxsw_sp_neigh_entry_destroy(mlxsw_sp, neigh_entry);
3464
3465 neigh_release(n);
Ido Schimmela8c97012017-02-08 11:16:35 +01003466}
Ido Schimmelc53b8e12017-02-08 11:16:30 +01003467
Petr Machata44b0fff2017-11-03 10:03:44 +01003468static bool mlxsw_sp_ipip_netdev_ul_up(struct net_device *ol_dev)
3469{
3470 struct net_device *ul_dev = __mlxsw_sp_ipip_netdev_ul_dev_get(ol_dev);
3471
3472 return ul_dev ? (ul_dev->flags & IFF_UP) : true;
3473}
3474
Petr Machatad97cda52017-11-28 13:17:13 +01003475static void mlxsw_sp_nexthop_ipip_init(struct mlxsw_sp *mlxsw_sp,
3476 struct mlxsw_sp_nexthop *nh,
3477 struct mlxsw_sp_ipip_entry *ipip_entry)
Petr Machata1012b9a2017-09-02 23:49:23 +02003478{
Petr Machata44b0fff2017-11-03 10:03:44 +01003479 bool removing;
3480
Petr Machata1012b9a2017-09-02 23:49:23 +02003481 if (!nh->nh_grp->gateway || nh->ipip_entry)
Petr Machatad97cda52017-11-28 13:17:13 +01003482 return;
Petr Machata1012b9a2017-09-02 23:49:23 +02003483
Petr Machatad97cda52017-11-28 13:17:13 +01003484 nh->ipip_entry = ipip_entry;
3485 removing = !mlxsw_sp_ipip_netdev_ul_up(ipip_entry->ol_dev);
Petr Machata44b0fff2017-11-03 10:03:44 +01003486 __mlxsw_sp_nexthop_neigh_update(nh, removing);
Petr Machatad97cda52017-11-28 13:17:13 +01003487 mlxsw_sp_nexthop_rif_init(nh, &ipip_entry->ol_lb->common);
Petr Machata1012b9a2017-09-02 23:49:23 +02003488}
3489
3490static void mlxsw_sp_nexthop_ipip_fini(struct mlxsw_sp *mlxsw_sp,
3491 struct mlxsw_sp_nexthop *nh)
3492{
3493 struct mlxsw_sp_ipip_entry *ipip_entry = nh->ipip_entry;
3494
3495 if (!ipip_entry)
3496 return;
3497
3498 __mlxsw_sp_nexthop_neigh_update(nh, true);
Petr Machata1012b9a2017-09-02 23:49:23 +02003499 nh->ipip_entry = NULL;
3500}
3501
3502static bool mlxsw_sp_nexthop4_ipip_type(const struct mlxsw_sp *mlxsw_sp,
3503 const struct fib_nh *fib_nh,
3504 enum mlxsw_sp_ipip_type *p_ipipt)
3505{
3506 struct net_device *dev = fib_nh->nh_dev;
3507
3508 return dev &&
3509 fib_nh->nh_parent->fib_type == RTN_UNICAST &&
3510 mlxsw_sp_netdev_ipip_type(mlxsw_sp, dev, p_ipipt);
3511}
3512
Petr Machata35225e42017-09-02 23:49:22 +02003513static void mlxsw_sp_nexthop_type_fini(struct mlxsw_sp *mlxsw_sp,
3514 struct mlxsw_sp_nexthop *nh)
3515{
3516 switch (nh->type) {
3517 case MLXSW_SP_NEXTHOP_TYPE_ETH:
3518 mlxsw_sp_nexthop_neigh_fini(mlxsw_sp, nh);
3519 mlxsw_sp_nexthop_rif_fini(nh);
3520 break;
Petr Machata1012b9a2017-09-02 23:49:23 +02003521 case MLXSW_SP_NEXTHOP_TYPE_IPIP:
Petr Machatade0f43c2017-10-02 12:14:57 +02003522 mlxsw_sp_nexthop_rif_fini(nh);
Petr Machata1012b9a2017-09-02 23:49:23 +02003523 mlxsw_sp_nexthop_ipip_fini(mlxsw_sp, nh);
3524 break;
Petr Machata35225e42017-09-02 23:49:22 +02003525 }
3526}
3527
3528static int mlxsw_sp_nexthop4_type_init(struct mlxsw_sp *mlxsw_sp,
3529 struct mlxsw_sp_nexthop *nh,
3530 struct fib_nh *fib_nh)
3531{
Petr Machatad97cda52017-11-28 13:17:13 +01003532 const struct mlxsw_sp_ipip_ops *ipip_ops;
Petr Machata35225e42017-09-02 23:49:22 +02003533 struct net_device *dev = fib_nh->nh_dev;
Petr Machatad97cda52017-11-28 13:17:13 +01003534 struct mlxsw_sp_ipip_entry *ipip_entry;
Petr Machata35225e42017-09-02 23:49:22 +02003535 struct mlxsw_sp_rif *rif;
3536 int err;
3537
Petr Machatad97cda52017-11-28 13:17:13 +01003538 ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, dev);
3539 if (ipip_entry) {
3540 ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt];
3541 if (ipip_ops->can_offload(mlxsw_sp, dev,
3542 MLXSW_SP_L3_PROTO_IPV4)) {
3543 nh->type = MLXSW_SP_NEXTHOP_TYPE_IPIP;
3544 mlxsw_sp_nexthop_ipip_init(mlxsw_sp, nh, ipip_entry);
3545 return 0;
3546 }
Petr Machata1012b9a2017-09-02 23:49:23 +02003547 }
3548
Petr Machata35225e42017-09-02 23:49:22 +02003549 nh->type = MLXSW_SP_NEXTHOP_TYPE_ETH;
3550 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
3551 if (!rif)
3552 return 0;
3553
3554 mlxsw_sp_nexthop_rif_init(nh, rif);
3555 err = mlxsw_sp_nexthop_neigh_init(mlxsw_sp, nh);
3556 if (err)
3557 goto err_neigh_init;
3558
3559 return 0;
3560
3561err_neigh_init:
3562 mlxsw_sp_nexthop_rif_fini(nh);
3563 return err;
3564}
3565
3566static void mlxsw_sp_nexthop4_type_fini(struct mlxsw_sp *mlxsw_sp,
3567 struct mlxsw_sp_nexthop *nh)
3568{
3569 mlxsw_sp_nexthop_type_fini(mlxsw_sp, nh);
3570}
3571
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02003572static int mlxsw_sp_nexthop4_init(struct mlxsw_sp *mlxsw_sp,
3573 struct mlxsw_sp_nexthop_group *nh_grp,
3574 struct mlxsw_sp_nexthop *nh,
3575 struct fib_nh *fib_nh)
Ido Schimmela8c97012017-02-08 11:16:35 +01003576{
3577 struct net_device *dev = fib_nh->nh_dev;
Ido Schimmeldf6dd79b2017-02-08 14:36:49 +01003578 struct in_device *in_dev;
Ido Schimmela8c97012017-02-08 11:16:35 +01003579 int err;
3580
3581 nh->nh_grp = nh_grp;
3582 nh->key.fib_nh = fib_nh;
Ido Schimmel408bd942017-10-22 23:11:46 +02003583#ifdef CONFIG_IP_ROUTE_MULTIPATH
3584 nh->nh_weight = fib_nh->nh_weight;
3585#else
3586 nh->nh_weight = 1;
3587#endif
Ido Schimmel58adf2c2017-07-18 10:10:19 +02003588 memcpy(&nh->gw_addr, &fib_nh->nh_gw, sizeof(fib_nh->nh_gw));
Ido Schimmela8c97012017-02-08 11:16:35 +01003589 err = mlxsw_sp_nexthop_insert(mlxsw_sp, nh);
3590 if (err)
3591 return err;
3592
Arkadi Sharshevskya5390272017-09-25 10:32:28 +02003593 mlxsw_sp_nexthop_counter_alloc(mlxsw_sp, nh);
Arkadi Sharshevskydbe45982017-09-25 10:32:23 +02003594 list_add_tail(&nh->router_list_node, &mlxsw_sp->router->nexthop_list);
3595
Ido Schimmel97989ee2017-03-10 08:53:38 +01003596 if (!dev)
3597 return 0;
3598
Ido Schimmeldf6dd79b2017-02-08 14:36:49 +01003599 in_dev = __in_dev_get_rtnl(dev);
3600 if (in_dev && IN_DEV_IGNORE_ROUTES_WITH_LINKDOWN(in_dev) &&
3601 fib_nh->nh_flags & RTNH_F_LINKDOWN)
3602 return 0;
3603
Petr Machata35225e42017-09-02 23:49:22 +02003604 err = mlxsw_sp_nexthop4_type_init(mlxsw_sp, nh, fib_nh);
Ido Schimmela8c97012017-02-08 11:16:35 +01003605 if (err)
3606 goto err_nexthop_neigh_init;
3607
3608 return 0;
3609
3610err_nexthop_neigh_init:
3611 mlxsw_sp_nexthop_remove(mlxsw_sp, nh);
3612 return err;
3613}
3614
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02003615static void mlxsw_sp_nexthop4_fini(struct mlxsw_sp *mlxsw_sp,
3616 struct mlxsw_sp_nexthop *nh)
Ido Schimmela8c97012017-02-08 11:16:35 +01003617{
Petr Machata35225e42017-09-02 23:49:22 +02003618 mlxsw_sp_nexthop4_type_fini(mlxsw_sp, nh);
Arkadi Sharshevskydbe45982017-09-25 10:32:23 +02003619 list_del(&nh->router_list_node);
Arkadi Sharshevskya5390272017-09-25 10:32:28 +02003620 mlxsw_sp_nexthop_counter_free(mlxsw_sp, nh);
Ido Schimmelc53b8e12017-02-08 11:16:30 +01003621 mlxsw_sp_nexthop_remove(mlxsw_sp, nh);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003622}
3623
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02003624static void mlxsw_sp_nexthop4_event(struct mlxsw_sp *mlxsw_sp,
3625 unsigned long event, struct fib_nh *fib_nh)
Ido Schimmelad178c82017-02-08 11:16:40 +01003626{
3627 struct mlxsw_sp_nexthop_key key;
3628 struct mlxsw_sp_nexthop *nh;
Ido Schimmelad178c82017-02-08 11:16:40 +01003629
Ido Schimmel9011b672017-05-16 19:38:25 +02003630 if (mlxsw_sp->router->aborted)
Ido Schimmelad178c82017-02-08 11:16:40 +01003631 return;
3632
3633 key.fib_nh = fib_nh;
3634 nh = mlxsw_sp_nexthop_lookup(mlxsw_sp, key);
3635 if (WARN_ON_ONCE(!nh))
3636 return;
3637
Ido Schimmelad178c82017-02-08 11:16:40 +01003638 switch (event) {
3639 case FIB_EVENT_NH_ADD:
Petr Machata35225e42017-09-02 23:49:22 +02003640 mlxsw_sp_nexthop4_type_init(mlxsw_sp, nh, fib_nh);
Ido Schimmelad178c82017-02-08 11:16:40 +01003641 break;
3642 case FIB_EVENT_NH_DEL:
Petr Machata35225e42017-09-02 23:49:22 +02003643 mlxsw_sp_nexthop4_type_fini(mlxsw_sp, nh);
Ido Schimmelad178c82017-02-08 11:16:40 +01003644 break;
3645 }
3646
3647 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nh_grp);
3648}
3649
Petr Machata0c5f1cd2017-11-03 10:03:38 +01003650static void mlxsw_sp_nexthop_rif_update(struct mlxsw_sp *mlxsw_sp,
3651 struct mlxsw_sp_rif *rif)
3652{
3653 struct mlxsw_sp_nexthop *nh;
Petr Machata44b0fff2017-11-03 10:03:44 +01003654 bool removing;
Petr Machata0c5f1cd2017-11-03 10:03:38 +01003655
3656 list_for_each_entry(nh, &rif->nexthop_list, rif_list_node) {
Petr Machata44b0fff2017-11-03 10:03:44 +01003657 switch (nh->type) {
3658 case MLXSW_SP_NEXTHOP_TYPE_ETH:
3659 removing = false;
3660 break;
3661 case MLXSW_SP_NEXTHOP_TYPE_IPIP:
3662 removing = !mlxsw_sp_ipip_netdev_ul_up(rif->dev);
3663 break;
3664 default:
3665 WARN_ON(1);
3666 continue;
3667 }
3668
3669 __mlxsw_sp_nexthop_neigh_update(nh, removing);
Petr Machata0c5f1cd2017-11-03 10:03:38 +01003670 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nh_grp);
3671 }
3672}
3673
Petr Machata09dbf622017-11-28 13:17:14 +01003674static void mlxsw_sp_nexthop_rif_migrate(struct mlxsw_sp *mlxsw_sp,
3675 struct mlxsw_sp_rif *old_rif,
3676 struct mlxsw_sp_rif *new_rif)
3677{
3678 struct mlxsw_sp_nexthop *nh;
3679
3680 list_splice_init(&old_rif->nexthop_list, &new_rif->nexthop_list);
3681 list_for_each_entry(nh, &new_rif->nexthop_list, rif_list_node)
3682 nh->rif = new_rif;
3683 mlxsw_sp_nexthop_rif_update(mlxsw_sp, new_rif);
3684}
3685
Ido Schimmel9665b742017-02-08 11:16:42 +01003686static void mlxsw_sp_nexthop_rif_gone_sync(struct mlxsw_sp *mlxsw_sp,
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003687 struct mlxsw_sp_rif *rif)
Ido Schimmel9665b742017-02-08 11:16:42 +01003688{
3689 struct mlxsw_sp_nexthop *nh, *tmp;
3690
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003691 list_for_each_entry_safe(nh, tmp, &rif->nexthop_list, rif_list_node) {
Petr Machata35225e42017-09-02 23:49:22 +02003692 mlxsw_sp_nexthop_type_fini(mlxsw_sp, nh);
Ido Schimmel9665b742017-02-08 11:16:42 +01003693 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nh_grp);
3694 }
3695}
3696
Petr Machata9b014512017-09-02 23:49:20 +02003697static bool mlxsw_sp_fi_is_gateway(const struct mlxsw_sp *mlxsw_sp,
3698 const struct fib_info *fi)
3699{
Petr Machata1012b9a2017-09-02 23:49:23 +02003700 return fi->fib_nh->nh_scope == RT_SCOPE_LINK ||
3701 mlxsw_sp_nexthop4_ipip_type(mlxsw_sp, fi->fib_nh, NULL);
Petr Machata9b014512017-09-02 23:49:20 +02003702}
3703
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003704static struct mlxsw_sp_nexthop_group *
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02003705mlxsw_sp_nexthop4_group_create(struct mlxsw_sp *mlxsw_sp, struct fib_info *fi)
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003706{
3707 struct mlxsw_sp_nexthop_group *nh_grp;
3708 struct mlxsw_sp_nexthop *nh;
3709 struct fib_nh *fib_nh;
3710 size_t alloc_size;
3711 int i;
3712 int err;
3713
3714 alloc_size = sizeof(*nh_grp) +
3715 fi->fib_nhs * sizeof(struct mlxsw_sp_nexthop);
3716 nh_grp = kzalloc(alloc_size, GFP_KERNEL);
3717 if (!nh_grp)
3718 return ERR_PTR(-ENOMEM);
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02003719 nh_grp->priv = fi;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003720 INIT_LIST_HEAD(&nh_grp->fib_list);
Ido Schimmel58adf2c2017-07-18 10:10:19 +02003721 nh_grp->neigh_tbl = &arp_tbl;
3722
Petr Machata9b014512017-09-02 23:49:20 +02003723 nh_grp->gateway = mlxsw_sp_fi_is_gateway(mlxsw_sp, fi);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003724 nh_grp->count = fi->fib_nhs;
Ido Schimmel7387dbb2017-07-12 09:12:53 +02003725 fib_info_hold(fi);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003726 for (i = 0; i < nh_grp->count; i++) {
3727 nh = &nh_grp->nexthops[i];
3728 fib_nh = &fi->fib_nh[i];
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02003729 err = mlxsw_sp_nexthop4_init(mlxsw_sp, nh_grp, nh, fib_nh);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003730 if (err)
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02003731 goto err_nexthop4_init;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003732 }
Ido Schimmele9ad5e72017-02-08 11:16:29 +01003733 err = mlxsw_sp_nexthop_group_insert(mlxsw_sp, nh_grp);
3734 if (err)
3735 goto err_nexthop_group_insert;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003736 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
3737 return nh_grp;
3738
Ido Schimmele9ad5e72017-02-08 11:16:29 +01003739err_nexthop_group_insert:
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02003740err_nexthop4_init:
Ido Schimmeldf6dd79b2017-02-08 14:36:49 +01003741 for (i--; i >= 0; i--) {
3742 nh = &nh_grp->nexthops[i];
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02003743 mlxsw_sp_nexthop4_fini(mlxsw_sp, nh);
Ido Schimmeldf6dd79b2017-02-08 14:36:49 +01003744 }
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02003745 fib_info_put(fi);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003746 kfree(nh_grp);
3747 return ERR_PTR(err);
3748}
3749
3750static void
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02003751mlxsw_sp_nexthop4_group_destroy(struct mlxsw_sp *mlxsw_sp,
3752 struct mlxsw_sp_nexthop_group *nh_grp)
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003753{
3754 struct mlxsw_sp_nexthop *nh;
3755 int i;
3756
Ido Schimmele9ad5e72017-02-08 11:16:29 +01003757 mlxsw_sp_nexthop_group_remove(mlxsw_sp, nh_grp);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003758 for (i = 0; i < nh_grp->count; i++) {
3759 nh = &nh_grp->nexthops[i];
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02003760 mlxsw_sp_nexthop4_fini(mlxsw_sp, nh);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003761 }
Ido Schimmel58312122016-12-23 09:32:50 +01003762 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
3763 WARN_ON_ONCE(nh_grp->adj_index_valid);
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02003764 fib_info_put(mlxsw_sp_nexthop4_group_fi(nh_grp));
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003765 kfree(nh_grp);
3766}
3767
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02003768static int mlxsw_sp_nexthop4_group_get(struct mlxsw_sp *mlxsw_sp,
3769 struct mlxsw_sp_fib_entry *fib_entry,
3770 struct fib_info *fi)
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003771{
3772 struct mlxsw_sp_nexthop_group *nh_grp;
3773
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02003774 nh_grp = mlxsw_sp_nexthop4_group_lookup(mlxsw_sp, fi);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003775 if (!nh_grp) {
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02003776 nh_grp = mlxsw_sp_nexthop4_group_create(mlxsw_sp, fi);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003777 if (IS_ERR(nh_grp))
3778 return PTR_ERR(nh_grp);
3779 }
3780 list_add_tail(&fib_entry->nexthop_group_node, &nh_grp->fib_list);
3781 fib_entry->nh_group = nh_grp;
3782 return 0;
3783}
3784
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02003785static void mlxsw_sp_nexthop4_group_put(struct mlxsw_sp *mlxsw_sp,
3786 struct mlxsw_sp_fib_entry *fib_entry)
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003787{
3788 struct mlxsw_sp_nexthop_group *nh_grp = fib_entry->nh_group;
3789
3790 list_del(&fib_entry->nexthop_group_node);
3791 if (!list_empty(&nh_grp->fib_list))
3792 return;
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02003793 mlxsw_sp_nexthop4_group_destroy(mlxsw_sp, nh_grp);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003794}
3795
Ido Schimmel013b20f2017-02-08 11:16:36 +01003796static bool
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02003797mlxsw_sp_fib4_entry_should_offload(const struct mlxsw_sp_fib_entry *fib_entry)
3798{
3799 struct mlxsw_sp_fib4_entry *fib4_entry;
3800
3801 fib4_entry = container_of(fib_entry, struct mlxsw_sp_fib4_entry,
3802 common);
3803 return !fib4_entry->tos;
3804}
3805
3806static bool
Ido Schimmel013b20f2017-02-08 11:16:36 +01003807mlxsw_sp_fib_entry_should_offload(const struct mlxsw_sp_fib_entry *fib_entry)
3808{
3809 struct mlxsw_sp_nexthop_group *nh_group = fib_entry->nh_group;
3810
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02003811 switch (fib_entry->fib_node->fib->proto) {
3812 case MLXSW_SP_L3_PROTO_IPV4:
3813 if (!mlxsw_sp_fib4_entry_should_offload(fib_entry))
3814 return false;
3815 break;
3816 case MLXSW_SP_L3_PROTO_IPV6:
3817 break;
3818 }
Ido Schimmel9aecce12017-02-09 10:28:42 +01003819
Ido Schimmel013b20f2017-02-08 11:16:36 +01003820 switch (fib_entry->type) {
3821 case MLXSW_SP_FIB_ENTRY_TYPE_REMOTE:
3822 return !!nh_group->adj_index_valid;
3823 case MLXSW_SP_FIB_ENTRY_TYPE_LOCAL:
Ido Schimmel70ad3502017-02-08 11:16:38 +01003824 return !!nh_group->nh_rif;
Petr Machata4607f6d2017-09-02 23:49:25 +02003825 case MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP:
3826 return true;
Ido Schimmel013b20f2017-02-08 11:16:36 +01003827 default:
3828 return false;
3829 }
3830}
3831
Ido Schimmel428b8512017-08-03 13:28:28 +02003832static struct mlxsw_sp_nexthop *
3833mlxsw_sp_rt6_nexthop(struct mlxsw_sp_nexthop_group *nh_grp,
3834 const struct mlxsw_sp_rt6 *mlxsw_sp_rt6)
3835{
3836 int i;
3837
3838 for (i = 0; i < nh_grp->count; i++) {
3839 struct mlxsw_sp_nexthop *nh = &nh_grp->nexthops[i];
David Ahern8d1c8022018-04-17 17:33:26 -07003840 struct fib6_info *rt = mlxsw_sp_rt6->rt;
Ido Schimmel428b8512017-08-03 13:28:28 +02003841
David Ahern5e670d82018-04-17 17:33:14 -07003842 if (nh->rif && nh->rif->dev == rt->fib6_nh.nh_dev &&
Ido Schimmel428b8512017-08-03 13:28:28 +02003843 ipv6_addr_equal((const struct in6_addr *) &nh->gw_addr,
David Ahern5e670d82018-04-17 17:33:14 -07003844 &rt->fib6_nh.nh_gw))
Ido Schimmel428b8512017-08-03 13:28:28 +02003845 return nh;
3846 continue;
3847 }
3848
3849 return NULL;
3850}
3851
Ido Schimmel3984d1a2017-08-02 09:56:03 +02003852static void
3853mlxsw_sp_fib4_entry_offload_set(struct mlxsw_sp_fib_entry *fib_entry)
3854{
3855 struct mlxsw_sp_nexthop_group *nh_grp = fib_entry->nh_group;
3856 int i;
3857
Petr Machata4607f6d2017-09-02 23:49:25 +02003858 if (fib_entry->type == MLXSW_SP_FIB_ENTRY_TYPE_LOCAL ||
3859 fib_entry->type == MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP) {
Ido Schimmel3984d1a2017-08-02 09:56:03 +02003860 nh_grp->nexthops->key.fib_nh->nh_flags |= RTNH_F_OFFLOAD;
3861 return;
3862 }
3863
3864 for (i = 0; i < nh_grp->count; i++) {
3865 struct mlxsw_sp_nexthop *nh = &nh_grp->nexthops[i];
3866
3867 if (nh->offloaded)
3868 nh->key.fib_nh->nh_flags |= RTNH_F_OFFLOAD;
3869 else
3870 nh->key.fib_nh->nh_flags &= ~RTNH_F_OFFLOAD;
3871 }
3872}
3873
3874static void
3875mlxsw_sp_fib4_entry_offload_unset(struct mlxsw_sp_fib_entry *fib_entry)
3876{
3877 struct mlxsw_sp_nexthop_group *nh_grp = fib_entry->nh_group;
3878 int i;
3879
Ido Schimmeld1c95af2018-02-17 00:30:44 +01003880 if (!list_is_singular(&nh_grp->fib_list))
3881 return;
3882
Ido Schimmel3984d1a2017-08-02 09:56:03 +02003883 for (i = 0; i < nh_grp->count; i++) {
3884 struct mlxsw_sp_nexthop *nh = &nh_grp->nexthops[i];
3885
3886 nh->key.fib_nh->nh_flags &= ~RTNH_F_OFFLOAD;
3887 }
3888}
3889
Ido Schimmel428b8512017-08-03 13:28:28 +02003890static void
3891mlxsw_sp_fib6_entry_offload_set(struct mlxsw_sp_fib_entry *fib_entry)
3892{
3893 struct mlxsw_sp_fib6_entry *fib6_entry;
3894 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
3895
3896 fib6_entry = container_of(fib_entry, struct mlxsw_sp_fib6_entry,
3897 common);
3898
3899 if (fib_entry->type == MLXSW_SP_FIB_ENTRY_TYPE_LOCAL) {
3900 list_first_entry(&fib6_entry->rt6_list, struct mlxsw_sp_rt6,
David Ahern5e670d82018-04-17 17:33:14 -07003901 list)->rt->fib6_nh.nh_flags |= RTNH_F_OFFLOAD;
Ido Schimmel428b8512017-08-03 13:28:28 +02003902 return;
3903 }
3904
3905 list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) {
3906 struct mlxsw_sp_nexthop_group *nh_grp = fib_entry->nh_group;
3907 struct mlxsw_sp_nexthop *nh;
3908
3909 nh = mlxsw_sp_rt6_nexthop(nh_grp, mlxsw_sp_rt6);
3910 if (nh && nh->offloaded)
David Ahern5e670d82018-04-17 17:33:14 -07003911 mlxsw_sp_rt6->rt->fib6_nh.nh_flags |= RTNH_F_OFFLOAD;
Ido Schimmel428b8512017-08-03 13:28:28 +02003912 else
David Ahern5e670d82018-04-17 17:33:14 -07003913 mlxsw_sp_rt6->rt->fib6_nh.nh_flags &= ~RTNH_F_OFFLOAD;
Ido Schimmel428b8512017-08-03 13:28:28 +02003914 }
3915}
3916
3917static void
3918mlxsw_sp_fib6_entry_offload_unset(struct mlxsw_sp_fib_entry *fib_entry)
3919{
3920 struct mlxsw_sp_fib6_entry *fib6_entry;
3921 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
3922
3923 fib6_entry = container_of(fib_entry, struct mlxsw_sp_fib6_entry,
3924 common);
3925 list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) {
David Ahern8d1c8022018-04-17 17:33:26 -07003926 struct fib6_info *rt = mlxsw_sp_rt6->rt;
Ido Schimmel428b8512017-08-03 13:28:28 +02003927
David Ahern5e670d82018-04-17 17:33:14 -07003928 rt->fib6_nh.nh_flags &= ~RTNH_F_OFFLOAD;
Ido Schimmel428b8512017-08-03 13:28:28 +02003929 }
3930}
3931
Ido Schimmel013b20f2017-02-08 11:16:36 +01003932static void mlxsw_sp_fib_entry_offload_set(struct mlxsw_sp_fib_entry *fib_entry)
3933{
Ido Schimmel76610eb2017-03-10 08:53:41 +01003934 switch (fib_entry->fib_node->fib->proto) {
Ido Schimmel013b20f2017-02-08 11:16:36 +01003935 case MLXSW_SP_L3_PROTO_IPV4:
Ido Schimmel3984d1a2017-08-02 09:56:03 +02003936 mlxsw_sp_fib4_entry_offload_set(fib_entry);
Ido Schimmel013b20f2017-02-08 11:16:36 +01003937 break;
3938 case MLXSW_SP_L3_PROTO_IPV6:
Ido Schimmel428b8512017-08-03 13:28:28 +02003939 mlxsw_sp_fib6_entry_offload_set(fib_entry);
3940 break;
Ido Schimmel013b20f2017-02-08 11:16:36 +01003941 }
3942}
3943
3944static void
3945mlxsw_sp_fib_entry_offload_unset(struct mlxsw_sp_fib_entry *fib_entry)
3946{
Ido Schimmel76610eb2017-03-10 08:53:41 +01003947 switch (fib_entry->fib_node->fib->proto) {
Ido Schimmel013b20f2017-02-08 11:16:36 +01003948 case MLXSW_SP_L3_PROTO_IPV4:
Ido Schimmel3984d1a2017-08-02 09:56:03 +02003949 mlxsw_sp_fib4_entry_offload_unset(fib_entry);
Ido Schimmel013b20f2017-02-08 11:16:36 +01003950 break;
3951 case MLXSW_SP_L3_PROTO_IPV6:
Ido Schimmel428b8512017-08-03 13:28:28 +02003952 mlxsw_sp_fib6_entry_offload_unset(fib_entry);
3953 break;
Ido Schimmel013b20f2017-02-08 11:16:36 +01003954 }
Ido Schimmel013b20f2017-02-08 11:16:36 +01003955}
3956
3957static void
3958mlxsw_sp_fib_entry_offload_refresh(struct mlxsw_sp_fib_entry *fib_entry,
3959 enum mlxsw_reg_ralue_op op, int err)
3960{
3961 switch (op) {
3962 case MLXSW_REG_RALUE_OP_WRITE_DELETE:
Ido Schimmel013b20f2017-02-08 11:16:36 +01003963 return mlxsw_sp_fib_entry_offload_unset(fib_entry);
3964 case MLXSW_REG_RALUE_OP_WRITE_WRITE:
3965 if (err)
3966 return;
Ido Schimmel1353ee72017-08-02 09:56:04 +02003967 if (mlxsw_sp_fib_entry_should_offload(fib_entry))
Ido Schimmel013b20f2017-02-08 11:16:36 +01003968 mlxsw_sp_fib_entry_offload_set(fib_entry);
Petr Machata85f44a12017-10-02 12:21:58 +02003969 else
Ido Schimmel013b20f2017-02-08 11:16:36 +01003970 mlxsw_sp_fib_entry_offload_unset(fib_entry);
3971 return;
3972 default:
3973 return;
3974 }
3975}
3976
Ido Schimmel9dbf4d72017-07-18 10:10:24 +02003977static void
3978mlxsw_sp_fib_entry_ralue_pack(char *ralue_pl,
3979 const struct mlxsw_sp_fib_entry *fib_entry,
3980 enum mlxsw_reg_ralue_op op)
3981{
3982 struct mlxsw_sp_fib *fib = fib_entry->fib_node->fib;
3983 enum mlxsw_reg_ralxx_protocol proto;
3984 u32 *p_dip;
3985
3986 proto = (enum mlxsw_reg_ralxx_protocol) fib->proto;
3987
3988 switch (fib->proto) {
3989 case MLXSW_SP_L3_PROTO_IPV4:
3990 p_dip = (u32 *) fib_entry->fib_node->key.addr;
3991 mlxsw_reg_ralue_pack4(ralue_pl, proto, op, fib->vr->id,
3992 fib_entry->fib_node->key.prefix_len,
3993 *p_dip);
3994 break;
3995 case MLXSW_SP_L3_PROTO_IPV6:
3996 mlxsw_reg_ralue_pack6(ralue_pl, proto, op, fib->vr->id,
3997 fib_entry->fib_node->key.prefix_len,
3998 fib_entry->fib_node->key.addr);
3999 break;
4000 }
4001}
4002
4003static int mlxsw_sp_fib_entry_op_remote(struct mlxsw_sp *mlxsw_sp,
4004 struct mlxsw_sp_fib_entry *fib_entry,
4005 enum mlxsw_reg_ralue_op op)
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02004006{
4007 char ralue_pl[MLXSW_REG_RALUE_LEN];
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02004008 enum mlxsw_reg_ralue_trap_action trap_action;
4009 u16 trap_id = 0;
4010 u32 adjacency_index = 0;
4011 u16 ecmp_size = 0;
4012
4013 /* In case the nexthop group adjacency index is valid, use it
4014 * with provided ECMP size. Otherwise, setup trap and pass
4015 * traffic to kernel.
4016 */
Ido Schimmel4b411472017-02-08 11:16:37 +01004017 if (mlxsw_sp_fib_entry_should_offload(fib_entry)) {
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02004018 trap_action = MLXSW_REG_RALUE_TRAP_ACTION_NOP;
4019 adjacency_index = fib_entry->nh_group->adj_index;
4020 ecmp_size = fib_entry->nh_group->ecmp_size;
4021 } else {
4022 trap_action = MLXSW_REG_RALUE_TRAP_ACTION_TRAP;
4023 trap_id = MLXSW_TRAP_ID_RTR_INGRESS0;
4024 }
4025
Ido Schimmel9dbf4d72017-07-18 10:10:24 +02004026 mlxsw_sp_fib_entry_ralue_pack(ralue_pl, fib_entry, op);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02004027 mlxsw_reg_ralue_act_remote_pack(ralue_pl, trap_action, trap_id,
4028 adjacency_index, ecmp_size);
4029 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
4030}
4031
Ido Schimmel9dbf4d72017-07-18 10:10:24 +02004032static int mlxsw_sp_fib_entry_op_local(struct mlxsw_sp *mlxsw_sp,
4033 struct mlxsw_sp_fib_entry *fib_entry,
4034 enum mlxsw_reg_ralue_op op)
Jiri Pirko61c503f2016-07-04 08:23:11 +02004035{
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01004036 struct mlxsw_sp_rif *rif = fib_entry->nh_group->nh_rif;
Ido Schimmel70ad3502017-02-08 11:16:38 +01004037 enum mlxsw_reg_ralue_trap_action trap_action;
Jiri Pirko61c503f2016-07-04 08:23:11 +02004038 char ralue_pl[MLXSW_REG_RALUE_LEN];
Ido Schimmel70ad3502017-02-08 11:16:38 +01004039 u16 trap_id = 0;
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01004040 u16 rif_index = 0;
Ido Schimmel70ad3502017-02-08 11:16:38 +01004041
4042 if (mlxsw_sp_fib_entry_should_offload(fib_entry)) {
4043 trap_action = MLXSW_REG_RALUE_TRAP_ACTION_NOP;
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01004044 rif_index = rif->rif_index;
Ido Schimmel70ad3502017-02-08 11:16:38 +01004045 } else {
4046 trap_action = MLXSW_REG_RALUE_TRAP_ACTION_TRAP;
4047 trap_id = MLXSW_TRAP_ID_RTR_INGRESS0;
4048 }
Jiri Pirko61c503f2016-07-04 08:23:11 +02004049
Ido Schimmel9dbf4d72017-07-18 10:10:24 +02004050 mlxsw_sp_fib_entry_ralue_pack(ralue_pl, fib_entry, op);
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01004051 mlxsw_reg_ralue_act_local_pack(ralue_pl, trap_action, trap_id,
4052 rif_index);
Jiri Pirko61c503f2016-07-04 08:23:11 +02004053 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
4054}
4055
Ido Schimmel9dbf4d72017-07-18 10:10:24 +02004056static int mlxsw_sp_fib_entry_op_trap(struct mlxsw_sp *mlxsw_sp,
4057 struct mlxsw_sp_fib_entry *fib_entry,
4058 enum mlxsw_reg_ralue_op op)
Jiri Pirko61c503f2016-07-04 08:23:11 +02004059{
4060 char ralue_pl[MLXSW_REG_RALUE_LEN];
Jiri Pirko61c503f2016-07-04 08:23:11 +02004061
Ido Schimmel9dbf4d72017-07-18 10:10:24 +02004062 mlxsw_sp_fib_entry_ralue_pack(ralue_pl, fib_entry, op);
Jiri Pirko61c503f2016-07-04 08:23:11 +02004063 mlxsw_reg_ralue_act_ip2me_pack(ralue_pl);
4064 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
4065}
4066
Petr Machata4607f6d2017-09-02 23:49:25 +02004067static int
4068mlxsw_sp_fib_entry_op_ipip_decap(struct mlxsw_sp *mlxsw_sp,
4069 struct mlxsw_sp_fib_entry *fib_entry,
4070 enum mlxsw_reg_ralue_op op)
4071{
4072 struct mlxsw_sp_ipip_entry *ipip_entry = fib_entry->decap.ipip_entry;
4073 const struct mlxsw_sp_ipip_ops *ipip_ops;
4074
4075 if (WARN_ON(!ipip_entry))
4076 return -EINVAL;
4077
4078 ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt];
4079 return ipip_ops->fib_entry_op(mlxsw_sp, ipip_entry, op,
4080 fib_entry->decap.tunnel_index);
4081}
4082
Ido Schimmel9dbf4d72017-07-18 10:10:24 +02004083static int __mlxsw_sp_fib_entry_op(struct mlxsw_sp *mlxsw_sp,
4084 struct mlxsw_sp_fib_entry *fib_entry,
4085 enum mlxsw_reg_ralue_op op)
Jiri Pirko61c503f2016-07-04 08:23:11 +02004086{
4087 switch (fib_entry->type) {
4088 case MLXSW_SP_FIB_ENTRY_TYPE_REMOTE:
Ido Schimmel9dbf4d72017-07-18 10:10:24 +02004089 return mlxsw_sp_fib_entry_op_remote(mlxsw_sp, fib_entry, op);
Jiri Pirko61c503f2016-07-04 08:23:11 +02004090 case MLXSW_SP_FIB_ENTRY_TYPE_LOCAL:
Ido Schimmel9dbf4d72017-07-18 10:10:24 +02004091 return mlxsw_sp_fib_entry_op_local(mlxsw_sp, fib_entry, op);
Jiri Pirko61c503f2016-07-04 08:23:11 +02004092 case MLXSW_SP_FIB_ENTRY_TYPE_TRAP:
Ido Schimmel9dbf4d72017-07-18 10:10:24 +02004093 return mlxsw_sp_fib_entry_op_trap(mlxsw_sp, fib_entry, op);
Petr Machata4607f6d2017-09-02 23:49:25 +02004094 case MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP:
4095 return mlxsw_sp_fib_entry_op_ipip_decap(mlxsw_sp,
4096 fib_entry, op);
Jiri Pirko61c503f2016-07-04 08:23:11 +02004097 }
4098 return -EINVAL;
4099}
4100
4101static int mlxsw_sp_fib_entry_op(struct mlxsw_sp *mlxsw_sp,
4102 struct mlxsw_sp_fib_entry *fib_entry,
4103 enum mlxsw_reg_ralue_op op)
4104{
Ido Schimmel9dbf4d72017-07-18 10:10:24 +02004105 int err = __mlxsw_sp_fib_entry_op(mlxsw_sp, fib_entry, op);
Ido Schimmel013b20f2017-02-08 11:16:36 +01004106
Ido Schimmel013b20f2017-02-08 11:16:36 +01004107 mlxsw_sp_fib_entry_offload_refresh(fib_entry, op, err);
Ido Schimmel9dbf4d72017-07-18 10:10:24 +02004108
Ido Schimmel013b20f2017-02-08 11:16:36 +01004109 return err;
Jiri Pirko61c503f2016-07-04 08:23:11 +02004110}
4111
4112static int mlxsw_sp_fib_entry_update(struct mlxsw_sp *mlxsw_sp,
4113 struct mlxsw_sp_fib_entry *fib_entry)
4114{
Jiri Pirko7146da32016-09-01 10:37:41 +02004115 return mlxsw_sp_fib_entry_op(mlxsw_sp, fib_entry,
4116 MLXSW_REG_RALUE_OP_WRITE_WRITE);
Jiri Pirko61c503f2016-07-04 08:23:11 +02004117}
4118
4119static int mlxsw_sp_fib_entry_del(struct mlxsw_sp *mlxsw_sp,
4120 struct mlxsw_sp_fib_entry *fib_entry)
4121{
4122 return mlxsw_sp_fib_entry_op(mlxsw_sp, fib_entry,
4123 MLXSW_REG_RALUE_OP_WRITE_DELETE);
4124}
4125
Jiri Pirko61c503f2016-07-04 08:23:11 +02004126static int
Ido Schimmel013b20f2017-02-08 11:16:36 +01004127mlxsw_sp_fib4_entry_type_set(struct mlxsw_sp *mlxsw_sp,
4128 const struct fib_entry_notifier_info *fen_info,
4129 struct mlxsw_sp_fib_entry *fib_entry)
Jiri Pirko61c503f2016-07-04 08:23:11 +02004130{
Petr Machata4607f6d2017-09-02 23:49:25 +02004131 union mlxsw_sp_l3addr dip = { .addr4 = htonl(fen_info->dst) };
4132 struct net_device *dev = fen_info->fi->fib_dev;
4133 struct mlxsw_sp_ipip_entry *ipip_entry;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02004134 struct fib_info *fi = fen_info->fi;
Jiri Pirko61c503f2016-07-04 08:23:11 +02004135
Ido Schimmel97989ee2017-03-10 08:53:38 +01004136 switch (fen_info->type) {
Ido Schimmel97989ee2017-03-10 08:53:38 +01004137 case RTN_LOCAL:
Petr Machata4607f6d2017-09-02 23:49:25 +02004138 ipip_entry = mlxsw_sp_ipip_entry_find_by_decap(mlxsw_sp, dev,
4139 MLXSW_SP_L3_PROTO_IPV4, dip);
Petr Machata57c77ce2017-11-28 13:17:11 +01004140 if (ipip_entry && ipip_entry->ol_dev->flags & IFF_UP) {
Petr Machata4607f6d2017-09-02 23:49:25 +02004141 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP;
4142 return mlxsw_sp_fib_entry_decap_init(mlxsw_sp,
4143 fib_entry,
4144 ipip_entry);
4145 }
4146 /* fall through */
4147 case RTN_BROADCAST:
Jiri Pirko61c503f2016-07-04 08:23:11 +02004148 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
4149 return 0;
Ido Schimmel97989ee2017-03-10 08:53:38 +01004150 case RTN_UNREACHABLE: /* fall through */
4151 case RTN_BLACKHOLE: /* fall through */
4152 case RTN_PROHIBIT:
4153 /* Packets hitting these routes need to be trapped, but
4154 * can do so with a lower priority than packets directed
4155 * at the host, so use action type local instead of trap.
4156 */
Jiri Pirkob45f64d2016-09-26 12:52:31 +02004157 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_LOCAL;
Ido Schimmel97989ee2017-03-10 08:53:38 +01004158 return 0;
4159 case RTN_UNICAST:
Petr Machata9b014512017-09-02 23:49:20 +02004160 if (mlxsw_sp_fi_is_gateway(mlxsw_sp, fi))
Ido Schimmel97989ee2017-03-10 08:53:38 +01004161 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_REMOTE;
Petr Machata9b014512017-09-02 23:49:20 +02004162 else
4163 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_LOCAL;
Ido Schimmel97989ee2017-03-10 08:53:38 +01004164 return 0;
4165 default:
4166 return -EINVAL;
4167 }
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02004168}
4169
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004170static struct mlxsw_sp_fib4_entry *
Ido Schimmel9aecce12017-02-09 10:28:42 +01004171mlxsw_sp_fib4_entry_create(struct mlxsw_sp *mlxsw_sp,
4172 struct mlxsw_sp_fib_node *fib_node,
4173 const struct fib_entry_notifier_info *fen_info)
Jiri Pirko5b004412016-09-01 10:37:40 +02004174{
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004175 struct mlxsw_sp_fib4_entry *fib4_entry;
Jiri Pirko5b004412016-09-01 10:37:40 +02004176 struct mlxsw_sp_fib_entry *fib_entry;
Ido Schimmel9aecce12017-02-09 10:28:42 +01004177 int err;
4178
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004179 fib4_entry = kzalloc(sizeof(*fib4_entry), GFP_KERNEL);
4180 if (!fib4_entry)
4181 return ERR_PTR(-ENOMEM);
4182 fib_entry = &fib4_entry->common;
Ido Schimmel9aecce12017-02-09 10:28:42 +01004183
4184 err = mlxsw_sp_fib4_entry_type_set(mlxsw_sp, fen_info, fib_entry);
4185 if (err)
4186 goto err_fib4_entry_type_set;
4187
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02004188 err = mlxsw_sp_nexthop4_group_get(mlxsw_sp, fib_entry, fen_info->fi);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004189 if (err)
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02004190 goto err_nexthop4_group_get;
Ido Schimmel9aecce12017-02-09 10:28:42 +01004191
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004192 fib4_entry->prio = fen_info->fi->fib_priority;
4193 fib4_entry->tb_id = fen_info->tb_id;
4194 fib4_entry->type = fen_info->type;
4195 fib4_entry->tos = fen_info->tos;
Ido Schimmel9aecce12017-02-09 10:28:42 +01004196
4197 fib_entry->fib_node = fib_node;
4198
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004199 return fib4_entry;
Ido Schimmel9aecce12017-02-09 10:28:42 +01004200
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02004201err_nexthop4_group_get:
Ido Schimmel9aecce12017-02-09 10:28:42 +01004202err_fib4_entry_type_set:
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004203 kfree(fib4_entry);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004204 return ERR_PTR(err);
4205}
4206
4207static void mlxsw_sp_fib4_entry_destroy(struct mlxsw_sp *mlxsw_sp,
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004208 struct mlxsw_sp_fib4_entry *fib4_entry)
Ido Schimmel9aecce12017-02-09 10:28:42 +01004209{
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02004210 mlxsw_sp_nexthop4_group_put(mlxsw_sp, &fib4_entry->common);
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004211 kfree(fib4_entry);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004212}
4213
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004214static struct mlxsw_sp_fib4_entry *
Ido Schimmel9aecce12017-02-09 10:28:42 +01004215mlxsw_sp_fib4_entry_lookup(struct mlxsw_sp *mlxsw_sp,
4216 const struct fib_entry_notifier_info *fen_info)
4217{
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004218 struct mlxsw_sp_fib4_entry *fib4_entry;
Ido Schimmel9aecce12017-02-09 10:28:42 +01004219 struct mlxsw_sp_fib_node *fib_node;
Ido Schimmel160e22a2017-07-18 10:10:20 +02004220 struct mlxsw_sp_fib *fib;
4221 struct mlxsw_sp_vr *vr;
Ido Schimmel9aecce12017-02-09 10:28:42 +01004222
Ido Schimmel160e22a2017-07-18 10:10:20 +02004223 vr = mlxsw_sp_vr_find(mlxsw_sp, fen_info->tb_id);
4224 if (!vr)
4225 return NULL;
4226 fib = mlxsw_sp_vr_fib(vr, MLXSW_SP_L3_PROTO_IPV4);
4227
4228 fib_node = mlxsw_sp_fib_node_lookup(fib, &fen_info->dst,
4229 sizeof(fen_info->dst),
4230 fen_info->dst_len);
4231 if (!fib_node)
Ido Schimmel9aecce12017-02-09 10:28:42 +01004232 return NULL;
4233
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004234 list_for_each_entry(fib4_entry, &fib_node->entry_list, common.list) {
4235 if (fib4_entry->tb_id == fen_info->tb_id &&
4236 fib4_entry->tos == fen_info->tos &&
4237 fib4_entry->type == fen_info->type &&
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02004238 mlxsw_sp_nexthop4_group_fi(fib4_entry->common.nh_group) ==
4239 fen_info->fi) {
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004240 return fib4_entry;
Ido Schimmel9aecce12017-02-09 10:28:42 +01004241 }
4242 }
4243
4244 return NULL;
4245}
4246
4247static const struct rhashtable_params mlxsw_sp_fib_ht_params = {
4248 .key_offset = offsetof(struct mlxsw_sp_fib_node, key),
4249 .head_offset = offsetof(struct mlxsw_sp_fib_node, ht_node),
4250 .key_len = sizeof(struct mlxsw_sp_fib_key),
4251 .automatic_shrinking = true,
4252};
4253
4254static int mlxsw_sp_fib_node_insert(struct mlxsw_sp_fib *fib,
4255 struct mlxsw_sp_fib_node *fib_node)
4256{
4257 return rhashtable_insert_fast(&fib->ht, &fib_node->ht_node,
4258 mlxsw_sp_fib_ht_params);
4259}
4260
4261static void mlxsw_sp_fib_node_remove(struct mlxsw_sp_fib *fib,
4262 struct mlxsw_sp_fib_node *fib_node)
4263{
4264 rhashtable_remove_fast(&fib->ht, &fib_node->ht_node,
4265 mlxsw_sp_fib_ht_params);
4266}
4267
4268static struct mlxsw_sp_fib_node *
4269mlxsw_sp_fib_node_lookup(struct mlxsw_sp_fib *fib, const void *addr,
4270 size_t addr_len, unsigned char prefix_len)
4271{
4272 struct mlxsw_sp_fib_key key;
4273
4274 memset(&key, 0, sizeof(key));
4275 memcpy(key.addr, addr, addr_len);
4276 key.prefix_len = prefix_len;
4277 return rhashtable_lookup_fast(&fib->ht, &key, mlxsw_sp_fib_ht_params);
4278}
4279
4280static struct mlxsw_sp_fib_node *
Ido Schimmel76610eb2017-03-10 08:53:41 +01004281mlxsw_sp_fib_node_create(struct mlxsw_sp_fib *fib, const void *addr,
Ido Schimmel9aecce12017-02-09 10:28:42 +01004282 size_t addr_len, unsigned char prefix_len)
4283{
4284 struct mlxsw_sp_fib_node *fib_node;
4285
4286 fib_node = kzalloc(sizeof(*fib_node), GFP_KERNEL);
4287 if (!fib_node)
4288 return NULL;
4289
4290 INIT_LIST_HEAD(&fib_node->entry_list);
Ido Schimmel76610eb2017-03-10 08:53:41 +01004291 list_add(&fib_node->list, &fib->node_list);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004292 memcpy(fib_node->key.addr, addr, addr_len);
4293 fib_node->key.prefix_len = prefix_len;
Ido Schimmel9aecce12017-02-09 10:28:42 +01004294
4295 return fib_node;
4296}
4297
4298static void mlxsw_sp_fib_node_destroy(struct mlxsw_sp_fib_node *fib_node)
4299{
Ido Schimmel9aecce12017-02-09 10:28:42 +01004300 list_del(&fib_node->list);
4301 WARN_ON(!list_empty(&fib_node->entry_list));
4302 kfree(fib_node);
4303}
4304
4305static bool
4306mlxsw_sp_fib_node_entry_is_first(const struct mlxsw_sp_fib_node *fib_node,
4307 const struct mlxsw_sp_fib_entry *fib_entry)
4308{
4309 return list_first_entry(&fib_node->entry_list,
4310 struct mlxsw_sp_fib_entry, list) == fib_entry;
4311}
4312
Ido Schimmelfc922bb2017-08-14 10:54:05 +02004313static int mlxsw_sp_fib_lpm_tree_link(struct mlxsw_sp *mlxsw_sp,
Ido Schimmelfc922bb2017-08-14 10:54:05 +02004314 struct mlxsw_sp_fib_node *fib_node)
4315{
Ido Schimmel2b52ce02018-01-22 09:17:42 +01004316 struct mlxsw_sp_prefix_usage req_prefix_usage;
Ido Schimmel3aad95d2018-01-22 09:17:41 +01004317 struct mlxsw_sp_fib *fib = fib_node->fib;
Ido Schimmelfc922bb2017-08-14 10:54:05 +02004318 struct mlxsw_sp_lpm_tree *lpm_tree;
4319 int err;
4320
Ido Schimmel2b52ce02018-01-22 09:17:42 +01004321 lpm_tree = mlxsw_sp->router->lpm.proto_trees[fib->proto];
4322 if (lpm_tree->prefix_ref_count[fib_node->key.prefix_len] != 0)
4323 goto out;
Ido Schimmelfc922bb2017-08-14 10:54:05 +02004324
Ido Schimmel2b52ce02018-01-22 09:17:42 +01004325 mlxsw_sp_prefix_usage_cpy(&req_prefix_usage, &lpm_tree->prefix_usage);
4326 mlxsw_sp_prefix_usage_set(&req_prefix_usage, fib_node->key.prefix_len);
Ido Schimmelfc922bb2017-08-14 10:54:05 +02004327 lpm_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, &req_prefix_usage,
4328 fib->proto);
4329 if (IS_ERR(lpm_tree))
4330 return PTR_ERR(lpm_tree);
4331
Ido Schimmelfc922bb2017-08-14 10:54:05 +02004332 err = mlxsw_sp_vrs_lpm_tree_replace(mlxsw_sp, fib, lpm_tree);
4333 if (err)
Ido Schimmel2b52ce02018-01-22 09:17:42 +01004334 goto err_lpm_tree_replace;
Ido Schimmelfc922bb2017-08-14 10:54:05 +02004335
Ido Schimmel2b52ce02018-01-22 09:17:42 +01004336out:
4337 lpm_tree->prefix_ref_count[fib_node->key.prefix_len]++;
Ido Schimmelfc922bb2017-08-14 10:54:05 +02004338 return 0;
Ido Schimmel2b52ce02018-01-22 09:17:42 +01004339
4340err_lpm_tree_replace:
4341 mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
4342 return err;
Ido Schimmelfc922bb2017-08-14 10:54:05 +02004343}
4344
4345static void mlxsw_sp_fib_lpm_tree_unlink(struct mlxsw_sp *mlxsw_sp,
Ido Schimmel3aad95d2018-01-22 09:17:41 +01004346 struct mlxsw_sp_fib_node *fib_node)
Ido Schimmelfc922bb2017-08-14 10:54:05 +02004347{
Ido Schimmel2b52ce02018-01-22 09:17:42 +01004348 struct mlxsw_sp_lpm_tree *lpm_tree = fib_node->fib->lpm_tree;
4349 struct mlxsw_sp_prefix_usage req_prefix_usage;
Ido Schimmel3aad95d2018-01-22 09:17:41 +01004350 struct mlxsw_sp_fib *fib = fib_node->fib;
Ido Schimmel2b52ce02018-01-22 09:17:42 +01004351 int err;
Ido Schimmel3aad95d2018-01-22 09:17:41 +01004352
Ido Schimmel2b52ce02018-01-22 09:17:42 +01004353 if (--lpm_tree->prefix_ref_count[fib_node->key.prefix_len] != 0)
Ido Schimmelfc922bb2017-08-14 10:54:05 +02004354 return;
Ido Schimmel2b52ce02018-01-22 09:17:42 +01004355 /* Try to construct a new LPM tree from the current prefix usage
4356 * minus the unused one. If we fail, continue using the old one.
Ido Schimmel4fd00312018-01-22 09:17:40 +01004357 */
Ido Schimmel2b52ce02018-01-22 09:17:42 +01004358 mlxsw_sp_prefix_usage_cpy(&req_prefix_usage, &lpm_tree->prefix_usage);
4359 mlxsw_sp_prefix_usage_clear(&req_prefix_usage,
4360 fib_node->key.prefix_len);
4361 lpm_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, &req_prefix_usage,
4362 fib->proto);
4363 if (IS_ERR(lpm_tree))
4364 return;
Ido Schimmelfc922bb2017-08-14 10:54:05 +02004365
Ido Schimmel2b52ce02018-01-22 09:17:42 +01004366 err = mlxsw_sp_vrs_lpm_tree_replace(mlxsw_sp, fib, lpm_tree);
4367 if (err)
4368 goto err_lpm_tree_replace;
Ido Schimmel9aecce12017-02-09 10:28:42 +01004369
Ido Schimmel2b52ce02018-01-22 09:17:42 +01004370 return;
Ido Schimmel9aecce12017-02-09 10:28:42 +01004371
Ido Schimmel2b52ce02018-01-22 09:17:42 +01004372err_lpm_tree_replace:
4373 mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004374}
4375
Ido Schimmel76610eb2017-03-10 08:53:41 +01004376static int mlxsw_sp_fib_node_init(struct mlxsw_sp *mlxsw_sp,
4377 struct mlxsw_sp_fib_node *fib_node,
4378 struct mlxsw_sp_fib *fib)
4379{
Ido Schimmel76610eb2017-03-10 08:53:41 +01004380 int err;
4381
4382 err = mlxsw_sp_fib_node_insert(fib, fib_node);
4383 if (err)
4384 return err;
4385 fib_node->fib = fib;
4386
Ido Schimmel3aad95d2018-01-22 09:17:41 +01004387 err = mlxsw_sp_fib_lpm_tree_link(mlxsw_sp, fib_node);
Ido Schimmelfc922bb2017-08-14 10:54:05 +02004388 if (err)
4389 goto err_fib_lpm_tree_link;
Ido Schimmel76610eb2017-03-10 08:53:41 +01004390
Ido Schimmel76610eb2017-03-10 08:53:41 +01004391 return 0;
4392
Ido Schimmelfc922bb2017-08-14 10:54:05 +02004393err_fib_lpm_tree_link:
Ido Schimmel76610eb2017-03-10 08:53:41 +01004394 fib_node->fib = NULL;
4395 mlxsw_sp_fib_node_remove(fib, fib_node);
4396 return err;
4397}
4398
4399static void mlxsw_sp_fib_node_fini(struct mlxsw_sp *mlxsw_sp,
4400 struct mlxsw_sp_fib_node *fib_node)
4401{
Ido Schimmel76610eb2017-03-10 08:53:41 +01004402 struct mlxsw_sp_fib *fib = fib_node->fib;
4403
Ido Schimmel3aad95d2018-01-22 09:17:41 +01004404 mlxsw_sp_fib_lpm_tree_unlink(mlxsw_sp, fib_node);
Ido Schimmel76610eb2017-03-10 08:53:41 +01004405 fib_node->fib = NULL;
4406 mlxsw_sp_fib_node_remove(fib, fib_node);
4407}
4408
Ido Schimmel9aecce12017-02-09 10:28:42 +01004409static struct mlxsw_sp_fib_node *
Ido Schimmel731ea1c2017-07-18 10:10:21 +02004410mlxsw_sp_fib_node_get(struct mlxsw_sp *mlxsw_sp, u32 tb_id, const void *addr,
4411 size_t addr_len, unsigned char prefix_len,
4412 enum mlxsw_sp_l3proto proto)
Ido Schimmel9aecce12017-02-09 10:28:42 +01004413{
4414 struct mlxsw_sp_fib_node *fib_node;
Ido Schimmel76610eb2017-03-10 08:53:41 +01004415 struct mlxsw_sp_fib *fib;
Jiri Pirko5b004412016-09-01 10:37:40 +02004416 struct mlxsw_sp_vr *vr;
4417 int err;
4418
David Ahernf8fa9b42017-10-18 09:56:56 -07004419 vr = mlxsw_sp_vr_get(mlxsw_sp, tb_id, NULL);
Jiri Pirko5b004412016-09-01 10:37:40 +02004420 if (IS_ERR(vr))
4421 return ERR_CAST(vr);
Ido Schimmel731ea1c2017-07-18 10:10:21 +02004422 fib = mlxsw_sp_vr_fib(vr, proto);
Jiri Pirko5b004412016-09-01 10:37:40 +02004423
Ido Schimmel731ea1c2017-07-18 10:10:21 +02004424 fib_node = mlxsw_sp_fib_node_lookup(fib, addr, addr_len, prefix_len);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004425 if (fib_node)
4426 return fib_node;
4427
Ido Schimmel731ea1c2017-07-18 10:10:21 +02004428 fib_node = mlxsw_sp_fib_node_create(fib, addr, addr_len, prefix_len);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004429 if (!fib_node) {
Jiri Pirko5b004412016-09-01 10:37:40 +02004430 err = -ENOMEM;
Ido Schimmel9aecce12017-02-09 10:28:42 +01004431 goto err_fib_node_create;
Jiri Pirko5b004412016-09-01 10:37:40 +02004432 }
Jiri Pirko5b004412016-09-01 10:37:40 +02004433
Ido Schimmel76610eb2017-03-10 08:53:41 +01004434 err = mlxsw_sp_fib_node_init(mlxsw_sp, fib_node, fib);
4435 if (err)
4436 goto err_fib_node_init;
4437
Ido Schimmel9aecce12017-02-09 10:28:42 +01004438 return fib_node;
Jiri Pirko5b004412016-09-01 10:37:40 +02004439
Ido Schimmel76610eb2017-03-10 08:53:41 +01004440err_fib_node_init:
4441 mlxsw_sp_fib_node_destroy(fib_node);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004442err_fib_node_create:
Ido Schimmel2b52ce02018-01-22 09:17:42 +01004443 mlxsw_sp_vr_put(mlxsw_sp, vr);
Jiri Pirko5b004412016-09-01 10:37:40 +02004444 return ERR_PTR(err);
4445}
4446
Ido Schimmel731ea1c2017-07-18 10:10:21 +02004447static void mlxsw_sp_fib_node_put(struct mlxsw_sp *mlxsw_sp,
4448 struct mlxsw_sp_fib_node *fib_node)
Jiri Pirko5b004412016-09-01 10:37:40 +02004449{
Ido Schimmel76610eb2017-03-10 08:53:41 +01004450 struct mlxsw_sp_vr *vr = fib_node->fib->vr;
Jiri Pirko5b004412016-09-01 10:37:40 +02004451
Ido Schimmel9aecce12017-02-09 10:28:42 +01004452 if (!list_empty(&fib_node->entry_list))
4453 return;
Ido Schimmel76610eb2017-03-10 08:53:41 +01004454 mlxsw_sp_fib_node_fini(mlxsw_sp, fib_node);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004455 mlxsw_sp_fib_node_destroy(fib_node);
Ido Schimmel2b52ce02018-01-22 09:17:42 +01004456 mlxsw_sp_vr_put(mlxsw_sp, vr);
Jiri Pirko5b004412016-09-01 10:37:40 +02004457}
4458
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004459static struct mlxsw_sp_fib4_entry *
Ido Schimmel9aecce12017-02-09 10:28:42 +01004460mlxsw_sp_fib4_node_entry_find(const struct mlxsw_sp_fib_node *fib_node,
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004461 const struct mlxsw_sp_fib4_entry *new4_entry)
Jiri Pirko61c503f2016-07-04 08:23:11 +02004462{
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004463 struct mlxsw_sp_fib4_entry *fib4_entry;
Ido Schimmel9aecce12017-02-09 10:28:42 +01004464
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004465 list_for_each_entry(fib4_entry, &fib_node->entry_list, common.list) {
4466 if (fib4_entry->tb_id > new4_entry->tb_id)
Ido Schimmel9aecce12017-02-09 10:28:42 +01004467 continue;
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004468 if (fib4_entry->tb_id != new4_entry->tb_id)
Ido Schimmel9aecce12017-02-09 10:28:42 +01004469 break;
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004470 if (fib4_entry->tos > new4_entry->tos)
Ido Schimmel9aecce12017-02-09 10:28:42 +01004471 continue;
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004472 if (fib4_entry->prio >= new4_entry->prio ||
4473 fib4_entry->tos < new4_entry->tos)
4474 return fib4_entry;
Ido Schimmel9aecce12017-02-09 10:28:42 +01004475 }
4476
4477 return NULL;
4478}
4479
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004480static int
4481mlxsw_sp_fib4_node_list_append(struct mlxsw_sp_fib4_entry *fib4_entry,
4482 struct mlxsw_sp_fib4_entry *new4_entry)
Ido Schimmel4283bce2017-02-09 10:28:43 +01004483{
4484 struct mlxsw_sp_fib_node *fib_node;
4485
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004486 if (WARN_ON(!fib4_entry))
Ido Schimmel4283bce2017-02-09 10:28:43 +01004487 return -EINVAL;
4488
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004489 fib_node = fib4_entry->common.fib_node;
4490 list_for_each_entry_from(fib4_entry, &fib_node->entry_list,
4491 common.list) {
4492 if (fib4_entry->tb_id != new4_entry->tb_id ||
4493 fib4_entry->tos != new4_entry->tos ||
4494 fib4_entry->prio != new4_entry->prio)
Ido Schimmel4283bce2017-02-09 10:28:43 +01004495 break;
4496 }
4497
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004498 list_add_tail(&new4_entry->common.list, &fib4_entry->common.list);
Ido Schimmel4283bce2017-02-09 10:28:43 +01004499 return 0;
4500}
4501
Ido Schimmel9aecce12017-02-09 10:28:42 +01004502static int
Ido Schimmel9efbee62017-07-18 10:10:28 +02004503mlxsw_sp_fib4_node_list_insert(struct mlxsw_sp_fib4_entry *new4_entry,
Ido Schimmel599cf8f2017-02-09 10:28:44 +01004504 bool replace, bool append)
Ido Schimmel9aecce12017-02-09 10:28:42 +01004505{
Ido Schimmel9efbee62017-07-18 10:10:28 +02004506 struct mlxsw_sp_fib_node *fib_node = new4_entry->common.fib_node;
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004507 struct mlxsw_sp_fib4_entry *fib4_entry;
Ido Schimmel9aecce12017-02-09 10:28:42 +01004508
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004509 fib4_entry = mlxsw_sp_fib4_node_entry_find(fib_node, new4_entry);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004510
Ido Schimmel4283bce2017-02-09 10:28:43 +01004511 if (append)
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004512 return mlxsw_sp_fib4_node_list_append(fib4_entry, new4_entry);
4513 if (replace && WARN_ON(!fib4_entry))
Ido Schimmel599cf8f2017-02-09 10:28:44 +01004514 return -EINVAL;
Ido Schimmel4283bce2017-02-09 10:28:43 +01004515
Ido Schimmel599cf8f2017-02-09 10:28:44 +01004516 /* Insert new entry before replaced one, so that we can later
4517 * remove the second.
4518 */
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004519 if (fib4_entry) {
4520 list_add_tail(&new4_entry->common.list,
4521 &fib4_entry->common.list);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004522 } else {
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004523 struct mlxsw_sp_fib4_entry *last;
Ido Schimmel9aecce12017-02-09 10:28:42 +01004524
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004525 list_for_each_entry(last, &fib_node->entry_list, common.list) {
4526 if (new4_entry->tb_id > last->tb_id)
Ido Schimmel9aecce12017-02-09 10:28:42 +01004527 break;
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004528 fib4_entry = last;
Ido Schimmel9aecce12017-02-09 10:28:42 +01004529 }
4530
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004531 if (fib4_entry)
4532 list_add(&new4_entry->common.list,
4533 &fib4_entry->common.list);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004534 else
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004535 list_add(&new4_entry->common.list,
4536 &fib_node->entry_list);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004537 }
4538
4539 return 0;
4540}
4541
4542static void
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004543mlxsw_sp_fib4_node_list_remove(struct mlxsw_sp_fib4_entry *fib4_entry)
Ido Schimmel9aecce12017-02-09 10:28:42 +01004544{
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004545 list_del(&fib4_entry->common.list);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004546}
4547
Ido Schimmel80c238f2017-07-18 10:10:29 +02004548static int mlxsw_sp_fib_node_entry_add(struct mlxsw_sp *mlxsw_sp,
4549 struct mlxsw_sp_fib_entry *fib_entry)
Ido Schimmel9aecce12017-02-09 10:28:42 +01004550{
Ido Schimmel9efbee62017-07-18 10:10:28 +02004551 struct mlxsw_sp_fib_node *fib_node = fib_entry->fib_node;
4552
Ido Schimmel9aecce12017-02-09 10:28:42 +01004553 if (!mlxsw_sp_fib_node_entry_is_first(fib_node, fib_entry))
4554 return 0;
4555
4556 /* To prevent packet loss, overwrite the previously offloaded
4557 * entry.
4558 */
4559 if (!list_is_singular(&fib_node->entry_list)) {
4560 enum mlxsw_reg_ralue_op op = MLXSW_REG_RALUE_OP_WRITE_DELETE;
4561 struct mlxsw_sp_fib_entry *n = list_next_entry(fib_entry, list);
4562
4563 mlxsw_sp_fib_entry_offload_refresh(n, op, 0);
4564 }
4565
4566 return mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
4567}
4568
Ido Schimmel80c238f2017-07-18 10:10:29 +02004569static void mlxsw_sp_fib_node_entry_del(struct mlxsw_sp *mlxsw_sp,
4570 struct mlxsw_sp_fib_entry *fib_entry)
Ido Schimmel9aecce12017-02-09 10:28:42 +01004571{
Ido Schimmel9efbee62017-07-18 10:10:28 +02004572 struct mlxsw_sp_fib_node *fib_node = fib_entry->fib_node;
4573
Ido Schimmel9aecce12017-02-09 10:28:42 +01004574 if (!mlxsw_sp_fib_node_entry_is_first(fib_node, fib_entry))
4575 return;
4576
4577 /* Promote the next entry by overwriting the deleted entry */
4578 if (!list_is_singular(&fib_node->entry_list)) {
4579 struct mlxsw_sp_fib_entry *n = list_next_entry(fib_entry, list);
4580 enum mlxsw_reg_ralue_op op = MLXSW_REG_RALUE_OP_WRITE_DELETE;
4581
4582 mlxsw_sp_fib_entry_update(mlxsw_sp, n);
4583 mlxsw_sp_fib_entry_offload_refresh(fib_entry, op, 0);
4584 return;
4585 }
4586
4587 mlxsw_sp_fib_entry_del(mlxsw_sp, fib_entry);
4588}
4589
4590static int mlxsw_sp_fib4_node_entry_link(struct mlxsw_sp *mlxsw_sp,
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004591 struct mlxsw_sp_fib4_entry *fib4_entry,
Ido Schimmel599cf8f2017-02-09 10:28:44 +01004592 bool replace, bool append)
Ido Schimmel9aecce12017-02-09 10:28:42 +01004593{
Ido Schimmel9aecce12017-02-09 10:28:42 +01004594 int err;
4595
Ido Schimmel9efbee62017-07-18 10:10:28 +02004596 err = mlxsw_sp_fib4_node_list_insert(fib4_entry, replace, append);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004597 if (err)
4598 return err;
4599
Ido Schimmel80c238f2017-07-18 10:10:29 +02004600 err = mlxsw_sp_fib_node_entry_add(mlxsw_sp, &fib4_entry->common);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004601 if (err)
Ido Schimmel80c238f2017-07-18 10:10:29 +02004602 goto err_fib_node_entry_add;
Ido Schimmel9aecce12017-02-09 10:28:42 +01004603
Ido Schimmel9aecce12017-02-09 10:28:42 +01004604 return 0;
4605
Ido Schimmel80c238f2017-07-18 10:10:29 +02004606err_fib_node_entry_add:
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004607 mlxsw_sp_fib4_node_list_remove(fib4_entry);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004608 return err;
4609}
4610
4611static void
4612mlxsw_sp_fib4_node_entry_unlink(struct mlxsw_sp *mlxsw_sp,
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004613 struct mlxsw_sp_fib4_entry *fib4_entry)
Ido Schimmel9aecce12017-02-09 10:28:42 +01004614{
Ido Schimmel80c238f2017-07-18 10:10:29 +02004615 mlxsw_sp_fib_node_entry_del(mlxsw_sp, &fib4_entry->common);
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004616 mlxsw_sp_fib4_node_list_remove(fib4_entry);
Petr Machata4607f6d2017-09-02 23:49:25 +02004617
4618 if (fib4_entry->common.type == MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP)
4619 mlxsw_sp_fib_entry_decap_fini(mlxsw_sp, &fib4_entry->common);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004620}
4621
Ido Schimmel599cf8f2017-02-09 10:28:44 +01004622static void mlxsw_sp_fib4_entry_replace(struct mlxsw_sp *mlxsw_sp,
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004623 struct mlxsw_sp_fib4_entry *fib4_entry,
Ido Schimmel599cf8f2017-02-09 10:28:44 +01004624 bool replace)
4625{
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004626 struct mlxsw_sp_fib_node *fib_node = fib4_entry->common.fib_node;
4627 struct mlxsw_sp_fib4_entry *replaced;
Ido Schimmel599cf8f2017-02-09 10:28:44 +01004628
4629 if (!replace)
4630 return;
4631
4632 /* We inserted the new entry before replaced one */
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004633 replaced = list_next_entry(fib4_entry, common.list);
Ido Schimmel599cf8f2017-02-09 10:28:44 +01004634
4635 mlxsw_sp_fib4_node_entry_unlink(mlxsw_sp, replaced);
4636 mlxsw_sp_fib4_entry_destroy(mlxsw_sp, replaced);
Ido Schimmel731ea1c2017-07-18 10:10:21 +02004637 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
Ido Schimmel599cf8f2017-02-09 10:28:44 +01004638}
4639
Ido Schimmel9aecce12017-02-09 10:28:42 +01004640static int
4641mlxsw_sp_router_fib4_add(struct mlxsw_sp *mlxsw_sp,
Ido Schimmel4283bce2017-02-09 10:28:43 +01004642 const struct fib_entry_notifier_info *fen_info,
Ido Schimmel599cf8f2017-02-09 10:28:44 +01004643 bool replace, bool append)
Ido Schimmel9aecce12017-02-09 10:28:42 +01004644{
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004645 struct mlxsw_sp_fib4_entry *fib4_entry;
Ido Schimmel9aecce12017-02-09 10:28:42 +01004646 struct mlxsw_sp_fib_node *fib_node;
Jiri Pirko61c503f2016-07-04 08:23:11 +02004647 int err;
4648
Ido Schimmel9011b672017-05-16 19:38:25 +02004649 if (mlxsw_sp->router->aborted)
Jiri Pirkob45f64d2016-09-26 12:52:31 +02004650 return 0;
4651
Ido Schimmel731ea1c2017-07-18 10:10:21 +02004652 fib_node = mlxsw_sp_fib_node_get(mlxsw_sp, fen_info->tb_id,
4653 &fen_info->dst, sizeof(fen_info->dst),
4654 fen_info->dst_len,
4655 MLXSW_SP_L3_PROTO_IPV4);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004656 if (IS_ERR(fib_node)) {
4657 dev_warn(mlxsw_sp->bus_info->dev, "Failed to get FIB node\n");
4658 return PTR_ERR(fib_node);
4659 }
4660
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004661 fib4_entry = mlxsw_sp_fib4_entry_create(mlxsw_sp, fib_node, fen_info);
4662 if (IS_ERR(fib4_entry)) {
Ido Schimmel9aecce12017-02-09 10:28:42 +01004663 dev_warn(mlxsw_sp->bus_info->dev, "Failed to create FIB entry\n");
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004664 err = PTR_ERR(fib4_entry);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004665 goto err_fib4_entry_create;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02004666 }
Jiri Pirko61c503f2016-07-04 08:23:11 +02004667
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004668 err = mlxsw_sp_fib4_node_entry_link(mlxsw_sp, fib4_entry, replace,
Ido Schimmel599cf8f2017-02-09 10:28:44 +01004669 append);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02004670 if (err) {
Ido Schimmel9aecce12017-02-09 10:28:42 +01004671 dev_warn(mlxsw_sp->bus_info->dev, "Failed to link FIB entry to node\n");
4672 goto err_fib4_node_entry_link;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02004673 }
Ido Schimmel9aecce12017-02-09 10:28:42 +01004674
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004675 mlxsw_sp_fib4_entry_replace(mlxsw_sp, fib4_entry, replace);
Ido Schimmel599cf8f2017-02-09 10:28:44 +01004676
Jiri Pirko61c503f2016-07-04 08:23:11 +02004677 return 0;
4678
Ido Schimmel9aecce12017-02-09 10:28:42 +01004679err_fib4_node_entry_link:
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004680 mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib4_entry);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004681err_fib4_entry_create:
Ido Schimmel731ea1c2017-07-18 10:10:21 +02004682 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
Jiri Pirko61c503f2016-07-04 08:23:11 +02004683 return err;
4684}
4685
Jiri Pirko37956d72016-10-20 16:05:43 +02004686static void mlxsw_sp_router_fib4_del(struct mlxsw_sp *mlxsw_sp,
4687 struct fib_entry_notifier_info *fen_info)
Jiri Pirko61c503f2016-07-04 08:23:11 +02004688{
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004689 struct mlxsw_sp_fib4_entry *fib4_entry;
Ido Schimmel9aecce12017-02-09 10:28:42 +01004690 struct mlxsw_sp_fib_node *fib_node;
Jiri Pirko61c503f2016-07-04 08:23:11 +02004691
Ido Schimmel9011b672017-05-16 19:38:25 +02004692 if (mlxsw_sp->router->aborted)
Jiri Pirko37956d72016-10-20 16:05:43 +02004693 return;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02004694
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004695 fib4_entry = mlxsw_sp_fib4_entry_lookup(mlxsw_sp, fen_info);
4696 if (WARN_ON(!fib4_entry))
Jiri Pirko37956d72016-10-20 16:05:43 +02004697 return;
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004698 fib_node = fib4_entry->common.fib_node;
Jiri Pirko5b004412016-09-01 10:37:40 +02004699
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004700 mlxsw_sp_fib4_node_entry_unlink(mlxsw_sp, fib4_entry);
4701 mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib4_entry);
Ido Schimmel731ea1c2017-07-18 10:10:21 +02004702 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
Jiri Pirko61c503f2016-07-04 08:23:11 +02004703}
Jiri Pirkob45f64d2016-09-26 12:52:31 +02004704
David Ahern8d1c8022018-04-17 17:33:26 -07004705static bool mlxsw_sp_fib6_rt_should_ignore(const struct fib6_info *rt)
Ido Schimmel428b8512017-08-03 13:28:28 +02004706{
4707 /* Packets with link-local destination IP arriving to the router
4708 * are trapped to the CPU, so no need to program specific routes
4709 * for them.
4710 */
David Ahern93c2fb22018-04-18 15:38:59 -07004711 if (ipv6_addr_type(&rt->fib6_dst.addr) & IPV6_ADDR_LINKLOCAL)
Ido Schimmel428b8512017-08-03 13:28:28 +02004712 return true;
4713
4714 /* Multicast routes aren't supported, so ignore them. Neighbour
4715 * Discovery packets are specifically trapped.
4716 */
David Ahern93c2fb22018-04-18 15:38:59 -07004717 if (ipv6_addr_type(&rt->fib6_dst.addr) & IPV6_ADDR_MULTICAST)
Ido Schimmel428b8512017-08-03 13:28:28 +02004718 return true;
4719
4720 /* Cloned routes are irrelevant in the forwarding path. */
David Ahern93c2fb22018-04-18 15:38:59 -07004721 if (rt->fib6_flags & RTF_CACHE)
Ido Schimmel428b8512017-08-03 13:28:28 +02004722 return true;
4723
4724 return false;
4725}
4726
David Ahern8d1c8022018-04-17 17:33:26 -07004727static struct mlxsw_sp_rt6 *mlxsw_sp_rt6_create(struct fib6_info *rt)
Ido Schimmel428b8512017-08-03 13:28:28 +02004728{
4729 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
4730
4731 mlxsw_sp_rt6 = kzalloc(sizeof(*mlxsw_sp_rt6), GFP_KERNEL);
4732 if (!mlxsw_sp_rt6)
4733 return ERR_PTR(-ENOMEM);
4734
4735 /* In case of route replace, replaced route is deleted with
4736 * no notification. Take reference to prevent accessing freed
4737 * memory.
4738 */
4739 mlxsw_sp_rt6->rt = rt;
David Ahern8d1c8022018-04-17 17:33:26 -07004740 fib6_info_hold(rt);
Ido Schimmel428b8512017-08-03 13:28:28 +02004741
4742 return mlxsw_sp_rt6;
4743}
4744
4745#if IS_ENABLED(CONFIG_IPV6)
David Ahern8d1c8022018-04-17 17:33:26 -07004746static void mlxsw_sp_rt6_release(struct fib6_info *rt)
Ido Schimmel428b8512017-08-03 13:28:28 +02004747{
David Ahern8d1c8022018-04-17 17:33:26 -07004748 fib6_info_release(rt);
Ido Schimmel428b8512017-08-03 13:28:28 +02004749}
4750#else
David Ahern8d1c8022018-04-17 17:33:26 -07004751static void mlxsw_sp_rt6_release(struct fib6_info *rt)
Ido Schimmel428b8512017-08-03 13:28:28 +02004752{
4753}
4754#endif
4755
4756static void mlxsw_sp_rt6_destroy(struct mlxsw_sp_rt6 *mlxsw_sp_rt6)
4757{
4758 mlxsw_sp_rt6_release(mlxsw_sp_rt6->rt);
4759 kfree(mlxsw_sp_rt6);
4760}
4761
David Ahern8d1c8022018-04-17 17:33:26 -07004762static struct fib6_info *
Ido Schimmel428b8512017-08-03 13:28:28 +02004763mlxsw_sp_fib6_entry_rt(const struct mlxsw_sp_fib6_entry *fib6_entry)
4764{
4765 return list_first_entry(&fib6_entry->rt6_list, struct mlxsw_sp_rt6,
4766 list)->rt;
4767}
4768
4769static struct mlxsw_sp_fib6_entry *
4770mlxsw_sp_fib6_node_mp_entry_find(const struct mlxsw_sp_fib_node *fib_node,
Ido Schimmel53b562d2018-06-15 16:23:36 +03004771 const struct fib6_info *nrt, bool append)
Ido Schimmel428b8512017-08-03 13:28:28 +02004772{
4773 struct mlxsw_sp_fib6_entry *fib6_entry;
4774
Ido Schimmel53b562d2018-06-15 16:23:36 +03004775 if (!append)
Ido Schimmel428b8512017-08-03 13:28:28 +02004776 return NULL;
4777
4778 list_for_each_entry(fib6_entry, &fib_node->entry_list, common.list) {
David Ahern8d1c8022018-04-17 17:33:26 -07004779 struct fib6_info *rt = mlxsw_sp_fib6_entry_rt(fib6_entry);
Ido Schimmel428b8512017-08-03 13:28:28 +02004780
4781 /* RT6_TABLE_LOCAL and RT6_TABLE_MAIN share the same
4782 * virtual router.
4783 */
David Ahern93c2fb22018-04-18 15:38:59 -07004784 if (rt->fib6_table->tb6_id > nrt->fib6_table->tb6_id)
Ido Schimmel428b8512017-08-03 13:28:28 +02004785 continue;
David Ahern93c2fb22018-04-18 15:38:59 -07004786 if (rt->fib6_table->tb6_id != nrt->fib6_table->tb6_id)
Ido Schimmel428b8512017-08-03 13:28:28 +02004787 break;
David Ahern93c2fb22018-04-18 15:38:59 -07004788 if (rt->fib6_metric < nrt->fib6_metric)
Ido Schimmel428b8512017-08-03 13:28:28 +02004789 continue;
Ido Schimmel53b562d2018-06-15 16:23:36 +03004790 if (rt->fib6_metric == nrt->fib6_metric)
Ido Schimmel428b8512017-08-03 13:28:28 +02004791 return fib6_entry;
David Ahern93c2fb22018-04-18 15:38:59 -07004792 if (rt->fib6_metric > nrt->fib6_metric)
Ido Schimmel428b8512017-08-03 13:28:28 +02004793 break;
4794 }
4795
4796 return NULL;
4797}
4798
4799static struct mlxsw_sp_rt6 *
4800mlxsw_sp_fib6_entry_rt_find(const struct mlxsw_sp_fib6_entry *fib6_entry,
David Ahern8d1c8022018-04-17 17:33:26 -07004801 const struct fib6_info *rt)
Ido Schimmel428b8512017-08-03 13:28:28 +02004802{
4803 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
4804
4805 list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) {
4806 if (mlxsw_sp_rt6->rt == rt)
4807 return mlxsw_sp_rt6;
4808 }
4809
4810 return NULL;
4811}
4812
Petr Machata8f28a302017-09-02 23:49:24 +02004813static bool mlxsw_sp_nexthop6_ipip_type(const struct mlxsw_sp *mlxsw_sp,
David Ahern8d1c8022018-04-17 17:33:26 -07004814 const struct fib6_info *rt,
Petr Machata8f28a302017-09-02 23:49:24 +02004815 enum mlxsw_sp_ipip_type *ret)
4816{
David Ahern5e670d82018-04-17 17:33:14 -07004817 return rt->fib6_nh.nh_dev &&
4818 mlxsw_sp_netdev_ipip_type(mlxsw_sp, rt->fib6_nh.nh_dev, ret);
Petr Machata8f28a302017-09-02 23:49:24 +02004819}
4820
Petr Machata35225e42017-09-02 23:49:22 +02004821static int mlxsw_sp_nexthop6_type_init(struct mlxsw_sp *mlxsw_sp,
4822 struct mlxsw_sp_nexthop_group *nh_grp,
4823 struct mlxsw_sp_nexthop *nh,
David Ahern8d1c8022018-04-17 17:33:26 -07004824 const struct fib6_info *rt)
Ido Schimmel428b8512017-08-03 13:28:28 +02004825{
Petr Machatad97cda52017-11-28 13:17:13 +01004826 const struct mlxsw_sp_ipip_ops *ipip_ops;
4827 struct mlxsw_sp_ipip_entry *ipip_entry;
David Ahern5e670d82018-04-17 17:33:14 -07004828 struct net_device *dev = rt->fib6_nh.nh_dev;
Ido Schimmel428b8512017-08-03 13:28:28 +02004829 struct mlxsw_sp_rif *rif;
4830 int err;
4831
Petr Machatad97cda52017-11-28 13:17:13 +01004832 ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, dev);
4833 if (ipip_entry) {
4834 ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt];
4835 if (ipip_ops->can_offload(mlxsw_sp, dev,
4836 MLXSW_SP_L3_PROTO_IPV6)) {
4837 nh->type = MLXSW_SP_NEXTHOP_TYPE_IPIP;
4838 mlxsw_sp_nexthop_ipip_init(mlxsw_sp, nh, ipip_entry);
4839 return 0;
4840 }
Petr Machata8f28a302017-09-02 23:49:24 +02004841 }
4842
Petr Machata35225e42017-09-02 23:49:22 +02004843 nh->type = MLXSW_SP_NEXTHOP_TYPE_ETH;
Ido Schimmel428b8512017-08-03 13:28:28 +02004844 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
4845 if (!rif)
4846 return 0;
4847 mlxsw_sp_nexthop_rif_init(nh, rif);
4848
4849 err = mlxsw_sp_nexthop_neigh_init(mlxsw_sp, nh);
4850 if (err)
4851 goto err_nexthop_neigh_init;
4852
4853 return 0;
4854
4855err_nexthop_neigh_init:
4856 mlxsw_sp_nexthop_rif_fini(nh);
4857 return err;
4858}
4859
Petr Machata35225e42017-09-02 23:49:22 +02004860static void mlxsw_sp_nexthop6_type_fini(struct mlxsw_sp *mlxsw_sp,
4861 struct mlxsw_sp_nexthop *nh)
4862{
4863 mlxsw_sp_nexthop_type_fini(mlxsw_sp, nh);
4864}
4865
4866static int mlxsw_sp_nexthop6_init(struct mlxsw_sp *mlxsw_sp,
4867 struct mlxsw_sp_nexthop_group *nh_grp,
4868 struct mlxsw_sp_nexthop *nh,
David Ahern8d1c8022018-04-17 17:33:26 -07004869 const struct fib6_info *rt)
Petr Machata35225e42017-09-02 23:49:22 +02004870{
David Ahern5e670d82018-04-17 17:33:14 -07004871 struct net_device *dev = rt->fib6_nh.nh_dev;
Petr Machata35225e42017-09-02 23:49:22 +02004872
4873 nh->nh_grp = nh_grp;
David Ahern5e670d82018-04-17 17:33:14 -07004874 nh->nh_weight = rt->fib6_nh.nh_weight;
4875 memcpy(&nh->gw_addr, &rt->fib6_nh.nh_gw, sizeof(nh->gw_addr));
Arkadi Sharshevskya5390272017-09-25 10:32:28 +02004876 mlxsw_sp_nexthop_counter_alloc(mlxsw_sp, nh);
Petr Machata35225e42017-09-02 23:49:22 +02004877
Arkadi Sharshevskydbe45982017-09-25 10:32:23 +02004878 list_add_tail(&nh->router_list_node, &mlxsw_sp->router->nexthop_list);
4879
Petr Machata35225e42017-09-02 23:49:22 +02004880 if (!dev)
4881 return 0;
4882 nh->ifindex = dev->ifindex;
4883
4884 return mlxsw_sp_nexthop6_type_init(mlxsw_sp, nh_grp, nh, rt);
4885}
4886
Ido Schimmel428b8512017-08-03 13:28:28 +02004887static void mlxsw_sp_nexthop6_fini(struct mlxsw_sp *mlxsw_sp,
4888 struct mlxsw_sp_nexthop *nh)
4889{
Petr Machata35225e42017-09-02 23:49:22 +02004890 mlxsw_sp_nexthop6_type_fini(mlxsw_sp, nh);
Arkadi Sharshevskydbe45982017-09-25 10:32:23 +02004891 list_del(&nh->router_list_node);
Arkadi Sharshevskya5390272017-09-25 10:32:28 +02004892 mlxsw_sp_nexthop_counter_free(mlxsw_sp, nh);
Ido Schimmel428b8512017-08-03 13:28:28 +02004893}
4894
Petr Machataf6050ee2017-09-02 23:49:21 +02004895static bool mlxsw_sp_rt6_is_gateway(const struct mlxsw_sp *mlxsw_sp,
David Ahern8d1c8022018-04-17 17:33:26 -07004896 const struct fib6_info *rt)
Petr Machataf6050ee2017-09-02 23:49:21 +02004897{
David Ahern93c2fb22018-04-18 15:38:59 -07004898 return rt->fib6_flags & RTF_GATEWAY ||
Petr Machata8f28a302017-09-02 23:49:24 +02004899 mlxsw_sp_nexthop6_ipip_type(mlxsw_sp, rt, NULL);
Petr Machataf6050ee2017-09-02 23:49:21 +02004900}
4901
Ido Schimmel428b8512017-08-03 13:28:28 +02004902static struct mlxsw_sp_nexthop_group *
4903mlxsw_sp_nexthop6_group_create(struct mlxsw_sp *mlxsw_sp,
4904 struct mlxsw_sp_fib6_entry *fib6_entry)
4905{
4906 struct mlxsw_sp_nexthop_group *nh_grp;
4907 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
4908 struct mlxsw_sp_nexthop *nh;
4909 size_t alloc_size;
4910 int i = 0;
4911 int err;
4912
4913 alloc_size = sizeof(*nh_grp) +
4914 fib6_entry->nrt6 * sizeof(struct mlxsw_sp_nexthop);
4915 nh_grp = kzalloc(alloc_size, GFP_KERNEL);
4916 if (!nh_grp)
4917 return ERR_PTR(-ENOMEM);
4918 INIT_LIST_HEAD(&nh_grp->fib_list);
4919#if IS_ENABLED(CONFIG_IPV6)
4920 nh_grp->neigh_tbl = &nd_tbl;
4921#endif
4922 mlxsw_sp_rt6 = list_first_entry(&fib6_entry->rt6_list,
4923 struct mlxsw_sp_rt6, list);
Petr Machataf6050ee2017-09-02 23:49:21 +02004924 nh_grp->gateway = mlxsw_sp_rt6_is_gateway(mlxsw_sp, mlxsw_sp_rt6->rt);
Ido Schimmel428b8512017-08-03 13:28:28 +02004925 nh_grp->count = fib6_entry->nrt6;
4926 for (i = 0; i < nh_grp->count; i++) {
David Ahern8d1c8022018-04-17 17:33:26 -07004927 struct fib6_info *rt = mlxsw_sp_rt6->rt;
Ido Schimmel428b8512017-08-03 13:28:28 +02004928
4929 nh = &nh_grp->nexthops[i];
4930 err = mlxsw_sp_nexthop6_init(mlxsw_sp, nh_grp, nh, rt);
4931 if (err)
4932 goto err_nexthop6_init;
4933 mlxsw_sp_rt6 = list_next_entry(mlxsw_sp_rt6, list);
4934 }
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02004935
4936 err = mlxsw_sp_nexthop_group_insert(mlxsw_sp, nh_grp);
4937 if (err)
4938 goto err_nexthop_group_insert;
4939
Ido Schimmel428b8512017-08-03 13:28:28 +02004940 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
4941 return nh_grp;
4942
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02004943err_nexthop_group_insert:
Ido Schimmel428b8512017-08-03 13:28:28 +02004944err_nexthop6_init:
4945 for (i--; i >= 0; i--) {
4946 nh = &nh_grp->nexthops[i];
4947 mlxsw_sp_nexthop6_fini(mlxsw_sp, nh);
4948 }
4949 kfree(nh_grp);
4950 return ERR_PTR(err);
4951}
4952
4953static void
4954mlxsw_sp_nexthop6_group_destroy(struct mlxsw_sp *mlxsw_sp,
4955 struct mlxsw_sp_nexthop_group *nh_grp)
4956{
4957 struct mlxsw_sp_nexthop *nh;
4958 int i = nh_grp->count;
4959
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02004960 mlxsw_sp_nexthop_group_remove(mlxsw_sp, nh_grp);
Ido Schimmel428b8512017-08-03 13:28:28 +02004961 for (i--; i >= 0; i--) {
4962 nh = &nh_grp->nexthops[i];
4963 mlxsw_sp_nexthop6_fini(mlxsw_sp, nh);
4964 }
4965 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
4966 WARN_ON(nh_grp->adj_index_valid);
4967 kfree(nh_grp);
4968}
4969
4970static int mlxsw_sp_nexthop6_group_get(struct mlxsw_sp *mlxsw_sp,
4971 struct mlxsw_sp_fib6_entry *fib6_entry)
4972{
4973 struct mlxsw_sp_nexthop_group *nh_grp;
4974
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02004975 nh_grp = mlxsw_sp_nexthop6_group_lookup(mlxsw_sp, fib6_entry);
4976 if (!nh_grp) {
4977 nh_grp = mlxsw_sp_nexthop6_group_create(mlxsw_sp, fib6_entry);
4978 if (IS_ERR(nh_grp))
4979 return PTR_ERR(nh_grp);
4980 }
Ido Schimmel428b8512017-08-03 13:28:28 +02004981
4982 list_add_tail(&fib6_entry->common.nexthop_group_node,
4983 &nh_grp->fib_list);
4984 fib6_entry->common.nh_group = nh_grp;
4985
4986 return 0;
4987}
4988
4989static void mlxsw_sp_nexthop6_group_put(struct mlxsw_sp *mlxsw_sp,
4990 struct mlxsw_sp_fib_entry *fib_entry)
4991{
4992 struct mlxsw_sp_nexthop_group *nh_grp = fib_entry->nh_group;
4993
4994 list_del(&fib_entry->nexthop_group_node);
4995 if (!list_empty(&nh_grp->fib_list))
4996 return;
4997 mlxsw_sp_nexthop6_group_destroy(mlxsw_sp, nh_grp);
4998}
4999
5000static int
5001mlxsw_sp_nexthop6_group_update(struct mlxsw_sp *mlxsw_sp,
5002 struct mlxsw_sp_fib6_entry *fib6_entry)
5003{
5004 struct mlxsw_sp_nexthop_group *old_nh_grp = fib6_entry->common.nh_group;
5005 int err;
5006
5007 fib6_entry->common.nh_group = NULL;
5008 list_del(&fib6_entry->common.nexthop_group_node);
5009
5010 err = mlxsw_sp_nexthop6_group_get(mlxsw_sp, fib6_entry);
5011 if (err)
5012 goto err_nexthop6_group_get;
5013
5014 /* In case this entry is offloaded, then the adjacency index
5015 * currently associated with it in the device's table is that
5016 * of the old group. Start using the new one instead.
5017 */
5018 err = mlxsw_sp_fib_node_entry_add(mlxsw_sp, &fib6_entry->common);
5019 if (err)
5020 goto err_fib_node_entry_add;
5021
5022 if (list_empty(&old_nh_grp->fib_list))
5023 mlxsw_sp_nexthop6_group_destroy(mlxsw_sp, old_nh_grp);
5024
5025 return 0;
5026
5027err_fib_node_entry_add:
5028 mlxsw_sp_nexthop6_group_put(mlxsw_sp, &fib6_entry->common);
5029err_nexthop6_group_get:
5030 list_add_tail(&fib6_entry->common.nexthop_group_node,
5031 &old_nh_grp->fib_list);
5032 fib6_entry->common.nh_group = old_nh_grp;
5033 return err;
5034}
5035
5036static int
5037mlxsw_sp_fib6_entry_nexthop_add(struct mlxsw_sp *mlxsw_sp,
5038 struct mlxsw_sp_fib6_entry *fib6_entry,
David Ahern8d1c8022018-04-17 17:33:26 -07005039 struct fib6_info *rt)
Ido Schimmel428b8512017-08-03 13:28:28 +02005040{
5041 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
5042 int err;
5043
5044 mlxsw_sp_rt6 = mlxsw_sp_rt6_create(rt);
5045 if (IS_ERR(mlxsw_sp_rt6))
5046 return PTR_ERR(mlxsw_sp_rt6);
5047
5048 list_add_tail(&mlxsw_sp_rt6->list, &fib6_entry->rt6_list);
5049 fib6_entry->nrt6++;
5050
5051 err = mlxsw_sp_nexthop6_group_update(mlxsw_sp, fib6_entry);
5052 if (err)
5053 goto err_nexthop6_group_update;
5054
5055 return 0;
5056
5057err_nexthop6_group_update:
5058 fib6_entry->nrt6--;
5059 list_del(&mlxsw_sp_rt6->list);
5060 mlxsw_sp_rt6_destroy(mlxsw_sp_rt6);
5061 return err;
5062}
5063
5064static void
5065mlxsw_sp_fib6_entry_nexthop_del(struct mlxsw_sp *mlxsw_sp,
5066 struct mlxsw_sp_fib6_entry *fib6_entry,
David Ahern8d1c8022018-04-17 17:33:26 -07005067 struct fib6_info *rt)
Ido Schimmel428b8512017-08-03 13:28:28 +02005068{
5069 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
5070
5071 mlxsw_sp_rt6 = mlxsw_sp_fib6_entry_rt_find(fib6_entry, rt);
5072 if (WARN_ON(!mlxsw_sp_rt6))
5073 return;
5074
5075 fib6_entry->nrt6--;
5076 list_del(&mlxsw_sp_rt6->list);
5077 mlxsw_sp_nexthop6_group_update(mlxsw_sp, fib6_entry);
5078 mlxsw_sp_rt6_destroy(mlxsw_sp_rt6);
5079}
5080
Petr Machataf6050ee2017-09-02 23:49:21 +02005081static void mlxsw_sp_fib6_entry_type_set(struct mlxsw_sp *mlxsw_sp,
5082 struct mlxsw_sp_fib_entry *fib_entry,
David Ahern8d1c8022018-04-17 17:33:26 -07005083 const struct fib6_info *rt)
Ido Schimmel428b8512017-08-03 13:28:28 +02005084{
5085 /* Packets hitting RTF_REJECT routes need to be discarded by the
5086 * stack. We can rely on their destination device not having a
5087 * RIF (it's the loopback device) and can thus use action type
5088 * local, which will cause them to be trapped with a lower
5089 * priority than packets that need to be locally received.
5090 */
David Ahern93c2fb22018-04-18 15:38:59 -07005091 if (rt->fib6_flags & (RTF_LOCAL | RTF_ANYCAST))
Ido Schimmel428b8512017-08-03 13:28:28 +02005092 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
David Ahern93c2fb22018-04-18 15:38:59 -07005093 else if (rt->fib6_flags & RTF_REJECT)
Ido Schimmel428b8512017-08-03 13:28:28 +02005094 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_LOCAL;
Petr Machataf6050ee2017-09-02 23:49:21 +02005095 else if (mlxsw_sp_rt6_is_gateway(mlxsw_sp, rt))
Ido Schimmel428b8512017-08-03 13:28:28 +02005096 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_REMOTE;
5097 else
5098 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_LOCAL;
5099}
5100
5101static void
5102mlxsw_sp_fib6_entry_rt_destroy_all(struct mlxsw_sp_fib6_entry *fib6_entry)
5103{
5104 struct mlxsw_sp_rt6 *mlxsw_sp_rt6, *tmp;
5105
5106 list_for_each_entry_safe(mlxsw_sp_rt6, tmp, &fib6_entry->rt6_list,
5107 list) {
5108 fib6_entry->nrt6--;
5109 list_del(&mlxsw_sp_rt6->list);
5110 mlxsw_sp_rt6_destroy(mlxsw_sp_rt6);
5111 }
5112}
5113
5114static struct mlxsw_sp_fib6_entry *
5115mlxsw_sp_fib6_entry_create(struct mlxsw_sp *mlxsw_sp,
5116 struct mlxsw_sp_fib_node *fib_node,
David Ahern8d1c8022018-04-17 17:33:26 -07005117 struct fib6_info *rt)
Ido Schimmel428b8512017-08-03 13:28:28 +02005118{
5119 struct mlxsw_sp_fib6_entry *fib6_entry;
5120 struct mlxsw_sp_fib_entry *fib_entry;
5121 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
5122 int err;
5123
5124 fib6_entry = kzalloc(sizeof(*fib6_entry), GFP_KERNEL);
5125 if (!fib6_entry)
5126 return ERR_PTR(-ENOMEM);
5127 fib_entry = &fib6_entry->common;
5128
5129 mlxsw_sp_rt6 = mlxsw_sp_rt6_create(rt);
5130 if (IS_ERR(mlxsw_sp_rt6)) {
5131 err = PTR_ERR(mlxsw_sp_rt6);
5132 goto err_rt6_create;
5133 }
5134
Petr Machataf6050ee2017-09-02 23:49:21 +02005135 mlxsw_sp_fib6_entry_type_set(mlxsw_sp, fib_entry, mlxsw_sp_rt6->rt);
Ido Schimmel428b8512017-08-03 13:28:28 +02005136
5137 INIT_LIST_HEAD(&fib6_entry->rt6_list);
5138 list_add_tail(&mlxsw_sp_rt6->list, &fib6_entry->rt6_list);
5139 fib6_entry->nrt6 = 1;
5140 err = mlxsw_sp_nexthop6_group_get(mlxsw_sp, fib6_entry);
5141 if (err)
5142 goto err_nexthop6_group_get;
5143
5144 fib_entry->fib_node = fib_node;
5145
5146 return fib6_entry;
5147
5148err_nexthop6_group_get:
5149 list_del(&mlxsw_sp_rt6->list);
5150 mlxsw_sp_rt6_destroy(mlxsw_sp_rt6);
5151err_rt6_create:
5152 kfree(fib6_entry);
5153 return ERR_PTR(err);
5154}
5155
5156static void mlxsw_sp_fib6_entry_destroy(struct mlxsw_sp *mlxsw_sp,
5157 struct mlxsw_sp_fib6_entry *fib6_entry)
5158{
5159 mlxsw_sp_nexthop6_group_put(mlxsw_sp, &fib6_entry->common);
5160 mlxsw_sp_fib6_entry_rt_destroy_all(fib6_entry);
5161 WARN_ON(fib6_entry->nrt6);
5162 kfree(fib6_entry);
5163}
5164
5165static struct mlxsw_sp_fib6_entry *
5166mlxsw_sp_fib6_node_entry_find(const struct mlxsw_sp_fib_node *fib_node,
David Ahern8d1c8022018-04-17 17:33:26 -07005167 const struct fib6_info *nrt, bool replace)
Ido Schimmel428b8512017-08-03 13:28:28 +02005168{
Ido Schimmelce45bded2018-06-15 16:23:37 +03005169 struct mlxsw_sp_fib6_entry *fib6_entry;
Ido Schimmel428b8512017-08-03 13:28:28 +02005170
5171 list_for_each_entry(fib6_entry, &fib_node->entry_list, common.list) {
David Ahern8d1c8022018-04-17 17:33:26 -07005172 struct fib6_info *rt = mlxsw_sp_fib6_entry_rt(fib6_entry);
Ido Schimmel428b8512017-08-03 13:28:28 +02005173
David Ahern93c2fb22018-04-18 15:38:59 -07005174 if (rt->fib6_table->tb6_id > nrt->fib6_table->tb6_id)
Ido Schimmel428b8512017-08-03 13:28:28 +02005175 continue;
David Ahern93c2fb22018-04-18 15:38:59 -07005176 if (rt->fib6_table->tb6_id != nrt->fib6_table->tb6_id)
Ido Schimmel428b8512017-08-03 13:28:28 +02005177 break;
Ido Schimmelce45bded2018-06-15 16:23:37 +03005178 if (replace && rt->fib6_metric == nrt->fib6_metric)
5179 return fib6_entry;
David Ahern93c2fb22018-04-18 15:38:59 -07005180 if (rt->fib6_metric > nrt->fib6_metric)
Ido Schimmelce45bded2018-06-15 16:23:37 +03005181 return fib6_entry;
Ido Schimmel428b8512017-08-03 13:28:28 +02005182 }
5183
Ido Schimmelce45bded2018-06-15 16:23:37 +03005184 return NULL;
Ido Schimmel428b8512017-08-03 13:28:28 +02005185}
5186
5187static int
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02005188mlxsw_sp_fib6_node_list_insert(struct mlxsw_sp_fib6_entry *new6_entry,
5189 bool replace)
Ido Schimmel428b8512017-08-03 13:28:28 +02005190{
5191 struct mlxsw_sp_fib_node *fib_node = new6_entry->common.fib_node;
David Ahern8d1c8022018-04-17 17:33:26 -07005192 struct fib6_info *nrt = mlxsw_sp_fib6_entry_rt(new6_entry);
Ido Schimmel428b8512017-08-03 13:28:28 +02005193 struct mlxsw_sp_fib6_entry *fib6_entry;
5194
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02005195 fib6_entry = mlxsw_sp_fib6_node_entry_find(fib_node, nrt, replace);
5196
5197 if (replace && WARN_ON(!fib6_entry))
5198 return -EINVAL;
Ido Schimmel428b8512017-08-03 13:28:28 +02005199
5200 if (fib6_entry) {
5201 list_add_tail(&new6_entry->common.list,
5202 &fib6_entry->common.list);
5203 } else {
5204 struct mlxsw_sp_fib6_entry *last;
5205
5206 list_for_each_entry(last, &fib_node->entry_list, common.list) {
David Ahern8d1c8022018-04-17 17:33:26 -07005207 struct fib6_info *rt = mlxsw_sp_fib6_entry_rt(last);
Ido Schimmel428b8512017-08-03 13:28:28 +02005208
David Ahern93c2fb22018-04-18 15:38:59 -07005209 if (nrt->fib6_table->tb6_id > rt->fib6_table->tb6_id)
Ido Schimmel428b8512017-08-03 13:28:28 +02005210 break;
5211 fib6_entry = last;
5212 }
5213
5214 if (fib6_entry)
5215 list_add(&new6_entry->common.list,
5216 &fib6_entry->common.list);
5217 else
5218 list_add(&new6_entry->common.list,
5219 &fib_node->entry_list);
5220 }
5221
5222 return 0;
5223}
5224
5225static void
5226mlxsw_sp_fib6_node_list_remove(struct mlxsw_sp_fib6_entry *fib6_entry)
5227{
5228 list_del(&fib6_entry->common.list);
5229}
5230
5231static int mlxsw_sp_fib6_node_entry_link(struct mlxsw_sp *mlxsw_sp,
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02005232 struct mlxsw_sp_fib6_entry *fib6_entry,
5233 bool replace)
Ido Schimmel428b8512017-08-03 13:28:28 +02005234{
5235 int err;
5236
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02005237 err = mlxsw_sp_fib6_node_list_insert(fib6_entry, replace);
Ido Schimmel428b8512017-08-03 13:28:28 +02005238 if (err)
5239 return err;
5240
5241 err = mlxsw_sp_fib_node_entry_add(mlxsw_sp, &fib6_entry->common);
5242 if (err)
5243 goto err_fib_node_entry_add;
5244
5245 return 0;
5246
5247err_fib_node_entry_add:
5248 mlxsw_sp_fib6_node_list_remove(fib6_entry);
5249 return err;
5250}
5251
5252static void
5253mlxsw_sp_fib6_node_entry_unlink(struct mlxsw_sp *mlxsw_sp,
5254 struct mlxsw_sp_fib6_entry *fib6_entry)
5255{
5256 mlxsw_sp_fib_node_entry_del(mlxsw_sp, &fib6_entry->common);
5257 mlxsw_sp_fib6_node_list_remove(fib6_entry);
5258}
5259
5260static struct mlxsw_sp_fib6_entry *
5261mlxsw_sp_fib6_entry_lookup(struct mlxsw_sp *mlxsw_sp,
David Ahern8d1c8022018-04-17 17:33:26 -07005262 const struct fib6_info *rt)
Ido Schimmel428b8512017-08-03 13:28:28 +02005263{
5264 struct mlxsw_sp_fib6_entry *fib6_entry;
5265 struct mlxsw_sp_fib_node *fib_node;
5266 struct mlxsw_sp_fib *fib;
5267 struct mlxsw_sp_vr *vr;
5268
David Ahern93c2fb22018-04-18 15:38:59 -07005269 vr = mlxsw_sp_vr_find(mlxsw_sp, rt->fib6_table->tb6_id);
Ido Schimmel428b8512017-08-03 13:28:28 +02005270 if (!vr)
5271 return NULL;
5272 fib = mlxsw_sp_vr_fib(vr, MLXSW_SP_L3_PROTO_IPV6);
5273
David Ahern93c2fb22018-04-18 15:38:59 -07005274 fib_node = mlxsw_sp_fib_node_lookup(fib, &rt->fib6_dst.addr,
5275 sizeof(rt->fib6_dst.addr),
5276 rt->fib6_dst.plen);
Ido Schimmel428b8512017-08-03 13:28:28 +02005277 if (!fib_node)
5278 return NULL;
5279
5280 list_for_each_entry(fib6_entry, &fib_node->entry_list, common.list) {
David Ahern8d1c8022018-04-17 17:33:26 -07005281 struct fib6_info *iter_rt = mlxsw_sp_fib6_entry_rt(fib6_entry);
Ido Schimmel428b8512017-08-03 13:28:28 +02005282
David Ahern93c2fb22018-04-18 15:38:59 -07005283 if (rt->fib6_table->tb6_id == iter_rt->fib6_table->tb6_id &&
5284 rt->fib6_metric == iter_rt->fib6_metric &&
Ido Schimmel428b8512017-08-03 13:28:28 +02005285 mlxsw_sp_fib6_entry_rt_find(fib6_entry, rt))
5286 return fib6_entry;
5287 }
5288
5289 return NULL;
5290}
5291
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02005292static void mlxsw_sp_fib6_entry_replace(struct mlxsw_sp *mlxsw_sp,
5293 struct mlxsw_sp_fib6_entry *fib6_entry,
5294 bool replace)
5295{
5296 struct mlxsw_sp_fib_node *fib_node = fib6_entry->common.fib_node;
5297 struct mlxsw_sp_fib6_entry *replaced;
5298
5299 if (!replace)
5300 return;
5301
5302 replaced = list_next_entry(fib6_entry, common.list);
5303
5304 mlxsw_sp_fib6_node_entry_unlink(mlxsw_sp, replaced);
5305 mlxsw_sp_fib6_entry_destroy(mlxsw_sp, replaced);
5306 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
5307}
5308
Ido Schimmel428b8512017-08-03 13:28:28 +02005309static int mlxsw_sp_router_fib6_add(struct mlxsw_sp *mlxsw_sp,
Ido Schimmel53b562d2018-06-15 16:23:36 +03005310 struct fib6_info *rt, bool replace,
5311 bool append)
Ido Schimmel428b8512017-08-03 13:28:28 +02005312{
5313 struct mlxsw_sp_fib6_entry *fib6_entry;
5314 struct mlxsw_sp_fib_node *fib_node;
5315 int err;
5316
5317 if (mlxsw_sp->router->aborted)
5318 return 0;
5319
David Ahern93c2fb22018-04-18 15:38:59 -07005320 if (rt->fib6_src.plen)
Ido Schimmelf36f5ac2017-08-03 13:28:30 +02005321 return -EINVAL;
5322
Ido Schimmel428b8512017-08-03 13:28:28 +02005323 if (mlxsw_sp_fib6_rt_should_ignore(rt))
5324 return 0;
5325
David Ahern93c2fb22018-04-18 15:38:59 -07005326 fib_node = mlxsw_sp_fib_node_get(mlxsw_sp, rt->fib6_table->tb6_id,
5327 &rt->fib6_dst.addr,
5328 sizeof(rt->fib6_dst.addr),
5329 rt->fib6_dst.plen,
Ido Schimmel428b8512017-08-03 13:28:28 +02005330 MLXSW_SP_L3_PROTO_IPV6);
5331 if (IS_ERR(fib_node))
5332 return PTR_ERR(fib_node);
5333
5334 /* Before creating a new entry, try to append route to an existing
5335 * multipath entry.
5336 */
Ido Schimmel53b562d2018-06-15 16:23:36 +03005337 fib6_entry = mlxsw_sp_fib6_node_mp_entry_find(fib_node, rt, append);
Ido Schimmel428b8512017-08-03 13:28:28 +02005338 if (fib6_entry) {
5339 err = mlxsw_sp_fib6_entry_nexthop_add(mlxsw_sp, fib6_entry, rt);
5340 if (err)
5341 goto err_fib6_entry_nexthop_add;
5342 return 0;
5343 }
5344
Ido Schimmel53b562d2018-06-15 16:23:36 +03005345 /* We received an append event, yet did not find any route to
5346 * append to.
5347 */
5348 if (WARN_ON(append)) {
5349 err = -EINVAL;
5350 goto err_fib6_entry_append;
5351 }
5352
Ido Schimmel428b8512017-08-03 13:28:28 +02005353 fib6_entry = mlxsw_sp_fib6_entry_create(mlxsw_sp, fib_node, rt);
5354 if (IS_ERR(fib6_entry)) {
5355 err = PTR_ERR(fib6_entry);
5356 goto err_fib6_entry_create;
5357 }
5358
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02005359 err = mlxsw_sp_fib6_node_entry_link(mlxsw_sp, fib6_entry, replace);
Ido Schimmel428b8512017-08-03 13:28:28 +02005360 if (err)
5361 goto err_fib6_node_entry_link;
5362
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02005363 mlxsw_sp_fib6_entry_replace(mlxsw_sp, fib6_entry, replace);
5364
Ido Schimmel428b8512017-08-03 13:28:28 +02005365 return 0;
5366
5367err_fib6_node_entry_link:
5368 mlxsw_sp_fib6_entry_destroy(mlxsw_sp, fib6_entry);
5369err_fib6_entry_create:
Ido Schimmel53b562d2018-06-15 16:23:36 +03005370err_fib6_entry_append:
Ido Schimmel428b8512017-08-03 13:28:28 +02005371err_fib6_entry_nexthop_add:
5372 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
5373 return err;
5374}
5375
5376static void mlxsw_sp_router_fib6_del(struct mlxsw_sp *mlxsw_sp,
David Ahern8d1c8022018-04-17 17:33:26 -07005377 struct fib6_info *rt)
Ido Schimmel428b8512017-08-03 13:28:28 +02005378{
5379 struct mlxsw_sp_fib6_entry *fib6_entry;
5380 struct mlxsw_sp_fib_node *fib_node;
5381
5382 if (mlxsw_sp->router->aborted)
5383 return;
5384
5385 if (mlxsw_sp_fib6_rt_should_ignore(rt))
5386 return;
5387
5388 fib6_entry = mlxsw_sp_fib6_entry_lookup(mlxsw_sp, rt);
5389 if (WARN_ON(!fib6_entry))
5390 return;
5391
5392 /* If route is part of a multipath entry, but not the last one
5393 * removed, then only reduce its nexthop group.
5394 */
5395 if (!list_is_singular(&fib6_entry->rt6_list)) {
5396 mlxsw_sp_fib6_entry_nexthop_del(mlxsw_sp, fib6_entry, rt);
5397 return;
5398 }
5399
5400 fib_node = fib6_entry->common.fib_node;
5401
5402 mlxsw_sp_fib6_node_entry_unlink(mlxsw_sp, fib6_entry);
5403 mlxsw_sp_fib6_entry_destroy(mlxsw_sp, fib6_entry);
5404 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
5405}
5406
Ido Schimmelbc65a8a2017-07-18 10:10:25 +02005407static int __mlxsw_sp_router_set_abort_trap(struct mlxsw_sp *mlxsw_sp,
5408 enum mlxsw_reg_ralxx_protocol proto,
5409 u8 tree_id)
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005410{
5411 char ralta_pl[MLXSW_REG_RALTA_LEN];
5412 char ralst_pl[MLXSW_REG_RALST_LEN];
Ido Schimmelb5d90e62017-03-10 08:53:43 +01005413 int i, err;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005414
Ido Schimmelbc65a8a2017-07-18 10:10:25 +02005415 mlxsw_reg_ralta_pack(ralta_pl, true, proto, tree_id);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005416 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralta), ralta_pl);
5417 if (err)
5418 return err;
5419
Ido Schimmelbc65a8a2017-07-18 10:10:25 +02005420 mlxsw_reg_ralst_pack(ralst_pl, 0xff, tree_id);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005421 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralst), ralst_pl);
5422 if (err)
5423 return err;
5424
Ido Schimmelb5d90e62017-03-10 08:53:43 +01005425 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
Ido Schimmel9011b672017-05-16 19:38:25 +02005426 struct mlxsw_sp_vr *vr = &mlxsw_sp->router->vrs[i];
Ido Schimmelb5d90e62017-03-10 08:53:43 +01005427 char raltb_pl[MLXSW_REG_RALTB_LEN];
5428 char ralue_pl[MLXSW_REG_RALUE_LEN];
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005429
Ido Schimmelbc65a8a2017-07-18 10:10:25 +02005430 mlxsw_reg_raltb_pack(raltb_pl, vr->id, proto, tree_id);
Ido Schimmelb5d90e62017-03-10 08:53:43 +01005431 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raltb),
5432 raltb_pl);
5433 if (err)
5434 return err;
5435
Ido Schimmelbc65a8a2017-07-18 10:10:25 +02005436 mlxsw_reg_ralue_pack(ralue_pl, proto,
5437 MLXSW_REG_RALUE_OP_WRITE_WRITE, vr->id, 0);
Ido Schimmelb5d90e62017-03-10 08:53:43 +01005438 mlxsw_reg_ralue_act_ip2me_pack(ralue_pl);
5439 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue),
5440 ralue_pl);
5441 if (err)
5442 return err;
5443 }
5444
5445 return 0;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005446}
5447
Yuval Mintzeb35da02018-03-26 15:01:42 +03005448static struct mlxsw_sp_mr_table *
5449mlxsw_sp_router_fibmr_family_to_table(struct mlxsw_sp_vr *vr, int family)
5450{
Yuval Mintz64ed1b92018-03-26 15:01:44 +03005451 if (family == RTNL_FAMILY_IPMR)
Yuval Mintzeb35da02018-03-26 15:01:42 +03005452 return vr->mr_table[MLXSW_SP_L3_PROTO_IPV4];
Yuval Mintz64ed1b92018-03-26 15:01:44 +03005453 else
5454 return vr->mr_table[MLXSW_SP_L3_PROTO_IPV6];
Yuval Mintzeb35da02018-03-26 15:01:42 +03005455}
5456
Yotam Gigid42b0962017-09-27 08:23:20 +02005457static int mlxsw_sp_router_fibmr_add(struct mlxsw_sp *mlxsw_sp,
5458 struct mfc_entry_notifier_info *men_info,
5459 bool replace)
5460{
Yuval Mintzeb35da02018-03-26 15:01:42 +03005461 struct mlxsw_sp_mr_table *mrt;
Yotam Gigid42b0962017-09-27 08:23:20 +02005462 struct mlxsw_sp_vr *vr;
5463
5464 if (mlxsw_sp->router->aborted)
5465 return 0;
5466
David Ahernf8fa9b42017-10-18 09:56:56 -07005467 vr = mlxsw_sp_vr_get(mlxsw_sp, men_info->tb_id, NULL);
Yotam Gigid42b0962017-09-27 08:23:20 +02005468 if (IS_ERR(vr))
5469 return PTR_ERR(vr);
5470
Yuval Mintzeb35da02018-03-26 15:01:42 +03005471 mrt = mlxsw_sp_router_fibmr_family_to_table(vr, men_info->info.family);
5472 return mlxsw_sp_mr_route_add(mrt, men_info->mfc, replace);
Yotam Gigid42b0962017-09-27 08:23:20 +02005473}
5474
5475static void mlxsw_sp_router_fibmr_del(struct mlxsw_sp *mlxsw_sp,
5476 struct mfc_entry_notifier_info *men_info)
5477{
Yuval Mintzeb35da02018-03-26 15:01:42 +03005478 struct mlxsw_sp_mr_table *mrt;
Yotam Gigid42b0962017-09-27 08:23:20 +02005479 struct mlxsw_sp_vr *vr;
5480
5481 if (mlxsw_sp->router->aborted)
5482 return;
5483
5484 vr = mlxsw_sp_vr_find(mlxsw_sp, men_info->tb_id);
5485 if (WARN_ON(!vr))
5486 return;
5487
Yuval Mintzeb35da02018-03-26 15:01:42 +03005488 mrt = mlxsw_sp_router_fibmr_family_to_table(vr, men_info->info.family);
5489 mlxsw_sp_mr_route_del(mrt, men_info->mfc);
Ido Schimmel2b52ce02018-01-22 09:17:42 +01005490 mlxsw_sp_vr_put(mlxsw_sp, vr);
Yotam Gigid42b0962017-09-27 08:23:20 +02005491}
5492
5493static int
5494mlxsw_sp_router_fibmr_vif_add(struct mlxsw_sp *mlxsw_sp,
5495 struct vif_entry_notifier_info *ven_info)
5496{
Yuval Mintzeb35da02018-03-26 15:01:42 +03005497 struct mlxsw_sp_mr_table *mrt;
Yotam Gigid42b0962017-09-27 08:23:20 +02005498 struct mlxsw_sp_rif *rif;
5499 struct mlxsw_sp_vr *vr;
5500
5501 if (mlxsw_sp->router->aborted)
5502 return 0;
5503
David Ahernf8fa9b42017-10-18 09:56:56 -07005504 vr = mlxsw_sp_vr_get(mlxsw_sp, ven_info->tb_id, NULL);
Yotam Gigid42b0962017-09-27 08:23:20 +02005505 if (IS_ERR(vr))
5506 return PTR_ERR(vr);
5507
Yuval Mintzeb35da02018-03-26 15:01:42 +03005508 mrt = mlxsw_sp_router_fibmr_family_to_table(vr, ven_info->info.family);
Yotam Gigid42b0962017-09-27 08:23:20 +02005509 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, ven_info->dev);
Yuval Mintzeb35da02018-03-26 15:01:42 +03005510 return mlxsw_sp_mr_vif_add(mrt, ven_info->dev,
Yotam Gigid42b0962017-09-27 08:23:20 +02005511 ven_info->vif_index,
5512 ven_info->vif_flags, rif);
5513}
5514
5515static void
5516mlxsw_sp_router_fibmr_vif_del(struct mlxsw_sp *mlxsw_sp,
5517 struct vif_entry_notifier_info *ven_info)
5518{
Yuval Mintzeb35da02018-03-26 15:01:42 +03005519 struct mlxsw_sp_mr_table *mrt;
Yotam Gigid42b0962017-09-27 08:23:20 +02005520 struct mlxsw_sp_vr *vr;
5521
5522 if (mlxsw_sp->router->aborted)
5523 return;
5524
5525 vr = mlxsw_sp_vr_find(mlxsw_sp, ven_info->tb_id);
5526 if (WARN_ON(!vr))
5527 return;
5528
Yuval Mintzeb35da02018-03-26 15:01:42 +03005529 mrt = mlxsw_sp_router_fibmr_family_to_table(vr, ven_info->info.family);
5530 mlxsw_sp_mr_vif_del(mrt, ven_info->vif_index);
Ido Schimmel2b52ce02018-01-22 09:17:42 +01005531 mlxsw_sp_vr_put(mlxsw_sp, vr);
Yotam Gigid42b0962017-09-27 08:23:20 +02005532}
5533
Ido Schimmelbc65a8a2017-07-18 10:10:25 +02005534static int mlxsw_sp_router_set_abort_trap(struct mlxsw_sp *mlxsw_sp)
5535{
5536 enum mlxsw_reg_ralxx_protocol proto = MLXSW_REG_RALXX_PROTOCOL_IPV4;
5537 int err;
5538
5539 err = __mlxsw_sp_router_set_abort_trap(mlxsw_sp, proto,
5540 MLXSW_SP_LPM_TREE_MIN);
5541 if (err)
5542 return err;
5543
Yotam Gigid42b0962017-09-27 08:23:20 +02005544 /* The multicast router code does not need an abort trap as by default,
5545 * packets that don't match any routes are trapped to the CPU.
5546 */
5547
Ido Schimmelbc65a8a2017-07-18 10:10:25 +02005548 proto = MLXSW_REG_RALXX_PROTOCOL_IPV6;
5549 return __mlxsw_sp_router_set_abort_trap(mlxsw_sp, proto,
5550 MLXSW_SP_LPM_TREE_MIN + 1);
5551}
5552
Ido Schimmel9aecce12017-02-09 10:28:42 +01005553static void mlxsw_sp_fib4_node_flush(struct mlxsw_sp *mlxsw_sp,
5554 struct mlxsw_sp_fib_node *fib_node)
5555{
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02005556 struct mlxsw_sp_fib4_entry *fib4_entry, *tmp;
Ido Schimmel9aecce12017-02-09 10:28:42 +01005557
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02005558 list_for_each_entry_safe(fib4_entry, tmp, &fib_node->entry_list,
5559 common.list) {
5560 bool do_break = &tmp->common.list == &fib_node->entry_list;
Ido Schimmel9aecce12017-02-09 10:28:42 +01005561
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02005562 mlxsw_sp_fib4_node_entry_unlink(mlxsw_sp, fib4_entry);
5563 mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib4_entry);
Ido Schimmel731ea1c2017-07-18 10:10:21 +02005564 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
Ido Schimmel9aecce12017-02-09 10:28:42 +01005565 /* Break when entry list is empty and node was freed.
5566 * Otherwise, we'll access freed memory in the next
5567 * iteration.
5568 */
5569 if (do_break)
5570 break;
5571 }
5572}
5573
Ido Schimmel428b8512017-08-03 13:28:28 +02005574static void mlxsw_sp_fib6_node_flush(struct mlxsw_sp *mlxsw_sp,
5575 struct mlxsw_sp_fib_node *fib_node)
5576{
5577 struct mlxsw_sp_fib6_entry *fib6_entry, *tmp;
5578
5579 list_for_each_entry_safe(fib6_entry, tmp, &fib_node->entry_list,
5580 common.list) {
5581 bool do_break = &tmp->common.list == &fib_node->entry_list;
5582
5583 mlxsw_sp_fib6_node_entry_unlink(mlxsw_sp, fib6_entry);
5584 mlxsw_sp_fib6_entry_destroy(mlxsw_sp, fib6_entry);
5585 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
5586 if (do_break)
5587 break;
5588 }
5589}
5590
Ido Schimmel9aecce12017-02-09 10:28:42 +01005591static void mlxsw_sp_fib_node_flush(struct mlxsw_sp *mlxsw_sp,
5592 struct mlxsw_sp_fib_node *fib_node)
5593{
Ido Schimmel76610eb2017-03-10 08:53:41 +01005594 switch (fib_node->fib->proto) {
Ido Schimmel9aecce12017-02-09 10:28:42 +01005595 case MLXSW_SP_L3_PROTO_IPV4:
5596 mlxsw_sp_fib4_node_flush(mlxsw_sp, fib_node);
5597 break;
5598 case MLXSW_SP_L3_PROTO_IPV6:
Ido Schimmel428b8512017-08-03 13:28:28 +02005599 mlxsw_sp_fib6_node_flush(mlxsw_sp, fib_node);
Ido Schimmel9aecce12017-02-09 10:28:42 +01005600 break;
5601 }
5602}
5603
Ido Schimmel76610eb2017-03-10 08:53:41 +01005604static void mlxsw_sp_vr_fib_flush(struct mlxsw_sp *mlxsw_sp,
5605 struct mlxsw_sp_vr *vr,
5606 enum mlxsw_sp_l3proto proto)
5607{
5608 struct mlxsw_sp_fib *fib = mlxsw_sp_vr_fib(vr, proto);
5609 struct mlxsw_sp_fib_node *fib_node, *tmp;
5610
5611 list_for_each_entry_safe(fib_node, tmp, &fib->node_list, list) {
5612 bool do_break = &tmp->list == &fib->node_list;
5613
5614 mlxsw_sp_fib_node_flush(mlxsw_sp, fib_node);
5615 if (do_break)
5616 break;
5617 }
5618}
5619
Ido Schimmelac571de2016-11-14 11:26:32 +01005620static void mlxsw_sp_router_fib_flush(struct mlxsw_sp *mlxsw_sp)
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005621{
Yuval Mintz9742f862018-03-26 15:01:40 +03005622 int i, j;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005623
Jiri Pirkoc1a38312016-10-21 16:07:23 +02005624 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
Ido Schimmel9011b672017-05-16 19:38:25 +02005625 struct mlxsw_sp_vr *vr = &mlxsw_sp->router->vrs[i];
Ido Schimmelac571de2016-11-14 11:26:32 +01005626
Ido Schimmel76610eb2017-03-10 08:53:41 +01005627 if (!mlxsw_sp_vr_is_used(vr))
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005628 continue;
Yotam Gigid42b0962017-09-27 08:23:20 +02005629
Yuval Mintz9742f862018-03-26 15:01:40 +03005630 for (j = 0; j < MLXSW_SP_L3_PROTO_MAX; j++)
5631 mlxsw_sp_mr_table_flush(vr->mr_table[j]);
Ido Schimmel76610eb2017-03-10 08:53:41 +01005632 mlxsw_sp_vr_fib_flush(mlxsw_sp, vr, MLXSW_SP_L3_PROTO_IPV4);
Ido Schimmela3d9bc52017-07-18 10:10:22 +02005633
5634 /* If virtual router was only used for IPv4, then it's no
5635 * longer used.
5636 */
5637 if (!mlxsw_sp_vr_is_used(vr))
5638 continue;
5639 mlxsw_sp_vr_fib_flush(mlxsw_sp, vr, MLXSW_SP_L3_PROTO_IPV6);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005640 }
Ido Schimmelac571de2016-11-14 11:26:32 +01005641}
5642
Ido Schimmelbc65a8a2017-07-18 10:10:25 +02005643static void mlxsw_sp_router_fib_abort(struct mlxsw_sp *mlxsw_sp)
Ido Schimmelac571de2016-11-14 11:26:32 +01005644{
5645 int err;
5646
Ido Schimmel9011b672017-05-16 19:38:25 +02005647 if (mlxsw_sp->router->aborted)
Ido Schimmeld331d302016-11-16 09:51:58 +01005648 return;
5649 dev_warn(mlxsw_sp->bus_info->dev, "FIB abort triggered. Note that FIB entries are no longer being offloaded to this device.\n");
Ido Schimmelac571de2016-11-14 11:26:32 +01005650 mlxsw_sp_router_fib_flush(mlxsw_sp);
Ido Schimmel9011b672017-05-16 19:38:25 +02005651 mlxsw_sp->router->aborted = true;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005652 err = mlxsw_sp_router_set_abort_trap(mlxsw_sp);
5653 if (err)
5654 dev_warn(mlxsw_sp->bus_info->dev, "Failed to set abort trap.\n");
5655}
5656
Ido Schimmel30572242016-12-03 16:45:01 +01005657struct mlxsw_sp_fib_event_work {
Ido Schimmela0e47612017-02-06 16:20:10 +01005658 struct work_struct work;
Ido Schimmelad178c82017-02-08 11:16:40 +01005659 union {
Ido Schimmel428b8512017-08-03 13:28:28 +02005660 struct fib6_entry_notifier_info fen6_info;
Ido Schimmelad178c82017-02-08 11:16:40 +01005661 struct fib_entry_notifier_info fen_info;
Ido Schimmel5d7bfd12017-03-16 09:08:14 +01005662 struct fib_rule_notifier_info fr_info;
Ido Schimmelad178c82017-02-08 11:16:40 +01005663 struct fib_nh_notifier_info fnh_info;
Yotam Gigid42b0962017-09-27 08:23:20 +02005664 struct mfc_entry_notifier_info men_info;
5665 struct vif_entry_notifier_info ven_info;
Ido Schimmelad178c82017-02-08 11:16:40 +01005666 };
Ido Schimmel30572242016-12-03 16:45:01 +01005667 struct mlxsw_sp *mlxsw_sp;
5668 unsigned long event;
5669};
5670
Ido Schimmel66a57632017-08-03 13:28:26 +02005671static void mlxsw_sp_router_fib4_event_work(struct work_struct *work)
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005672{
Ido Schimmel30572242016-12-03 16:45:01 +01005673 struct mlxsw_sp_fib_event_work *fib_work =
Ido Schimmela0e47612017-02-06 16:20:10 +01005674 container_of(work, struct mlxsw_sp_fib_event_work, work);
Ido Schimmel30572242016-12-03 16:45:01 +01005675 struct mlxsw_sp *mlxsw_sp = fib_work->mlxsw_sp;
Ido Schimmel599cf8f2017-02-09 10:28:44 +01005676 bool replace, append;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005677 int err;
5678
Ido Schimmel30572242016-12-03 16:45:01 +01005679 /* Protect internal structures from changes */
5680 rtnl_lock();
Petr Machata803335a2018-02-27 14:53:46 +01005681 mlxsw_sp_span_respin(mlxsw_sp);
5682
Ido Schimmel30572242016-12-03 16:45:01 +01005683 switch (fib_work->event) {
Ido Schimmel599cf8f2017-02-09 10:28:44 +01005684 case FIB_EVENT_ENTRY_REPLACE: /* fall through */
Ido Schimmel4283bce2017-02-09 10:28:43 +01005685 case FIB_EVENT_ENTRY_APPEND: /* fall through */
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005686 case FIB_EVENT_ENTRY_ADD:
Ido Schimmel599cf8f2017-02-09 10:28:44 +01005687 replace = fib_work->event == FIB_EVENT_ENTRY_REPLACE;
Ido Schimmel4283bce2017-02-09 10:28:43 +01005688 append = fib_work->event == FIB_EVENT_ENTRY_APPEND;
5689 err = mlxsw_sp_router_fib4_add(mlxsw_sp, &fib_work->fen_info,
Ido Schimmel599cf8f2017-02-09 10:28:44 +01005690 replace, append);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005691 if (err)
Ido Schimmelbc65a8a2017-07-18 10:10:25 +02005692 mlxsw_sp_router_fib_abort(mlxsw_sp);
Ido Schimmel30572242016-12-03 16:45:01 +01005693 fib_info_put(fib_work->fen_info.fi);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005694 break;
5695 case FIB_EVENT_ENTRY_DEL:
Ido Schimmel30572242016-12-03 16:45:01 +01005696 mlxsw_sp_router_fib4_del(mlxsw_sp, &fib_work->fen_info);
5697 fib_info_put(fib_work->fen_info.fi);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005698 break;
David Ahern1f279232017-10-27 17:37:14 -07005699 case FIB_EVENT_RULE_ADD:
5700 /* if we get here, a rule was added that we do not support.
5701 * just do the fib_abort
5702 */
5703 mlxsw_sp_router_fib_abort(mlxsw_sp);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005704 break;
Ido Schimmelad178c82017-02-08 11:16:40 +01005705 case FIB_EVENT_NH_ADD: /* fall through */
5706 case FIB_EVENT_NH_DEL:
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02005707 mlxsw_sp_nexthop4_event(mlxsw_sp, fib_work->event,
5708 fib_work->fnh_info.fib_nh);
Ido Schimmelad178c82017-02-08 11:16:40 +01005709 fib_info_put(fib_work->fnh_info.fib_nh->nh_parent);
5710 break;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005711 }
Ido Schimmel30572242016-12-03 16:45:01 +01005712 rtnl_unlock();
5713 kfree(fib_work);
5714}
5715
Ido Schimmel66a57632017-08-03 13:28:26 +02005716static void mlxsw_sp_router_fib6_event_work(struct work_struct *work)
5717{
Ido Schimmel583419f2017-08-03 13:28:27 +02005718 struct mlxsw_sp_fib_event_work *fib_work =
5719 container_of(work, struct mlxsw_sp_fib_event_work, work);
5720 struct mlxsw_sp *mlxsw_sp = fib_work->mlxsw_sp;
Ido Schimmel53b562d2018-06-15 16:23:36 +03005721 bool replace, append;
Ido Schimmel428b8512017-08-03 13:28:28 +02005722 int err;
Ido Schimmel583419f2017-08-03 13:28:27 +02005723
5724 rtnl_lock();
Petr Machata803335a2018-02-27 14:53:46 +01005725 mlxsw_sp_span_respin(mlxsw_sp);
5726
Ido Schimmel583419f2017-08-03 13:28:27 +02005727 switch (fib_work->event) {
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02005728 case FIB_EVENT_ENTRY_REPLACE: /* fall through */
David Ahern5a15a1b2018-05-21 10:26:52 -07005729 case FIB_EVENT_ENTRY_APPEND: /* fall through */
Ido Schimmel428b8512017-08-03 13:28:28 +02005730 case FIB_EVENT_ENTRY_ADD:
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02005731 replace = fib_work->event == FIB_EVENT_ENTRY_REPLACE;
Ido Schimmel53b562d2018-06-15 16:23:36 +03005732 append = fib_work->event == FIB_EVENT_ENTRY_APPEND;
Ido Schimmel428b8512017-08-03 13:28:28 +02005733 err = mlxsw_sp_router_fib6_add(mlxsw_sp,
Ido Schimmel53b562d2018-06-15 16:23:36 +03005734 fib_work->fen6_info.rt, replace,
5735 append);
Ido Schimmel428b8512017-08-03 13:28:28 +02005736 if (err)
5737 mlxsw_sp_router_fib_abort(mlxsw_sp);
5738 mlxsw_sp_rt6_release(fib_work->fen6_info.rt);
5739 break;
5740 case FIB_EVENT_ENTRY_DEL:
5741 mlxsw_sp_router_fib6_del(mlxsw_sp, fib_work->fen6_info.rt);
5742 mlxsw_sp_rt6_release(fib_work->fen6_info.rt);
5743 break;
David Ahern1f279232017-10-27 17:37:14 -07005744 case FIB_EVENT_RULE_ADD:
5745 /* if we get here, a rule was added that we do not support.
5746 * just do the fib_abort
5747 */
5748 mlxsw_sp_router_fib_abort(mlxsw_sp);
Ido Schimmel583419f2017-08-03 13:28:27 +02005749 break;
5750 }
5751 rtnl_unlock();
5752 kfree(fib_work);
Ido Schimmel66a57632017-08-03 13:28:26 +02005753}
5754
Yotam Gigid42b0962017-09-27 08:23:20 +02005755static void mlxsw_sp_router_fibmr_event_work(struct work_struct *work)
5756{
5757 struct mlxsw_sp_fib_event_work *fib_work =
5758 container_of(work, struct mlxsw_sp_fib_event_work, work);
5759 struct mlxsw_sp *mlxsw_sp = fib_work->mlxsw_sp;
Yotam Gigid42b0962017-09-27 08:23:20 +02005760 bool replace;
5761 int err;
5762
5763 rtnl_lock();
5764 switch (fib_work->event) {
5765 case FIB_EVENT_ENTRY_REPLACE: /* fall through */
5766 case FIB_EVENT_ENTRY_ADD:
5767 replace = fib_work->event == FIB_EVENT_ENTRY_REPLACE;
5768
5769 err = mlxsw_sp_router_fibmr_add(mlxsw_sp, &fib_work->men_info,
5770 replace);
5771 if (err)
5772 mlxsw_sp_router_fib_abort(mlxsw_sp);
Yuval Mintz8c13af22018-03-26 15:01:36 +03005773 mr_cache_put(fib_work->men_info.mfc);
Yotam Gigid42b0962017-09-27 08:23:20 +02005774 break;
5775 case FIB_EVENT_ENTRY_DEL:
5776 mlxsw_sp_router_fibmr_del(mlxsw_sp, &fib_work->men_info);
Yuval Mintz8c13af22018-03-26 15:01:36 +03005777 mr_cache_put(fib_work->men_info.mfc);
Yotam Gigid42b0962017-09-27 08:23:20 +02005778 break;
5779 case FIB_EVENT_VIF_ADD:
5780 err = mlxsw_sp_router_fibmr_vif_add(mlxsw_sp,
5781 &fib_work->ven_info);
5782 if (err)
5783 mlxsw_sp_router_fib_abort(mlxsw_sp);
5784 dev_put(fib_work->ven_info.dev);
5785 break;
5786 case FIB_EVENT_VIF_DEL:
5787 mlxsw_sp_router_fibmr_vif_del(mlxsw_sp,
5788 &fib_work->ven_info);
5789 dev_put(fib_work->ven_info.dev);
5790 break;
David Ahern1f279232017-10-27 17:37:14 -07005791 case FIB_EVENT_RULE_ADD:
5792 /* if we get here, a rule was added that we do not support.
5793 * just do the fib_abort
5794 */
5795 mlxsw_sp_router_fib_abort(mlxsw_sp);
Yotam Gigid42b0962017-09-27 08:23:20 +02005796 break;
5797 }
5798 rtnl_unlock();
5799 kfree(fib_work);
5800}
5801
Ido Schimmel66a57632017-08-03 13:28:26 +02005802static void mlxsw_sp_router_fib4_event(struct mlxsw_sp_fib_event_work *fib_work,
5803 struct fib_notifier_info *info)
5804{
David Ahern3c75f9b2017-10-18 15:01:38 -07005805 struct fib_entry_notifier_info *fen_info;
David Ahern3c75f9b2017-10-18 15:01:38 -07005806 struct fib_nh_notifier_info *fnh_info;
5807
Ido Schimmel66a57632017-08-03 13:28:26 +02005808 switch (fib_work->event) {
5809 case FIB_EVENT_ENTRY_REPLACE: /* fall through */
5810 case FIB_EVENT_ENTRY_APPEND: /* fall through */
5811 case FIB_EVENT_ENTRY_ADD: /* fall through */
5812 case FIB_EVENT_ENTRY_DEL:
David Ahern3c75f9b2017-10-18 15:01:38 -07005813 fen_info = container_of(info, struct fib_entry_notifier_info,
5814 info);
5815 fib_work->fen_info = *fen_info;
5816 /* Take reference on fib_info to prevent it from being
Ido Schimmel66a57632017-08-03 13:28:26 +02005817 * freed while work is queued. Release it afterwards.
5818 */
5819 fib_info_hold(fib_work->fen_info.fi);
5820 break;
Ido Schimmel66a57632017-08-03 13:28:26 +02005821 case FIB_EVENT_NH_ADD: /* fall through */
5822 case FIB_EVENT_NH_DEL:
David Ahern3c75f9b2017-10-18 15:01:38 -07005823 fnh_info = container_of(info, struct fib_nh_notifier_info,
5824 info);
5825 fib_work->fnh_info = *fnh_info;
Ido Schimmel66a57632017-08-03 13:28:26 +02005826 fib_info_hold(fib_work->fnh_info.fib_nh->nh_parent);
5827 break;
5828 }
5829}
5830
5831static void mlxsw_sp_router_fib6_event(struct mlxsw_sp_fib_event_work *fib_work,
5832 struct fib_notifier_info *info)
5833{
David Ahern3c75f9b2017-10-18 15:01:38 -07005834 struct fib6_entry_notifier_info *fen6_info;
David Ahern3c75f9b2017-10-18 15:01:38 -07005835
Ido Schimmel583419f2017-08-03 13:28:27 +02005836 switch (fib_work->event) {
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02005837 case FIB_EVENT_ENTRY_REPLACE: /* fall through */
David Ahern5a15a1b2018-05-21 10:26:52 -07005838 case FIB_EVENT_ENTRY_APPEND: /* fall through */
Ido Schimmel428b8512017-08-03 13:28:28 +02005839 case FIB_EVENT_ENTRY_ADD: /* fall through */
5840 case FIB_EVENT_ENTRY_DEL:
David Ahern3c75f9b2017-10-18 15:01:38 -07005841 fen6_info = container_of(info, struct fib6_entry_notifier_info,
5842 info);
5843 fib_work->fen6_info = *fen6_info;
David Ahern8d1c8022018-04-17 17:33:26 -07005844 fib6_info_hold(fib_work->fen6_info.rt);
Ido Schimmel428b8512017-08-03 13:28:28 +02005845 break;
Ido Schimmel583419f2017-08-03 13:28:27 +02005846 }
Ido Schimmel66a57632017-08-03 13:28:26 +02005847}
5848
Yotam Gigid42b0962017-09-27 08:23:20 +02005849static void
5850mlxsw_sp_router_fibmr_event(struct mlxsw_sp_fib_event_work *fib_work,
5851 struct fib_notifier_info *info)
5852{
5853 switch (fib_work->event) {
5854 case FIB_EVENT_ENTRY_REPLACE: /* fall through */
5855 case FIB_EVENT_ENTRY_ADD: /* fall through */
5856 case FIB_EVENT_ENTRY_DEL:
5857 memcpy(&fib_work->men_info, info, sizeof(fib_work->men_info));
Yuval Mintz8c13af22018-03-26 15:01:36 +03005858 mr_cache_hold(fib_work->men_info.mfc);
Yotam Gigid42b0962017-09-27 08:23:20 +02005859 break;
5860 case FIB_EVENT_VIF_ADD: /* fall through */
5861 case FIB_EVENT_VIF_DEL:
5862 memcpy(&fib_work->ven_info, info, sizeof(fib_work->ven_info));
5863 dev_hold(fib_work->ven_info.dev);
5864 break;
David Ahern1f279232017-10-27 17:37:14 -07005865 }
5866}
5867
5868static int mlxsw_sp_router_fib_rule_event(unsigned long event,
5869 struct fib_notifier_info *info,
5870 struct mlxsw_sp *mlxsw_sp)
5871{
5872 struct netlink_ext_ack *extack = info->extack;
5873 struct fib_rule_notifier_info *fr_info;
5874 struct fib_rule *rule;
5875 int err = 0;
5876
5877 /* nothing to do at the moment */
5878 if (event == FIB_EVENT_RULE_DEL)
5879 return 0;
5880
5881 if (mlxsw_sp->router->aborted)
5882 return 0;
5883
5884 fr_info = container_of(info, struct fib_rule_notifier_info, info);
5885 rule = fr_info->rule;
5886
5887 switch (info->family) {
5888 case AF_INET:
5889 if (!fib4_rule_default(rule) && !rule->l3mdev)
Ido Schimmel62901822018-05-02 10:17:34 +03005890 err = -EOPNOTSUPP;
David Ahern1f279232017-10-27 17:37:14 -07005891 break;
5892 case AF_INET6:
5893 if (!fib6_rule_default(rule) && !rule->l3mdev)
Ido Schimmel62901822018-05-02 10:17:34 +03005894 err = -EOPNOTSUPP;
David Ahern1f279232017-10-27 17:37:14 -07005895 break;
5896 case RTNL_FAMILY_IPMR:
5897 if (!ipmr_rule_default(rule) && !rule->l3mdev)
Ido Schimmel62901822018-05-02 10:17:34 +03005898 err = -EOPNOTSUPP;
Yotam Gigid42b0962017-09-27 08:23:20 +02005899 break;
Yuval Mintz64ed1b92018-03-26 15:01:44 +03005900 case RTNL_FAMILY_IP6MR:
5901 if (!ip6mr_rule_default(rule) && !rule->l3mdev)
Ido Schimmel62901822018-05-02 10:17:34 +03005902 err = -EOPNOTSUPP;
Yuval Mintz64ed1b92018-03-26 15:01:44 +03005903 break;
Yotam Gigid42b0962017-09-27 08:23:20 +02005904 }
David Ahern1f279232017-10-27 17:37:14 -07005905
5906 if (err < 0)
Ido Schimmel62901822018-05-02 10:17:34 +03005907 NL_SET_ERR_MSG_MOD(extack, "FIB rules not supported");
David Ahern1f279232017-10-27 17:37:14 -07005908
5909 return err;
Yotam Gigid42b0962017-09-27 08:23:20 +02005910}
5911
Ido Schimmel30572242016-12-03 16:45:01 +01005912/* Called with rcu_read_lock() */
5913static int mlxsw_sp_router_fib_event(struct notifier_block *nb,
5914 unsigned long event, void *ptr)
5915{
Ido Schimmel30572242016-12-03 16:45:01 +01005916 struct mlxsw_sp_fib_event_work *fib_work;
5917 struct fib_notifier_info *info = ptr;
Ido Schimmel7e39d112017-05-16 19:38:28 +02005918 struct mlxsw_sp_router *router;
David Ahern1f279232017-10-27 17:37:14 -07005919 int err;
Ido Schimmel30572242016-12-03 16:45:01 +01005920
Ido Schimmel8e29f972017-09-15 15:31:07 +02005921 if (!net_eq(info->net, &init_net) ||
Yotam Gigi664375e2017-09-27 08:23:22 +02005922 (info->family != AF_INET && info->family != AF_INET6 &&
Yuval Mintz64ed1b92018-03-26 15:01:44 +03005923 info->family != RTNL_FAMILY_IPMR &&
5924 info->family != RTNL_FAMILY_IP6MR))
Ido Schimmel30572242016-12-03 16:45:01 +01005925 return NOTIFY_DONE;
5926
David Ahern1f279232017-10-27 17:37:14 -07005927 router = container_of(nb, struct mlxsw_sp_router, fib_nb);
5928
5929 switch (event) {
5930 case FIB_EVENT_RULE_ADD: /* fall through */
5931 case FIB_EVENT_RULE_DEL:
5932 err = mlxsw_sp_router_fib_rule_event(event, info,
5933 router->mlxsw_sp);
Ido Schimmel62901822018-05-02 10:17:34 +03005934 if (!err || info->extack)
5935 return notifier_from_errno(err);
Ido Schimmel50d10712018-05-02 10:17:35 +03005936 break;
5937 case FIB_EVENT_ENTRY_ADD:
5938 if (router->aborted) {
5939 NL_SET_ERR_MSG_MOD(info->extack, "FIB offload was aborted. Not configuring route");
5940 return notifier_from_errno(-EINVAL);
5941 }
5942 break;
David Ahern1f279232017-10-27 17:37:14 -07005943 }
5944
Ido Schimmel30572242016-12-03 16:45:01 +01005945 fib_work = kzalloc(sizeof(*fib_work), GFP_ATOMIC);
5946 if (WARN_ON(!fib_work))
5947 return NOTIFY_BAD;
5948
Ido Schimmel7e39d112017-05-16 19:38:28 +02005949 fib_work->mlxsw_sp = router->mlxsw_sp;
Ido Schimmel30572242016-12-03 16:45:01 +01005950 fib_work->event = event;
5951
Ido Schimmel66a57632017-08-03 13:28:26 +02005952 switch (info->family) {
5953 case AF_INET:
5954 INIT_WORK(&fib_work->work, mlxsw_sp_router_fib4_event_work);
5955 mlxsw_sp_router_fib4_event(fib_work, info);
Ido Schimmel30572242016-12-03 16:45:01 +01005956 break;
Ido Schimmel66a57632017-08-03 13:28:26 +02005957 case AF_INET6:
5958 INIT_WORK(&fib_work->work, mlxsw_sp_router_fib6_event_work);
5959 mlxsw_sp_router_fib6_event(fib_work, info);
Ido Schimmelad178c82017-02-08 11:16:40 +01005960 break;
Yuval Mintz64ed1b92018-03-26 15:01:44 +03005961 case RTNL_FAMILY_IP6MR:
Yotam Gigid42b0962017-09-27 08:23:20 +02005962 case RTNL_FAMILY_IPMR:
5963 INIT_WORK(&fib_work->work, mlxsw_sp_router_fibmr_event_work);
5964 mlxsw_sp_router_fibmr_event(fib_work, info);
5965 break;
Ido Schimmel30572242016-12-03 16:45:01 +01005966 }
5967
Ido Schimmela0e47612017-02-06 16:20:10 +01005968 mlxsw_core_schedule_work(&fib_work->work);
Ido Schimmel30572242016-12-03 16:45:01 +01005969
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005970 return NOTIFY_DONE;
5971}
5972
Petr Machata0c412922018-06-25 10:48:15 +03005973struct mlxsw_sp_rif *
Ido Schimmel4724ba562017-03-10 08:53:39 +01005974mlxsw_sp_rif_find_by_dev(const struct mlxsw_sp *mlxsw_sp,
5975 const struct net_device *dev)
5976{
5977 int i;
5978
5979 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++)
Ido Schimmel5f9efff2017-05-16 19:38:27 +02005980 if (mlxsw_sp->router->rifs[i] &&
5981 mlxsw_sp->router->rifs[i]->dev == dev)
5982 return mlxsw_sp->router->rifs[i];
Ido Schimmel4724ba562017-03-10 08:53:39 +01005983
5984 return NULL;
5985}
5986
5987static int mlxsw_sp_router_rif_disable(struct mlxsw_sp *mlxsw_sp, u16 rif)
5988{
5989 char ritr_pl[MLXSW_REG_RITR_LEN];
5990 int err;
5991
5992 mlxsw_reg_ritr_rif_pack(ritr_pl, rif);
5993 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
5994 if (WARN_ON_ONCE(err))
5995 return err;
5996
5997 mlxsw_reg_ritr_enable_set(ritr_pl, false);
5998 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
5999}
6000
6001static void mlxsw_sp_router_rif_gone_sync(struct mlxsw_sp *mlxsw_sp,
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01006002 struct mlxsw_sp_rif *rif)
Ido Schimmel4724ba562017-03-10 08:53:39 +01006003{
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01006004 mlxsw_sp_router_rif_disable(mlxsw_sp, rif->rif_index);
6005 mlxsw_sp_nexthop_rif_gone_sync(mlxsw_sp, rif);
6006 mlxsw_sp_neigh_rif_gone_sync(mlxsw_sp, rif);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006007}
6008
Arkadi Sharshevsky5ea12372017-07-18 10:10:13 +02006009static bool
6010mlxsw_sp_rif_should_config(struct mlxsw_sp_rif *rif, struct net_device *dev,
6011 unsigned long event)
Ido Schimmel4724ba562017-03-10 08:53:39 +01006012{
Arkadi Sharshevsky5ea12372017-07-18 10:10:13 +02006013 struct inet6_dev *inet6_dev;
6014 bool addr_list_empty = true;
6015 struct in_device *idev;
6016
Ido Schimmel4724ba562017-03-10 08:53:39 +01006017 switch (event) {
6018 case NETDEV_UP:
Petr Machataf1b1f272017-07-31 09:27:28 +02006019 return rif == NULL;
Ido Schimmel4724ba562017-03-10 08:53:39 +01006020 case NETDEV_DOWN:
Arkadi Sharshevsky5ea12372017-07-18 10:10:13 +02006021 idev = __in_dev_get_rtnl(dev);
6022 if (idev && idev->ifa_list)
6023 addr_list_empty = false;
6024
6025 inet6_dev = __in6_dev_get(dev);
6026 if (addr_list_empty && inet6_dev &&
6027 !list_empty(&inet6_dev->addr_list))
6028 addr_list_empty = false;
6029
6030 if (rif && addr_list_empty &&
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01006031 !netif_is_l3_slave(rif->dev))
Ido Schimmel4724ba562017-03-10 08:53:39 +01006032 return true;
6033 /* It is possible we already removed the RIF ourselves
6034 * if it was assigned to a netdev that is now a bridge
6035 * or LAG slave.
6036 */
6037 return false;
6038 }
6039
6040 return false;
6041}
6042
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006043static enum mlxsw_sp_rif_type
6044mlxsw_sp_dev_rif_type(const struct mlxsw_sp *mlxsw_sp,
6045 const struct net_device *dev)
6046{
6047 enum mlxsw_sp_fid_type type;
6048
Petr Machata6ddb7422017-09-02 23:49:19 +02006049 if (mlxsw_sp_netdev_ipip_type(mlxsw_sp, dev, NULL))
6050 return MLXSW_SP_RIF_TYPE_IPIP_LB;
6051
6052 /* Otherwise RIF type is derived from the type of the underlying FID. */
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006053 if (is_vlan_dev(dev) && netif_is_bridge_master(vlan_dev_real_dev(dev)))
6054 type = MLXSW_SP_FID_TYPE_8021Q;
6055 else if (netif_is_bridge_master(dev) && br_vlan_enabled(dev))
6056 type = MLXSW_SP_FID_TYPE_8021Q;
6057 else if (netif_is_bridge_master(dev))
6058 type = MLXSW_SP_FID_TYPE_8021D;
6059 else
6060 type = MLXSW_SP_FID_TYPE_RFID;
6061
6062 return mlxsw_sp_fid_type_rif_type(mlxsw_sp, type);
6063}
6064
Ido Schimmelde5ed992017-06-04 16:53:40 +02006065static int mlxsw_sp_rif_index_alloc(struct mlxsw_sp *mlxsw_sp, u16 *p_rif_index)
Ido Schimmel4724ba562017-03-10 08:53:39 +01006066{
6067 int i;
6068
Ido Schimmelde5ed992017-06-04 16:53:40 +02006069 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++) {
6070 if (!mlxsw_sp->router->rifs[i]) {
6071 *p_rif_index = i;
6072 return 0;
6073 }
6074 }
Ido Schimmel4724ba562017-03-10 08:53:39 +01006075
Ido Schimmelde5ed992017-06-04 16:53:40 +02006076 return -ENOBUFS;
Ido Schimmel4724ba562017-03-10 08:53:39 +01006077}
6078
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006079static struct mlxsw_sp_rif *mlxsw_sp_rif_alloc(size_t rif_size, u16 rif_index,
6080 u16 vr_id,
6081 struct net_device *l3_dev)
Ido Schimmel4724ba562017-03-10 08:53:39 +01006082{
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01006083 struct mlxsw_sp_rif *rif;
Ido Schimmel4724ba562017-03-10 08:53:39 +01006084
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006085 rif = kzalloc(rif_size, GFP_KERNEL);
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01006086 if (!rif)
Ido Schimmel4724ba562017-03-10 08:53:39 +01006087 return NULL;
6088
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01006089 INIT_LIST_HEAD(&rif->nexthop_list);
6090 INIT_LIST_HEAD(&rif->neigh_list);
6091 ether_addr_copy(rif->addr, l3_dev->dev_addr);
6092 rif->mtu = l3_dev->mtu;
6093 rif->vr_id = vr_id;
6094 rif->dev = l3_dev;
6095 rif->rif_index = rif_index;
Ido Schimmel4724ba562017-03-10 08:53:39 +01006096
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01006097 return rif;
Ido Schimmel4724ba562017-03-10 08:53:39 +01006098}
6099
Ido Schimmel5f9efff2017-05-16 19:38:27 +02006100struct mlxsw_sp_rif *mlxsw_sp_rif_by_index(const struct mlxsw_sp *mlxsw_sp,
6101 u16 rif_index)
6102{
6103 return mlxsw_sp->router->rifs[rif_index];
6104}
6105
Arkadi Sharshevskyfd1b9d42017-03-28 17:24:16 +02006106u16 mlxsw_sp_rif_index(const struct mlxsw_sp_rif *rif)
6107{
6108 return rif->rif_index;
6109}
6110
Petr Machata92107cf2017-09-02 23:49:28 +02006111u16 mlxsw_sp_ipip_lb_rif_index(const struct mlxsw_sp_rif_ipip_lb *lb_rif)
6112{
6113 return lb_rif->common.rif_index;
6114}
6115
6116u16 mlxsw_sp_ipip_lb_ul_vr_id(const struct mlxsw_sp_rif_ipip_lb *lb_rif)
6117{
6118 return lb_rif->ul_vr_id;
6119}
6120
Arkadi Sharshevskyfd1b9d42017-03-28 17:24:16 +02006121int mlxsw_sp_rif_dev_ifindex(const struct mlxsw_sp_rif *rif)
6122{
6123 return rif->dev->ifindex;
6124}
6125
Yotam Gigi91e4d592017-09-19 10:00:19 +02006126const struct net_device *mlxsw_sp_rif_dev(const struct mlxsw_sp_rif *rif)
6127{
6128 return rif->dev;
6129}
6130
Petr Machataa28b1eb2018-06-25 10:48:16 +03006131struct mlxsw_sp_fid *mlxsw_sp_rif_fid(const struct mlxsw_sp_rif *rif)
6132{
6133 return rif->fid;
6134}
6135
Ido Schimmel4724ba562017-03-10 08:53:39 +01006136static struct mlxsw_sp_rif *
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006137mlxsw_sp_rif_create(struct mlxsw_sp *mlxsw_sp,
David Ahernf8fa9b42017-10-18 09:56:56 -07006138 const struct mlxsw_sp_rif_params *params,
6139 struct netlink_ext_ack *extack)
Ido Schimmel4724ba562017-03-10 08:53:39 +01006140{
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006141 u32 tb_id = l3mdev_fib_table(params->dev);
6142 const struct mlxsw_sp_rif_ops *ops;
Petr Machata010cadf2017-09-02 23:49:18 +02006143 struct mlxsw_sp_fid *fid = NULL;
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006144 enum mlxsw_sp_rif_type type;
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01006145 struct mlxsw_sp_rif *rif;
Ido Schimmela1107482017-05-26 08:37:39 +02006146 struct mlxsw_sp_vr *vr;
6147 u16 rif_index;
Yuval Mintz9742f862018-03-26 15:01:40 +03006148 int i, err;
Ido Schimmel4724ba562017-03-10 08:53:39 +01006149
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006150 type = mlxsw_sp_dev_rif_type(mlxsw_sp, params->dev);
6151 ops = mlxsw_sp->router->rif_ops_arr[type];
6152
David Ahernf8fa9b42017-10-18 09:56:56 -07006153 vr = mlxsw_sp_vr_get(mlxsw_sp, tb_id ? : RT_TABLE_MAIN, extack);
Ido Schimmelc9ec53f2017-05-26 08:37:38 +02006154 if (IS_ERR(vr))
6155 return ERR_CAST(vr);
Petr Machata28a04c72017-10-02 12:14:56 +02006156 vr->rif_count++;
Ido Schimmelc9ec53f2017-05-26 08:37:38 +02006157
Ido Schimmelde5ed992017-06-04 16:53:40 +02006158 err = mlxsw_sp_rif_index_alloc(mlxsw_sp, &rif_index);
David Ahernf8fa9b42017-10-18 09:56:56 -07006159 if (err) {
Arkadi Sharshevsky6c677752018-02-13 11:29:05 +01006160 NL_SET_ERR_MSG_MOD(extack, "Exceeded number of supported router interfaces");
Ido Schimmelde5ed992017-06-04 16:53:40 +02006161 goto err_rif_index_alloc;
David Ahernf8fa9b42017-10-18 09:56:56 -07006162 }
Ido Schimmel4724ba562017-03-10 08:53:39 +01006163
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006164 rif = mlxsw_sp_rif_alloc(ops->rif_size, rif_index, vr->id, params->dev);
Ido Schimmela13a5942017-05-26 08:37:33 +02006165 if (!rif) {
6166 err = -ENOMEM;
6167 goto err_rif_alloc;
6168 }
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006169 rif->mlxsw_sp = mlxsw_sp;
6170 rif->ops = ops;
Ido Schimmela13a5942017-05-26 08:37:33 +02006171
Petr Machata010cadf2017-09-02 23:49:18 +02006172 if (ops->fid_get) {
Petr Machata5f15e252018-06-25 10:48:13 +03006173 fid = ops->fid_get(rif, extack);
Petr Machata010cadf2017-09-02 23:49:18 +02006174 if (IS_ERR(fid)) {
6175 err = PTR_ERR(fid);
6176 goto err_fid_get;
6177 }
6178 rif->fid = fid;
Ido Schimmel4d93cee2017-05-26 08:37:34 +02006179 }
6180
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006181 if (ops->setup)
6182 ops->setup(rif, params);
6183
6184 err = ops->configure(rif);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006185 if (err)
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006186 goto err_configure;
Ido Schimmel4724ba562017-03-10 08:53:39 +01006187
Yuval Mintz9742f862018-03-26 15:01:40 +03006188 for (i = 0; i < MLXSW_SP_L3_PROTO_MAX; i++) {
6189 err = mlxsw_sp_mr_rif_add(vr->mr_table[i], rif);
6190 if (err)
6191 goto err_mr_rif_add;
6192 }
Yotam Gigid42b0962017-09-27 08:23:20 +02006193
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006194 mlxsw_sp_rif_counters_alloc(rif);
Ido Schimmel5f9efff2017-05-16 19:38:27 +02006195 mlxsw_sp->router->rifs[rif_index] = rif;
Ido Schimmel4724ba562017-03-10 08:53:39 +01006196
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01006197 return rif;
Ido Schimmel4724ba562017-03-10 08:53:39 +01006198
Yotam Gigid42b0962017-09-27 08:23:20 +02006199err_mr_rif_add:
Yuval Mintz9742f862018-03-26 15:01:40 +03006200 for (i--; i >= 0; i--)
6201 mlxsw_sp_mr_rif_del(vr->mr_table[i], rif);
Yotam Gigid42b0962017-09-27 08:23:20 +02006202 ops->deconfigure(rif);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006203err_configure:
Petr Machata010cadf2017-09-02 23:49:18 +02006204 if (fid)
6205 mlxsw_sp_fid_put(fid);
Ido Schimmela1107482017-05-26 08:37:39 +02006206err_fid_get:
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006207 kfree(rif);
6208err_rif_alloc:
Ido Schimmelde5ed992017-06-04 16:53:40 +02006209err_rif_index_alloc:
Petr Machata28a04c72017-10-02 12:14:56 +02006210 vr->rif_count--;
Ido Schimmel2b52ce02018-01-22 09:17:42 +01006211 mlxsw_sp_vr_put(mlxsw_sp, vr);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006212 return ERR_PTR(err);
6213}
6214
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006215void mlxsw_sp_rif_destroy(struct mlxsw_sp_rif *rif)
Ido Schimmel4724ba562017-03-10 08:53:39 +01006216{
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006217 const struct mlxsw_sp_rif_ops *ops = rif->ops;
6218 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
Ido Schimmela1107482017-05-26 08:37:39 +02006219 struct mlxsw_sp_fid *fid = rif->fid;
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006220 struct mlxsw_sp_vr *vr;
Yuval Mintz9742f862018-03-26 15:01:40 +03006221 int i;
Ido Schimmel4724ba562017-03-10 08:53:39 +01006222
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01006223 mlxsw_sp_router_rif_gone_sync(mlxsw_sp, rif);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006224 vr = &mlxsw_sp->router->vrs[rif->vr_id];
Arkadi Sharshevskye0c0afd2017-03-28 17:24:15 +02006225
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006226 mlxsw_sp->router->rifs[rif->rif_index] = NULL;
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006227 mlxsw_sp_rif_counters_free(rif);
Yuval Mintz9742f862018-03-26 15:01:40 +03006228 for (i = 0; i < MLXSW_SP_L3_PROTO_MAX; i++)
6229 mlxsw_sp_mr_rif_del(vr->mr_table[i], rif);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006230 ops->deconfigure(rif);
Petr Machata010cadf2017-09-02 23:49:18 +02006231 if (fid)
6232 /* Loopback RIFs are not associated with a FID. */
6233 mlxsw_sp_fid_put(fid);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006234 kfree(rif);
Petr Machata28a04c72017-10-02 12:14:56 +02006235 vr->rif_count--;
Ido Schimmel2b52ce02018-01-22 09:17:42 +01006236 mlxsw_sp_vr_put(mlxsw_sp, vr);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006237}
6238
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006239static void
6240mlxsw_sp_rif_subport_params_init(struct mlxsw_sp_rif_params *params,
6241 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
6242{
6243 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
6244
6245 params->vid = mlxsw_sp_port_vlan->vid;
6246 params->lag = mlxsw_sp_port->lagged;
6247 if (params->lag)
6248 params->lag_id = mlxsw_sp_port->lag_id;
6249 else
6250 params->system_port = mlxsw_sp_port->local_port;
6251}
6252
Ido Schimmel7cbecf22017-05-26 08:37:28 +02006253static int
Ido Schimmela1107482017-05-26 08:37:39 +02006254mlxsw_sp_port_vlan_router_join(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan,
David Ahernf8fa9b42017-10-18 09:56:56 -07006255 struct net_device *l3_dev,
6256 struct netlink_ext_ack *extack)
Ido Schimmel4724ba562017-03-10 08:53:39 +01006257{
Ido Schimmel7cbecf22017-05-26 08:37:28 +02006258 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
Ido Schimmel1b8f09a2017-05-26 08:37:36 +02006259 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
Ido Schimmel7cbecf22017-05-26 08:37:28 +02006260 u16 vid = mlxsw_sp_port_vlan->vid;
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01006261 struct mlxsw_sp_rif *rif;
Ido Schimmela1107482017-05-26 08:37:39 +02006262 struct mlxsw_sp_fid *fid;
Ido Schimmel03ea01e2017-05-23 21:56:30 +02006263 int err;
Ido Schimmel4724ba562017-03-10 08:53:39 +01006264
Ido Schimmel1b8f09a2017-05-26 08:37:36 +02006265 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev);
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01006266 if (!rif) {
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006267 struct mlxsw_sp_rif_params params = {
6268 .dev = l3_dev,
6269 };
6270
6271 mlxsw_sp_rif_subport_params_init(&params, mlxsw_sp_port_vlan);
David Ahernf8fa9b42017-10-18 09:56:56 -07006272 rif = mlxsw_sp_rif_create(mlxsw_sp, &params, extack);
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01006273 if (IS_ERR(rif))
6274 return PTR_ERR(rif);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006275 }
6276
Ido Schimmela1107482017-05-26 08:37:39 +02006277 /* FID was already created, just take a reference */
Petr Machata5f15e252018-06-25 10:48:13 +03006278 fid = rif->ops->fid_get(rif, extack);
Ido Schimmela1107482017-05-26 08:37:39 +02006279 err = mlxsw_sp_fid_port_vid_map(fid, mlxsw_sp_port, vid);
6280 if (err)
6281 goto err_fid_port_vid_map;
6282
Ido Schimmel7cbecf22017-05-26 08:37:28 +02006283 err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, false);
Ido Schimmel03ea01e2017-05-23 21:56:30 +02006284 if (err)
6285 goto err_port_vid_learning_set;
6286
Ido Schimmel7cbecf22017-05-26 08:37:28 +02006287 err = mlxsw_sp_port_vid_stp_set(mlxsw_sp_port, vid,
Ido Schimmel03ea01e2017-05-23 21:56:30 +02006288 BR_STATE_FORWARDING);
6289 if (err)
6290 goto err_port_vid_stp_set;
6291
Ido Schimmela1107482017-05-26 08:37:39 +02006292 mlxsw_sp_port_vlan->fid = fid;
Ido Schimmel4724ba562017-03-10 08:53:39 +01006293
Ido Schimmel4724ba562017-03-10 08:53:39 +01006294 return 0;
Ido Schimmel03ea01e2017-05-23 21:56:30 +02006295
6296err_port_vid_stp_set:
Ido Schimmel7cbecf22017-05-26 08:37:28 +02006297 mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, true);
Ido Schimmel03ea01e2017-05-23 21:56:30 +02006298err_port_vid_learning_set:
Ido Schimmela1107482017-05-26 08:37:39 +02006299 mlxsw_sp_fid_port_vid_unmap(fid, mlxsw_sp_port, vid);
6300err_fid_port_vid_map:
6301 mlxsw_sp_fid_put(fid);
Ido Schimmel03ea01e2017-05-23 21:56:30 +02006302 return err;
Ido Schimmel4724ba562017-03-10 08:53:39 +01006303}
6304
Ido Schimmela1107482017-05-26 08:37:39 +02006305void
6306mlxsw_sp_port_vlan_router_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
Ido Schimmel4724ba562017-03-10 08:53:39 +01006307{
Ido Schimmelce95e152017-05-26 08:37:27 +02006308 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
Ido Schimmel7cbecf22017-05-26 08:37:28 +02006309 struct mlxsw_sp_fid *fid = mlxsw_sp_port_vlan->fid;
Ido Schimmelce95e152017-05-26 08:37:27 +02006310 u16 vid = mlxsw_sp_port_vlan->vid;
Ido Schimmelce95e152017-05-26 08:37:27 +02006311
Ido Schimmela1107482017-05-26 08:37:39 +02006312 if (WARN_ON(mlxsw_sp_fid_type(fid) != MLXSW_SP_FID_TYPE_RFID))
6313 return;
Ido Schimmel4aafc362017-05-26 08:37:25 +02006314
Ido Schimmela1107482017-05-26 08:37:39 +02006315 mlxsw_sp_port_vlan->fid = NULL;
Ido Schimmel7cbecf22017-05-26 08:37:28 +02006316 mlxsw_sp_port_vid_stp_set(mlxsw_sp_port, vid, BR_STATE_BLOCKING);
6317 mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, true);
Ido Schimmela1107482017-05-26 08:37:39 +02006318 mlxsw_sp_fid_port_vid_unmap(fid, mlxsw_sp_port, vid);
6319 /* If router port holds the last reference on the rFID, then the
6320 * associated Sub-port RIF will be destroyed.
6321 */
6322 mlxsw_sp_fid_put(fid);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006323}
6324
Ido Schimmel7cbecf22017-05-26 08:37:28 +02006325static int mlxsw_sp_inetaddr_port_vlan_event(struct net_device *l3_dev,
6326 struct net_device *port_dev,
David Ahernf8fa9b42017-10-18 09:56:56 -07006327 unsigned long event, u16 vid,
6328 struct netlink_ext_ack *extack)
Ido Schimmel4724ba562017-03-10 08:53:39 +01006329{
6330 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(port_dev);
Ido Schimmelce95e152017-05-26 08:37:27 +02006331 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
Ido Schimmel4724ba562017-03-10 08:53:39 +01006332
Ido Schimmelce95e152017-05-26 08:37:27 +02006333 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
Ido Schimmel7cbecf22017-05-26 08:37:28 +02006334 if (WARN_ON(!mlxsw_sp_port_vlan))
6335 return -EINVAL;
Ido Schimmel4724ba562017-03-10 08:53:39 +01006336
6337 switch (event) {
6338 case NETDEV_UP:
Ido Schimmela1107482017-05-26 08:37:39 +02006339 return mlxsw_sp_port_vlan_router_join(mlxsw_sp_port_vlan,
David Ahernf8fa9b42017-10-18 09:56:56 -07006340 l3_dev, extack);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006341 case NETDEV_DOWN:
Ido Schimmela1107482017-05-26 08:37:39 +02006342 mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port_vlan);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006343 break;
6344 }
6345
6346 return 0;
6347}
6348
6349static int mlxsw_sp_inetaddr_port_event(struct net_device *port_dev,
David Ahernf8fa9b42017-10-18 09:56:56 -07006350 unsigned long event,
6351 struct netlink_ext_ack *extack)
Ido Schimmel4724ba562017-03-10 08:53:39 +01006352{
Jiri Pirko2b94e582017-04-18 16:55:37 +02006353 if (netif_is_bridge_port(port_dev) ||
6354 netif_is_lag_port(port_dev) ||
6355 netif_is_ovs_port(port_dev))
Ido Schimmel4724ba562017-03-10 08:53:39 +01006356 return 0;
6357
David Ahernf8fa9b42017-10-18 09:56:56 -07006358 return mlxsw_sp_inetaddr_port_vlan_event(port_dev, port_dev, event, 1,
6359 extack);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006360}
6361
6362static int __mlxsw_sp_inetaddr_lag_event(struct net_device *l3_dev,
6363 struct net_device *lag_dev,
David Ahernf8fa9b42017-10-18 09:56:56 -07006364 unsigned long event, u16 vid,
6365 struct netlink_ext_ack *extack)
Ido Schimmel4724ba562017-03-10 08:53:39 +01006366{
6367 struct net_device *port_dev;
6368 struct list_head *iter;
6369 int err;
6370
6371 netdev_for_each_lower_dev(lag_dev, port_dev, iter) {
6372 if (mlxsw_sp_port_dev_check(port_dev)) {
Ido Schimmel7cbecf22017-05-26 08:37:28 +02006373 err = mlxsw_sp_inetaddr_port_vlan_event(l3_dev,
6374 port_dev,
David Ahernf8fa9b42017-10-18 09:56:56 -07006375 event, vid,
6376 extack);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006377 if (err)
6378 return err;
6379 }
6380 }
6381
6382 return 0;
6383}
6384
6385static int mlxsw_sp_inetaddr_lag_event(struct net_device *lag_dev,
David Ahernf8fa9b42017-10-18 09:56:56 -07006386 unsigned long event,
6387 struct netlink_ext_ack *extack)
Ido Schimmel4724ba562017-03-10 08:53:39 +01006388{
6389 if (netif_is_bridge_port(lag_dev))
6390 return 0;
6391
David Ahernf8fa9b42017-10-18 09:56:56 -07006392 return __mlxsw_sp_inetaddr_lag_event(lag_dev, lag_dev, event, 1,
6393 extack);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006394}
6395
Ido Schimmel4724ba562017-03-10 08:53:39 +01006396static int mlxsw_sp_inetaddr_bridge_event(struct net_device *l3_dev,
David Ahernf8fa9b42017-10-18 09:56:56 -07006397 unsigned long event,
6398 struct netlink_ext_ack *extack)
Ido Schimmel4724ba562017-03-10 08:53:39 +01006399{
6400 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(l3_dev);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006401 struct mlxsw_sp_rif_params params = {
6402 .dev = l3_dev,
6403 };
Ido Schimmela1107482017-05-26 08:37:39 +02006404 struct mlxsw_sp_rif *rif;
Ido Schimmel4724ba562017-03-10 08:53:39 +01006405
6406 switch (event) {
6407 case NETDEV_UP:
David Ahernf8fa9b42017-10-18 09:56:56 -07006408 rif = mlxsw_sp_rif_create(mlxsw_sp, &params, extack);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006409 if (IS_ERR(rif))
6410 return PTR_ERR(rif);
6411 break;
Ido Schimmel4724ba562017-03-10 08:53:39 +01006412 case NETDEV_DOWN:
Ido Schimmela1107482017-05-26 08:37:39 +02006413 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006414 mlxsw_sp_rif_destroy(rif);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006415 break;
6416 }
6417
6418 return 0;
6419}
6420
6421static int mlxsw_sp_inetaddr_vlan_event(struct net_device *vlan_dev,
David Ahernf8fa9b42017-10-18 09:56:56 -07006422 unsigned long event,
6423 struct netlink_ext_ack *extack)
Ido Schimmel4724ba562017-03-10 08:53:39 +01006424{
6425 struct net_device *real_dev = vlan_dev_real_dev(vlan_dev);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006426 u16 vid = vlan_dev_vlan_id(vlan_dev);
6427
Ido Schimmel6b27c8a2017-06-28 09:03:12 +03006428 if (netif_is_bridge_port(vlan_dev))
6429 return 0;
6430
Ido Schimmel4724ba562017-03-10 08:53:39 +01006431 if (mlxsw_sp_port_dev_check(real_dev))
Ido Schimmel7cbecf22017-05-26 08:37:28 +02006432 return mlxsw_sp_inetaddr_port_vlan_event(vlan_dev, real_dev,
David Ahernf8fa9b42017-10-18 09:56:56 -07006433 event, vid, extack);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006434 else if (netif_is_lag_master(real_dev))
6435 return __mlxsw_sp_inetaddr_lag_event(vlan_dev, real_dev, event,
David Ahernf8fa9b42017-10-18 09:56:56 -07006436 vid, extack);
Ido Schimmelc57529e2017-05-26 08:37:31 +02006437 else if (netif_is_bridge_master(real_dev) && br_vlan_enabled(real_dev))
David Ahernf8fa9b42017-10-18 09:56:56 -07006438 return mlxsw_sp_inetaddr_bridge_event(vlan_dev, event, extack);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006439
6440 return 0;
6441}
6442
Ido Schimmelb1e45522017-04-30 19:47:14 +03006443static int __mlxsw_sp_inetaddr_event(struct net_device *dev,
David Ahernf8fa9b42017-10-18 09:56:56 -07006444 unsigned long event,
6445 struct netlink_ext_ack *extack)
Ido Schimmelb1e45522017-04-30 19:47:14 +03006446{
6447 if (mlxsw_sp_port_dev_check(dev))
David Ahernf8fa9b42017-10-18 09:56:56 -07006448 return mlxsw_sp_inetaddr_port_event(dev, event, extack);
Ido Schimmelb1e45522017-04-30 19:47:14 +03006449 else if (netif_is_lag_master(dev))
David Ahernf8fa9b42017-10-18 09:56:56 -07006450 return mlxsw_sp_inetaddr_lag_event(dev, event, extack);
Ido Schimmelb1e45522017-04-30 19:47:14 +03006451 else if (netif_is_bridge_master(dev))
David Ahernf8fa9b42017-10-18 09:56:56 -07006452 return mlxsw_sp_inetaddr_bridge_event(dev, event, extack);
Ido Schimmelb1e45522017-04-30 19:47:14 +03006453 else if (is_vlan_dev(dev))
David Ahernf8fa9b42017-10-18 09:56:56 -07006454 return mlxsw_sp_inetaddr_vlan_event(dev, event, extack);
Ido Schimmelb1e45522017-04-30 19:47:14 +03006455 else
6456 return 0;
6457}
6458
Ido Schimmel4724ba562017-03-10 08:53:39 +01006459int mlxsw_sp_inetaddr_event(struct notifier_block *unused,
6460 unsigned long event, void *ptr)
6461{
6462 struct in_ifaddr *ifa = (struct in_ifaddr *) ptr;
6463 struct net_device *dev = ifa->ifa_dev->dev;
6464 struct mlxsw_sp *mlxsw_sp;
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01006465 struct mlxsw_sp_rif *rif;
Ido Schimmel4724ba562017-03-10 08:53:39 +01006466 int err = 0;
6467
David Ahern89d5dd22017-10-18 09:56:55 -07006468 /* NETDEV_UP event is handled by mlxsw_sp_inetaddr_valid_event */
6469 if (event == NETDEV_UP)
6470 goto out;
6471
6472 mlxsw_sp = mlxsw_sp_lower_get(dev);
6473 if (!mlxsw_sp)
6474 goto out;
6475
6476 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
6477 if (!mlxsw_sp_rif_should_config(rif, dev, event))
6478 goto out;
6479
David Ahernf8fa9b42017-10-18 09:56:56 -07006480 err = __mlxsw_sp_inetaddr_event(dev, event, NULL);
David Ahern89d5dd22017-10-18 09:56:55 -07006481out:
6482 return notifier_from_errno(err);
6483}
6484
6485int mlxsw_sp_inetaddr_valid_event(struct notifier_block *unused,
6486 unsigned long event, void *ptr)
6487{
6488 struct in_validator_info *ivi = (struct in_validator_info *) ptr;
6489 struct net_device *dev = ivi->ivi_dev->dev;
6490 struct mlxsw_sp *mlxsw_sp;
6491 struct mlxsw_sp_rif *rif;
6492 int err = 0;
6493
Ido Schimmel4724ba562017-03-10 08:53:39 +01006494 mlxsw_sp = mlxsw_sp_lower_get(dev);
6495 if (!mlxsw_sp)
6496 goto out;
6497
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01006498 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
Arkadi Sharshevsky5ea12372017-07-18 10:10:13 +02006499 if (!mlxsw_sp_rif_should_config(rif, dev, event))
Ido Schimmel4724ba562017-03-10 08:53:39 +01006500 goto out;
6501
David Ahernf8fa9b42017-10-18 09:56:56 -07006502 err = __mlxsw_sp_inetaddr_event(dev, event, ivi->extack);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006503out:
6504 return notifier_from_errno(err);
6505}
6506
Arkadi Sharshevsky5ea12372017-07-18 10:10:13 +02006507struct mlxsw_sp_inet6addr_event_work {
6508 struct work_struct work;
6509 struct net_device *dev;
6510 unsigned long event;
6511};
6512
6513static void mlxsw_sp_inet6addr_event_work(struct work_struct *work)
6514{
6515 struct mlxsw_sp_inet6addr_event_work *inet6addr_work =
6516 container_of(work, struct mlxsw_sp_inet6addr_event_work, work);
6517 struct net_device *dev = inet6addr_work->dev;
6518 unsigned long event = inet6addr_work->event;
6519 struct mlxsw_sp *mlxsw_sp;
6520 struct mlxsw_sp_rif *rif;
6521
6522 rtnl_lock();
6523 mlxsw_sp = mlxsw_sp_lower_get(dev);
6524 if (!mlxsw_sp)
6525 goto out;
6526
6527 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
6528 if (!mlxsw_sp_rif_should_config(rif, dev, event))
6529 goto out;
6530
David Ahernf8fa9b42017-10-18 09:56:56 -07006531 __mlxsw_sp_inetaddr_event(dev, event, NULL);
Arkadi Sharshevsky5ea12372017-07-18 10:10:13 +02006532out:
6533 rtnl_unlock();
6534 dev_put(dev);
6535 kfree(inet6addr_work);
6536}
6537
6538/* Called with rcu_read_lock() */
6539int mlxsw_sp_inet6addr_event(struct notifier_block *unused,
6540 unsigned long event, void *ptr)
6541{
6542 struct inet6_ifaddr *if6 = (struct inet6_ifaddr *) ptr;
6543 struct mlxsw_sp_inet6addr_event_work *inet6addr_work;
6544 struct net_device *dev = if6->idev->dev;
6545
David Ahern89d5dd22017-10-18 09:56:55 -07006546 /* NETDEV_UP event is handled by mlxsw_sp_inet6addr_valid_event */
6547 if (event == NETDEV_UP)
6548 return NOTIFY_DONE;
6549
Arkadi Sharshevsky5ea12372017-07-18 10:10:13 +02006550 if (!mlxsw_sp_port_dev_lower_find_rcu(dev))
6551 return NOTIFY_DONE;
6552
6553 inet6addr_work = kzalloc(sizeof(*inet6addr_work), GFP_ATOMIC);
6554 if (!inet6addr_work)
6555 return NOTIFY_BAD;
6556
6557 INIT_WORK(&inet6addr_work->work, mlxsw_sp_inet6addr_event_work);
6558 inet6addr_work->dev = dev;
6559 inet6addr_work->event = event;
6560 dev_hold(dev);
6561 mlxsw_core_schedule_work(&inet6addr_work->work);
6562
6563 return NOTIFY_DONE;
6564}
6565
David Ahern89d5dd22017-10-18 09:56:55 -07006566int mlxsw_sp_inet6addr_valid_event(struct notifier_block *unused,
6567 unsigned long event, void *ptr)
6568{
6569 struct in6_validator_info *i6vi = (struct in6_validator_info *) ptr;
6570 struct net_device *dev = i6vi->i6vi_dev->dev;
6571 struct mlxsw_sp *mlxsw_sp;
6572 struct mlxsw_sp_rif *rif;
6573 int err = 0;
6574
6575 mlxsw_sp = mlxsw_sp_lower_get(dev);
6576 if (!mlxsw_sp)
6577 goto out;
6578
6579 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
6580 if (!mlxsw_sp_rif_should_config(rif, dev, event))
6581 goto out;
6582
David Ahernf8fa9b42017-10-18 09:56:56 -07006583 err = __mlxsw_sp_inetaddr_event(dev, event, i6vi->extack);
David Ahern89d5dd22017-10-18 09:56:55 -07006584out:
6585 return notifier_from_errno(err);
6586}
6587
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01006588static int mlxsw_sp_rif_edit(struct mlxsw_sp *mlxsw_sp, u16 rif_index,
Ido Schimmel4724ba562017-03-10 08:53:39 +01006589 const char *mac, int mtu)
6590{
6591 char ritr_pl[MLXSW_REG_RITR_LEN];
6592 int err;
6593
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01006594 mlxsw_reg_ritr_rif_pack(ritr_pl, rif_index);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006595 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
6596 if (err)
6597 return err;
6598
6599 mlxsw_reg_ritr_mtu_set(ritr_pl, mtu);
6600 mlxsw_reg_ritr_if_mac_memcpy_to(ritr_pl, mac);
6601 mlxsw_reg_ritr_op_set(ritr_pl, MLXSW_REG_RITR_RIF_CREATE);
6602 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
6603}
6604
6605int mlxsw_sp_netdevice_router_port_event(struct net_device *dev)
6606{
6607 struct mlxsw_sp *mlxsw_sp;
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01006608 struct mlxsw_sp_rif *rif;
Ido Schimmela1107482017-05-26 08:37:39 +02006609 u16 fid_index;
Ido Schimmel4724ba562017-03-10 08:53:39 +01006610 int err;
6611
6612 mlxsw_sp = mlxsw_sp_lower_get(dev);
6613 if (!mlxsw_sp)
6614 return 0;
6615
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01006616 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
6617 if (!rif)
Ido Schimmel4724ba562017-03-10 08:53:39 +01006618 return 0;
Ido Schimmela1107482017-05-26 08:37:39 +02006619 fid_index = mlxsw_sp_fid_index(rif->fid);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006620
Ido Schimmela1107482017-05-26 08:37:39 +02006621 err = mlxsw_sp_rif_fdb_op(mlxsw_sp, rif->addr, fid_index, false);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006622 if (err)
6623 return err;
6624
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01006625 err = mlxsw_sp_rif_edit(mlxsw_sp, rif->rif_index, dev->dev_addr,
6626 dev->mtu);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006627 if (err)
6628 goto err_rif_edit;
6629
Ido Schimmela1107482017-05-26 08:37:39 +02006630 err = mlxsw_sp_rif_fdb_op(mlxsw_sp, dev->dev_addr, fid_index, true);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006631 if (err)
6632 goto err_rif_fdb_op;
6633
Yotam Gigifd890fe2017-09-27 08:23:21 +02006634 if (rif->mtu != dev->mtu) {
6635 struct mlxsw_sp_vr *vr;
Yuval Mintz9742f862018-03-26 15:01:40 +03006636 int i;
Yotam Gigifd890fe2017-09-27 08:23:21 +02006637
6638 /* The RIF is relevant only to its mr_table instance, as unlike
6639 * unicast routing, in multicast routing a RIF cannot be shared
6640 * between several multicast routing tables.
6641 */
6642 vr = &mlxsw_sp->router->vrs[rif->vr_id];
Yuval Mintz9742f862018-03-26 15:01:40 +03006643 for (i = 0; i < MLXSW_SP_L3_PROTO_MAX; i++)
6644 mlxsw_sp_mr_rif_mtu_update(vr->mr_table[i],
6645 rif, dev->mtu);
Yotam Gigifd890fe2017-09-27 08:23:21 +02006646 }
6647
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01006648 ether_addr_copy(rif->addr, dev->dev_addr);
6649 rif->mtu = dev->mtu;
Ido Schimmel4724ba562017-03-10 08:53:39 +01006650
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01006651 netdev_dbg(dev, "Updated RIF=%d\n", rif->rif_index);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006652
6653 return 0;
6654
6655err_rif_fdb_op:
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01006656 mlxsw_sp_rif_edit(mlxsw_sp, rif->rif_index, rif->addr, rif->mtu);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006657err_rif_edit:
Ido Schimmela1107482017-05-26 08:37:39 +02006658 mlxsw_sp_rif_fdb_op(mlxsw_sp, rif->addr, fid_index, true);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006659 return err;
6660}
6661
Ido Schimmelb1e45522017-04-30 19:47:14 +03006662static int mlxsw_sp_port_vrf_join(struct mlxsw_sp *mlxsw_sp,
David Ahernf8fa9b42017-10-18 09:56:56 -07006663 struct net_device *l3_dev,
6664 struct netlink_ext_ack *extack)
Ido Schimmel7179eb52017-03-16 09:08:18 +01006665{
Ido Schimmelb1e45522017-04-30 19:47:14 +03006666 struct mlxsw_sp_rif *rif;
Ido Schimmel7179eb52017-03-16 09:08:18 +01006667
Ido Schimmelb1e45522017-04-30 19:47:14 +03006668 /* If netdev is already associated with a RIF, then we need to
6669 * destroy it and create a new one with the new virtual router ID.
Ido Schimmel7179eb52017-03-16 09:08:18 +01006670 */
Ido Schimmelb1e45522017-04-30 19:47:14 +03006671 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev);
6672 if (rif)
David Ahernf8fa9b42017-10-18 09:56:56 -07006673 __mlxsw_sp_inetaddr_event(l3_dev, NETDEV_DOWN, extack);
Ido Schimmel7179eb52017-03-16 09:08:18 +01006674
David Ahernf8fa9b42017-10-18 09:56:56 -07006675 return __mlxsw_sp_inetaddr_event(l3_dev, NETDEV_UP, extack);
Ido Schimmel7179eb52017-03-16 09:08:18 +01006676}
6677
Ido Schimmelb1e45522017-04-30 19:47:14 +03006678static void mlxsw_sp_port_vrf_leave(struct mlxsw_sp *mlxsw_sp,
6679 struct net_device *l3_dev)
Ido Schimmel7179eb52017-03-16 09:08:18 +01006680{
Ido Schimmelb1e45522017-04-30 19:47:14 +03006681 struct mlxsw_sp_rif *rif;
Ido Schimmel7179eb52017-03-16 09:08:18 +01006682
Ido Schimmelb1e45522017-04-30 19:47:14 +03006683 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev);
6684 if (!rif)
Ido Schimmel7179eb52017-03-16 09:08:18 +01006685 return;
David Ahernf8fa9b42017-10-18 09:56:56 -07006686 __mlxsw_sp_inetaddr_event(l3_dev, NETDEV_DOWN, NULL);
Ido Schimmel7179eb52017-03-16 09:08:18 +01006687}
6688
Ido Schimmelb1e45522017-04-30 19:47:14 +03006689int mlxsw_sp_netdevice_vrf_event(struct net_device *l3_dev, unsigned long event,
6690 struct netdev_notifier_changeupper_info *info)
Ido Schimmel3d70e4582017-03-16 09:08:19 +01006691{
Ido Schimmelb1e45522017-04-30 19:47:14 +03006692 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(l3_dev);
6693 int err = 0;
Ido Schimmel3d70e4582017-03-16 09:08:19 +01006694
Ido Schimmelb1e45522017-04-30 19:47:14 +03006695 if (!mlxsw_sp)
6696 return 0;
Ido Schimmel3d70e4582017-03-16 09:08:19 +01006697
Ido Schimmelb1e45522017-04-30 19:47:14 +03006698 switch (event) {
6699 case NETDEV_PRECHANGEUPPER:
6700 return 0;
6701 case NETDEV_CHANGEUPPER:
David Ahernf8fa9b42017-10-18 09:56:56 -07006702 if (info->linking) {
6703 struct netlink_ext_ack *extack;
6704
6705 extack = netdev_notifier_info_to_extack(&info->info);
6706 err = mlxsw_sp_port_vrf_join(mlxsw_sp, l3_dev, extack);
6707 } else {
Ido Schimmelb1e45522017-04-30 19:47:14 +03006708 mlxsw_sp_port_vrf_leave(mlxsw_sp, l3_dev);
David Ahernf8fa9b42017-10-18 09:56:56 -07006709 }
Ido Schimmelb1e45522017-04-30 19:47:14 +03006710 break;
6711 }
Ido Schimmel3d70e4582017-03-16 09:08:19 +01006712
Ido Schimmelb1e45522017-04-30 19:47:14 +03006713 return err;
Ido Schimmel3d70e4582017-03-16 09:08:19 +01006714}
6715
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006716static struct mlxsw_sp_rif_subport *
6717mlxsw_sp_rif_subport_rif(const struct mlxsw_sp_rif *rif)
Ido Schimmela1107482017-05-26 08:37:39 +02006718{
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006719 return container_of(rif, struct mlxsw_sp_rif_subport, common);
Ido Schimmela1107482017-05-26 08:37:39 +02006720}
6721
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006722static void mlxsw_sp_rif_subport_setup(struct mlxsw_sp_rif *rif,
6723 const struct mlxsw_sp_rif_params *params)
6724{
6725 struct mlxsw_sp_rif_subport *rif_subport;
6726
6727 rif_subport = mlxsw_sp_rif_subport_rif(rif);
6728 rif_subport->vid = params->vid;
6729 rif_subport->lag = params->lag;
6730 if (params->lag)
6731 rif_subport->lag_id = params->lag_id;
6732 else
6733 rif_subport->system_port = params->system_port;
6734}
6735
6736static int mlxsw_sp_rif_subport_op(struct mlxsw_sp_rif *rif, bool enable)
6737{
6738 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
6739 struct mlxsw_sp_rif_subport *rif_subport;
6740 char ritr_pl[MLXSW_REG_RITR_LEN];
6741
6742 rif_subport = mlxsw_sp_rif_subport_rif(rif);
6743 mlxsw_reg_ritr_pack(ritr_pl, enable, MLXSW_REG_RITR_SP_IF,
Petr Machata9571e822017-09-02 23:49:14 +02006744 rif->rif_index, rif->vr_id, rif->dev->mtu);
6745 mlxsw_reg_ritr_mac_pack(ritr_pl, rif->dev->dev_addr);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006746 mlxsw_reg_ritr_sp_if_pack(ritr_pl, rif_subport->lag,
6747 rif_subport->lag ? rif_subport->lag_id :
6748 rif_subport->system_port,
6749 rif_subport->vid);
6750
6751 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
6752}
6753
6754static int mlxsw_sp_rif_subport_configure(struct mlxsw_sp_rif *rif)
6755{
Petr Machata010cadf2017-09-02 23:49:18 +02006756 int err;
6757
6758 err = mlxsw_sp_rif_subport_op(rif, true);
6759 if (err)
6760 return err;
6761
6762 err = mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
6763 mlxsw_sp_fid_index(rif->fid), true);
6764 if (err)
6765 goto err_rif_fdb_op;
6766
6767 mlxsw_sp_fid_rif_set(rif->fid, rif);
6768 return 0;
6769
6770err_rif_fdb_op:
6771 mlxsw_sp_rif_subport_op(rif, false);
6772 return err;
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006773}
6774
6775static void mlxsw_sp_rif_subport_deconfigure(struct mlxsw_sp_rif *rif)
6776{
Petr Machata010cadf2017-09-02 23:49:18 +02006777 struct mlxsw_sp_fid *fid = rif->fid;
6778
6779 mlxsw_sp_fid_rif_set(fid, NULL);
6780 mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
6781 mlxsw_sp_fid_index(fid), false);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006782 mlxsw_sp_rif_subport_op(rif, false);
6783}
6784
6785static struct mlxsw_sp_fid *
Petr Machata5f15e252018-06-25 10:48:13 +03006786mlxsw_sp_rif_subport_fid_get(struct mlxsw_sp_rif *rif,
6787 struct netlink_ext_ack *extack)
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006788{
6789 return mlxsw_sp_fid_rfid_get(rif->mlxsw_sp, rif->rif_index);
6790}
6791
6792static const struct mlxsw_sp_rif_ops mlxsw_sp_rif_subport_ops = {
6793 .type = MLXSW_SP_RIF_TYPE_SUBPORT,
6794 .rif_size = sizeof(struct mlxsw_sp_rif_subport),
6795 .setup = mlxsw_sp_rif_subport_setup,
6796 .configure = mlxsw_sp_rif_subport_configure,
6797 .deconfigure = mlxsw_sp_rif_subport_deconfigure,
6798 .fid_get = mlxsw_sp_rif_subport_fid_get,
6799};
6800
6801static int mlxsw_sp_rif_vlan_fid_op(struct mlxsw_sp_rif *rif,
6802 enum mlxsw_reg_ritr_if_type type,
6803 u16 vid_fid, bool enable)
6804{
6805 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
6806 char ritr_pl[MLXSW_REG_RITR_LEN];
6807
6808 mlxsw_reg_ritr_pack(ritr_pl, enable, type, rif->rif_index, rif->vr_id,
Petr Machata9571e822017-09-02 23:49:14 +02006809 rif->dev->mtu);
6810 mlxsw_reg_ritr_mac_pack(ritr_pl, rif->dev->dev_addr);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006811 mlxsw_reg_ritr_fid_set(ritr_pl, type, vid_fid);
6812
6813 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
6814}
6815
Yotam Gigib35750f2017-10-09 11:15:33 +02006816u8 mlxsw_sp_router_port(const struct mlxsw_sp *mlxsw_sp)
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006817{
6818 return mlxsw_core_max_ports(mlxsw_sp->core) + 1;
6819}
6820
6821static int mlxsw_sp_rif_vlan_configure(struct mlxsw_sp_rif *rif)
6822{
6823 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
6824 u16 vid = mlxsw_sp_fid_8021q_vid(rif->fid);
6825 int err;
6826
6827 err = mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_VLAN_IF, vid, true);
6828 if (err)
6829 return err;
6830
Ido Schimmel0d284812017-07-18 10:10:12 +02006831 err = mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
6832 mlxsw_sp_router_port(mlxsw_sp), true);
6833 if (err)
6834 goto err_fid_mc_flood_set;
6835
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006836 err = mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
6837 mlxsw_sp_router_port(mlxsw_sp), true);
6838 if (err)
6839 goto err_fid_bc_flood_set;
6840
Petr Machata010cadf2017-09-02 23:49:18 +02006841 err = mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
6842 mlxsw_sp_fid_index(rif->fid), true);
6843 if (err)
6844 goto err_rif_fdb_op;
6845
6846 mlxsw_sp_fid_rif_set(rif->fid, rif);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006847 return 0;
6848
Petr Machata010cadf2017-09-02 23:49:18 +02006849err_rif_fdb_op:
6850 mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
6851 mlxsw_sp_router_port(mlxsw_sp), false);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006852err_fid_bc_flood_set:
Ido Schimmel0d284812017-07-18 10:10:12 +02006853 mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
6854 mlxsw_sp_router_port(mlxsw_sp), false);
6855err_fid_mc_flood_set:
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006856 mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_VLAN_IF, vid, false);
6857 return err;
6858}
6859
6860static void mlxsw_sp_rif_vlan_deconfigure(struct mlxsw_sp_rif *rif)
6861{
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006862 u16 vid = mlxsw_sp_fid_8021q_vid(rif->fid);
Petr Machata010cadf2017-09-02 23:49:18 +02006863 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
6864 struct mlxsw_sp_fid *fid = rif->fid;
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006865
Petr Machata010cadf2017-09-02 23:49:18 +02006866 mlxsw_sp_fid_rif_set(fid, NULL);
6867 mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
6868 mlxsw_sp_fid_index(fid), false);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006869 mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
6870 mlxsw_sp_router_port(mlxsw_sp), false);
Ido Schimmel0d284812017-07-18 10:10:12 +02006871 mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
6872 mlxsw_sp_router_port(mlxsw_sp), false);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006873 mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_VLAN_IF, vid, false);
6874}
6875
6876static struct mlxsw_sp_fid *
Petr Machata5f15e252018-06-25 10:48:13 +03006877mlxsw_sp_rif_vlan_fid_get(struct mlxsw_sp_rif *rif,
6878 struct netlink_ext_ack *extack)
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006879{
Petr Machatae6f19602018-06-25 10:48:14 +03006880 u16 vid;
6881 int err;
6882
6883 if (is_vlan_dev(rif->dev)) {
6884 vid = vlan_dev_vlan_id(rif->dev);
6885 } else {
6886 err = br_vlan_get_pvid(rif->dev, &vid);
Arnd Bergmannbe9c64b2018-07-06 14:44:45 +02006887 if (err < 0 || !vid) {
Petr Machatae6f19602018-06-25 10:48:14 +03006888 NL_SET_ERR_MSG_MOD(extack, "Couldn't determine bridge PVID");
Arnd Bergmannbe9c64b2018-07-06 14:44:45 +02006889 return ERR_PTR(-EINVAL);
Petr Machatae6f19602018-06-25 10:48:14 +03006890 }
6891 }
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006892
6893 return mlxsw_sp_fid_8021q_get(rif->mlxsw_sp, vid);
6894}
6895
6896static const struct mlxsw_sp_rif_ops mlxsw_sp_rif_vlan_ops = {
6897 .type = MLXSW_SP_RIF_TYPE_VLAN,
6898 .rif_size = sizeof(struct mlxsw_sp_rif),
6899 .configure = mlxsw_sp_rif_vlan_configure,
6900 .deconfigure = mlxsw_sp_rif_vlan_deconfigure,
6901 .fid_get = mlxsw_sp_rif_vlan_fid_get,
6902};
6903
6904static int mlxsw_sp_rif_fid_configure(struct mlxsw_sp_rif *rif)
6905{
6906 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
6907 u16 fid_index = mlxsw_sp_fid_index(rif->fid);
6908 int err;
6909
6910 err = mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_FID_IF, fid_index,
6911 true);
6912 if (err)
6913 return err;
6914
Ido Schimmel0d284812017-07-18 10:10:12 +02006915 err = mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
6916 mlxsw_sp_router_port(mlxsw_sp), true);
6917 if (err)
6918 goto err_fid_mc_flood_set;
6919
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006920 err = mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
6921 mlxsw_sp_router_port(mlxsw_sp), true);
6922 if (err)
6923 goto err_fid_bc_flood_set;
6924
Petr Machata010cadf2017-09-02 23:49:18 +02006925 err = mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
6926 mlxsw_sp_fid_index(rif->fid), true);
6927 if (err)
6928 goto err_rif_fdb_op;
6929
6930 mlxsw_sp_fid_rif_set(rif->fid, rif);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006931 return 0;
6932
Petr Machata010cadf2017-09-02 23:49:18 +02006933err_rif_fdb_op:
6934 mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
6935 mlxsw_sp_router_port(mlxsw_sp), false);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006936err_fid_bc_flood_set:
Ido Schimmel0d284812017-07-18 10:10:12 +02006937 mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
6938 mlxsw_sp_router_port(mlxsw_sp), false);
6939err_fid_mc_flood_set:
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006940 mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_FID_IF, fid_index, false);
6941 return err;
6942}
6943
6944static void mlxsw_sp_rif_fid_deconfigure(struct mlxsw_sp_rif *rif)
6945{
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006946 u16 fid_index = mlxsw_sp_fid_index(rif->fid);
Petr Machata010cadf2017-09-02 23:49:18 +02006947 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
6948 struct mlxsw_sp_fid *fid = rif->fid;
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006949
Petr Machata010cadf2017-09-02 23:49:18 +02006950 mlxsw_sp_fid_rif_set(fid, NULL);
6951 mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
6952 mlxsw_sp_fid_index(fid), false);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006953 mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
6954 mlxsw_sp_router_port(mlxsw_sp), false);
Ido Schimmel0d284812017-07-18 10:10:12 +02006955 mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
6956 mlxsw_sp_router_port(mlxsw_sp), false);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006957 mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_FID_IF, fid_index, false);
6958}
6959
6960static struct mlxsw_sp_fid *
Petr Machata5f15e252018-06-25 10:48:13 +03006961mlxsw_sp_rif_fid_fid_get(struct mlxsw_sp_rif *rif,
6962 struct netlink_ext_ack *extack)
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006963{
6964 return mlxsw_sp_fid_8021d_get(rif->mlxsw_sp, rif->dev->ifindex);
6965}
6966
6967static const struct mlxsw_sp_rif_ops mlxsw_sp_rif_fid_ops = {
6968 .type = MLXSW_SP_RIF_TYPE_FID,
6969 .rif_size = sizeof(struct mlxsw_sp_rif),
6970 .configure = mlxsw_sp_rif_fid_configure,
6971 .deconfigure = mlxsw_sp_rif_fid_deconfigure,
6972 .fid_get = mlxsw_sp_rif_fid_fid_get,
6973};
6974
Petr Machata6ddb7422017-09-02 23:49:19 +02006975static struct mlxsw_sp_rif_ipip_lb *
6976mlxsw_sp_rif_ipip_lb_rif(struct mlxsw_sp_rif *rif)
6977{
6978 return container_of(rif, struct mlxsw_sp_rif_ipip_lb, common);
6979}
6980
6981static void
6982mlxsw_sp_rif_ipip_lb_setup(struct mlxsw_sp_rif *rif,
6983 const struct mlxsw_sp_rif_params *params)
6984{
6985 struct mlxsw_sp_rif_params_ipip_lb *params_lb;
6986 struct mlxsw_sp_rif_ipip_lb *rif_lb;
6987
6988 params_lb = container_of(params, struct mlxsw_sp_rif_params_ipip_lb,
6989 common);
6990 rif_lb = mlxsw_sp_rif_ipip_lb_rif(rif);
6991 rif_lb->lb_config = params_lb->lb_config;
6992}
6993
6994static int
Petr Machata6ddb7422017-09-02 23:49:19 +02006995mlxsw_sp_rif_ipip_lb_configure(struct mlxsw_sp_rif *rif)
6996{
6997 struct mlxsw_sp_rif_ipip_lb *lb_rif = mlxsw_sp_rif_ipip_lb_rif(rif);
6998 u32 ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(rif->dev);
6999 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
7000 struct mlxsw_sp_vr *ul_vr;
7001 int err;
7002
David Ahernf8fa9b42017-10-18 09:56:56 -07007003 ul_vr = mlxsw_sp_vr_get(mlxsw_sp, ul_tb_id, NULL);
Petr Machata6ddb7422017-09-02 23:49:19 +02007004 if (IS_ERR(ul_vr))
7005 return PTR_ERR(ul_vr);
7006
7007 err = mlxsw_sp_rif_ipip_lb_op(lb_rif, ul_vr, true);
7008 if (err)
7009 goto err_loopback_op;
7010
7011 lb_rif->ul_vr_id = ul_vr->id;
7012 ++ul_vr->rif_count;
7013 return 0;
7014
7015err_loopback_op:
Ido Schimmel2b52ce02018-01-22 09:17:42 +01007016 mlxsw_sp_vr_put(mlxsw_sp, ul_vr);
Petr Machata6ddb7422017-09-02 23:49:19 +02007017 return err;
7018}
7019
7020static void mlxsw_sp_rif_ipip_lb_deconfigure(struct mlxsw_sp_rif *rif)
7021{
7022 struct mlxsw_sp_rif_ipip_lb *lb_rif = mlxsw_sp_rif_ipip_lb_rif(rif);
7023 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
7024 struct mlxsw_sp_vr *ul_vr;
7025
7026 ul_vr = &mlxsw_sp->router->vrs[lb_rif->ul_vr_id];
7027 mlxsw_sp_rif_ipip_lb_op(lb_rif, ul_vr, false);
7028
7029 --ul_vr->rif_count;
Ido Schimmel2b52ce02018-01-22 09:17:42 +01007030 mlxsw_sp_vr_put(mlxsw_sp, ul_vr);
Petr Machata6ddb7422017-09-02 23:49:19 +02007031}
7032
7033static const struct mlxsw_sp_rif_ops mlxsw_sp_rif_ipip_lb_ops = {
7034 .type = MLXSW_SP_RIF_TYPE_IPIP_LB,
7035 .rif_size = sizeof(struct mlxsw_sp_rif_ipip_lb),
7036 .setup = mlxsw_sp_rif_ipip_lb_setup,
7037 .configure = mlxsw_sp_rif_ipip_lb_configure,
7038 .deconfigure = mlxsw_sp_rif_ipip_lb_deconfigure,
7039};
7040
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02007041static const struct mlxsw_sp_rif_ops *mlxsw_sp_rif_ops_arr[] = {
7042 [MLXSW_SP_RIF_TYPE_SUBPORT] = &mlxsw_sp_rif_subport_ops,
7043 [MLXSW_SP_RIF_TYPE_VLAN] = &mlxsw_sp_rif_vlan_ops,
7044 [MLXSW_SP_RIF_TYPE_FID] = &mlxsw_sp_rif_fid_ops,
Petr Machata6ddb7422017-09-02 23:49:19 +02007045 [MLXSW_SP_RIF_TYPE_IPIP_LB] = &mlxsw_sp_rif_ipip_lb_ops,
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02007046};
7047
Ido Schimmel348b8fc2017-05-16 19:38:29 +02007048static int mlxsw_sp_rifs_init(struct mlxsw_sp *mlxsw_sp)
7049{
7050 u64 max_rifs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS);
7051
7052 mlxsw_sp->router->rifs = kcalloc(max_rifs,
7053 sizeof(struct mlxsw_sp_rif *),
7054 GFP_KERNEL);
7055 if (!mlxsw_sp->router->rifs)
7056 return -ENOMEM;
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02007057
7058 mlxsw_sp->router->rif_ops_arr = mlxsw_sp_rif_ops_arr;
7059
Ido Schimmel348b8fc2017-05-16 19:38:29 +02007060 return 0;
7061}
7062
7063static void mlxsw_sp_rifs_fini(struct mlxsw_sp *mlxsw_sp)
7064{
7065 int i;
7066
7067 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++)
7068 WARN_ON_ONCE(mlxsw_sp->router->rifs[i]);
7069
7070 kfree(mlxsw_sp->router->rifs);
7071}
7072
Petr Machatadcbda282017-10-20 09:16:16 +02007073static int
7074mlxsw_sp_ipip_config_tigcr(struct mlxsw_sp *mlxsw_sp)
7075{
7076 char tigcr_pl[MLXSW_REG_TIGCR_LEN];
7077
7078 mlxsw_reg_tigcr_pack(tigcr_pl, true, 0);
7079 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(tigcr), tigcr_pl);
7080}
7081
Petr Machata38ebc0f2017-09-02 23:49:17 +02007082static int mlxsw_sp_ipips_init(struct mlxsw_sp *mlxsw_sp)
7083{
7084 mlxsw_sp->router->ipip_ops_arr = mlxsw_sp_ipip_ops_arr;
Petr Machata1012b9a2017-09-02 23:49:23 +02007085 INIT_LIST_HEAD(&mlxsw_sp->router->ipip_list);
Petr Machatadcbda282017-10-20 09:16:16 +02007086 return mlxsw_sp_ipip_config_tigcr(mlxsw_sp);
Petr Machata38ebc0f2017-09-02 23:49:17 +02007087}
7088
7089static void mlxsw_sp_ipips_fini(struct mlxsw_sp *mlxsw_sp)
7090{
Petr Machata1012b9a2017-09-02 23:49:23 +02007091 WARN_ON(!list_empty(&mlxsw_sp->router->ipip_list));
Petr Machata38ebc0f2017-09-02 23:49:17 +02007092}
7093
Ido Schimmelc3852ef2016-12-03 16:45:07 +01007094static void mlxsw_sp_router_fib_dump_flush(struct notifier_block *nb)
7095{
Ido Schimmel7e39d112017-05-16 19:38:28 +02007096 struct mlxsw_sp_router *router;
Ido Schimmelc3852ef2016-12-03 16:45:07 +01007097
7098 /* Flush pending FIB notifications and then flush the device's
7099 * table before requesting another dump. The FIB notification
7100 * block is unregistered, so no need to take RTNL.
7101 */
7102 mlxsw_core_flush_owq();
Ido Schimmel7e39d112017-05-16 19:38:28 +02007103 router = container_of(nb, struct mlxsw_sp_router, fib_nb);
7104 mlxsw_sp_router_fib_flush(router->mlxsw_sp);
Ido Schimmelc3852ef2016-12-03 16:45:07 +01007105}
7106
Ido Schimmelaf658b62017-11-02 17:14:09 +01007107#ifdef CONFIG_IP_ROUTE_MULTIPATH
7108static void mlxsw_sp_mp_hash_header_set(char *recr2_pl, int header)
7109{
7110 mlxsw_reg_recr2_outer_header_enables_set(recr2_pl, header, true);
7111}
7112
7113static void mlxsw_sp_mp_hash_field_set(char *recr2_pl, int field)
7114{
7115 mlxsw_reg_recr2_outer_header_fields_enable_set(recr2_pl, field, true);
7116}
7117
7118static void mlxsw_sp_mp4_hash_init(char *recr2_pl)
7119{
7120 bool only_l3 = !init_net.ipv4.sysctl_fib_multipath_hash_policy;
7121
7122 mlxsw_sp_mp_hash_header_set(recr2_pl,
7123 MLXSW_REG_RECR2_IPV4_EN_NOT_TCP_NOT_UDP);
7124 mlxsw_sp_mp_hash_header_set(recr2_pl, MLXSW_REG_RECR2_IPV4_EN_TCP_UDP);
7125 mlxsw_reg_recr2_ipv4_sip_enable(recr2_pl);
7126 mlxsw_reg_recr2_ipv4_dip_enable(recr2_pl);
7127 if (only_l3)
7128 return;
7129 mlxsw_sp_mp_hash_header_set(recr2_pl, MLXSW_REG_RECR2_TCP_UDP_EN_IPV4);
7130 mlxsw_sp_mp_hash_field_set(recr2_pl, MLXSW_REG_RECR2_IPV4_PROTOCOL);
7131 mlxsw_sp_mp_hash_field_set(recr2_pl, MLXSW_REG_RECR2_TCP_UDP_SPORT);
7132 mlxsw_sp_mp_hash_field_set(recr2_pl, MLXSW_REG_RECR2_TCP_UDP_DPORT);
7133}
7134
7135static void mlxsw_sp_mp6_hash_init(char *recr2_pl)
7136{
Petr Machata918ee502018-03-11 09:45:47 +02007137 bool only_l3 = !ip6_multipath_hash_policy(&init_net);
David Ahern5e18b9c552018-03-02 08:32:19 -08007138
Ido Schimmelaf658b62017-11-02 17:14:09 +01007139 mlxsw_sp_mp_hash_header_set(recr2_pl,
7140 MLXSW_REG_RECR2_IPV6_EN_NOT_TCP_NOT_UDP);
7141 mlxsw_sp_mp_hash_header_set(recr2_pl, MLXSW_REG_RECR2_IPV6_EN_TCP_UDP);
7142 mlxsw_reg_recr2_ipv6_sip_enable(recr2_pl);
7143 mlxsw_reg_recr2_ipv6_dip_enable(recr2_pl);
Ido Schimmelaf658b62017-11-02 17:14:09 +01007144 mlxsw_sp_mp_hash_field_set(recr2_pl, MLXSW_REG_RECR2_IPV6_NEXT_HEADER);
David Ahern5e18b9c552018-03-02 08:32:19 -08007145 if (only_l3) {
7146 mlxsw_sp_mp_hash_field_set(recr2_pl,
7147 MLXSW_REG_RECR2_IPV6_FLOW_LABEL);
7148 } else {
7149 mlxsw_sp_mp_hash_header_set(recr2_pl,
7150 MLXSW_REG_RECR2_TCP_UDP_EN_IPV6);
7151 mlxsw_sp_mp_hash_field_set(recr2_pl,
7152 MLXSW_REG_RECR2_TCP_UDP_SPORT);
7153 mlxsw_sp_mp_hash_field_set(recr2_pl,
7154 MLXSW_REG_RECR2_TCP_UDP_DPORT);
7155 }
Ido Schimmelaf658b62017-11-02 17:14:09 +01007156}
7157
7158static int mlxsw_sp_mp_hash_init(struct mlxsw_sp *mlxsw_sp)
7159{
7160 char recr2_pl[MLXSW_REG_RECR2_LEN];
7161 u32 seed;
7162
7163 get_random_bytes(&seed, sizeof(seed));
7164 mlxsw_reg_recr2_pack(recr2_pl, seed);
7165 mlxsw_sp_mp4_hash_init(recr2_pl);
7166 mlxsw_sp_mp6_hash_init(recr2_pl);
7167
7168 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(recr2), recr2_pl);
7169}
7170#else
7171static int mlxsw_sp_mp_hash_init(struct mlxsw_sp *mlxsw_sp)
7172{
7173 return 0;
7174}
7175#endif
7176
Yuval Mintz48276a22018-01-14 12:33:14 +01007177static int mlxsw_sp_dscp_init(struct mlxsw_sp *mlxsw_sp)
7178{
7179 char rdpm_pl[MLXSW_REG_RDPM_LEN];
7180 unsigned int i;
7181
7182 MLXSW_REG_ZERO(rdpm, rdpm_pl);
7183
7184 /* HW is determining switch priority based on DSCP-bits, but the
7185 * kernel is still doing that based on the ToS. Since there's a
7186 * mismatch in bits we need to make sure to translate the right
7187 * value ToS would observe, skipping the 2 least-significant ECN bits.
7188 */
7189 for (i = 0; i < MLXSW_REG_RDPM_DSCP_ENTRY_REC_MAX_COUNT; i++)
7190 mlxsw_reg_rdpm_pack(rdpm_pl, i, rt_tos2priority(i << 2));
7191
7192 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rdpm), rdpm_pl);
7193}
7194
Ido Schimmel4724ba562017-03-10 08:53:39 +01007195static int __mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp)
7196{
7197 char rgcr_pl[MLXSW_REG_RGCR_LEN];
7198 u64 max_rifs;
7199 int err;
7200
7201 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_RIFS))
7202 return -EIO;
Ido Schimmel4724ba562017-03-10 08:53:39 +01007203 max_rifs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS);
Ido Schimmel4724ba562017-03-10 08:53:39 +01007204
Arkadi Sharshevskye29237e2017-07-18 10:10:09 +02007205 mlxsw_reg_rgcr_pack(rgcr_pl, true, true);
Ido Schimmel4724ba562017-03-10 08:53:39 +01007206 mlxsw_reg_rgcr_max_router_interfaces_set(rgcr_pl, max_rifs);
Yuval Mintz48276a22018-01-14 12:33:14 +01007207 mlxsw_reg_rgcr_usp_set(rgcr_pl, true);
Ido Schimmel4724ba562017-03-10 08:53:39 +01007208 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rgcr), rgcr_pl);
7209 if (err)
Ido Schimmel348b8fc2017-05-16 19:38:29 +02007210 return err;
Ido Schimmel4724ba562017-03-10 08:53:39 +01007211 return 0;
Ido Schimmel4724ba562017-03-10 08:53:39 +01007212}
7213
7214static void __mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp)
7215{
7216 char rgcr_pl[MLXSW_REG_RGCR_LEN];
Ido Schimmel4724ba562017-03-10 08:53:39 +01007217
Arkadi Sharshevskye29237e2017-07-18 10:10:09 +02007218 mlxsw_reg_rgcr_pack(rgcr_pl, false, false);
Ido Schimmel4724ba562017-03-10 08:53:39 +01007219 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rgcr), rgcr_pl);
Ido Schimmel4724ba562017-03-10 08:53:39 +01007220}
7221
Jiri Pirkob45f64d2016-09-26 12:52:31 +02007222int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp)
7223{
Ido Schimmel9011b672017-05-16 19:38:25 +02007224 struct mlxsw_sp_router *router;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02007225 int err;
7226
Ido Schimmel9011b672017-05-16 19:38:25 +02007227 router = kzalloc(sizeof(*mlxsw_sp->router), GFP_KERNEL);
7228 if (!router)
7229 return -ENOMEM;
7230 mlxsw_sp->router = router;
7231 router->mlxsw_sp = mlxsw_sp;
7232
7233 INIT_LIST_HEAD(&mlxsw_sp->router->nexthop_neighs_list);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02007234 err = __mlxsw_sp_router_init(mlxsw_sp);
7235 if (err)
Ido Schimmel9011b672017-05-16 19:38:25 +02007236 goto err_router_init;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02007237
Ido Schimmel348b8fc2017-05-16 19:38:29 +02007238 err = mlxsw_sp_rifs_init(mlxsw_sp);
7239 if (err)
7240 goto err_rifs_init;
7241
Petr Machata38ebc0f2017-09-02 23:49:17 +02007242 err = mlxsw_sp_ipips_init(mlxsw_sp);
7243 if (err)
7244 goto err_ipips_init;
7245
Ido Schimmel9011b672017-05-16 19:38:25 +02007246 err = rhashtable_init(&mlxsw_sp->router->nexthop_ht,
Ido Schimmelc53b8e12017-02-08 11:16:30 +01007247 &mlxsw_sp_nexthop_ht_params);
7248 if (err)
7249 goto err_nexthop_ht_init;
7250
Ido Schimmel9011b672017-05-16 19:38:25 +02007251 err = rhashtable_init(&mlxsw_sp->router->nexthop_group_ht,
Ido Schimmele9ad5e72017-02-08 11:16:29 +01007252 &mlxsw_sp_nexthop_group_ht_params);
7253 if (err)
7254 goto err_nexthop_group_ht_init;
7255
Arkadi Sharshevskydbe45982017-09-25 10:32:23 +02007256 INIT_LIST_HEAD(&mlxsw_sp->router->nexthop_list);
Ido Schimmel8494ab02017-03-24 08:02:47 +01007257 err = mlxsw_sp_lpm_init(mlxsw_sp);
7258 if (err)
7259 goto err_lpm_init;
7260
Yotam Gigid42b0962017-09-27 08:23:20 +02007261 err = mlxsw_sp_mr_init(mlxsw_sp, &mlxsw_sp_mr_tcam_ops);
7262 if (err)
7263 goto err_mr_init;
7264
Jiri Pirkob45f64d2016-09-26 12:52:31 +02007265 err = mlxsw_sp_vrs_init(mlxsw_sp);
7266 if (err)
7267 goto err_vrs_init;
7268
Ido Schimmel8c9583a2016-10-27 15:12:57 +02007269 err = mlxsw_sp_neigh_init(mlxsw_sp);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02007270 if (err)
7271 goto err_neigh_init;
7272
Ido Schimmel48fac882017-11-02 17:14:06 +01007273 mlxsw_sp->router->netevent_nb.notifier_call =
7274 mlxsw_sp_router_netevent_event;
7275 err = register_netevent_notifier(&mlxsw_sp->router->netevent_nb);
7276 if (err)
7277 goto err_register_netevent_notifier;
7278
Ido Schimmelaf658b62017-11-02 17:14:09 +01007279 err = mlxsw_sp_mp_hash_init(mlxsw_sp);
7280 if (err)
7281 goto err_mp_hash_init;
7282
Yuval Mintz48276a22018-01-14 12:33:14 +01007283 err = mlxsw_sp_dscp_init(mlxsw_sp);
7284 if (err)
7285 goto err_dscp_init;
7286
Ido Schimmel7e39d112017-05-16 19:38:28 +02007287 mlxsw_sp->router->fib_nb.notifier_call = mlxsw_sp_router_fib_event;
7288 err = register_fib_notifier(&mlxsw_sp->router->fib_nb,
Ido Schimmelc3852ef2016-12-03 16:45:07 +01007289 mlxsw_sp_router_fib_dump_flush);
7290 if (err)
7291 goto err_register_fib_notifier;
7292
Jiri Pirkob45f64d2016-09-26 12:52:31 +02007293 return 0;
7294
Ido Schimmelc3852ef2016-12-03 16:45:07 +01007295err_register_fib_notifier:
Yuval Mintz48276a22018-01-14 12:33:14 +01007296err_dscp_init:
Ido Schimmelaf658b62017-11-02 17:14:09 +01007297err_mp_hash_init:
Ido Schimmel48fac882017-11-02 17:14:06 +01007298 unregister_netevent_notifier(&mlxsw_sp->router->netevent_nb);
7299err_register_netevent_notifier:
Ido Schimmelc3852ef2016-12-03 16:45:07 +01007300 mlxsw_sp_neigh_fini(mlxsw_sp);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02007301err_neigh_init:
7302 mlxsw_sp_vrs_fini(mlxsw_sp);
7303err_vrs_init:
Yotam Gigid42b0962017-09-27 08:23:20 +02007304 mlxsw_sp_mr_fini(mlxsw_sp);
7305err_mr_init:
Ido Schimmel8494ab02017-03-24 08:02:47 +01007306 mlxsw_sp_lpm_fini(mlxsw_sp);
7307err_lpm_init:
Ido Schimmel9011b672017-05-16 19:38:25 +02007308 rhashtable_destroy(&mlxsw_sp->router->nexthop_group_ht);
Ido Schimmele9ad5e72017-02-08 11:16:29 +01007309err_nexthop_group_ht_init:
Ido Schimmel9011b672017-05-16 19:38:25 +02007310 rhashtable_destroy(&mlxsw_sp->router->nexthop_ht);
Ido Schimmelc53b8e12017-02-08 11:16:30 +01007311err_nexthop_ht_init:
Petr Machata38ebc0f2017-09-02 23:49:17 +02007312 mlxsw_sp_ipips_fini(mlxsw_sp);
7313err_ipips_init:
Ido Schimmel348b8fc2017-05-16 19:38:29 +02007314 mlxsw_sp_rifs_fini(mlxsw_sp);
7315err_rifs_init:
Jiri Pirkob45f64d2016-09-26 12:52:31 +02007316 __mlxsw_sp_router_fini(mlxsw_sp);
Ido Schimmel9011b672017-05-16 19:38:25 +02007317err_router_init:
7318 kfree(mlxsw_sp->router);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02007319 return err;
7320}
7321
7322void mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp)
7323{
Ido Schimmel7e39d112017-05-16 19:38:28 +02007324 unregister_fib_notifier(&mlxsw_sp->router->fib_nb);
Ido Schimmel48fac882017-11-02 17:14:06 +01007325 unregister_netevent_notifier(&mlxsw_sp->router->netevent_nb);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02007326 mlxsw_sp_neigh_fini(mlxsw_sp);
7327 mlxsw_sp_vrs_fini(mlxsw_sp);
Yotam Gigid42b0962017-09-27 08:23:20 +02007328 mlxsw_sp_mr_fini(mlxsw_sp);
Ido Schimmel8494ab02017-03-24 08:02:47 +01007329 mlxsw_sp_lpm_fini(mlxsw_sp);
Ido Schimmel9011b672017-05-16 19:38:25 +02007330 rhashtable_destroy(&mlxsw_sp->router->nexthop_group_ht);
7331 rhashtable_destroy(&mlxsw_sp->router->nexthop_ht);
Petr Machata38ebc0f2017-09-02 23:49:17 +02007332 mlxsw_sp_ipips_fini(mlxsw_sp);
Ido Schimmel348b8fc2017-05-16 19:38:29 +02007333 mlxsw_sp_rifs_fini(mlxsw_sp);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02007334 __mlxsw_sp_router_fini(mlxsw_sp);
Ido Schimmel9011b672017-05-16 19:38:25 +02007335 kfree(mlxsw_sp->router);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02007336}