blob: 05c52e486330cc9d1b563084af1515ed2fb230f9 [file] [log] [blame]
Ido Schimmel464dce12016-07-02 11:00:15 +02001/*
2 * drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
Petr Machatae437f3b2018-02-13 11:26:09 +01003 * Copyright (c) 2016-2018 Mellanox Technologies. All rights reserved.
Ido Schimmel464dce12016-07-02 11:00:15 +02004 * Copyright (c) 2016 Jiri Pirko <jiri@mellanox.com>
5 * Copyright (c) 2016 Ido Schimmel <idosch@mellanox.com>
Yotam Gigic723c7352016-07-05 11:27:43 +02006 * Copyright (c) 2016 Yotam Gigi <yotamg@mellanox.com>
Petr Machatae437f3b2018-02-13 11:26:09 +01007 * Copyright (c) 2017-2018 Petr Machata <petrm@mellanox.com>
Ido Schimmel464dce12016-07-02 11:00:15 +02008 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions are met:
11 *
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. Neither the names of the copyright holders nor the names of its
18 * contributors may be used to endorse or promote products derived from
19 * this software without specific prior written permission.
20 *
21 * Alternatively, this software may be distributed under the terms of the
22 * GNU General Public License ("GPL") version 2 as published by the Free
23 * Software Foundation.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
26 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
29 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
36 */
37
38#include <linux/kernel.h>
39#include <linux/types.h>
Jiri Pirko5e9c16c2016-07-04 08:23:04 +020040#include <linux/rhashtable.h>
41#include <linux/bitops.h>
42#include <linux/in6.h>
Yotam Gigic723c7352016-07-05 11:27:43 +020043#include <linux/notifier.h>
Ido Schimmeldf6dd79b2017-02-08 14:36:49 +010044#include <linux/inetdevice.h>
Ido Schimmel9db032b2017-03-16 09:08:17 +010045#include <linux/netdevice.h>
Ido Schimmel03ea01e2017-05-23 21:56:30 +020046#include <linux/if_bridge.h>
Ido Schimmelb5f3e0d2017-07-24 09:56:00 +020047#include <linux/socket.h>
Ido Schimmel428b8512017-08-03 13:28:28 +020048#include <linux/route.h>
Ido Schimmeleb789982017-10-22 23:11:48 +020049#include <linux/gcd.h>
Ido Schimmelaf658b62017-11-02 17:14:09 +010050#include <linux/random.h>
Yotam Gigic723c7352016-07-05 11:27:43 +020051#include <net/netevent.h>
Jiri Pirko6cf3c972016-07-05 11:27:39 +020052#include <net/neighbour.h>
53#include <net/arp.h>
Jiri Pirkob45f64d2016-09-26 12:52:31 +020054#include <net/ip_fib.h>
Ido Schimmel583419f2017-08-03 13:28:27 +020055#include <net/ip6_fib.h>
Ido Schimmel5d7bfd12017-03-16 09:08:14 +010056#include <net/fib_rules.h>
Petr Machata6ddb7422017-09-02 23:49:19 +020057#include <net/ip_tunnels.h>
Ido Schimmel57837882017-03-16 09:08:16 +010058#include <net/l3mdev.h>
Arkadi Sharshevsky5ea12372017-07-18 10:10:13 +020059#include <net/addrconf.h>
Arkadi Sharshevskyd5eb89c2017-07-18 10:10:15 +020060#include <net/ndisc.h>
61#include <net/ipv6.h>
Ido Schimmel04b1d4e2017-08-03 13:28:11 +020062#include <net/fib_notifier.h>
Ido Schimmel464dce12016-07-02 11:00:15 +020063
64#include "spectrum.h"
65#include "core.h"
66#include "reg.h"
Arkadi Sharshevskye0c0afd2017-03-28 17:24:15 +020067#include "spectrum_cnt.h"
68#include "spectrum_dpipe.h"
Petr Machata38ebc0f2017-09-02 23:49:17 +020069#include "spectrum_ipip.h"
Yotam Gigid42b0962017-09-27 08:23:20 +020070#include "spectrum_mr.h"
71#include "spectrum_mr_tcam.h"
Arkadi Sharshevskye0c0afd2017-03-28 17:24:15 +020072#include "spectrum_router.h"
Petr Machata803335a2018-02-27 14:53:46 +010073#include "spectrum_span.h"
Ido Schimmel464dce12016-07-02 11:00:15 +020074
Ido Schimmel2b52ce02018-01-22 09:17:42 +010075struct mlxsw_sp_fib;
Ido Schimmel9011b672017-05-16 19:38:25 +020076struct mlxsw_sp_vr;
77struct mlxsw_sp_lpm_tree;
Ido Schimmele4f3c1c2017-05-26 08:37:40 +020078struct mlxsw_sp_rif_ops;
Ido Schimmel9011b672017-05-16 19:38:25 +020079
80struct mlxsw_sp_router {
81 struct mlxsw_sp *mlxsw_sp;
Ido Schimmel5f9efff2017-05-16 19:38:27 +020082 struct mlxsw_sp_rif **rifs;
Ido Schimmel9011b672017-05-16 19:38:25 +020083 struct mlxsw_sp_vr *vrs;
84 struct rhashtable neigh_ht;
85 struct rhashtable nexthop_group_ht;
86 struct rhashtable nexthop_ht;
Arkadi Sharshevskydbe45982017-09-25 10:32:23 +020087 struct list_head nexthop_list;
Ido Schimmel9011b672017-05-16 19:38:25 +020088 struct {
Ido Schimmel2b52ce02018-01-22 09:17:42 +010089 /* One tree for each protocol: IPv4 and IPv6 */
90 struct mlxsw_sp_lpm_tree *proto_trees[2];
Ido Schimmel9011b672017-05-16 19:38:25 +020091 struct mlxsw_sp_lpm_tree *trees;
92 unsigned int tree_count;
93 } lpm;
94 struct {
95 struct delayed_work dw;
96 unsigned long interval; /* ms */
97 } neighs_update;
98 struct delayed_work nexthop_probe_dw;
99#define MLXSW_SP_UNRESOLVED_NH_PROBE_INTERVAL 5000 /* ms */
100 struct list_head nexthop_neighs_list;
Petr Machata1012b9a2017-09-02 23:49:23 +0200101 struct list_head ipip_list;
Ido Schimmel9011b672017-05-16 19:38:25 +0200102 bool aborted;
Ido Schimmel7e39d112017-05-16 19:38:28 +0200103 struct notifier_block fib_nb;
Ido Schimmel48fac882017-11-02 17:14:06 +0100104 struct notifier_block netevent_nb;
Ido Schimmele4f3c1c2017-05-26 08:37:40 +0200105 const struct mlxsw_sp_rif_ops **rif_ops_arr;
Petr Machata38ebc0f2017-09-02 23:49:17 +0200106 const struct mlxsw_sp_ipip_ops **ipip_ops_arr;
Ido Schimmel9011b672017-05-16 19:38:25 +0200107};
108
Ido Schimmel4724ba562017-03-10 08:53:39 +0100109struct mlxsw_sp_rif {
110 struct list_head nexthop_list;
111 struct list_head neigh_list;
112 struct net_device *dev;
Ido Schimmela1107482017-05-26 08:37:39 +0200113 struct mlxsw_sp_fid *fid;
Ido Schimmel4724ba562017-03-10 08:53:39 +0100114 unsigned char addr[ETH_ALEN];
115 int mtu;
Arkadi Sharshevskybf952332017-03-17 09:38:00 +0100116 u16 rif_index;
Ido Schimmel69132292017-03-10 08:53:42 +0100117 u16 vr_id;
Ido Schimmele4f3c1c2017-05-26 08:37:40 +0200118 const struct mlxsw_sp_rif_ops *ops;
119 struct mlxsw_sp *mlxsw_sp;
120
Arkadi Sharshevskye0c0afd2017-03-28 17:24:15 +0200121 unsigned int counter_ingress;
122 bool counter_ingress_valid;
123 unsigned int counter_egress;
124 bool counter_egress_valid;
Ido Schimmel4724ba562017-03-10 08:53:39 +0100125};
126
Ido Schimmele4f3c1c2017-05-26 08:37:40 +0200127struct mlxsw_sp_rif_params {
128 struct net_device *dev;
129 union {
130 u16 system_port;
131 u16 lag_id;
132 };
133 u16 vid;
134 bool lag;
135};
136
Ido Schimmel4d93cee2017-05-26 08:37:34 +0200137struct mlxsw_sp_rif_subport {
138 struct mlxsw_sp_rif common;
139 union {
140 u16 system_port;
141 u16 lag_id;
142 };
143 u16 vid;
144 bool lag;
145};
146
Petr Machata6ddb7422017-09-02 23:49:19 +0200147struct mlxsw_sp_rif_ipip_lb {
148 struct mlxsw_sp_rif common;
149 struct mlxsw_sp_rif_ipip_lb_config lb_config;
150 u16 ul_vr_id; /* Reserved for Spectrum-2. */
151};
152
153struct mlxsw_sp_rif_params_ipip_lb {
154 struct mlxsw_sp_rif_params common;
155 struct mlxsw_sp_rif_ipip_lb_config lb_config;
156};
157
Ido Schimmele4f3c1c2017-05-26 08:37:40 +0200158struct mlxsw_sp_rif_ops {
159 enum mlxsw_sp_rif_type type;
160 size_t rif_size;
161
162 void (*setup)(struct mlxsw_sp_rif *rif,
163 const struct mlxsw_sp_rif_params *params);
164 int (*configure)(struct mlxsw_sp_rif *rif);
165 void (*deconfigure)(struct mlxsw_sp_rif *rif);
Petr Machata5f15e252018-06-25 10:48:13 +0300166 struct mlxsw_sp_fid * (*fid_get)(struct mlxsw_sp_rif *rif,
167 struct netlink_ext_ack *extack);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +0200168};
169
Ido Schimmel2b52ce02018-01-22 09:17:42 +0100170static void mlxsw_sp_lpm_tree_hold(struct mlxsw_sp_lpm_tree *lpm_tree);
171static void mlxsw_sp_lpm_tree_put(struct mlxsw_sp *mlxsw_sp,
172 struct mlxsw_sp_lpm_tree *lpm_tree);
173static int mlxsw_sp_vr_lpm_tree_bind(struct mlxsw_sp *mlxsw_sp,
174 const struct mlxsw_sp_fib *fib,
175 u8 tree_id);
176static int mlxsw_sp_vr_lpm_tree_unbind(struct mlxsw_sp *mlxsw_sp,
177 const struct mlxsw_sp_fib *fib);
178
Arkadi Sharshevskye0c0afd2017-03-28 17:24:15 +0200179static unsigned int *
180mlxsw_sp_rif_p_counter_get(struct mlxsw_sp_rif *rif,
181 enum mlxsw_sp_rif_counter_dir dir)
182{
183 switch (dir) {
184 case MLXSW_SP_RIF_COUNTER_EGRESS:
185 return &rif->counter_egress;
186 case MLXSW_SP_RIF_COUNTER_INGRESS:
187 return &rif->counter_ingress;
188 }
189 return NULL;
190}
191
192static bool
193mlxsw_sp_rif_counter_valid_get(struct mlxsw_sp_rif *rif,
194 enum mlxsw_sp_rif_counter_dir dir)
195{
196 switch (dir) {
197 case MLXSW_SP_RIF_COUNTER_EGRESS:
198 return rif->counter_egress_valid;
199 case MLXSW_SP_RIF_COUNTER_INGRESS:
200 return rif->counter_ingress_valid;
201 }
202 return false;
203}
204
205static void
206mlxsw_sp_rif_counter_valid_set(struct mlxsw_sp_rif *rif,
207 enum mlxsw_sp_rif_counter_dir dir,
208 bool valid)
209{
210 switch (dir) {
211 case MLXSW_SP_RIF_COUNTER_EGRESS:
212 rif->counter_egress_valid = valid;
213 break;
214 case MLXSW_SP_RIF_COUNTER_INGRESS:
215 rif->counter_ingress_valid = valid;
216 break;
217 }
218}
219
220static int mlxsw_sp_rif_counter_edit(struct mlxsw_sp *mlxsw_sp, u16 rif_index,
221 unsigned int counter_index, bool enable,
222 enum mlxsw_sp_rif_counter_dir dir)
223{
224 char ritr_pl[MLXSW_REG_RITR_LEN];
225 bool is_egress = false;
226 int err;
227
228 if (dir == MLXSW_SP_RIF_COUNTER_EGRESS)
229 is_egress = true;
230 mlxsw_reg_ritr_rif_pack(ritr_pl, rif_index);
231 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
232 if (err)
233 return err;
234
235 mlxsw_reg_ritr_counter_pack(ritr_pl, counter_index, enable,
236 is_egress);
237 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
238}
239
240int mlxsw_sp_rif_counter_value_get(struct mlxsw_sp *mlxsw_sp,
241 struct mlxsw_sp_rif *rif,
242 enum mlxsw_sp_rif_counter_dir dir, u64 *cnt)
243{
244 char ricnt_pl[MLXSW_REG_RICNT_LEN];
245 unsigned int *p_counter_index;
246 bool valid;
247 int err;
248
249 valid = mlxsw_sp_rif_counter_valid_get(rif, dir);
250 if (!valid)
251 return -EINVAL;
252
253 p_counter_index = mlxsw_sp_rif_p_counter_get(rif, dir);
254 if (!p_counter_index)
255 return -EINVAL;
256 mlxsw_reg_ricnt_pack(ricnt_pl, *p_counter_index,
257 MLXSW_REG_RICNT_OPCODE_NOP);
258 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ricnt), ricnt_pl);
259 if (err)
260 return err;
261 *cnt = mlxsw_reg_ricnt_good_unicast_packets_get(ricnt_pl);
262 return 0;
263}
264
265static int mlxsw_sp_rif_counter_clear(struct mlxsw_sp *mlxsw_sp,
266 unsigned int counter_index)
267{
268 char ricnt_pl[MLXSW_REG_RICNT_LEN];
269
270 mlxsw_reg_ricnt_pack(ricnt_pl, counter_index,
271 MLXSW_REG_RICNT_OPCODE_CLEAR);
272 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ricnt), ricnt_pl);
273}
274
275int mlxsw_sp_rif_counter_alloc(struct mlxsw_sp *mlxsw_sp,
276 struct mlxsw_sp_rif *rif,
277 enum mlxsw_sp_rif_counter_dir dir)
278{
279 unsigned int *p_counter_index;
280 int err;
281
282 p_counter_index = mlxsw_sp_rif_p_counter_get(rif, dir);
283 if (!p_counter_index)
284 return -EINVAL;
285 err = mlxsw_sp_counter_alloc(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_RIF,
286 p_counter_index);
287 if (err)
288 return err;
289
290 err = mlxsw_sp_rif_counter_clear(mlxsw_sp, *p_counter_index);
291 if (err)
292 goto err_counter_clear;
293
294 err = mlxsw_sp_rif_counter_edit(mlxsw_sp, rif->rif_index,
295 *p_counter_index, true, dir);
296 if (err)
297 goto err_counter_edit;
298 mlxsw_sp_rif_counter_valid_set(rif, dir, true);
299 return 0;
300
301err_counter_edit:
302err_counter_clear:
303 mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_RIF,
304 *p_counter_index);
305 return err;
306}
307
308void mlxsw_sp_rif_counter_free(struct mlxsw_sp *mlxsw_sp,
309 struct mlxsw_sp_rif *rif,
310 enum mlxsw_sp_rif_counter_dir dir)
311{
312 unsigned int *p_counter_index;
313
Arkadi Sharshevsky6b1206b2017-05-18 09:18:53 +0200314 if (!mlxsw_sp_rif_counter_valid_get(rif, dir))
315 return;
316
Arkadi Sharshevskye0c0afd2017-03-28 17:24:15 +0200317 p_counter_index = mlxsw_sp_rif_p_counter_get(rif, dir);
318 if (WARN_ON(!p_counter_index))
319 return;
320 mlxsw_sp_rif_counter_edit(mlxsw_sp, rif->rif_index,
321 *p_counter_index, false, dir);
322 mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_RIF,
323 *p_counter_index);
324 mlxsw_sp_rif_counter_valid_set(rif, dir, false);
325}
326
Ido Schimmele4f3c1c2017-05-26 08:37:40 +0200327static void mlxsw_sp_rif_counters_alloc(struct mlxsw_sp_rif *rif)
328{
329 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
330 struct devlink *devlink;
331
332 devlink = priv_to_devlink(mlxsw_sp->core);
333 if (!devlink_dpipe_table_counter_enabled(devlink,
334 MLXSW_SP_DPIPE_TABLE_NAME_ERIF))
335 return;
336 mlxsw_sp_rif_counter_alloc(mlxsw_sp, rif, MLXSW_SP_RIF_COUNTER_EGRESS);
337}
338
339static void mlxsw_sp_rif_counters_free(struct mlxsw_sp_rif *rif)
340{
341 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
342
343 mlxsw_sp_rif_counter_free(mlxsw_sp, rif, MLXSW_SP_RIF_COUNTER_EGRESS);
344}
345
Ido Schimmel4724ba562017-03-10 08:53:39 +0100346static struct mlxsw_sp_rif *
347mlxsw_sp_rif_find_by_dev(const struct mlxsw_sp *mlxsw_sp,
348 const struct net_device *dev);
349
Ido Schimmel7dcc18a2017-07-18 10:10:30 +0200350#define MLXSW_SP_PREFIX_COUNT (sizeof(struct in6_addr) * BITS_PER_BYTE + 1)
Ido Schimmel9011b672017-05-16 19:38:25 +0200351
352struct mlxsw_sp_prefix_usage {
353 DECLARE_BITMAP(b, MLXSW_SP_PREFIX_COUNT);
354};
355
Jiri Pirko53342022016-07-04 08:23:08 +0200356#define mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage) \
357 for_each_set_bit(prefix, (prefix_usage)->b, MLXSW_SP_PREFIX_COUNT)
358
359static bool
360mlxsw_sp_prefix_usage_eq(struct mlxsw_sp_prefix_usage *prefix_usage1,
361 struct mlxsw_sp_prefix_usage *prefix_usage2)
362{
363 return !memcmp(prefix_usage1, prefix_usage2, sizeof(*prefix_usage1));
364}
365
Jiri Pirko6b75c482016-07-04 08:23:09 +0200366static void
367mlxsw_sp_prefix_usage_cpy(struct mlxsw_sp_prefix_usage *prefix_usage1,
368 struct mlxsw_sp_prefix_usage *prefix_usage2)
369{
370 memcpy(prefix_usage1, prefix_usage2, sizeof(*prefix_usage1));
371}
372
373static void
Jiri Pirko5e9c16c2016-07-04 08:23:04 +0200374mlxsw_sp_prefix_usage_set(struct mlxsw_sp_prefix_usage *prefix_usage,
375 unsigned char prefix_len)
376{
377 set_bit(prefix_len, prefix_usage->b);
378}
379
380static void
381mlxsw_sp_prefix_usage_clear(struct mlxsw_sp_prefix_usage *prefix_usage,
382 unsigned char prefix_len)
383{
384 clear_bit(prefix_len, prefix_usage->b);
385}
386
387struct mlxsw_sp_fib_key {
388 unsigned char addr[sizeof(struct in6_addr)];
389 unsigned char prefix_len;
390};
391
Jiri Pirko61c503f2016-07-04 08:23:11 +0200392enum mlxsw_sp_fib_entry_type {
393 MLXSW_SP_FIB_ENTRY_TYPE_REMOTE,
394 MLXSW_SP_FIB_ENTRY_TYPE_LOCAL,
395 MLXSW_SP_FIB_ENTRY_TYPE_TRAP,
Petr Machata4607f6d2017-09-02 23:49:25 +0200396
397 /* This is a special case of local delivery, where a packet should be
398 * decapsulated on reception. Note that there is no corresponding ENCAP,
399 * because that's a type of next hop, not of FIB entry. (There can be
400 * several next hops in a REMOTE entry, and some of them may be
401 * encapsulating entries.)
402 */
403 MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP,
Jiri Pirko61c503f2016-07-04 08:23:11 +0200404};
405
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +0200406struct mlxsw_sp_nexthop_group;
407
Ido Schimmel9aecce12017-02-09 10:28:42 +0100408struct mlxsw_sp_fib_node {
409 struct list_head entry_list;
Jiri Pirkob45f64d2016-09-26 12:52:31 +0200410 struct list_head list;
Ido Schimmel9aecce12017-02-09 10:28:42 +0100411 struct rhash_head ht_node;
Ido Schimmel76610eb2017-03-10 08:53:41 +0100412 struct mlxsw_sp_fib *fib;
Ido Schimmel9aecce12017-02-09 10:28:42 +0100413 struct mlxsw_sp_fib_key key;
414};
415
Petr Machata4607f6d2017-09-02 23:49:25 +0200416struct mlxsw_sp_fib_entry_decap {
417 struct mlxsw_sp_ipip_entry *ipip_entry;
418 u32 tunnel_index;
419};
420
Ido Schimmel9aecce12017-02-09 10:28:42 +0100421struct mlxsw_sp_fib_entry {
422 struct list_head list;
423 struct mlxsw_sp_fib_node *fib_node;
424 enum mlxsw_sp_fib_entry_type type;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +0200425 struct list_head nexthop_group_node;
426 struct mlxsw_sp_nexthop_group *nh_group;
Petr Machata4607f6d2017-09-02 23:49:25 +0200427 struct mlxsw_sp_fib_entry_decap decap; /* Valid for decap entries. */
Jiri Pirko5e9c16c2016-07-04 08:23:04 +0200428};
429
Ido Schimmel4f1c7f12017-07-18 10:10:26 +0200430struct mlxsw_sp_fib4_entry {
431 struct mlxsw_sp_fib_entry common;
432 u32 tb_id;
433 u32 prio;
434 u8 tos;
435 u8 type;
436};
437
Ido Schimmel428b8512017-08-03 13:28:28 +0200438struct mlxsw_sp_fib6_entry {
439 struct mlxsw_sp_fib_entry common;
440 struct list_head rt6_list;
441 unsigned int nrt6;
442};
443
444struct mlxsw_sp_rt6 {
445 struct list_head list;
David Ahern8d1c8022018-04-17 17:33:26 -0700446 struct fib6_info *rt;
Ido Schimmel428b8512017-08-03 13:28:28 +0200447};
448
Ido Schimmel9011b672017-05-16 19:38:25 +0200449struct mlxsw_sp_lpm_tree {
450 u8 id; /* tree ID */
451 unsigned int ref_count;
452 enum mlxsw_sp_l3proto proto;
Ido Schimmel2b52ce02018-01-22 09:17:42 +0100453 unsigned long prefix_ref_count[MLXSW_SP_PREFIX_COUNT];
Ido Schimmel9011b672017-05-16 19:38:25 +0200454 struct mlxsw_sp_prefix_usage prefix_usage;
455};
456
Jiri Pirko5e9c16c2016-07-04 08:23:04 +0200457struct mlxsw_sp_fib {
458 struct rhashtable ht;
Ido Schimmel9aecce12017-02-09 10:28:42 +0100459 struct list_head node_list;
Ido Schimmel76610eb2017-03-10 08:53:41 +0100460 struct mlxsw_sp_vr *vr;
461 struct mlxsw_sp_lpm_tree *lpm_tree;
Ido Schimmel76610eb2017-03-10 08:53:41 +0100462 enum mlxsw_sp_l3proto proto;
Jiri Pirko5e9c16c2016-07-04 08:23:04 +0200463};
464
Ido Schimmel9011b672017-05-16 19:38:25 +0200465struct mlxsw_sp_vr {
466 u16 id; /* virtual router ID */
467 u32 tb_id; /* kernel fib table id */
468 unsigned int rif_count;
469 struct mlxsw_sp_fib *fib4;
Ido Schimmela3d9bc52017-07-18 10:10:22 +0200470 struct mlxsw_sp_fib *fib6;
Yuval Mintz9742f862018-03-26 15:01:40 +0300471 struct mlxsw_sp_mr_table *mr_table[MLXSW_SP_L3_PROTO_MAX];
Ido Schimmel9011b672017-05-16 19:38:25 +0200472};
473
Ido Schimmel9aecce12017-02-09 10:28:42 +0100474static const struct rhashtable_params mlxsw_sp_fib_ht_params;
Jiri Pirko5e9c16c2016-07-04 08:23:04 +0200475
Ido Schimmel2b52ce02018-01-22 09:17:42 +0100476static struct mlxsw_sp_fib *mlxsw_sp_fib_create(struct mlxsw_sp *mlxsw_sp,
477 struct mlxsw_sp_vr *vr,
Ido Schimmel76610eb2017-03-10 08:53:41 +0100478 enum mlxsw_sp_l3proto proto)
Jiri Pirko5e9c16c2016-07-04 08:23:04 +0200479{
Ido Schimmel2b52ce02018-01-22 09:17:42 +0100480 struct mlxsw_sp_lpm_tree *lpm_tree;
Jiri Pirko5e9c16c2016-07-04 08:23:04 +0200481 struct mlxsw_sp_fib *fib;
482 int err;
483
Ido Schimmel2b52ce02018-01-22 09:17:42 +0100484 lpm_tree = mlxsw_sp->router->lpm.proto_trees[proto];
Jiri Pirko5e9c16c2016-07-04 08:23:04 +0200485 fib = kzalloc(sizeof(*fib), GFP_KERNEL);
486 if (!fib)
487 return ERR_PTR(-ENOMEM);
488 err = rhashtable_init(&fib->ht, &mlxsw_sp_fib_ht_params);
489 if (err)
490 goto err_rhashtable_init;
Ido Schimmel9aecce12017-02-09 10:28:42 +0100491 INIT_LIST_HEAD(&fib->node_list);
Ido Schimmel76610eb2017-03-10 08:53:41 +0100492 fib->proto = proto;
493 fib->vr = vr;
Ido Schimmel2b52ce02018-01-22 09:17:42 +0100494 fib->lpm_tree = lpm_tree;
495 mlxsw_sp_lpm_tree_hold(lpm_tree);
496 err = mlxsw_sp_vr_lpm_tree_bind(mlxsw_sp, fib, lpm_tree->id);
497 if (err)
498 goto err_lpm_tree_bind;
Jiri Pirko5e9c16c2016-07-04 08:23:04 +0200499 return fib;
500
Ido Schimmel2b52ce02018-01-22 09:17:42 +0100501err_lpm_tree_bind:
502 mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
Jiri Pirko5e9c16c2016-07-04 08:23:04 +0200503err_rhashtable_init:
504 kfree(fib);
505 return ERR_PTR(err);
506}
507
Ido Schimmel2b52ce02018-01-22 09:17:42 +0100508static void mlxsw_sp_fib_destroy(struct mlxsw_sp *mlxsw_sp,
509 struct mlxsw_sp_fib *fib)
Jiri Pirko5e9c16c2016-07-04 08:23:04 +0200510{
Ido Schimmel2b52ce02018-01-22 09:17:42 +0100511 mlxsw_sp_vr_lpm_tree_unbind(mlxsw_sp, fib);
512 mlxsw_sp_lpm_tree_put(mlxsw_sp, fib->lpm_tree);
Ido Schimmel9aecce12017-02-09 10:28:42 +0100513 WARN_ON(!list_empty(&fib->node_list));
Jiri Pirko5e9c16c2016-07-04 08:23:04 +0200514 rhashtable_destroy(&fib->ht);
515 kfree(fib);
516}
517
Jiri Pirko53342022016-07-04 08:23:08 +0200518static struct mlxsw_sp_lpm_tree *
Ido Schimmel382dbb42017-03-10 08:53:40 +0100519mlxsw_sp_lpm_tree_find_unused(struct mlxsw_sp *mlxsw_sp)
Jiri Pirko53342022016-07-04 08:23:08 +0200520{
521 static struct mlxsw_sp_lpm_tree *lpm_tree;
522 int i;
523
Ido Schimmel9011b672017-05-16 19:38:25 +0200524 for (i = 0; i < mlxsw_sp->router->lpm.tree_count; i++) {
525 lpm_tree = &mlxsw_sp->router->lpm.trees[i];
Ido Schimmel382dbb42017-03-10 08:53:40 +0100526 if (lpm_tree->ref_count == 0)
527 return lpm_tree;
Jiri Pirko53342022016-07-04 08:23:08 +0200528 }
529 return NULL;
530}
531
532static int mlxsw_sp_lpm_tree_alloc(struct mlxsw_sp *mlxsw_sp,
533 struct mlxsw_sp_lpm_tree *lpm_tree)
534{
535 char ralta_pl[MLXSW_REG_RALTA_LEN];
536
Ido Schimmel1a9234e662016-09-19 08:29:26 +0200537 mlxsw_reg_ralta_pack(ralta_pl, true,
538 (enum mlxsw_reg_ralxx_protocol) lpm_tree->proto,
539 lpm_tree->id);
Jiri Pirko53342022016-07-04 08:23:08 +0200540 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralta), ralta_pl);
541}
542
Ido Schimmelcc702672017-08-14 10:54:03 +0200543static void mlxsw_sp_lpm_tree_free(struct mlxsw_sp *mlxsw_sp,
544 struct mlxsw_sp_lpm_tree *lpm_tree)
Jiri Pirko53342022016-07-04 08:23:08 +0200545{
546 char ralta_pl[MLXSW_REG_RALTA_LEN];
547
Ido Schimmel1a9234e662016-09-19 08:29:26 +0200548 mlxsw_reg_ralta_pack(ralta_pl, false,
549 (enum mlxsw_reg_ralxx_protocol) lpm_tree->proto,
550 lpm_tree->id);
Ido Schimmelcc702672017-08-14 10:54:03 +0200551 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralta), ralta_pl);
Jiri Pirko53342022016-07-04 08:23:08 +0200552}
553
554static int
555mlxsw_sp_lpm_tree_left_struct_set(struct mlxsw_sp *mlxsw_sp,
556 struct mlxsw_sp_prefix_usage *prefix_usage,
557 struct mlxsw_sp_lpm_tree *lpm_tree)
558{
559 char ralst_pl[MLXSW_REG_RALST_LEN];
560 u8 root_bin = 0;
561 u8 prefix;
562 u8 last_prefix = MLXSW_REG_RALST_BIN_NO_CHILD;
563
564 mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage)
565 root_bin = prefix;
566
567 mlxsw_reg_ralst_pack(ralst_pl, root_bin, lpm_tree->id);
568 mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage) {
569 if (prefix == 0)
570 continue;
571 mlxsw_reg_ralst_bin_pack(ralst_pl, prefix, last_prefix,
572 MLXSW_REG_RALST_BIN_NO_CHILD);
573 last_prefix = prefix;
574 }
575 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralst), ralst_pl);
576}
577
578static struct mlxsw_sp_lpm_tree *
579mlxsw_sp_lpm_tree_create(struct mlxsw_sp *mlxsw_sp,
580 struct mlxsw_sp_prefix_usage *prefix_usage,
Ido Schimmel382dbb42017-03-10 08:53:40 +0100581 enum mlxsw_sp_l3proto proto)
Jiri Pirko53342022016-07-04 08:23:08 +0200582{
583 struct mlxsw_sp_lpm_tree *lpm_tree;
584 int err;
585
Ido Schimmel382dbb42017-03-10 08:53:40 +0100586 lpm_tree = mlxsw_sp_lpm_tree_find_unused(mlxsw_sp);
Jiri Pirko53342022016-07-04 08:23:08 +0200587 if (!lpm_tree)
588 return ERR_PTR(-EBUSY);
589 lpm_tree->proto = proto;
590 err = mlxsw_sp_lpm_tree_alloc(mlxsw_sp, lpm_tree);
591 if (err)
592 return ERR_PTR(err);
593
594 err = mlxsw_sp_lpm_tree_left_struct_set(mlxsw_sp, prefix_usage,
595 lpm_tree);
596 if (err)
597 goto err_left_struct_set;
Jiri Pirko2083d362016-10-25 11:25:56 +0200598 memcpy(&lpm_tree->prefix_usage, prefix_usage,
599 sizeof(lpm_tree->prefix_usage));
Ido Schimmel2b52ce02018-01-22 09:17:42 +0100600 memset(&lpm_tree->prefix_ref_count, 0,
601 sizeof(lpm_tree->prefix_ref_count));
602 lpm_tree->ref_count = 1;
Jiri Pirko53342022016-07-04 08:23:08 +0200603 return lpm_tree;
604
605err_left_struct_set:
606 mlxsw_sp_lpm_tree_free(mlxsw_sp, lpm_tree);
607 return ERR_PTR(err);
608}
609
Ido Schimmelcc702672017-08-14 10:54:03 +0200610static void mlxsw_sp_lpm_tree_destroy(struct mlxsw_sp *mlxsw_sp,
611 struct mlxsw_sp_lpm_tree *lpm_tree)
Jiri Pirko53342022016-07-04 08:23:08 +0200612{
Ido Schimmelcc702672017-08-14 10:54:03 +0200613 mlxsw_sp_lpm_tree_free(mlxsw_sp, lpm_tree);
Jiri Pirko53342022016-07-04 08:23:08 +0200614}
615
616static struct mlxsw_sp_lpm_tree *
617mlxsw_sp_lpm_tree_get(struct mlxsw_sp *mlxsw_sp,
618 struct mlxsw_sp_prefix_usage *prefix_usage,
Ido Schimmel382dbb42017-03-10 08:53:40 +0100619 enum mlxsw_sp_l3proto proto)
Jiri Pirko53342022016-07-04 08:23:08 +0200620{
621 struct mlxsw_sp_lpm_tree *lpm_tree;
622 int i;
623
Ido Schimmel9011b672017-05-16 19:38:25 +0200624 for (i = 0; i < mlxsw_sp->router->lpm.tree_count; i++) {
625 lpm_tree = &mlxsw_sp->router->lpm.trees[i];
Jiri Pirko8b99bec2016-10-25 11:25:57 +0200626 if (lpm_tree->ref_count != 0 &&
627 lpm_tree->proto == proto &&
Jiri Pirko53342022016-07-04 08:23:08 +0200628 mlxsw_sp_prefix_usage_eq(&lpm_tree->prefix_usage,
Ido Schimmel2b52ce02018-01-22 09:17:42 +0100629 prefix_usage)) {
630 mlxsw_sp_lpm_tree_hold(lpm_tree);
Ido Schimmelfc922bb2017-08-14 10:54:05 +0200631 return lpm_tree;
Ido Schimmel2b52ce02018-01-22 09:17:42 +0100632 }
Jiri Pirko53342022016-07-04 08:23:08 +0200633 }
Ido Schimmelfc922bb2017-08-14 10:54:05 +0200634 return mlxsw_sp_lpm_tree_create(mlxsw_sp, prefix_usage, proto);
635}
Jiri Pirko53342022016-07-04 08:23:08 +0200636
Ido Schimmelfc922bb2017-08-14 10:54:05 +0200637static void mlxsw_sp_lpm_tree_hold(struct mlxsw_sp_lpm_tree *lpm_tree)
638{
Jiri Pirko53342022016-07-04 08:23:08 +0200639 lpm_tree->ref_count++;
Jiri Pirko53342022016-07-04 08:23:08 +0200640}
641
Ido Schimmelcc702672017-08-14 10:54:03 +0200642static void mlxsw_sp_lpm_tree_put(struct mlxsw_sp *mlxsw_sp,
643 struct mlxsw_sp_lpm_tree *lpm_tree)
Jiri Pirko53342022016-07-04 08:23:08 +0200644{
645 if (--lpm_tree->ref_count == 0)
Ido Schimmelcc702672017-08-14 10:54:03 +0200646 mlxsw_sp_lpm_tree_destroy(mlxsw_sp, lpm_tree);
Jiri Pirko53342022016-07-04 08:23:08 +0200647}
648
Ido Schimmeld7a60302017-06-08 08:47:43 +0200649#define MLXSW_SP_LPM_TREE_MIN 1 /* tree 0 is reserved */
Ido Schimmel8494ab02017-03-24 08:02:47 +0100650
651static int mlxsw_sp_lpm_init(struct mlxsw_sp *mlxsw_sp)
Jiri Pirko53342022016-07-04 08:23:08 +0200652{
Ido Schimmel2b52ce02018-01-22 09:17:42 +0100653 struct mlxsw_sp_prefix_usage req_prefix_usage = {{ 0 } };
Jiri Pirko53342022016-07-04 08:23:08 +0200654 struct mlxsw_sp_lpm_tree *lpm_tree;
Ido Schimmel8494ab02017-03-24 08:02:47 +0100655 u64 max_trees;
Ido Schimmel2b52ce02018-01-22 09:17:42 +0100656 int err, i;
Jiri Pirko53342022016-07-04 08:23:08 +0200657
Ido Schimmel8494ab02017-03-24 08:02:47 +0100658 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_LPM_TREES))
659 return -EIO;
660
661 max_trees = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_LPM_TREES);
Ido Schimmel9011b672017-05-16 19:38:25 +0200662 mlxsw_sp->router->lpm.tree_count = max_trees - MLXSW_SP_LPM_TREE_MIN;
663 mlxsw_sp->router->lpm.trees = kcalloc(mlxsw_sp->router->lpm.tree_count,
Ido Schimmel8494ab02017-03-24 08:02:47 +0100664 sizeof(struct mlxsw_sp_lpm_tree),
665 GFP_KERNEL);
Ido Schimmel9011b672017-05-16 19:38:25 +0200666 if (!mlxsw_sp->router->lpm.trees)
Ido Schimmel8494ab02017-03-24 08:02:47 +0100667 return -ENOMEM;
668
Ido Schimmel9011b672017-05-16 19:38:25 +0200669 for (i = 0; i < mlxsw_sp->router->lpm.tree_count; i++) {
670 lpm_tree = &mlxsw_sp->router->lpm.trees[i];
Jiri Pirko53342022016-07-04 08:23:08 +0200671 lpm_tree->id = i + MLXSW_SP_LPM_TREE_MIN;
672 }
Ido Schimmel8494ab02017-03-24 08:02:47 +0100673
Ido Schimmel2b52ce02018-01-22 09:17:42 +0100674 lpm_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, &req_prefix_usage,
675 MLXSW_SP_L3_PROTO_IPV4);
676 if (IS_ERR(lpm_tree)) {
677 err = PTR_ERR(lpm_tree);
678 goto err_ipv4_tree_get;
679 }
680 mlxsw_sp->router->lpm.proto_trees[MLXSW_SP_L3_PROTO_IPV4] = lpm_tree;
681
682 lpm_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, &req_prefix_usage,
683 MLXSW_SP_L3_PROTO_IPV6);
684 if (IS_ERR(lpm_tree)) {
685 err = PTR_ERR(lpm_tree);
686 goto err_ipv6_tree_get;
687 }
688 mlxsw_sp->router->lpm.proto_trees[MLXSW_SP_L3_PROTO_IPV6] = lpm_tree;
689
Ido Schimmel8494ab02017-03-24 08:02:47 +0100690 return 0;
Ido Schimmel2b52ce02018-01-22 09:17:42 +0100691
692err_ipv6_tree_get:
693 lpm_tree = mlxsw_sp->router->lpm.proto_trees[MLXSW_SP_L3_PROTO_IPV4];
694 mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
695err_ipv4_tree_get:
696 kfree(mlxsw_sp->router->lpm.trees);
697 return err;
Ido Schimmel8494ab02017-03-24 08:02:47 +0100698}
699
700static void mlxsw_sp_lpm_fini(struct mlxsw_sp *mlxsw_sp)
701{
Ido Schimmel2b52ce02018-01-22 09:17:42 +0100702 struct mlxsw_sp_lpm_tree *lpm_tree;
703
704 lpm_tree = mlxsw_sp->router->lpm.proto_trees[MLXSW_SP_L3_PROTO_IPV6];
705 mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
706
707 lpm_tree = mlxsw_sp->router->lpm.proto_trees[MLXSW_SP_L3_PROTO_IPV4];
708 mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
709
Ido Schimmel9011b672017-05-16 19:38:25 +0200710 kfree(mlxsw_sp->router->lpm.trees);
Jiri Pirko53342022016-07-04 08:23:08 +0200711}
712
Ido Schimmel76610eb2017-03-10 08:53:41 +0100713static bool mlxsw_sp_vr_is_used(const struct mlxsw_sp_vr *vr)
714{
Yuval Mintz9742f862018-03-26 15:01:40 +0300715 return !!vr->fib4 || !!vr->fib6 ||
716 !!vr->mr_table[MLXSW_SP_L3_PROTO_IPV4] ||
717 !!vr->mr_table[MLXSW_SP_L3_PROTO_IPV6];
Ido Schimmel76610eb2017-03-10 08:53:41 +0100718}
719
Jiri Pirko6b75c482016-07-04 08:23:09 +0200720static struct mlxsw_sp_vr *mlxsw_sp_vr_find_unused(struct mlxsw_sp *mlxsw_sp)
721{
722 struct mlxsw_sp_vr *vr;
723 int i;
724
Jiri Pirkoc1a38312016-10-21 16:07:23 +0200725 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
Ido Schimmel9011b672017-05-16 19:38:25 +0200726 vr = &mlxsw_sp->router->vrs[i];
Ido Schimmel76610eb2017-03-10 08:53:41 +0100727 if (!mlxsw_sp_vr_is_used(vr))
Jiri Pirko6b75c482016-07-04 08:23:09 +0200728 return vr;
729 }
730 return NULL;
731}
732
733static int mlxsw_sp_vr_lpm_tree_bind(struct mlxsw_sp *mlxsw_sp,
Ido Schimmel0adb2142017-08-14 10:54:04 +0200734 const struct mlxsw_sp_fib *fib, u8 tree_id)
Jiri Pirko6b75c482016-07-04 08:23:09 +0200735{
736 char raltb_pl[MLXSW_REG_RALTB_LEN];
737
Ido Schimmel76610eb2017-03-10 08:53:41 +0100738 mlxsw_reg_raltb_pack(raltb_pl, fib->vr->id,
739 (enum mlxsw_reg_ralxx_protocol) fib->proto,
Ido Schimmel0adb2142017-08-14 10:54:04 +0200740 tree_id);
Jiri Pirko6b75c482016-07-04 08:23:09 +0200741 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raltb), raltb_pl);
742}
743
744static int mlxsw_sp_vr_lpm_tree_unbind(struct mlxsw_sp *mlxsw_sp,
Ido Schimmel76610eb2017-03-10 08:53:41 +0100745 const struct mlxsw_sp_fib *fib)
Jiri Pirko6b75c482016-07-04 08:23:09 +0200746{
747 char raltb_pl[MLXSW_REG_RALTB_LEN];
748
749 /* Bind to tree 0 which is default */
Ido Schimmel76610eb2017-03-10 08:53:41 +0100750 mlxsw_reg_raltb_pack(raltb_pl, fib->vr->id,
751 (enum mlxsw_reg_ralxx_protocol) fib->proto, 0);
Jiri Pirko6b75c482016-07-04 08:23:09 +0200752 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raltb), raltb_pl);
753}
754
755static u32 mlxsw_sp_fix_tb_id(u32 tb_id)
756{
Yotam Gigi7e50d432017-09-27 08:23:19 +0200757 /* For our purpose, squash main, default and local tables into one */
758 if (tb_id == RT_TABLE_LOCAL || tb_id == RT_TABLE_DEFAULT)
Jiri Pirko6b75c482016-07-04 08:23:09 +0200759 tb_id = RT_TABLE_MAIN;
760 return tb_id;
761}
762
763static struct mlxsw_sp_vr *mlxsw_sp_vr_find(struct mlxsw_sp *mlxsw_sp,
Ido Schimmel76610eb2017-03-10 08:53:41 +0100764 u32 tb_id)
Jiri Pirko6b75c482016-07-04 08:23:09 +0200765{
766 struct mlxsw_sp_vr *vr;
767 int i;
768
769 tb_id = mlxsw_sp_fix_tb_id(tb_id);
Nogah Frankel9497c042016-09-20 11:16:54 +0200770
Jiri Pirkoc1a38312016-10-21 16:07:23 +0200771 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
Ido Schimmel9011b672017-05-16 19:38:25 +0200772 vr = &mlxsw_sp->router->vrs[i];
Ido Schimmel76610eb2017-03-10 08:53:41 +0100773 if (mlxsw_sp_vr_is_used(vr) && vr->tb_id == tb_id)
Jiri Pirko6b75c482016-07-04 08:23:09 +0200774 return vr;
775 }
776 return NULL;
777}
778
Ido Schimmel76610eb2017-03-10 08:53:41 +0100779static struct mlxsw_sp_fib *mlxsw_sp_vr_fib(const struct mlxsw_sp_vr *vr,
780 enum mlxsw_sp_l3proto proto)
Jiri Pirko6b75c482016-07-04 08:23:09 +0200781{
Ido Schimmel76610eb2017-03-10 08:53:41 +0100782 switch (proto) {
783 case MLXSW_SP_L3_PROTO_IPV4:
784 return vr->fib4;
785 case MLXSW_SP_L3_PROTO_IPV6:
Ido Schimmela3d9bc52017-07-18 10:10:22 +0200786 return vr->fib6;
Ido Schimmel76610eb2017-03-10 08:53:41 +0100787 }
788 return NULL;
789}
790
791static struct mlxsw_sp_vr *mlxsw_sp_vr_create(struct mlxsw_sp *mlxsw_sp,
David Ahernf8fa9b42017-10-18 09:56:56 -0700792 u32 tb_id,
793 struct netlink_ext_ack *extack)
Ido Schimmel76610eb2017-03-10 08:53:41 +0100794{
Yuval Mintz9742f862018-03-26 15:01:40 +0300795 struct mlxsw_sp_mr_table *mr4_table, *mr6_table;
Jiri Pirko0f2d2b22018-02-13 11:22:42 +0100796 struct mlxsw_sp_fib *fib4;
797 struct mlxsw_sp_fib *fib6;
Jiri Pirko6b75c482016-07-04 08:23:09 +0200798 struct mlxsw_sp_vr *vr;
Ido Schimmela3d9bc52017-07-18 10:10:22 +0200799 int err;
Jiri Pirko6b75c482016-07-04 08:23:09 +0200800
801 vr = mlxsw_sp_vr_find_unused(mlxsw_sp);
David Ahernf8fa9b42017-10-18 09:56:56 -0700802 if (!vr) {
Arkadi Sharshevsky6c677752018-02-13 11:29:05 +0100803 NL_SET_ERR_MSG_MOD(extack, "Exceeded number of supported virtual routers");
Jiri Pirko6b75c482016-07-04 08:23:09 +0200804 return ERR_PTR(-EBUSY);
David Ahernf8fa9b42017-10-18 09:56:56 -0700805 }
Jiri Pirko0f2d2b22018-02-13 11:22:42 +0100806 fib4 = mlxsw_sp_fib_create(mlxsw_sp, vr, MLXSW_SP_L3_PROTO_IPV4);
807 if (IS_ERR(fib4))
808 return ERR_CAST(fib4);
809 fib6 = mlxsw_sp_fib_create(mlxsw_sp, vr, MLXSW_SP_L3_PROTO_IPV6);
810 if (IS_ERR(fib6)) {
811 err = PTR_ERR(fib6);
Ido Schimmela3d9bc52017-07-18 10:10:22 +0200812 goto err_fib6_create;
813 }
Jiri Pirko0f2d2b22018-02-13 11:22:42 +0100814 mr4_table = mlxsw_sp_mr_table_create(mlxsw_sp, vr->id,
815 MLXSW_SP_L3_PROTO_IPV4);
816 if (IS_ERR(mr4_table)) {
817 err = PTR_ERR(mr4_table);
Yuval Mintz9742f862018-03-26 15:01:40 +0300818 goto err_mr4_table_create;
Yotam Gigid42b0962017-09-27 08:23:20 +0200819 }
Yuval Mintz9742f862018-03-26 15:01:40 +0300820 mr6_table = mlxsw_sp_mr_table_create(mlxsw_sp, vr->id,
821 MLXSW_SP_L3_PROTO_IPV6);
822 if (IS_ERR(mr6_table)) {
823 err = PTR_ERR(mr6_table);
824 goto err_mr6_table_create;
825 }
826
Jiri Pirko0f2d2b22018-02-13 11:22:42 +0100827 vr->fib4 = fib4;
828 vr->fib6 = fib6;
Yuval Mintz9742f862018-03-26 15:01:40 +0300829 vr->mr_table[MLXSW_SP_L3_PROTO_IPV4] = mr4_table;
830 vr->mr_table[MLXSW_SP_L3_PROTO_IPV6] = mr6_table;
Jiri Pirko6b75c482016-07-04 08:23:09 +0200831 vr->tb_id = tb_id;
Jiri Pirko6b75c482016-07-04 08:23:09 +0200832 return vr;
Ido Schimmela3d9bc52017-07-18 10:10:22 +0200833
Yuval Mintz9742f862018-03-26 15:01:40 +0300834err_mr6_table_create:
835 mlxsw_sp_mr_table_destroy(mr4_table);
836err_mr4_table_create:
Jiri Pirko0f2d2b22018-02-13 11:22:42 +0100837 mlxsw_sp_fib_destroy(mlxsw_sp, fib6);
Ido Schimmela3d9bc52017-07-18 10:10:22 +0200838err_fib6_create:
Jiri Pirko0f2d2b22018-02-13 11:22:42 +0100839 mlxsw_sp_fib_destroy(mlxsw_sp, fib4);
Ido Schimmela3d9bc52017-07-18 10:10:22 +0200840 return ERR_PTR(err);
Jiri Pirko6b75c482016-07-04 08:23:09 +0200841}
842
Ido Schimmel2b52ce02018-01-22 09:17:42 +0100843static void mlxsw_sp_vr_destroy(struct mlxsw_sp *mlxsw_sp,
844 struct mlxsw_sp_vr *vr)
Jiri Pirko6b75c482016-07-04 08:23:09 +0200845{
Yuval Mintz9742f862018-03-26 15:01:40 +0300846 mlxsw_sp_mr_table_destroy(vr->mr_table[MLXSW_SP_L3_PROTO_IPV6]);
847 vr->mr_table[MLXSW_SP_L3_PROTO_IPV6] = NULL;
848 mlxsw_sp_mr_table_destroy(vr->mr_table[MLXSW_SP_L3_PROTO_IPV4]);
849 vr->mr_table[MLXSW_SP_L3_PROTO_IPV4] = NULL;
Ido Schimmel2b52ce02018-01-22 09:17:42 +0100850 mlxsw_sp_fib_destroy(mlxsw_sp, vr->fib6);
Ido Schimmela3d9bc52017-07-18 10:10:22 +0200851 vr->fib6 = NULL;
Ido Schimmel2b52ce02018-01-22 09:17:42 +0100852 mlxsw_sp_fib_destroy(mlxsw_sp, vr->fib4);
Ido Schimmel76610eb2017-03-10 08:53:41 +0100853 vr->fib4 = NULL;
Jiri Pirko6b75c482016-07-04 08:23:09 +0200854}
855
David Ahernf8fa9b42017-10-18 09:56:56 -0700856static struct mlxsw_sp_vr *mlxsw_sp_vr_get(struct mlxsw_sp *mlxsw_sp, u32 tb_id,
857 struct netlink_ext_ack *extack)
Jiri Pirko6b75c482016-07-04 08:23:09 +0200858{
859 struct mlxsw_sp_vr *vr;
Jiri Pirko6b75c482016-07-04 08:23:09 +0200860
861 tb_id = mlxsw_sp_fix_tb_id(tb_id);
Ido Schimmel76610eb2017-03-10 08:53:41 +0100862 vr = mlxsw_sp_vr_find(mlxsw_sp, tb_id);
863 if (!vr)
David Ahernf8fa9b42017-10-18 09:56:56 -0700864 vr = mlxsw_sp_vr_create(mlxsw_sp, tb_id, extack);
Jiri Pirko6b75c482016-07-04 08:23:09 +0200865 return vr;
866}
867
Ido Schimmel2b52ce02018-01-22 09:17:42 +0100868static void mlxsw_sp_vr_put(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_vr *vr)
Jiri Pirko6b75c482016-07-04 08:23:09 +0200869{
Ido Schimmela3d9bc52017-07-18 10:10:22 +0200870 if (!vr->rif_count && list_empty(&vr->fib4->node_list) &&
Yotam Gigid42b0962017-09-27 08:23:20 +0200871 list_empty(&vr->fib6->node_list) &&
Yuval Mintz9742f862018-03-26 15:01:40 +0300872 mlxsw_sp_mr_table_empty(vr->mr_table[MLXSW_SP_L3_PROTO_IPV4]) &&
873 mlxsw_sp_mr_table_empty(vr->mr_table[MLXSW_SP_L3_PROTO_IPV6]))
Ido Schimmel2b52ce02018-01-22 09:17:42 +0100874 mlxsw_sp_vr_destroy(mlxsw_sp, vr);
Jiri Pirko6b75c482016-07-04 08:23:09 +0200875}
876
Ido Schimmelfc922bb2017-08-14 10:54:05 +0200877static bool
878mlxsw_sp_vr_lpm_tree_should_replace(struct mlxsw_sp_vr *vr,
879 enum mlxsw_sp_l3proto proto, u8 tree_id)
880{
881 struct mlxsw_sp_fib *fib = mlxsw_sp_vr_fib(vr, proto);
882
883 if (!mlxsw_sp_vr_is_used(vr))
884 return false;
Ido Schimmel2b52ce02018-01-22 09:17:42 +0100885 if (fib->lpm_tree->id == tree_id)
Ido Schimmelfc922bb2017-08-14 10:54:05 +0200886 return true;
887 return false;
888}
889
890static int mlxsw_sp_vr_lpm_tree_replace(struct mlxsw_sp *mlxsw_sp,
891 struct mlxsw_sp_fib *fib,
892 struct mlxsw_sp_lpm_tree *new_tree)
893{
894 struct mlxsw_sp_lpm_tree *old_tree = fib->lpm_tree;
895 int err;
896
Ido Schimmelfc922bb2017-08-14 10:54:05 +0200897 fib->lpm_tree = new_tree;
898 mlxsw_sp_lpm_tree_hold(new_tree);
Ido Schimmeled604c52018-01-18 15:42:10 +0100899 err = mlxsw_sp_vr_lpm_tree_bind(mlxsw_sp, fib, new_tree->id);
900 if (err)
901 goto err_tree_bind;
Ido Schimmelfc922bb2017-08-14 10:54:05 +0200902 mlxsw_sp_lpm_tree_put(mlxsw_sp, old_tree);
903 return 0;
Ido Schimmeled604c52018-01-18 15:42:10 +0100904
905err_tree_bind:
906 mlxsw_sp_lpm_tree_put(mlxsw_sp, new_tree);
907 fib->lpm_tree = old_tree;
908 return err;
Ido Schimmelfc922bb2017-08-14 10:54:05 +0200909}
910
911static int mlxsw_sp_vrs_lpm_tree_replace(struct mlxsw_sp *mlxsw_sp,
912 struct mlxsw_sp_fib *fib,
913 struct mlxsw_sp_lpm_tree *new_tree)
914{
Ido Schimmelfc922bb2017-08-14 10:54:05 +0200915 enum mlxsw_sp_l3proto proto = fib->proto;
Ido Schimmel2b52ce02018-01-22 09:17:42 +0100916 struct mlxsw_sp_lpm_tree *old_tree;
Ido Schimmelfc922bb2017-08-14 10:54:05 +0200917 u8 old_id, new_id = new_tree->id;
918 struct mlxsw_sp_vr *vr;
919 int i, err;
920
Ido Schimmel2b52ce02018-01-22 09:17:42 +0100921 old_tree = mlxsw_sp->router->lpm.proto_trees[proto];
Ido Schimmelfc922bb2017-08-14 10:54:05 +0200922 old_id = old_tree->id;
923
924 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
925 vr = &mlxsw_sp->router->vrs[i];
926 if (!mlxsw_sp_vr_lpm_tree_should_replace(vr, proto, old_id))
927 continue;
928 err = mlxsw_sp_vr_lpm_tree_replace(mlxsw_sp,
929 mlxsw_sp_vr_fib(vr, proto),
930 new_tree);
931 if (err)
932 goto err_tree_replace;
933 }
934
Ido Schimmel2b52ce02018-01-22 09:17:42 +0100935 memcpy(new_tree->prefix_ref_count, old_tree->prefix_ref_count,
936 sizeof(new_tree->prefix_ref_count));
937 mlxsw_sp->router->lpm.proto_trees[proto] = new_tree;
938 mlxsw_sp_lpm_tree_put(mlxsw_sp, old_tree);
939
Ido Schimmelfc922bb2017-08-14 10:54:05 +0200940 return 0;
941
942err_tree_replace:
943 for (i--; i >= 0; i--) {
944 if (!mlxsw_sp_vr_lpm_tree_should_replace(vr, proto, new_id))
945 continue;
946 mlxsw_sp_vr_lpm_tree_replace(mlxsw_sp,
947 mlxsw_sp_vr_fib(vr, proto),
948 old_tree);
949 }
950 return err;
Ido Schimmelfc922bb2017-08-14 10:54:05 +0200951}
952
Nogah Frankel9497c042016-09-20 11:16:54 +0200953static int mlxsw_sp_vrs_init(struct mlxsw_sp *mlxsw_sp)
Jiri Pirko6b75c482016-07-04 08:23:09 +0200954{
955 struct mlxsw_sp_vr *vr;
Jiri Pirkoc1a38312016-10-21 16:07:23 +0200956 u64 max_vrs;
Jiri Pirko6b75c482016-07-04 08:23:09 +0200957 int i;
958
Jiri Pirkoc1a38312016-10-21 16:07:23 +0200959 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_VRS))
Nogah Frankel9497c042016-09-20 11:16:54 +0200960 return -EIO;
961
Jiri Pirkoc1a38312016-10-21 16:07:23 +0200962 max_vrs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS);
Ido Schimmel9011b672017-05-16 19:38:25 +0200963 mlxsw_sp->router->vrs = kcalloc(max_vrs, sizeof(struct mlxsw_sp_vr),
964 GFP_KERNEL);
965 if (!mlxsw_sp->router->vrs)
Nogah Frankel9497c042016-09-20 11:16:54 +0200966 return -ENOMEM;
967
Jiri Pirkoc1a38312016-10-21 16:07:23 +0200968 for (i = 0; i < max_vrs; i++) {
Ido Schimmel9011b672017-05-16 19:38:25 +0200969 vr = &mlxsw_sp->router->vrs[i];
Jiri Pirko6b75c482016-07-04 08:23:09 +0200970 vr->id = i;
971 }
Nogah Frankel9497c042016-09-20 11:16:54 +0200972
973 return 0;
974}
975
Ido Schimmelac571de2016-11-14 11:26:32 +0100976static void mlxsw_sp_router_fib_flush(struct mlxsw_sp *mlxsw_sp);
977
Nogah Frankel9497c042016-09-20 11:16:54 +0200978static void mlxsw_sp_vrs_fini(struct mlxsw_sp *mlxsw_sp)
979{
Ido Schimmel30572242016-12-03 16:45:01 +0100980 /* At this stage we're guaranteed not to have new incoming
981 * FIB notifications and the work queue is free from FIBs
982 * sitting on top of mlxsw netdevs. However, we can still
983 * have other FIBs queued. Flush the queue before flushing
984 * the device's tables. No need for locks, as we're the only
985 * writer.
986 */
987 mlxsw_core_flush_owq();
Ido Schimmelac571de2016-11-14 11:26:32 +0100988 mlxsw_sp_router_fib_flush(mlxsw_sp);
Ido Schimmel9011b672017-05-16 19:38:25 +0200989 kfree(mlxsw_sp->router->vrs);
Jiri Pirko6b75c482016-07-04 08:23:09 +0200990}
991
Petr Machata6ddb7422017-09-02 23:49:19 +0200992static struct net_device *
993__mlxsw_sp_ipip_netdev_ul_dev_get(const struct net_device *ol_dev)
994{
995 struct ip_tunnel *tun = netdev_priv(ol_dev);
996 struct net *net = dev_net(ol_dev);
997
998 return __dev_get_by_index(net, tun->parms.link);
999}
1000
Petr Machata4cf04f32017-11-03 10:03:42 +01001001u32 mlxsw_sp_ipip_dev_ul_tb_id(const struct net_device *ol_dev)
Petr Machata6ddb7422017-09-02 23:49:19 +02001002{
1003 struct net_device *d = __mlxsw_sp_ipip_netdev_ul_dev_get(ol_dev);
1004
1005 if (d)
1006 return l3mdev_fib_table(d) ? : RT_TABLE_MAIN;
1007 else
1008 return l3mdev_fib_table(ol_dev) ? : RT_TABLE_MAIN;
1009}
1010
Petr Machata1012b9a2017-09-02 23:49:23 +02001011static struct mlxsw_sp_rif *
1012mlxsw_sp_rif_create(struct mlxsw_sp *mlxsw_sp,
David Ahernf8fa9b42017-10-18 09:56:56 -07001013 const struct mlxsw_sp_rif_params *params,
1014 struct netlink_ext_ack *extack);
Petr Machata1012b9a2017-09-02 23:49:23 +02001015
1016static struct mlxsw_sp_rif_ipip_lb *
1017mlxsw_sp_ipip_ol_ipip_lb_create(struct mlxsw_sp *mlxsw_sp,
1018 enum mlxsw_sp_ipip_type ipipt,
Petr Machata7e75af62017-11-03 10:03:36 +01001019 struct net_device *ol_dev,
1020 struct netlink_ext_ack *extack)
Petr Machata1012b9a2017-09-02 23:49:23 +02001021{
1022 struct mlxsw_sp_rif_params_ipip_lb lb_params;
1023 const struct mlxsw_sp_ipip_ops *ipip_ops;
1024 struct mlxsw_sp_rif *rif;
1025
1026 ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipipt];
1027 lb_params = (struct mlxsw_sp_rif_params_ipip_lb) {
1028 .common.dev = ol_dev,
1029 .common.lag = false,
1030 .lb_config = ipip_ops->ol_loopback_config(mlxsw_sp, ol_dev),
1031 };
1032
Petr Machata7e75af62017-11-03 10:03:36 +01001033 rif = mlxsw_sp_rif_create(mlxsw_sp, &lb_params.common, extack);
Petr Machata1012b9a2017-09-02 23:49:23 +02001034 if (IS_ERR(rif))
1035 return ERR_CAST(rif);
1036 return container_of(rif, struct mlxsw_sp_rif_ipip_lb, common);
1037}
1038
1039static struct mlxsw_sp_ipip_entry *
1040mlxsw_sp_ipip_entry_alloc(struct mlxsw_sp *mlxsw_sp,
1041 enum mlxsw_sp_ipip_type ipipt,
1042 struct net_device *ol_dev)
1043{
Petr Machatae437f3b2018-02-13 11:26:09 +01001044 const struct mlxsw_sp_ipip_ops *ipip_ops;
Petr Machata1012b9a2017-09-02 23:49:23 +02001045 struct mlxsw_sp_ipip_entry *ipip_entry;
1046 struct mlxsw_sp_ipip_entry *ret = NULL;
1047
Petr Machatae437f3b2018-02-13 11:26:09 +01001048 ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipipt];
Petr Machata1012b9a2017-09-02 23:49:23 +02001049 ipip_entry = kzalloc(sizeof(*ipip_entry), GFP_KERNEL);
1050 if (!ipip_entry)
1051 return ERR_PTR(-ENOMEM);
1052
1053 ipip_entry->ol_lb = mlxsw_sp_ipip_ol_ipip_lb_create(mlxsw_sp, ipipt,
Petr Machata7e75af62017-11-03 10:03:36 +01001054 ol_dev, NULL);
Petr Machata1012b9a2017-09-02 23:49:23 +02001055 if (IS_ERR(ipip_entry->ol_lb)) {
1056 ret = ERR_CAST(ipip_entry->ol_lb);
1057 goto err_ol_ipip_lb_create;
1058 }
1059
1060 ipip_entry->ipipt = ipipt;
1061 ipip_entry->ol_dev = ol_dev;
Petr Machatae437f3b2018-02-13 11:26:09 +01001062
1063 switch (ipip_ops->ul_proto) {
1064 case MLXSW_SP_L3_PROTO_IPV4:
1065 ipip_entry->parms4 = mlxsw_sp_ipip_netdev_parms4(ol_dev);
1066 break;
1067 case MLXSW_SP_L3_PROTO_IPV6:
1068 WARN_ON(1);
1069 break;
1070 }
Petr Machata1012b9a2017-09-02 23:49:23 +02001071
1072 return ipip_entry;
1073
1074err_ol_ipip_lb_create:
1075 kfree(ipip_entry);
1076 return ret;
1077}
1078
1079static void
Petr Machata4cccb732017-10-16 16:26:39 +02001080mlxsw_sp_ipip_entry_dealloc(struct mlxsw_sp_ipip_entry *ipip_entry)
Petr Machata1012b9a2017-09-02 23:49:23 +02001081{
Petr Machata1012b9a2017-09-02 23:49:23 +02001082 mlxsw_sp_rif_destroy(&ipip_entry->ol_lb->common);
1083 kfree(ipip_entry);
1084}
1085
Petr Machata1012b9a2017-09-02 23:49:23 +02001086static bool
1087mlxsw_sp_ipip_entry_saddr_matches(struct mlxsw_sp *mlxsw_sp,
1088 const enum mlxsw_sp_l3proto ul_proto,
1089 union mlxsw_sp_l3addr saddr,
1090 u32 ul_tb_id,
1091 struct mlxsw_sp_ipip_entry *ipip_entry)
1092{
1093 u32 tun_ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(ipip_entry->ol_dev);
1094 enum mlxsw_sp_ipip_type ipipt = ipip_entry->ipipt;
1095 union mlxsw_sp_l3addr tun_saddr;
1096
1097 if (mlxsw_sp->router->ipip_ops_arr[ipipt]->ul_proto != ul_proto)
1098 return false;
1099
1100 tun_saddr = mlxsw_sp_ipip_netdev_saddr(ul_proto, ipip_entry->ol_dev);
1101 return tun_ul_tb_id == ul_tb_id &&
1102 mlxsw_sp_l3addr_eq(&tun_saddr, &saddr);
1103}
1104
Petr Machata4607f6d2017-09-02 23:49:25 +02001105static int
1106mlxsw_sp_fib_entry_decap_init(struct mlxsw_sp *mlxsw_sp,
1107 struct mlxsw_sp_fib_entry *fib_entry,
1108 struct mlxsw_sp_ipip_entry *ipip_entry)
1109{
1110 u32 tunnel_index;
1111 int err;
1112
1113 err = mlxsw_sp_kvdl_alloc(mlxsw_sp, 1, &tunnel_index);
1114 if (err)
1115 return err;
1116
1117 ipip_entry->decap_fib_entry = fib_entry;
1118 fib_entry->decap.ipip_entry = ipip_entry;
1119 fib_entry->decap.tunnel_index = tunnel_index;
1120 return 0;
1121}
1122
1123static void mlxsw_sp_fib_entry_decap_fini(struct mlxsw_sp *mlxsw_sp,
1124 struct mlxsw_sp_fib_entry *fib_entry)
1125{
1126 /* Unlink this node from the IPIP entry that it's the decap entry of. */
1127 fib_entry->decap.ipip_entry->decap_fib_entry = NULL;
1128 fib_entry->decap.ipip_entry = NULL;
1129 mlxsw_sp_kvdl_free(mlxsw_sp, fib_entry->decap.tunnel_index);
1130}
1131
Petr Machata1cc38fb2017-09-02 23:49:26 +02001132static struct mlxsw_sp_fib_node *
1133mlxsw_sp_fib_node_lookup(struct mlxsw_sp_fib *fib, const void *addr,
1134 size_t addr_len, unsigned char prefix_len);
Petr Machata4607f6d2017-09-02 23:49:25 +02001135static int mlxsw_sp_fib_entry_update(struct mlxsw_sp *mlxsw_sp,
1136 struct mlxsw_sp_fib_entry *fib_entry);
1137
1138static void
1139mlxsw_sp_ipip_entry_demote_decap(struct mlxsw_sp *mlxsw_sp,
1140 struct mlxsw_sp_ipip_entry *ipip_entry)
1141{
1142 struct mlxsw_sp_fib_entry *fib_entry = ipip_entry->decap_fib_entry;
1143
1144 mlxsw_sp_fib_entry_decap_fini(mlxsw_sp, fib_entry);
1145 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
1146
1147 mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
1148}
1149
Petr Machata1cc38fb2017-09-02 23:49:26 +02001150static void
1151mlxsw_sp_ipip_entry_promote_decap(struct mlxsw_sp *mlxsw_sp,
1152 struct mlxsw_sp_ipip_entry *ipip_entry,
1153 struct mlxsw_sp_fib_entry *decap_fib_entry)
1154{
1155 if (mlxsw_sp_fib_entry_decap_init(mlxsw_sp, decap_fib_entry,
1156 ipip_entry))
1157 return;
1158 decap_fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP;
1159
1160 if (mlxsw_sp_fib_entry_update(mlxsw_sp, decap_fib_entry))
1161 mlxsw_sp_ipip_entry_demote_decap(mlxsw_sp, ipip_entry);
1162}
1163
1164/* Given an IPIP entry, find the corresponding decap route. */
1165static struct mlxsw_sp_fib_entry *
1166mlxsw_sp_ipip_entry_find_decap(struct mlxsw_sp *mlxsw_sp,
1167 struct mlxsw_sp_ipip_entry *ipip_entry)
1168{
1169 static struct mlxsw_sp_fib_node *fib_node;
1170 const struct mlxsw_sp_ipip_ops *ipip_ops;
1171 struct mlxsw_sp_fib_entry *fib_entry;
1172 unsigned char saddr_prefix_len;
1173 union mlxsw_sp_l3addr saddr;
1174 struct mlxsw_sp_fib *ul_fib;
1175 struct mlxsw_sp_vr *ul_vr;
1176 const void *saddrp;
1177 size_t saddr_len;
1178 u32 ul_tb_id;
1179 u32 saddr4;
1180
1181 ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt];
1182
1183 ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(ipip_entry->ol_dev);
1184 ul_vr = mlxsw_sp_vr_find(mlxsw_sp, ul_tb_id);
1185 if (!ul_vr)
1186 return NULL;
1187
1188 ul_fib = mlxsw_sp_vr_fib(ul_vr, ipip_ops->ul_proto);
1189 saddr = mlxsw_sp_ipip_netdev_saddr(ipip_ops->ul_proto,
1190 ipip_entry->ol_dev);
1191
1192 switch (ipip_ops->ul_proto) {
1193 case MLXSW_SP_L3_PROTO_IPV4:
1194 saddr4 = be32_to_cpu(saddr.addr4);
1195 saddrp = &saddr4;
1196 saddr_len = 4;
1197 saddr_prefix_len = 32;
1198 break;
1199 case MLXSW_SP_L3_PROTO_IPV6:
1200 WARN_ON(1);
1201 return NULL;
1202 }
1203
1204 fib_node = mlxsw_sp_fib_node_lookup(ul_fib, saddrp, saddr_len,
1205 saddr_prefix_len);
1206 if (!fib_node || list_empty(&fib_node->entry_list))
1207 return NULL;
1208
1209 fib_entry = list_first_entry(&fib_node->entry_list,
1210 struct mlxsw_sp_fib_entry, list);
1211 if (fib_entry->type != MLXSW_SP_FIB_ENTRY_TYPE_TRAP)
1212 return NULL;
1213
1214 return fib_entry;
1215}
1216
Petr Machata1012b9a2017-09-02 23:49:23 +02001217static struct mlxsw_sp_ipip_entry *
Petr Machata4cccb732017-10-16 16:26:39 +02001218mlxsw_sp_ipip_entry_create(struct mlxsw_sp *mlxsw_sp,
1219 enum mlxsw_sp_ipip_type ipipt,
1220 struct net_device *ol_dev)
Petr Machata1012b9a2017-09-02 23:49:23 +02001221{
Petr Machata1012b9a2017-09-02 23:49:23 +02001222 struct mlxsw_sp_ipip_entry *ipip_entry;
Petr Machata1012b9a2017-09-02 23:49:23 +02001223
1224 ipip_entry = mlxsw_sp_ipip_entry_alloc(mlxsw_sp, ipipt, ol_dev);
1225 if (IS_ERR(ipip_entry))
1226 return ipip_entry;
1227
1228 list_add_tail(&ipip_entry->ipip_list_node,
1229 &mlxsw_sp->router->ipip_list);
1230
Petr Machata1012b9a2017-09-02 23:49:23 +02001231 return ipip_entry;
1232}
1233
1234static void
Petr Machata4cccb732017-10-16 16:26:39 +02001235mlxsw_sp_ipip_entry_destroy(struct mlxsw_sp *mlxsw_sp,
1236 struct mlxsw_sp_ipip_entry *ipip_entry)
Petr Machata1012b9a2017-09-02 23:49:23 +02001237{
Petr Machata4cccb732017-10-16 16:26:39 +02001238 list_del(&ipip_entry->ipip_list_node);
1239 mlxsw_sp_ipip_entry_dealloc(ipip_entry);
Petr Machata1012b9a2017-09-02 23:49:23 +02001240}
1241
Petr Machata4607f6d2017-09-02 23:49:25 +02001242static bool
1243mlxsw_sp_ipip_entry_matches_decap(struct mlxsw_sp *mlxsw_sp,
1244 const struct net_device *ul_dev,
1245 enum mlxsw_sp_l3proto ul_proto,
1246 union mlxsw_sp_l3addr ul_dip,
1247 struct mlxsw_sp_ipip_entry *ipip_entry)
1248{
1249 u32 ul_tb_id = l3mdev_fib_table(ul_dev) ? : RT_TABLE_MAIN;
1250 enum mlxsw_sp_ipip_type ipipt = ipip_entry->ipipt;
1251 struct net_device *ipip_ul_dev;
1252
1253 if (mlxsw_sp->router->ipip_ops_arr[ipipt]->ul_proto != ul_proto)
1254 return false;
1255
1256 ipip_ul_dev = __mlxsw_sp_ipip_netdev_ul_dev_get(ipip_entry->ol_dev);
1257 return mlxsw_sp_ipip_entry_saddr_matches(mlxsw_sp, ul_proto, ul_dip,
1258 ul_tb_id, ipip_entry) &&
1259 (!ipip_ul_dev || ipip_ul_dev == ul_dev);
1260}
1261
1262/* Given decap parameters, find the corresponding IPIP entry. */
1263static struct mlxsw_sp_ipip_entry *
1264mlxsw_sp_ipip_entry_find_by_decap(struct mlxsw_sp *mlxsw_sp,
1265 const struct net_device *ul_dev,
1266 enum mlxsw_sp_l3proto ul_proto,
1267 union mlxsw_sp_l3addr ul_dip)
1268{
1269 struct mlxsw_sp_ipip_entry *ipip_entry;
1270
1271 list_for_each_entry(ipip_entry, &mlxsw_sp->router->ipip_list,
1272 ipip_list_node)
1273 if (mlxsw_sp_ipip_entry_matches_decap(mlxsw_sp, ul_dev,
1274 ul_proto, ul_dip,
1275 ipip_entry))
1276 return ipip_entry;
1277
1278 return NULL;
1279}
1280
Petr Machata6698c162017-10-16 16:26:36 +02001281static bool mlxsw_sp_netdev_ipip_type(const struct mlxsw_sp *mlxsw_sp,
1282 const struct net_device *dev,
1283 enum mlxsw_sp_ipip_type *p_type)
1284{
1285 struct mlxsw_sp_router *router = mlxsw_sp->router;
1286 const struct mlxsw_sp_ipip_ops *ipip_ops;
1287 enum mlxsw_sp_ipip_type ipipt;
1288
1289 for (ipipt = 0; ipipt < MLXSW_SP_IPIP_TYPE_MAX; ++ipipt) {
1290 ipip_ops = router->ipip_ops_arr[ipipt];
1291 if (dev->type == ipip_ops->dev_type) {
1292 if (p_type)
1293 *p_type = ipipt;
1294 return true;
1295 }
1296 }
1297 return false;
1298}
1299
Petr Machata796ec772017-11-03 10:03:29 +01001300bool mlxsw_sp_netdev_is_ipip_ol(const struct mlxsw_sp *mlxsw_sp,
1301 const struct net_device *dev)
Petr Machata00635872017-10-16 16:26:37 +02001302{
1303 return mlxsw_sp_netdev_ipip_type(mlxsw_sp, dev, NULL);
1304}
1305
1306static struct mlxsw_sp_ipip_entry *
1307mlxsw_sp_ipip_entry_find_by_ol_dev(struct mlxsw_sp *mlxsw_sp,
1308 const struct net_device *ol_dev)
1309{
1310 struct mlxsw_sp_ipip_entry *ipip_entry;
1311
1312 list_for_each_entry(ipip_entry, &mlxsw_sp->router->ipip_list,
1313 ipip_list_node)
1314 if (ipip_entry->ol_dev == ol_dev)
1315 return ipip_entry;
1316
1317 return NULL;
1318}
1319
Petr Machata61481f22017-11-03 10:03:41 +01001320static struct mlxsw_sp_ipip_entry *
1321mlxsw_sp_ipip_entry_find_by_ul_dev(const struct mlxsw_sp *mlxsw_sp,
1322 const struct net_device *ul_dev,
1323 struct mlxsw_sp_ipip_entry *start)
1324{
1325 struct mlxsw_sp_ipip_entry *ipip_entry;
1326
1327 ipip_entry = list_prepare_entry(start, &mlxsw_sp->router->ipip_list,
1328 ipip_list_node);
1329 list_for_each_entry_continue(ipip_entry, &mlxsw_sp->router->ipip_list,
1330 ipip_list_node) {
1331 struct net_device *ipip_ul_dev =
1332 __mlxsw_sp_ipip_netdev_ul_dev_get(ipip_entry->ol_dev);
1333
1334 if (ipip_ul_dev == ul_dev)
1335 return ipip_entry;
1336 }
1337
1338 return NULL;
1339}
1340
1341bool mlxsw_sp_netdev_is_ipip_ul(const struct mlxsw_sp *mlxsw_sp,
1342 const struct net_device *dev)
1343{
1344 return mlxsw_sp_ipip_entry_find_by_ul_dev(mlxsw_sp, dev, NULL);
1345}
1346
Petr Machatacafdb2a2017-11-03 10:03:30 +01001347static bool mlxsw_sp_netdevice_ipip_can_offload(struct mlxsw_sp *mlxsw_sp,
1348 const struct net_device *ol_dev,
1349 enum mlxsw_sp_ipip_type ipipt)
1350{
1351 const struct mlxsw_sp_ipip_ops *ops
1352 = mlxsw_sp->router->ipip_ops_arr[ipipt];
1353
1354 /* For deciding whether decap should be offloaded, we don't care about
1355 * overlay protocol, so ask whether either one is supported.
1356 */
1357 return ops->can_offload(mlxsw_sp, ol_dev, MLXSW_SP_L3_PROTO_IPV4) ||
1358 ops->can_offload(mlxsw_sp, ol_dev, MLXSW_SP_L3_PROTO_IPV6);
1359}
1360
Petr Machata796ec772017-11-03 10:03:29 +01001361static int mlxsw_sp_netdevice_ipip_ol_reg_event(struct mlxsw_sp *mlxsw_sp,
1362 struct net_device *ol_dev)
Petr Machata00635872017-10-16 16:26:37 +02001363{
Petr Machata00635872017-10-16 16:26:37 +02001364 struct mlxsw_sp_ipip_entry *ipip_entry;
Petr Machataaf641712017-11-03 10:03:40 +01001365 enum mlxsw_sp_l3proto ul_proto;
Petr Machata00635872017-10-16 16:26:37 +02001366 enum mlxsw_sp_ipip_type ipipt;
Petr Machataaf641712017-11-03 10:03:40 +01001367 union mlxsw_sp_l3addr saddr;
1368 u32 ul_tb_id;
Petr Machata00635872017-10-16 16:26:37 +02001369
1370 mlxsw_sp_netdev_ipip_type(mlxsw_sp, ol_dev, &ipipt);
Petr Machatacafdb2a2017-11-03 10:03:30 +01001371 if (mlxsw_sp_netdevice_ipip_can_offload(mlxsw_sp, ol_dev, ipipt)) {
Petr Machataaf641712017-11-03 10:03:40 +01001372 ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(ol_dev);
1373 ul_proto = mlxsw_sp->router->ipip_ops_arr[ipipt]->ul_proto;
1374 saddr = mlxsw_sp_ipip_netdev_saddr(ul_proto, ol_dev);
1375 if (!mlxsw_sp_ipip_demote_tunnel_by_saddr(mlxsw_sp, ul_proto,
1376 saddr, ul_tb_id,
1377 NULL)) {
1378 ipip_entry = mlxsw_sp_ipip_entry_create(mlxsw_sp, ipipt,
1379 ol_dev);
1380 if (IS_ERR(ipip_entry))
1381 return PTR_ERR(ipip_entry);
1382 }
Petr Machata00635872017-10-16 16:26:37 +02001383 }
1384
1385 return 0;
1386}
1387
Petr Machata796ec772017-11-03 10:03:29 +01001388static void mlxsw_sp_netdevice_ipip_ol_unreg_event(struct mlxsw_sp *mlxsw_sp,
1389 struct net_device *ol_dev)
Petr Machata00635872017-10-16 16:26:37 +02001390{
1391 struct mlxsw_sp_ipip_entry *ipip_entry;
1392
1393 ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
1394 if (ipip_entry)
Petr Machata4cccb732017-10-16 16:26:39 +02001395 mlxsw_sp_ipip_entry_destroy(mlxsw_sp, ipip_entry);
Petr Machata00635872017-10-16 16:26:37 +02001396}
1397
Petr Machata47518ca2017-11-03 10:03:35 +01001398static void
1399mlxsw_sp_ipip_entry_ol_up_event(struct mlxsw_sp *mlxsw_sp,
1400 struct mlxsw_sp_ipip_entry *ipip_entry)
1401{
1402 struct mlxsw_sp_fib_entry *decap_fib_entry;
1403
1404 decap_fib_entry = mlxsw_sp_ipip_entry_find_decap(mlxsw_sp, ipip_entry);
1405 if (decap_fib_entry)
1406 mlxsw_sp_ipip_entry_promote_decap(mlxsw_sp, ipip_entry,
1407 decap_fib_entry);
1408}
1409
Petr Machata22b990582018-03-22 19:53:34 +02001410static int
1411mlxsw_sp_rif_ipip_lb_op(struct mlxsw_sp_rif_ipip_lb *lb_rif,
1412 struct mlxsw_sp_vr *ul_vr, bool enable)
1413{
1414 struct mlxsw_sp_rif_ipip_lb_config lb_cf = lb_rif->lb_config;
1415 struct mlxsw_sp_rif *rif = &lb_rif->common;
1416 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
1417 char ritr_pl[MLXSW_REG_RITR_LEN];
1418 u32 saddr4;
1419
1420 switch (lb_cf.ul_protocol) {
1421 case MLXSW_SP_L3_PROTO_IPV4:
1422 saddr4 = be32_to_cpu(lb_cf.saddr.addr4);
1423 mlxsw_reg_ritr_pack(ritr_pl, enable, MLXSW_REG_RITR_LOOPBACK_IF,
1424 rif->rif_index, rif->vr_id, rif->dev->mtu);
1425 mlxsw_reg_ritr_loopback_ipip4_pack(ritr_pl, lb_cf.lb_ipipt,
1426 MLXSW_REG_RITR_LOOPBACK_IPIP_OPTIONS_GRE_KEY_PRESET,
1427 ul_vr->id, saddr4, lb_cf.okey);
1428 break;
1429
1430 case MLXSW_SP_L3_PROTO_IPV6:
1431 return -EAFNOSUPPORT;
1432 }
1433
1434 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
1435}
1436
Petr Machata68c3cd92018-03-22 19:53:35 +02001437static int mlxsw_sp_netdevice_ipip_ol_update_mtu(struct mlxsw_sp *mlxsw_sp,
1438 struct net_device *ol_dev)
1439{
1440 struct mlxsw_sp_ipip_entry *ipip_entry;
1441 struct mlxsw_sp_rif_ipip_lb *lb_rif;
1442 struct mlxsw_sp_vr *ul_vr;
1443 int err = 0;
1444
1445 ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
1446 if (ipip_entry) {
1447 lb_rif = ipip_entry->ol_lb;
1448 ul_vr = &mlxsw_sp->router->vrs[lb_rif->ul_vr_id];
1449 err = mlxsw_sp_rif_ipip_lb_op(lb_rif, ul_vr, true);
1450 if (err)
1451 goto out;
1452 lb_rif->common.mtu = ol_dev->mtu;
1453 }
1454
1455out:
1456 return err;
1457}
1458
Petr Machata6d4de442017-11-03 10:03:34 +01001459static void mlxsw_sp_netdevice_ipip_ol_up_event(struct mlxsw_sp *mlxsw_sp,
1460 struct net_device *ol_dev)
Petr Machata00635872017-10-16 16:26:37 +02001461{
Petr Machata00635872017-10-16 16:26:37 +02001462 struct mlxsw_sp_ipip_entry *ipip_entry;
1463
1464 ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
Petr Machata47518ca2017-11-03 10:03:35 +01001465 if (ipip_entry)
1466 mlxsw_sp_ipip_entry_ol_up_event(mlxsw_sp, ipip_entry);
Petr Machata00635872017-10-16 16:26:37 +02001467}
1468
Petr Machataa3fe1982017-11-03 10:03:33 +01001469static void
1470mlxsw_sp_ipip_entry_ol_down_event(struct mlxsw_sp *mlxsw_sp,
1471 struct mlxsw_sp_ipip_entry *ipip_entry)
1472{
1473 if (ipip_entry->decap_fib_entry)
1474 mlxsw_sp_ipip_entry_demote_decap(mlxsw_sp, ipip_entry);
1475}
1476
Petr Machata796ec772017-11-03 10:03:29 +01001477static void mlxsw_sp_netdevice_ipip_ol_down_event(struct mlxsw_sp *mlxsw_sp,
1478 struct net_device *ol_dev)
Petr Machata00635872017-10-16 16:26:37 +02001479{
1480 struct mlxsw_sp_ipip_entry *ipip_entry;
1481
1482 ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
Petr Machataa3fe1982017-11-03 10:03:33 +01001483 if (ipip_entry)
1484 mlxsw_sp_ipip_entry_ol_down_event(mlxsw_sp, ipip_entry);
Petr Machata00635872017-10-16 16:26:37 +02001485}
1486
Petr Machata09dbf622017-11-28 13:17:14 +01001487static void mlxsw_sp_nexthop_rif_migrate(struct mlxsw_sp *mlxsw_sp,
1488 struct mlxsw_sp_rif *old_rif,
1489 struct mlxsw_sp_rif *new_rif);
Petr Machata65a61212017-11-03 10:03:37 +01001490static int
1491mlxsw_sp_ipip_entry_ol_lb_update(struct mlxsw_sp *mlxsw_sp,
1492 struct mlxsw_sp_ipip_entry *ipip_entry,
Petr Machata0c5f1cd2017-11-03 10:03:38 +01001493 bool keep_encap,
Petr Machata65a61212017-11-03 10:03:37 +01001494 struct netlink_ext_ack *extack)
Petr Machataf63ce4e2017-10-16 16:26:38 +02001495{
Petr Machata65a61212017-11-03 10:03:37 +01001496 struct mlxsw_sp_rif_ipip_lb *old_lb_rif = ipip_entry->ol_lb;
1497 struct mlxsw_sp_rif_ipip_lb *new_lb_rif;
Petr Machataf63ce4e2017-10-16 16:26:38 +02001498
Petr Machata65a61212017-11-03 10:03:37 +01001499 new_lb_rif = mlxsw_sp_ipip_ol_ipip_lb_create(mlxsw_sp,
1500 ipip_entry->ipipt,
1501 ipip_entry->ol_dev,
1502 extack);
1503 if (IS_ERR(new_lb_rif))
1504 return PTR_ERR(new_lb_rif);
1505 ipip_entry->ol_lb = new_lb_rif;
Petr Machata0c5f1cd2017-11-03 10:03:38 +01001506
Petr Machata09dbf622017-11-28 13:17:14 +01001507 if (keep_encap)
1508 mlxsw_sp_nexthop_rif_migrate(mlxsw_sp, &old_lb_rif->common,
1509 &new_lb_rif->common);
Petr Machata0c5f1cd2017-11-03 10:03:38 +01001510
Petr Machata65a61212017-11-03 10:03:37 +01001511 mlxsw_sp_rif_destroy(&old_lb_rif->common);
Petr Machataf63ce4e2017-10-16 16:26:38 +02001512
Petr Machata65a61212017-11-03 10:03:37 +01001513 return 0;
1514}
1515
Petr Machata09dbf622017-11-28 13:17:14 +01001516static void mlxsw_sp_nexthop_rif_update(struct mlxsw_sp *mlxsw_sp,
1517 struct mlxsw_sp_rif *rif);
1518
Petr Machata0c5f1cd2017-11-03 10:03:38 +01001519/**
1520 * Update the offload related to an IPIP entry. This always updates decap, and
1521 * in addition to that it also:
1522 * @recreate_loopback: recreates the associated loopback RIF
1523 * @keep_encap: updates next hops that use the tunnel netdevice. This is only
1524 * relevant when recreate_loopback is true.
1525 * @update_nexthops: updates next hops, keeping the current loopback RIF. This
1526 * is only relevant when recreate_loopback is false.
1527 */
Petr Machata65a61212017-11-03 10:03:37 +01001528int __mlxsw_sp_ipip_entry_update_tunnel(struct mlxsw_sp *mlxsw_sp,
1529 struct mlxsw_sp_ipip_entry *ipip_entry,
Petr Machata0c5f1cd2017-11-03 10:03:38 +01001530 bool recreate_loopback,
1531 bool keep_encap,
1532 bool update_nexthops,
Petr Machata65a61212017-11-03 10:03:37 +01001533 struct netlink_ext_ack *extack)
1534{
1535 int err;
1536
1537 /* RIFs can't be edited, so to update loopback, we need to destroy and
1538 * recreate it. That creates a window of opportunity where RALUE and
1539 * RATR registers end up referencing a RIF that's already gone. RATRs
1540 * are handled in mlxsw_sp_ipip_entry_ol_lb_update(), and to take care
Petr Machataf63ce4e2017-10-16 16:26:38 +02001541 * of RALUE, demote the decap route back.
1542 */
1543 if (ipip_entry->decap_fib_entry)
1544 mlxsw_sp_ipip_entry_demote_decap(mlxsw_sp, ipip_entry);
1545
Petr Machata0c5f1cd2017-11-03 10:03:38 +01001546 if (recreate_loopback) {
1547 err = mlxsw_sp_ipip_entry_ol_lb_update(mlxsw_sp, ipip_entry,
1548 keep_encap, extack);
1549 if (err)
1550 return err;
1551 } else if (update_nexthops) {
1552 mlxsw_sp_nexthop_rif_update(mlxsw_sp,
1553 &ipip_entry->ol_lb->common);
1554 }
Petr Machataf63ce4e2017-10-16 16:26:38 +02001555
Petr Machata65a61212017-11-03 10:03:37 +01001556 if (ipip_entry->ol_dev->flags & IFF_UP)
1557 mlxsw_sp_ipip_entry_ol_up_event(mlxsw_sp, ipip_entry);
Petr Machataf63ce4e2017-10-16 16:26:38 +02001558
1559 return 0;
1560}
1561
Petr Machata65a61212017-11-03 10:03:37 +01001562static int mlxsw_sp_netdevice_ipip_ol_vrf_event(struct mlxsw_sp *mlxsw_sp,
1563 struct net_device *ol_dev,
1564 struct netlink_ext_ack *extack)
1565{
1566 struct mlxsw_sp_ipip_entry *ipip_entry =
1567 mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
Petr Machatacab43d92017-11-28 13:17:12 +01001568 enum mlxsw_sp_l3proto ul_proto;
1569 union mlxsw_sp_l3addr saddr;
1570 u32 ul_tb_id;
Petr Machata65a61212017-11-03 10:03:37 +01001571
1572 if (!ipip_entry)
1573 return 0;
Petr Machatacab43d92017-11-28 13:17:12 +01001574
1575 /* For flat configuration cases, moving overlay to a different VRF might
1576 * cause local address conflict, and the conflicting tunnels need to be
1577 * demoted.
1578 */
1579 ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(ol_dev);
1580 ul_proto = mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt]->ul_proto;
1581 saddr = mlxsw_sp_ipip_netdev_saddr(ul_proto, ol_dev);
1582 if (mlxsw_sp_ipip_demote_tunnel_by_saddr(mlxsw_sp, ul_proto,
1583 saddr, ul_tb_id,
1584 ipip_entry)) {
1585 mlxsw_sp_ipip_entry_demote_tunnel(mlxsw_sp, ipip_entry);
1586 return 0;
1587 }
1588
Petr Machata65a61212017-11-03 10:03:37 +01001589 return __mlxsw_sp_ipip_entry_update_tunnel(mlxsw_sp, ipip_entry,
Petr Machata0c5f1cd2017-11-03 10:03:38 +01001590 true, false, false, extack);
Petr Machata65a61212017-11-03 10:03:37 +01001591}
1592
Petr Machata61481f22017-11-03 10:03:41 +01001593static int
1594mlxsw_sp_netdevice_ipip_ul_vrf_event(struct mlxsw_sp *mlxsw_sp,
1595 struct mlxsw_sp_ipip_entry *ipip_entry,
1596 struct net_device *ul_dev,
1597 struct netlink_ext_ack *extack)
1598{
1599 return __mlxsw_sp_ipip_entry_update_tunnel(mlxsw_sp, ipip_entry,
1600 true, true, false, extack);
1601}
1602
Petr Machata4cf04f32017-11-03 10:03:42 +01001603static int
Petr Machata44b0fff2017-11-03 10:03:44 +01001604mlxsw_sp_netdevice_ipip_ul_up_event(struct mlxsw_sp *mlxsw_sp,
1605 struct mlxsw_sp_ipip_entry *ipip_entry,
1606 struct net_device *ul_dev)
1607{
1608 return __mlxsw_sp_ipip_entry_update_tunnel(mlxsw_sp, ipip_entry,
1609 false, false, true, NULL);
1610}
1611
1612static int
1613mlxsw_sp_netdevice_ipip_ul_down_event(struct mlxsw_sp *mlxsw_sp,
1614 struct mlxsw_sp_ipip_entry *ipip_entry,
1615 struct net_device *ul_dev)
1616{
1617 /* A down underlay device causes encapsulated packets to not be
1618 * forwarded, but decap still works. So refresh next hops without
1619 * touching anything else.
1620 */
1621 return __mlxsw_sp_ipip_entry_update_tunnel(mlxsw_sp, ipip_entry,
1622 false, false, true, NULL);
1623}
1624
1625static int
Petr Machata4cf04f32017-11-03 10:03:42 +01001626mlxsw_sp_netdevice_ipip_ol_change_event(struct mlxsw_sp *mlxsw_sp,
1627 struct net_device *ol_dev,
1628 struct netlink_ext_ack *extack)
1629{
1630 const struct mlxsw_sp_ipip_ops *ipip_ops;
1631 struct mlxsw_sp_ipip_entry *ipip_entry;
1632 int err;
1633
1634 ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
1635 if (!ipip_entry)
1636 /* A change might make a tunnel eligible for offloading, but
1637 * that is currently not implemented. What falls to slow path
1638 * stays there.
1639 */
1640 return 0;
1641
1642 /* A change might make a tunnel not eligible for offloading. */
1643 if (!mlxsw_sp_netdevice_ipip_can_offload(mlxsw_sp, ol_dev,
1644 ipip_entry->ipipt)) {
1645 mlxsw_sp_ipip_entry_demote_tunnel(mlxsw_sp, ipip_entry);
1646 return 0;
1647 }
1648
1649 ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt];
1650 err = ipip_ops->ol_netdev_change(mlxsw_sp, ipip_entry, extack);
1651 return err;
1652}
1653
Petr Machataaf641712017-11-03 10:03:40 +01001654void mlxsw_sp_ipip_entry_demote_tunnel(struct mlxsw_sp *mlxsw_sp,
1655 struct mlxsw_sp_ipip_entry *ipip_entry)
1656{
1657 struct net_device *ol_dev = ipip_entry->ol_dev;
1658
1659 if (ol_dev->flags & IFF_UP)
1660 mlxsw_sp_ipip_entry_ol_down_event(mlxsw_sp, ipip_entry);
1661 mlxsw_sp_ipip_entry_destroy(mlxsw_sp, ipip_entry);
1662}
1663
1664/* The configuration where several tunnels have the same local address in the
1665 * same underlay table needs special treatment in the HW. That is currently not
1666 * implemented in the driver. This function finds and demotes the first tunnel
1667 * with a given source address, except the one passed in in the argument
1668 * `except'.
1669 */
1670bool
1671mlxsw_sp_ipip_demote_tunnel_by_saddr(struct mlxsw_sp *mlxsw_sp,
1672 enum mlxsw_sp_l3proto ul_proto,
1673 union mlxsw_sp_l3addr saddr,
1674 u32 ul_tb_id,
1675 const struct mlxsw_sp_ipip_entry *except)
1676{
1677 struct mlxsw_sp_ipip_entry *ipip_entry, *tmp;
1678
1679 list_for_each_entry_safe(ipip_entry, tmp, &mlxsw_sp->router->ipip_list,
1680 ipip_list_node) {
1681 if (ipip_entry != except &&
1682 mlxsw_sp_ipip_entry_saddr_matches(mlxsw_sp, ul_proto, saddr,
1683 ul_tb_id, ipip_entry)) {
1684 mlxsw_sp_ipip_entry_demote_tunnel(mlxsw_sp, ipip_entry);
1685 return true;
1686 }
1687 }
1688
1689 return false;
1690}
1691
Petr Machata61481f22017-11-03 10:03:41 +01001692static void mlxsw_sp_ipip_demote_tunnel_by_ul_netdev(struct mlxsw_sp *mlxsw_sp,
1693 struct net_device *ul_dev)
1694{
1695 struct mlxsw_sp_ipip_entry *ipip_entry, *tmp;
1696
1697 list_for_each_entry_safe(ipip_entry, tmp, &mlxsw_sp->router->ipip_list,
1698 ipip_list_node) {
1699 struct net_device *ipip_ul_dev =
1700 __mlxsw_sp_ipip_netdev_ul_dev_get(ipip_entry->ol_dev);
1701
1702 if (ipip_ul_dev == ul_dev)
1703 mlxsw_sp_ipip_entry_demote_tunnel(mlxsw_sp, ipip_entry);
1704 }
1705}
1706
Petr Machata7e75af62017-11-03 10:03:36 +01001707int mlxsw_sp_netdevice_ipip_ol_event(struct mlxsw_sp *mlxsw_sp,
1708 struct net_device *ol_dev,
1709 unsigned long event,
1710 struct netdev_notifier_info *info)
Petr Machata00635872017-10-16 16:26:37 +02001711{
Petr Machata7e75af62017-11-03 10:03:36 +01001712 struct netdev_notifier_changeupper_info *chup;
1713 struct netlink_ext_ack *extack;
1714
Petr Machata00635872017-10-16 16:26:37 +02001715 switch (event) {
1716 case NETDEV_REGISTER:
Petr Machata796ec772017-11-03 10:03:29 +01001717 return mlxsw_sp_netdevice_ipip_ol_reg_event(mlxsw_sp, ol_dev);
Petr Machata00635872017-10-16 16:26:37 +02001718 case NETDEV_UNREGISTER:
Petr Machata796ec772017-11-03 10:03:29 +01001719 mlxsw_sp_netdevice_ipip_ol_unreg_event(mlxsw_sp, ol_dev);
Petr Machata00635872017-10-16 16:26:37 +02001720 return 0;
1721 case NETDEV_UP:
Petr Machata6d4de442017-11-03 10:03:34 +01001722 mlxsw_sp_netdevice_ipip_ol_up_event(mlxsw_sp, ol_dev);
1723 return 0;
Petr Machata00635872017-10-16 16:26:37 +02001724 case NETDEV_DOWN:
Petr Machata796ec772017-11-03 10:03:29 +01001725 mlxsw_sp_netdevice_ipip_ol_down_event(mlxsw_sp, ol_dev);
Petr Machata00635872017-10-16 16:26:37 +02001726 return 0;
Petr Machataf63ce4e2017-10-16 16:26:38 +02001727 case NETDEV_CHANGEUPPER:
Petr Machata7e75af62017-11-03 10:03:36 +01001728 chup = container_of(info, typeof(*chup), info);
1729 extack = info->extack;
1730 if (netif_is_l3_master(chup->upper_dev))
Petr Machata796ec772017-11-03 10:03:29 +01001731 return mlxsw_sp_netdevice_ipip_ol_vrf_event(mlxsw_sp,
Petr Machata7e75af62017-11-03 10:03:36 +01001732 ol_dev,
1733 extack);
Petr Machataf63ce4e2017-10-16 16:26:38 +02001734 return 0;
Petr Machata4cf04f32017-11-03 10:03:42 +01001735 case NETDEV_CHANGE:
1736 extack = info->extack;
1737 return mlxsw_sp_netdevice_ipip_ol_change_event(mlxsw_sp,
1738 ol_dev, extack);
Petr Machata68c3cd92018-03-22 19:53:35 +02001739 case NETDEV_CHANGEMTU:
1740 return mlxsw_sp_netdevice_ipip_ol_update_mtu(mlxsw_sp, ol_dev);
Petr Machata00635872017-10-16 16:26:37 +02001741 }
1742 return 0;
1743}
1744
Petr Machata61481f22017-11-03 10:03:41 +01001745static int
1746__mlxsw_sp_netdevice_ipip_ul_event(struct mlxsw_sp *mlxsw_sp,
1747 struct mlxsw_sp_ipip_entry *ipip_entry,
1748 struct net_device *ul_dev,
1749 unsigned long event,
1750 struct netdev_notifier_info *info)
1751{
1752 struct netdev_notifier_changeupper_info *chup;
1753 struct netlink_ext_ack *extack;
1754
1755 switch (event) {
1756 case NETDEV_CHANGEUPPER:
1757 chup = container_of(info, typeof(*chup), info);
1758 extack = info->extack;
1759 if (netif_is_l3_master(chup->upper_dev))
1760 return mlxsw_sp_netdevice_ipip_ul_vrf_event(mlxsw_sp,
1761 ipip_entry,
1762 ul_dev,
1763 extack);
1764 break;
Petr Machata44b0fff2017-11-03 10:03:44 +01001765
1766 case NETDEV_UP:
1767 return mlxsw_sp_netdevice_ipip_ul_up_event(mlxsw_sp, ipip_entry,
1768 ul_dev);
1769 case NETDEV_DOWN:
1770 return mlxsw_sp_netdevice_ipip_ul_down_event(mlxsw_sp,
1771 ipip_entry,
1772 ul_dev);
Petr Machata61481f22017-11-03 10:03:41 +01001773 }
1774 return 0;
1775}
1776
1777int
1778mlxsw_sp_netdevice_ipip_ul_event(struct mlxsw_sp *mlxsw_sp,
1779 struct net_device *ul_dev,
1780 unsigned long event,
1781 struct netdev_notifier_info *info)
1782{
1783 struct mlxsw_sp_ipip_entry *ipip_entry = NULL;
1784 int err;
1785
1786 while ((ipip_entry = mlxsw_sp_ipip_entry_find_by_ul_dev(mlxsw_sp,
1787 ul_dev,
1788 ipip_entry))) {
1789 err = __mlxsw_sp_netdevice_ipip_ul_event(mlxsw_sp, ipip_entry,
1790 ul_dev, event, info);
1791 if (err) {
1792 mlxsw_sp_ipip_demote_tunnel_by_ul_netdev(mlxsw_sp,
1793 ul_dev);
1794 return err;
1795 }
1796 }
1797
1798 return 0;
1799}
1800
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001801struct mlxsw_sp_neigh_key {
Jiri Pirko33b13412016-11-10 12:31:04 +01001802 struct neighbour *n;
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001803};
1804
1805struct mlxsw_sp_neigh_entry {
Ido Schimmel9665b742017-02-08 11:16:42 +01001806 struct list_head rif_list_node;
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001807 struct rhash_head ht_node;
1808 struct mlxsw_sp_neigh_key key;
1809 u16 rif;
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001810 bool connected;
Yotam Gigia6bf9e92016-07-05 11:27:44 +02001811 unsigned char ha[ETH_ALEN];
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001812 struct list_head nexthop_list; /* list of nexthops using
1813 * this neigh entry
1814 */
Yotam Gigib2157142016-07-05 11:27:51 +02001815 struct list_head nexthop_neighs_list_node;
Arkadi Sharshevsky7cfcbc72017-08-24 08:40:08 +02001816 unsigned int counter_index;
1817 bool counter_valid;
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001818};
1819
1820static const struct rhashtable_params mlxsw_sp_neigh_ht_params = {
1821 .key_offset = offsetof(struct mlxsw_sp_neigh_entry, key),
1822 .head_offset = offsetof(struct mlxsw_sp_neigh_entry, ht_node),
1823 .key_len = sizeof(struct mlxsw_sp_neigh_key),
1824};
1825
Arkadi Sharshevskyf17cc842017-08-24 08:40:04 +02001826struct mlxsw_sp_neigh_entry *
1827mlxsw_sp_rif_neigh_next(struct mlxsw_sp_rif *rif,
1828 struct mlxsw_sp_neigh_entry *neigh_entry)
1829{
1830 if (!neigh_entry) {
1831 if (list_empty(&rif->neigh_list))
1832 return NULL;
1833 else
1834 return list_first_entry(&rif->neigh_list,
1835 typeof(*neigh_entry),
1836 rif_list_node);
1837 }
Arkadi Sharshevskyec2437f2017-09-25 10:32:24 +02001838 if (list_is_last(&neigh_entry->rif_list_node, &rif->neigh_list))
Arkadi Sharshevskyf17cc842017-08-24 08:40:04 +02001839 return NULL;
1840 return list_next_entry(neigh_entry, rif_list_node);
1841}
1842
1843int mlxsw_sp_neigh_entry_type(struct mlxsw_sp_neigh_entry *neigh_entry)
1844{
1845 return neigh_entry->key.n->tbl->family;
1846}
1847
1848unsigned char *
1849mlxsw_sp_neigh_entry_ha(struct mlxsw_sp_neigh_entry *neigh_entry)
1850{
1851 return neigh_entry->ha;
1852}
1853
1854u32 mlxsw_sp_neigh4_entry_dip(struct mlxsw_sp_neigh_entry *neigh_entry)
1855{
1856 struct neighbour *n;
1857
1858 n = neigh_entry->key.n;
1859 return ntohl(*((__be32 *) n->primary_key));
1860}
1861
Arkadi Sharshevsky02507682017-08-31 17:59:15 +02001862struct in6_addr *
1863mlxsw_sp_neigh6_entry_dip(struct mlxsw_sp_neigh_entry *neigh_entry)
1864{
1865 struct neighbour *n;
1866
1867 n = neigh_entry->key.n;
1868 return (struct in6_addr *) &n->primary_key;
1869}
1870
Arkadi Sharshevsky7cfcbc72017-08-24 08:40:08 +02001871int mlxsw_sp_neigh_counter_get(struct mlxsw_sp *mlxsw_sp,
1872 struct mlxsw_sp_neigh_entry *neigh_entry,
1873 u64 *p_counter)
1874{
1875 if (!neigh_entry->counter_valid)
1876 return -EINVAL;
1877
1878 return mlxsw_sp_flow_counter_get(mlxsw_sp, neigh_entry->counter_index,
1879 p_counter, NULL);
1880}
1881
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001882static struct mlxsw_sp_neigh_entry *
1883mlxsw_sp_neigh_entry_alloc(struct mlxsw_sp *mlxsw_sp, struct neighbour *n,
1884 u16 rif)
1885{
1886 struct mlxsw_sp_neigh_entry *neigh_entry;
1887
1888 neigh_entry = kzalloc(sizeof(*neigh_entry), GFP_KERNEL);
1889 if (!neigh_entry)
1890 return NULL;
1891
1892 neigh_entry->key.n = n;
1893 neigh_entry->rif = rif;
1894 INIT_LIST_HEAD(&neigh_entry->nexthop_list);
1895
1896 return neigh_entry;
1897}
1898
1899static void mlxsw_sp_neigh_entry_free(struct mlxsw_sp_neigh_entry *neigh_entry)
1900{
1901 kfree(neigh_entry);
1902}
1903
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001904static int
1905mlxsw_sp_neigh_entry_insert(struct mlxsw_sp *mlxsw_sp,
1906 struct mlxsw_sp_neigh_entry *neigh_entry)
1907{
Ido Schimmel9011b672017-05-16 19:38:25 +02001908 return rhashtable_insert_fast(&mlxsw_sp->router->neigh_ht,
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001909 &neigh_entry->ht_node,
1910 mlxsw_sp_neigh_ht_params);
1911}
1912
1913static void
1914mlxsw_sp_neigh_entry_remove(struct mlxsw_sp *mlxsw_sp,
1915 struct mlxsw_sp_neigh_entry *neigh_entry)
1916{
Ido Schimmel9011b672017-05-16 19:38:25 +02001917 rhashtable_remove_fast(&mlxsw_sp->router->neigh_ht,
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001918 &neigh_entry->ht_node,
1919 mlxsw_sp_neigh_ht_params);
1920}
1921
Arkadi Sharshevsky7cfcbc72017-08-24 08:40:08 +02001922static bool
Arkadi Sharshevsky1ed55742017-08-31 17:59:18 +02001923mlxsw_sp_neigh_counter_should_alloc(struct mlxsw_sp *mlxsw_sp,
1924 struct mlxsw_sp_neigh_entry *neigh_entry)
Arkadi Sharshevsky7cfcbc72017-08-24 08:40:08 +02001925{
1926 struct devlink *devlink;
Arkadi Sharshevsky1ed55742017-08-31 17:59:18 +02001927 const char *table_name;
1928
1929 switch (mlxsw_sp_neigh_entry_type(neigh_entry)) {
1930 case AF_INET:
1931 table_name = MLXSW_SP_DPIPE_TABLE_NAME_HOST4;
1932 break;
1933 case AF_INET6:
1934 table_name = MLXSW_SP_DPIPE_TABLE_NAME_HOST6;
1935 break;
1936 default:
1937 WARN_ON(1);
1938 return false;
1939 }
Arkadi Sharshevsky7cfcbc72017-08-24 08:40:08 +02001940
1941 devlink = priv_to_devlink(mlxsw_sp->core);
Arkadi Sharshevsky1ed55742017-08-31 17:59:18 +02001942 return devlink_dpipe_table_counter_enabled(devlink, table_name);
Arkadi Sharshevsky7cfcbc72017-08-24 08:40:08 +02001943}
1944
1945static void
1946mlxsw_sp_neigh_counter_alloc(struct mlxsw_sp *mlxsw_sp,
1947 struct mlxsw_sp_neigh_entry *neigh_entry)
1948{
Arkadi Sharshevsky1ed55742017-08-31 17:59:18 +02001949 if (!mlxsw_sp_neigh_counter_should_alloc(mlxsw_sp, neigh_entry))
Arkadi Sharshevsky7cfcbc72017-08-24 08:40:08 +02001950 return;
1951
1952 if (mlxsw_sp_flow_counter_alloc(mlxsw_sp, &neigh_entry->counter_index))
1953 return;
1954
1955 neigh_entry->counter_valid = true;
1956}
1957
1958static void
1959mlxsw_sp_neigh_counter_free(struct mlxsw_sp *mlxsw_sp,
1960 struct mlxsw_sp_neigh_entry *neigh_entry)
1961{
1962 if (!neigh_entry->counter_valid)
1963 return;
1964 mlxsw_sp_flow_counter_free(mlxsw_sp,
1965 neigh_entry->counter_index);
1966 neigh_entry->counter_valid = false;
1967}
1968
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001969static struct mlxsw_sp_neigh_entry *
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001970mlxsw_sp_neigh_entry_create(struct mlxsw_sp *mlxsw_sp, struct neighbour *n)
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001971{
1972 struct mlxsw_sp_neigh_entry *neigh_entry;
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01001973 struct mlxsw_sp_rif *rif;
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001974 int err;
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001975
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01001976 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, n->dev);
1977 if (!rif)
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001978 return ERR_PTR(-EINVAL);
1979
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01001980 neigh_entry = mlxsw_sp_neigh_entry_alloc(mlxsw_sp, n, rif->rif_index);
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001981 if (!neigh_entry)
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001982 return ERR_PTR(-ENOMEM);
1983
1984 err = mlxsw_sp_neigh_entry_insert(mlxsw_sp, neigh_entry);
1985 if (err)
1986 goto err_neigh_entry_insert;
1987
Arkadi Sharshevsky7cfcbc72017-08-24 08:40:08 +02001988 mlxsw_sp_neigh_counter_alloc(mlxsw_sp, neigh_entry);
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01001989 list_add(&neigh_entry->rif_list_node, &rif->neigh_list);
Ido Schimmel9665b742017-02-08 11:16:42 +01001990
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001991 return neigh_entry;
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001992
1993err_neigh_entry_insert:
1994 mlxsw_sp_neigh_entry_free(neigh_entry);
1995 return ERR_PTR(err);
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001996}
1997
1998static void
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001999mlxsw_sp_neigh_entry_destroy(struct mlxsw_sp *mlxsw_sp,
2000 struct mlxsw_sp_neigh_entry *neigh_entry)
Jiri Pirko6cf3c972016-07-05 11:27:39 +02002001{
Ido Schimmel9665b742017-02-08 11:16:42 +01002002 list_del(&neigh_entry->rif_list_node);
Arkadi Sharshevsky7cfcbc72017-08-24 08:40:08 +02002003 mlxsw_sp_neigh_counter_free(mlxsw_sp, neigh_entry);
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002004 mlxsw_sp_neigh_entry_remove(mlxsw_sp, neigh_entry);
2005 mlxsw_sp_neigh_entry_free(neigh_entry);
Jiri Pirko6cf3c972016-07-05 11:27:39 +02002006}
2007
2008static struct mlxsw_sp_neigh_entry *
Jiri Pirko33b13412016-11-10 12:31:04 +01002009mlxsw_sp_neigh_entry_lookup(struct mlxsw_sp *mlxsw_sp, struct neighbour *n)
Jiri Pirko6cf3c972016-07-05 11:27:39 +02002010{
Jiri Pirko33b13412016-11-10 12:31:04 +01002011 struct mlxsw_sp_neigh_key key;
Jiri Pirko6cf3c972016-07-05 11:27:39 +02002012
Jiri Pirko33b13412016-11-10 12:31:04 +01002013 key.n = n;
Ido Schimmel9011b672017-05-16 19:38:25 +02002014 return rhashtable_lookup_fast(&mlxsw_sp->router->neigh_ht,
Jiri Pirko6cf3c972016-07-05 11:27:39 +02002015 &key, mlxsw_sp_neigh_ht_params);
2016}
2017
Yotam Gigic723c7352016-07-05 11:27:43 +02002018static void
2019mlxsw_sp_router_neighs_update_interval_init(struct mlxsw_sp *mlxsw_sp)
2020{
Arkadi Sharshevskya6c9b5d2017-07-18 10:10:18 +02002021 unsigned long interval;
Yotam Gigic723c7352016-07-05 11:27:43 +02002022
Ido Schimmelb5f3e0d2017-07-24 09:56:00 +02002023#if IS_ENABLED(CONFIG_IPV6)
Arkadi Sharshevskya6c9b5d2017-07-18 10:10:18 +02002024 interval = min_t(unsigned long,
2025 NEIGH_VAR(&arp_tbl.parms, DELAY_PROBE_TIME),
2026 NEIGH_VAR(&nd_tbl.parms, DELAY_PROBE_TIME));
Ido Schimmelb5f3e0d2017-07-24 09:56:00 +02002027#else
2028 interval = NEIGH_VAR(&arp_tbl.parms, DELAY_PROBE_TIME);
2029#endif
Ido Schimmel9011b672017-05-16 19:38:25 +02002030 mlxsw_sp->router->neighs_update.interval = jiffies_to_msecs(interval);
Yotam Gigic723c7352016-07-05 11:27:43 +02002031}
2032
2033static void mlxsw_sp_router_neigh_ent_ipv4_process(struct mlxsw_sp *mlxsw_sp,
2034 char *rauhtd_pl,
2035 int ent_index)
2036{
2037 struct net_device *dev;
2038 struct neighbour *n;
2039 __be32 dipn;
2040 u32 dip;
2041 u16 rif;
2042
2043 mlxsw_reg_rauhtd_ent_ipv4_unpack(rauhtd_pl, ent_index, &rif, &dip);
2044
Ido Schimmel5f9efff2017-05-16 19:38:27 +02002045 if (!mlxsw_sp->router->rifs[rif]) {
Yotam Gigic723c7352016-07-05 11:27:43 +02002046 dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Incorrect RIF in neighbour entry\n");
2047 return;
2048 }
2049
2050 dipn = htonl(dip);
Ido Schimmel5f9efff2017-05-16 19:38:27 +02002051 dev = mlxsw_sp->router->rifs[rif]->dev;
Yotam Gigic723c7352016-07-05 11:27:43 +02002052 n = neigh_lookup(&arp_tbl, &dipn, dev);
Yuval Mintz1ecdaea2018-01-24 10:02:09 +01002053 if (!n)
Yotam Gigic723c7352016-07-05 11:27:43 +02002054 return;
Yotam Gigic723c7352016-07-05 11:27:43 +02002055
2056 netdev_dbg(dev, "Updating neighbour with IP=%pI4h\n", &dip);
2057 neigh_event_send(n, NULL);
2058 neigh_release(n);
2059}
2060
Ido Schimmeldf9a21f2017-08-15 09:10:33 +02002061#if IS_ENABLED(CONFIG_IPV6)
Arkadi Sharshevsky60f040c2017-07-18 10:10:17 +02002062static void mlxsw_sp_router_neigh_ent_ipv6_process(struct mlxsw_sp *mlxsw_sp,
2063 char *rauhtd_pl,
2064 int rec_index)
2065{
2066 struct net_device *dev;
2067 struct neighbour *n;
2068 struct in6_addr dip;
2069 u16 rif;
2070
2071 mlxsw_reg_rauhtd_ent_ipv6_unpack(rauhtd_pl, rec_index, &rif,
2072 (char *) &dip);
2073
2074 if (!mlxsw_sp->router->rifs[rif]) {
2075 dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Incorrect RIF in neighbour entry\n");
2076 return;
2077 }
2078
2079 dev = mlxsw_sp->router->rifs[rif]->dev;
2080 n = neigh_lookup(&nd_tbl, &dip, dev);
Yuval Mintz1ecdaea2018-01-24 10:02:09 +01002081 if (!n)
Arkadi Sharshevsky60f040c2017-07-18 10:10:17 +02002082 return;
Arkadi Sharshevsky60f040c2017-07-18 10:10:17 +02002083
2084 netdev_dbg(dev, "Updating neighbour with IP=%pI6c\n", &dip);
2085 neigh_event_send(n, NULL);
2086 neigh_release(n);
2087}
Ido Schimmelb5f3e0d2017-07-24 09:56:00 +02002088#else
2089static void mlxsw_sp_router_neigh_ent_ipv6_process(struct mlxsw_sp *mlxsw_sp,
2090 char *rauhtd_pl,
2091 int rec_index)
2092{
2093}
2094#endif
Arkadi Sharshevsky60f040c2017-07-18 10:10:17 +02002095
Yotam Gigic723c7352016-07-05 11:27:43 +02002096static void mlxsw_sp_router_neigh_rec_ipv4_process(struct mlxsw_sp *mlxsw_sp,
2097 char *rauhtd_pl,
2098 int rec_index)
2099{
2100 u8 num_entries;
2101 int i;
2102
2103 num_entries = mlxsw_reg_rauhtd_ipv4_rec_num_entries_get(rauhtd_pl,
2104 rec_index);
2105 /* Hardware starts counting at 0, so add 1. */
2106 num_entries++;
2107
2108 /* Each record consists of several neighbour entries. */
2109 for (i = 0; i < num_entries; i++) {
2110 int ent_index;
2111
2112 ent_index = rec_index * MLXSW_REG_RAUHTD_IPV4_ENT_PER_REC + i;
2113 mlxsw_sp_router_neigh_ent_ipv4_process(mlxsw_sp, rauhtd_pl,
2114 ent_index);
2115 }
2116
2117}
2118
Arkadi Sharshevsky60f040c2017-07-18 10:10:17 +02002119static void mlxsw_sp_router_neigh_rec_ipv6_process(struct mlxsw_sp *mlxsw_sp,
2120 char *rauhtd_pl,
2121 int rec_index)
2122{
2123 /* One record contains one entry. */
2124 mlxsw_sp_router_neigh_ent_ipv6_process(mlxsw_sp, rauhtd_pl,
2125 rec_index);
2126}
2127
Yotam Gigic723c7352016-07-05 11:27:43 +02002128static void mlxsw_sp_router_neigh_rec_process(struct mlxsw_sp *mlxsw_sp,
2129 char *rauhtd_pl, int rec_index)
2130{
2131 switch (mlxsw_reg_rauhtd_rec_type_get(rauhtd_pl, rec_index)) {
2132 case MLXSW_REG_RAUHTD_TYPE_IPV4:
2133 mlxsw_sp_router_neigh_rec_ipv4_process(mlxsw_sp, rauhtd_pl,
2134 rec_index);
2135 break;
2136 case MLXSW_REG_RAUHTD_TYPE_IPV6:
Arkadi Sharshevsky60f040c2017-07-18 10:10:17 +02002137 mlxsw_sp_router_neigh_rec_ipv6_process(mlxsw_sp, rauhtd_pl,
2138 rec_index);
Yotam Gigic723c7352016-07-05 11:27:43 +02002139 break;
2140 }
2141}
2142
Arkadi Sharshevsky42cdb332016-11-11 16:34:26 +01002143static bool mlxsw_sp_router_rauhtd_is_full(char *rauhtd_pl)
2144{
2145 u8 num_rec, last_rec_index, num_entries;
2146
2147 num_rec = mlxsw_reg_rauhtd_num_rec_get(rauhtd_pl);
2148 last_rec_index = num_rec - 1;
2149
2150 if (num_rec < MLXSW_REG_RAUHTD_REC_MAX_NUM)
2151 return false;
2152 if (mlxsw_reg_rauhtd_rec_type_get(rauhtd_pl, last_rec_index) ==
2153 MLXSW_REG_RAUHTD_TYPE_IPV6)
2154 return true;
2155
2156 num_entries = mlxsw_reg_rauhtd_ipv4_rec_num_entries_get(rauhtd_pl,
2157 last_rec_index);
2158 if (++num_entries == MLXSW_REG_RAUHTD_IPV4_ENT_PER_REC)
2159 return true;
2160 return false;
2161}
2162
Arkadi Sharshevsky60f040c2017-07-18 10:10:17 +02002163static int
2164__mlxsw_sp_router_neighs_update_rauhtd(struct mlxsw_sp *mlxsw_sp,
2165 char *rauhtd_pl,
2166 enum mlxsw_reg_rauhtd_type type)
Yotam Gigic723c7352016-07-05 11:27:43 +02002167{
Arkadi Sharshevsky60f040c2017-07-18 10:10:17 +02002168 int i, num_rec;
2169 int err;
Yotam Gigic723c7352016-07-05 11:27:43 +02002170
2171 /* Make sure the neighbour's netdev isn't removed in the
2172 * process.
2173 */
2174 rtnl_lock();
2175 do {
Arkadi Sharshevsky60f040c2017-07-18 10:10:17 +02002176 mlxsw_reg_rauhtd_pack(rauhtd_pl, type);
Yotam Gigic723c7352016-07-05 11:27:43 +02002177 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(rauhtd),
2178 rauhtd_pl);
2179 if (err) {
Petr Machata7ff176f2017-10-02 12:21:57 +02002180 dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Failed to dump neighbour table\n");
Yotam Gigic723c7352016-07-05 11:27:43 +02002181 break;
2182 }
2183 num_rec = mlxsw_reg_rauhtd_num_rec_get(rauhtd_pl);
2184 for (i = 0; i < num_rec; i++)
2185 mlxsw_sp_router_neigh_rec_process(mlxsw_sp, rauhtd_pl,
2186 i);
Arkadi Sharshevsky42cdb332016-11-11 16:34:26 +01002187 } while (mlxsw_sp_router_rauhtd_is_full(rauhtd_pl));
Yotam Gigic723c7352016-07-05 11:27:43 +02002188 rtnl_unlock();
2189
Arkadi Sharshevsky60f040c2017-07-18 10:10:17 +02002190 return err;
2191}
2192
2193static int mlxsw_sp_router_neighs_update_rauhtd(struct mlxsw_sp *mlxsw_sp)
2194{
2195 enum mlxsw_reg_rauhtd_type type;
2196 char *rauhtd_pl;
2197 int err;
2198
2199 rauhtd_pl = kmalloc(MLXSW_REG_RAUHTD_LEN, GFP_KERNEL);
2200 if (!rauhtd_pl)
2201 return -ENOMEM;
2202
2203 type = MLXSW_REG_RAUHTD_TYPE_IPV4;
2204 err = __mlxsw_sp_router_neighs_update_rauhtd(mlxsw_sp, rauhtd_pl, type);
2205 if (err)
2206 goto out;
2207
2208 type = MLXSW_REG_RAUHTD_TYPE_IPV6;
2209 err = __mlxsw_sp_router_neighs_update_rauhtd(mlxsw_sp, rauhtd_pl, type);
2210out:
Yotam Gigic723c7352016-07-05 11:27:43 +02002211 kfree(rauhtd_pl);
Yotam Gigib2157142016-07-05 11:27:51 +02002212 return err;
2213}
2214
2215static void mlxsw_sp_router_neighs_update_nh(struct mlxsw_sp *mlxsw_sp)
2216{
2217 struct mlxsw_sp_neigh_entry *neigh_entry;
2218
2219 /* Take RTNL mutex here to prevent lists from changes */
2220 rtnl_lock();
Ido Schimmel9011b672017-05-16 19:38:25 +02002221 list_for_each_entry(neigh_entry, &mlxsw_sp->router->nexthop_neighs_list,
Ido Schimmel8a0b7272017-02-06 16:20:15 +01002222 nexthop_neighs_list_node)
Yotam Gigib2157142016-07-05 11:27:51 +02002223 /* If this neigh have nexthops, make the kernel think this neigh
2224 * is active regardless of the traffic.
2225 */
Ido Schimmel8a0b7272017-02-06 16:20:15 +01002226 neigh_event_send(neigh_entry->key.n, NULL);
Yotam Gigib2157142016-07-05 11:27:51 +02002227 rtnl_unlock();
2228}
2229
2230static void
2231mlxsw_sp_router_neighs_update_work_schedule(struct mlxsw_sp *mlxsw_sp)
2232{
Ido Schimmel9011b672017-05-16 19:38:25 +02002233 unsigned long interval = mlxsw_sp->router->neighs_update.interval;
Yotam Gigib2157142016-07-05 11:27:51 +02002234
Ido Schimmel9011b672017-05-16 19:38:25 +02002235 mlxsw_core_schedule_dw(&mlxsw_sp->router->neighs_update.dw,
Yotam Gigib2157142016-07-05 11:27:51 +02002236 msecs_to_jiffies(interval));
2237}
2238
2239static void mlxsw_sp_router_neighs_update_work(struct work_struct *work)
2240{
Ido Schimmel9011b672017-05-16 19:38:25 +02002241 struct mlxsw_sp_router *router;
Yotam Gigib2157142016-07-05 11:27:51 +02002242 int err;
2243
Ido Schimmel9011b672017-05-16 19:38:25 +02002244 router = container_of(work, struct mlxsw_sp_router,
2245 neighs_update.dw.work);
2246 err = mlxsw_sp_router_neighs_update_rauhtd(router->mlxsw_sp);
Yotam Gigib2157142016-07-05 11:27:51 +02002247 if (err)
Ido Schimmel9011b672017-05-16 19:38:25 +02002248 dev_err(router->mlxsw_sp->bus_info->dev, "Could not update kernel for neigh activity");
Yotam Gigib2157142016-07-05 11:27:51 +02002249
Ido Schimmel9011b672017-05-16 19:38:25 +02002250 mlxsw_sp_router_neighs_update_nh(router->mlxsw_sp);
Yotam Gigib2157142016-07-05 11:27:51 +02002251
Ido Schimmel9011b672017-05-16 19:38:25 +02002252 mlxsw_sp_router_neighs_update_work_schedule(router->mlxsw_sp);
Yotam Gigic723c7352016-07-05 11:27:43 +02002253}
2254
Yotam Gigi0b2361d2016-07-05 11:27:52 +02002255static void mlxsw_sp_router_probe_unresolved_nexthops(struct work_struct *work)
2256{
2257 struct mlxsw_sp_neigh_entry *neigh_entry;
Ido Schimmel9011b672017-05-16 19:38:25 +02002258 struct mlxsw_sp_router *router;
Yotam Gigi0b2361d2016-07-05 11:27:52 +02002259
Ido Schimmel9011b672017-05-16 19:38:25 +02002260 router = container_of(work, struct mlxsw_sp_router,
2261 nexthop_probe_dw.work);
Yotam Gigi0b2361d2016-07-05 11:27:52 +02002262 /* Iterate over nexthop neighbours, find those who are unresolved and
2263 * send arp on them. This solves the chicken-egg problem when
2264 * the nexthop wouldn't get offloaded until the neighbor is resolved
2265 * but it wouldn't get resolved ever in case traffic is flowing in HW
2266 * using different nexthop.
2267 *
2268 * Take RTNL mutex here to prevent lists from changes.
2269 */
2270 rtnl_lock();
Ido Schimmel9011b672017-05-16 19:38:25 +02002271 list_for_each_entry(neigh_entry, &router->nexthop_neighs_list,
Ido Schimmel8a0b7272017-02-06 16:20:15 +01002272 nexthop_neighs_list_node)
Ido Schimmel01b1aa32017-02-06 16:20:16 +01002273 if (!neigh_entry->connected)
Jiri Pirko33b13412016-11-10 12:31:04 +01002274 neigh_event_send(neigh_entry->key.n, NULL);
Yotam Gigi0b2361d2016-07-05 11:27:52 +02002275 rtnl_unlock();
2276
Ido Schimmel9011b672017-05-16 19:38:25 +02002277 mlxsw_core_schedule_dw(&router->nexthop_probe_dw,
Yotam Gigi0b2361d2016-07-05 11:27:52 +02002278 MLXSW_SP_UNRESOLVED_NH_PROBE_INTERVAL);
2279}
2280
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002281static void
2282mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp *mlxsw_sp,
2283 struct mlxsw_sp_neigh_entry *neigh_entry,
2284 bool removing);
2285
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002286static enum mlxsw_reg_rauht_op mlxsw_sp_rauht_op(bool adding)
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002287{
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002288 return adding ? MLXSW_REG_RAUHT_OP_WRITE_ADD :
2289 MLXSW_REG_RAUHT_OP_WRITE_DELETE;
2290}
2291
2292static void
2293mlxsw_sp_router_neigh_entry_op4(struct mlxsw_sp *mlxsw_sp,
2294 struct mlxsw_sp_neigh_entry *neigh_entry,
2295 enum mlxsw_reg_rauht_op op)
2296{
Jiri Pirko33b13412016-11-10 12:31:04 +01002297 struct neighbour *n = neigh_entry->key.n;
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002298 u32 dip = ntohl(*((__be32 *) n->primary_key));
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002299 char rauht_pl[MLXSW_REG_RAUHT_LEN];
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002300
2301 mlxsw_reg_rauht_pack4(rauht_pl, op, neigh_entry->rif, neigh_entry->ha,
2302 dip);
Arkadi Sharshevsky7cfcbc72017-08-24 08:40:08 +02002303 if (neigh_entry->counter_valid)
2304 mlxsw_reg_rauht_pack_counter(rauht_pl,
2305 neigh_entry->counter_index);
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002306 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rauht), rauht_pl);
2307}
2308
2309static void
Arkadi Sharshevskyd5eb89c2017-07-18 10:10:15 +02002310mlxsw_sp_router_neigh_entry_op6(struct mlxsw_sp *mlxsw_sp,
2311 struct mlxsw_sp_neigh_entry *neigh_entry,
2312 enum mlxsw_reg_rauht_op op)
2313{
2314 struct neighbour *n = neigh_entry->key.n;
2315 char rauht_pl[MLXSW_REG_RAUHT_LEN];
2316 const char *dip = n->primary_key;
2317
2318 mlxsw_reg_rauht_pack6(rauht_pl, op, neigh_entry->rif, neigh_entry->ha,
2319 dip);
Arkadi Sharshevsky7cfcbc72017-08-24 08:40:08 +02002320 if (neigh_entry->counter_valid)
2321 mlxsw_reg_rauht_pack_counter(rauht_pl,
2322 neigh_entry->counter_index);
Arkadi Sharshevskyd5eb89c2017-07-18 10:10:15 +02002323 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rauht), rauht_pl);
2324}
2325
Arkadi Sharshevsky1d1056d82017-08-31 17:59:13 +02002326bool mlxsw_sp_neigh_ipv6_ignore(struct mlxsw_sp_neigh_entry *neigh_entry)
Arkadi Sharshevskyd5eb89c2017-07-18 10:10:15 +02002327{
Arkadi Sharshevsky1d1056d82017-08-31 17:59:13 +02002328 struct neighbour *n = neigh_entry->key.n;
2329
Arkadi Sharshevskyd5eb89c2017-07-18 10:10:15 +02002330 /* Packets with a link-local destination address are trapped
2331 * after LPM lookup and never reach the neighbour table, so
2332 * there is no need to program such neighbours to the device.
2333 */
2334 if (ipv6_addr_type((struct in6_addr *) &n->primary_key) &
2335 IPV6_ADDR_LINKLOCAL)
2336 return true;
2337 return false;
2338}
2339
2340static void
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002341mlxsw_sp_neigh_entry_update(struct mlxsw_sp *mlxsw_sp,
2342 struct mlxsw_sp_neigh_entry *neigh_entry,
2343 bool adding)
2344{
2345 if (!adding && !neigh_entry->connected)
2346 return;
2347 neigh_entry->connected = adding;
Ido Schimmelb5f3e0d2017-07-24 09:56:00 +02002348 if (neigh_entry->key.n->tbl->family == AF_INET) {
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002349 mlxsw_sp_router_neigh_entry_op4(mlxsw_sp, neigh_entry,
2350 mlxsw_sp_rauht_op(adding));
Ido Schimmelb5f3e0d2017-07-24 09:56:00 +02002351 } else if (neigh_entry->key.n->tbl->family == AF_INET6) {
Arkadi Sharshevsky1d1056d82017-08-31 17:59:13 +02002352 if (mlxsw_sp_neigh_ipv6_ignore(neigh_entry))
Arkadi Sharshevskyd5eb89c2017-07-18 10:10:15 +02002353 return;
2354 mlxsw_sp_router_neigh_entry_op6(mlxsw_sp, neigh_entry,
2355 mlxsw_sp_rauht_op(adding));
2356 } else {
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002357 WARN_ON_ONCE(1);
Arkadi Sharshevskyd5eb89c2017-07-18 10:10:15 +02002358 }
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002359}
2360
Arkadi Sharshevskya481d712017-08-24 08:40:10 +02002361void
2362mlxsw_sp_neigh_entry_counter_update(struct mlxsw_sp *mlxsw_sp,
2363 struct mlxsw_sp_neigh_entry *neigh_entry,
2364 bool adding)
2365{
2366 if (adding)
2367 mlxsw_sp_neigh_counter_alloc(mlxsw_sp, neigh_entry);
2368 else
2369 mlxsw_sp_neigh_counter_free(mlxsw_sp, neigh_entry);
2370 mlxsw_sp_neigh_entry_update(mlxsw_sp, neigh_entry, true);
2371}
2372
Ido Schimmelceb88812017-11-02 17:14:07 +01002373struct mlxsw_sp_netevent_work {
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002374 struct work_struct work;
2375 struct mlxsw_sp *mlxsw_sp;
2376 struct neighbour *n;
2377};
2378
2379static void mlxsw_sp_router_neigh_event_work(struct work_struct *work)
2380{
Ido Schimmelceb88812017-11-02 17:14:07 +01002381 struct mlxsw_sp_netevent_work *net_work =
2382 container_of(work, struct mlxsw_sp_netevent_work, work);
2383 struct mlxsw_sp *mlxsw_sp = net_work->mlxsw_sp;
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002384 struct mlxsw_sp_neigh_entry *neigh_entry;
Ido Schimmelceb88812017-11-02 17:14:07 +01002385 struct neighbour *n = net_work->n;
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002386 unsigned char ha[ETH_ALEN];
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002387 bool entry_connected;
Ido Schimmel93a87e52016-12-23 09:32:49 +01002388 u8 nud_state, dead;
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002389
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002390 /* If these parameters are changed after we release the lock,
2391 * then we are guaranteed to receive another event letting us
2392 * know about it.
2393 */
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002394 read_lock_bh(&n->lock);
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002395 memcpy(ha, n->ha, ETH_ALEN);
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002396 nud_state = n->nud_state;
Ido Schimmel93a87e52016-12-23 09:32:49 +01002397 dead = n->dead;
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002398 read_unlock_bh(&n->lock);
2399
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002400 rtnl_lock();
Petr Machata803335a2018-02-27 14:53:46 +01002401 mlxsw_sp_span_respin(mlxsw_sp);
2402
Ido Schimmel93a87e52016-12-23 09:32:49 +01002403 entry_connected = nud_state & NUD_VALID && !dead;
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002404 neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp, n);
2405 if (!entry_connected && !neigh_entry)
2406 goto out;
2407 if (!neigh_entry) {
2408 neigh_entry = mlxsw_sp_neigh_entry_create(mlxsw_sp, n);
2409 if (IS_ERR(neigh_entry))
2410 goto out;
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002411 }
2412
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002413 memcpy(neigh_entry->ha, ha, ETH_ALEN);
2414 mlxsw_sp_neigh_entry_update(mlxsw_sp, neigh_entry, entry_connected);
2415 mlxsw_sp_nexthop_neigh_update(mlxsw_sp, neigh_entry, !entry_connected);
2416
2417 if (!neigh_entry->connected && list_empty(&neigh_entry->nexthop_list))
2418 mlxsw_sp_neigh_entry_destroy(mlxsw_sp, neigh_entry);
2419
2420out:
2421 rtnl_unlock();
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002422 neigh_release(n);
Ido Schimmelceb88812017-11-02 17:14:07 +01002423 kfree(net_work);
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002424}
2425
Ido Schimmel28678f02017-11-02 17:14:10 +01002426static int mlxsw_sp_mp_hash_init(struct mlxsw_sp *mlxsw_sp);
2427
2428static void mlxsw_sp_router_mp_hash_event_work(struct work_struct *work)
2429{
2430 struct mlxsw_sp_netevent_work *net_work =
2431 container_of(work, struct mlxsw_sp_netevent_work, work);
2432 struct mlxsw_sp *mlxsw_sp = net_work->mlxsw_sp;
2433
2434 mlxsw_sp_mp_hash_init(mlxsw_sp);
2435 kfree(net_work);
2436}
2437
2438static int mlxsw_sp_router_netevent_event(struct notifier_block *nb,
Ido Schimmel48fac882017-11-02 17:14:06 +01002439 unsigned long event, void *ptr)
Yotam Gigic723c7352016-07-05 11:27:43 +02002440{
Ido Schimmelceb88812017-11-02 17:14:07 +01002441 struct mlxsw_sp_netevent_work *net_work;
Yotam Gigic723c7352016-07-05 11:27:43 +02002442 struct mlxsw_sp_port *mlxsw_sp_port;
Ido Schimmel28678f02017-11-02 17:14:10 +01002443 struct mlxsw_sp_router *router;
Yotam Gigic723c7352016-07-05 11:27:43 +02002444 struct mlxsw_sp *mlxsw_sp;
2445 unsigned long interval;
2446 struct neigh_parms *p;
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002447 struct neighbour *n;
Ido Schimmel28678f02017-11-02 17:14:10 +01002448 struct net *net;
Yotam Gigic723c7352016-07-05 11:27:43 +02002449
2450 switch (event) {
2451 case NETEVENT_DELAY_PROBE_TIME_UPDATE:
2452 p = ptr;
2453
2454 /* We don't care about changes in the default table. */
Ido Schimmelb5f3e0d2017-07-24 09:56:00 +02002455 if (!p->dev || (p->tbl->family != AF_INET &&
2456 p->tbl->family != AF_INET6))
Yotam Gigic723c7352016-07-05 11:27:43 +02002457 return NOTIFY_DONE;
2458
2459 /* We are in atomic context and can't take RTNL mutex,
2460 * so use RCU variant to walk the device chain.
2461 */
2462 mlxsw_sp_port = mlxsw_sp_port_lower_dev_hold(p->dev);
2463 if (!mlxsw_sp_port)
2464 return NOTIFY_DONE;
2465
2466 mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2467 interval = jiffies_to_msecs(NEIGH_VAR(p, DELAY_PROBE_TIME));
Ido Schimmel9011b672017-05-16 19:38:25 +02002468 mlxsw_sp->router->neighs_update.interval = interval;
Yotam Gigic723c7352016-07-05 11:27:43 +02002469
2470 mlxsw_sp_port_dev_put(mlxsw_sp_port);
2471 break;
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002472 case NETEVENT_NEIGH_UPDATE:
2473 n = ptr;
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002474
Ido Schimmelb5f3e0d2017-07-24 09:56:00 +02002475 if (n->tbl->family != AF_INET && n->tbl->family != AF_INET6)
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002476 return NOTIFY_DONE;
2477
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002478 mlxsw_sp_port = mlxsw_sp_port_lower_dev_hold(n->dev);
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002479 if (!mlxsw_sp_port)
2480 return NOTIFY_DONE;
2481
Ido Schimmelceb88812017-11-02 17:14:07 +01002482 net_work = kzalloc(sizeof(*net_work), GFP_ATOMIC);
2483 if (!net_work) {
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002484 mlxsw_sp_port_dev_put(mlxsw_sp_port);
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002485 return NOTIFY_BAD;
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002486 }
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002487
Ido Schimmelceb88812017-11-02 17:14:07 +01002488 INIT_WORK(&net_work->work, mlxsw_sp_router_neigh_event_work);
2489 net_work->mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2490 net_work->n = n;
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002491
2492 /* Take a reference to ensure the neighbour won't be
2493 * destructed until we drop the reference in delayed
2494 * work.
2495 */
2496 neigh_clone(n);
Ido Schimmelceb88812017-11-02 17:14:07 +01002497 mlxsw_core_schedule_work(&net_work->work);
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002498 mlxsw_sp_port_dev_put(mlxsw_sp_port);
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002499 break;
David Ahern3192dac2018-03-02 08:32:16 -08002500 case NETEVENT_IPV4_MPATH_HASH_UPDATE:
David Ahern5e18b9c552018-03-02 08:32:19 -08002501 case NETEVENT_IPV6_MPATH_HASH_UPDATE:
Ido Schimmel28678f02017-11-02 17:14:10 +01002502 net = ptr;
2503
2504 if (!net_eq(net, &init_net))
2505 return NOTIFY_DONE;
2506
2507 net_work = kzalloc(sizeof(*net_work), GFP_ATOMIC);
2508 if (!net_work)
2509 return NOTIFY_BAD;
2510
2511 router = container_of(nb, struct mlxsw_sp_router, netevent_nb);
2512 INIT_WORK(&net_work->work, mlxsw_sp_router_mp_hash_event_work);
2513 net_work->mlxsw_sp = router->mlxsw_sp;
2514 mlxsw_core_schedule_work(&net_work->work);
2515 break;
Yotam Gigic723c7352016-07-05 11:27:43 +02002516 }
2517
2518 return NOTIFY_DONE;
2519}
2520
Jiri Pirko6cf3c972016-07-05 11:27:39 +02002521static int mlxsw_sp_neigh_init(struct mlxsw_sp *mlxsw_sp)
2522{
Yotam Gigic723c7352016-07-05 11:27:43 +02002523 int err;
2524
Ido Schimmel9011b672017-05-16 19:38:25 +02002525 err = rhashtable_init(&mlxsw_sp->router->neigh_ht,
Yotam Gigic723c7352016-07-05 11:27:43 +02002526 &mlxsw_sp_neigh_ht_params);
2527 if (err)
2528 return err;
2529
2530 /* Initialize the polling interval according to the default
2531 * table.
2532 */
2533 mlxsw_sp_router_neighs_update_interval_init(mlxsw_sp);
2534
Yotam Gigi0b2361d2016-07-05 11:27:52 +02002535 /* Create the delayed works for the activity_update */
Ido Schimmel9011b672017-05-16 19:38:25 +02002536 INIT_DELAYED_WORK(&mlxsw_sp->router->neighs_update.dw,
Yotam Gigic723c7352016-07-05 11:27:43 +02002537 mlxsw_sp_router_neighs_update_work);
Ido Schimmel9011b672017-05-16 19:38:25 +02002538 INIT_DELAYED_WORK(&mlxsw_sp->router->nexthop_probe_dw,
Yotam Gigi0b2361d2016-07-05 11:27:52 +02002539 mlxsw_sp_router_probe_unresolved_nexthops);
Ido Schimmel9011b672017-05-16 19:38:25 +02002540 mlxsw_core_schedule_dw(&mlxsw_sp->router->neighs_update.dw, 0);
2541 mlxsw_core_schedule_dw(&mlxsw_sp->router->nexthop_probe_dw, 0);
Yotam Gigic723c7352016-07-05 11:27:43 +02002542 return 0;
Jiri Pirko6cf3c972016-07-05 11:27:39 +02002543}
2544
2545static void mlxsw_sp_neigh_fini(struct mlxsw_sp *mlxsw_sp)
2546{
Ido Schimmel9011b672017-05-16 19:38:25 +02002547 cancel_delayed_work_sync(&mlxsw_sp->router->neighs_update.dw);
2548 cancel_delayed_work_sync(&mlxsw_sp->router->nexthop_probe_dw);
2549 rhashtable_destroy(&mlxsw_sp->router->neigh_ht);
Jiri Pirko6cf3c972016-07-05 11:27:39 +02002550}
2551
Ido Schimmel9665b742017-02-08 11:16:42 +01002552static void mlxsw_sp_neigh_rif_gone_sync(struct mlxsw_sp *mlxsw_sp,
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01002553 struct mlxsw_sp_rif *rif)
Ido Schimmel9665b742017-02-08 11:16:42 +01002554{
2555 struct mlxsw_sp_neigh_entry *neigh_entry, *tmp;
2556
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01002557 list_for_each_entry_safe(neigh_entry, tmp, &rif->neigh_list,
Petr Machata8ba6b302017-12-17 17:16:43 +01002558 rif_list_node) {
2559 mlxsw_sp_neigh_entry_update(mlxsw_sp, neigh_entry, false);
Ido Schimmel9665b742017-02-08 11:16:42 +01002560 mlxsw_sp_neigh_entry_destroy(mlxsw_sp, neigh_entry);
Petr Machata8ba6b302017-12-17 17:16:43 +01002561 }
Ido Schimmel9665b742017-02-08 11:16:42 +01002562}
2563
Petr Machata35225e42017-09-02 23:49:22 +02002564enum mlxsw_sp_nexthop_type {
2565 MLXSW_SP_NEXTHOP_TYPE_ETH,
Petr Machata1012b9a2017-09-02 23:49:23 +02002566 MLXSW_SP_NEXTHOP_TYPE_IPIP,
Petr Machata35225e42017-09-02 23:49:22 +02002567};
2568
Ido Schimmelc53b8e12017-02-08 11:16:30 +01002569struct mlxsw_sp_nexthop_key {
2570 struct fib_nh *fib_nh;
2571};
2572
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002573struct mlxsw_sp_nexthop {
2574 struct list_head neigh_list_node; /* member of neigh entry list */
Ido Schimmel9665b742017-02-08 11:16:42 +01002575 struct list_head rif_list_node;
Arkadi Sharshevskydbe45982017-09-25 10:32:23 +02002576 struct list_head router_list_node;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002577 struct mlxsw_sp_nexthop_group *nh_grp; /* pointer back to the group
2578 * this belongs to
2579 */
Ido Schimmelc53b8e12017-02-08 11:16:30 +01002580 struct rhash_head ht_node;
2581 struct mlxsw_sp_nexthop_key key;
Ido Schimmel58adf2c2017-07-18 10:10:19 +02002582 unsigned char gw_addr[sizeof(struct in6_addr)];
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02002583 int ifindex;
Ido Schimmel408bd942017-10-22 23:11:46 +02002584 int nh_weight;
Ido Schimmeleb789982017-10-22 23:11:48 +02002585 int norm_nh_weight;
2586 int num_adj_entries;
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01002587 struct mlxsw_sp_rif *rif;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002588 u8 should_offload:1, /* set indicates this neigh is connected and
2589 * should be put to KVD linear area of this group.
2590 */
2591 offloaded:1, /* set in case the neigh is actually put into
2592 * KVD linear area of this group.
2593 */
2594 update:1; /* set indicates that MAC of this neigh should be
2595 * updated in HW
2596 */
Petr Machata35225e42017-09-02 23:49:22 +02002597 enum mlxsw_sp_nexthop_type type;
2598 union {
2599 struct mlxsw_sp_neigh_entry *neigh_entry;
Petr Machata1012b9a2017-09-02 23:49:23 +02002600 struct mlxsw_sp_ipip_entry *ipip_entry;
Petr Machata35225e42017-09-02 23:49:22 +02002601 };
Arkadi Sharshevskya5390272017-09-25 10:32:28 +02002602 unsigned int counter_index;
2603 bool counter_valid;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002604};
2605
2606struct mlxsw_sp_nexthop_group {
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02002607 void *priv;
Ido Schimmele9ad5e72017-02-08 11:16:29 +01002608 struct rhash_head ht_node;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002609 struct list_head fib_list; /* list of fib entries that use this group */
Ido Schimmel58adf2c2017-07-18 10:10:19 +02002610 struct neigh_table *neigh_tbl;
Ido Schimmelb3e8d1e2017-02-08 11:16:32 +01002611 u8 adj_index_valid:1,
2612 gateway:1; /* routes using the group use a gateway */
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002613 u32 adj_index;
2614 u16 ecmp_size;
2615 u16 count;
Ido Schimmeleb789982017-10-22 23:11:48 +02002616 int sum_norm_weight;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002617 struct mlxsw_sp_nexthop nexthops[0];
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01002618#define nh_rif nexthops[0].rif
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002619};
2620
Arkadi Sharshevsky427e6522017-09-25 10:32:30 +02002621void mlxsw_sp_nexthop_counter_alloc(struct mlxsw_sp *mlxsw_sp,
2622 struct mlxsw_sp_nexthop *nh)
Arkadi Sharshevskya5390272017-09-25 10:32:28 +02002623{
2624 struct devlink *devlink;
2625
2626 devlink = priv_to_devlink(mlxsw_sp->core);
2627 if (!devlink_dpipe_table_counter_enabled(devlink,
2628 MLXSW_SP_DPIPE_TABLE_NAME_ADJ))
2629 return;
2630
2631 if (mlxsw_sp_flow_counter_alloc(mlxsw_sp, &nh->counter_index))
2632 return;
2633
2634 nh->counter_valid = true;
2635}
2636
Arkadi Sharshevsky427e6522017-09-25 10:32:30 +02002637void mlxsw_sp_nexthop_counter_free(struct mlxsw_sp *mlxsw_sp,
2638 struct mlxsw_sp_nexthop *nh)
Arkadi Sharshevskya5390272017-09-25 10:32:28 +02002639{
2640 if (!nh->counter_valid)
2641 return;
2642 mlxsw_sp_flow_counter_free(mlxsw_sp, nh->counter_index);
2643 nh->counter_valid = false;
2644}
2645
2646int mlxsw_sp_nexthop_counter_get(struct mlxsw_sp *mlxsw_sp,
2647 struct mlxsw_sp_nexthop *nh, u64 *p_counter)
2648{
2649 if (!nh->counter_valid)
2650 return -EINVAL;
2651
2652 return mlxsw_sp_flow_counter_get(mlxsw_sp, nh->counter_index,
2653 p_counter, NULL);
2654}
2655
Arkadi Sharshevskyc556cd22017-09-25 10:32:25 +02002656struct mlxsw_sp_nexthop *mlxsw_sp_nexthop_next(struct mlxsw_sp_router *router,
2657 struct mlxsw_sp_nexthop *nh)
2658{
2659 if (!nh) {
2660 if (list_empty(&router->nexthop_list))
2661 return NULL;
2662 else
2663 return list_first_entry(&router->nexthop_list,
2664 typeof(*nh), router_list_node);
2665 }
2666 if (list_is_last(&nh->router_list_node, &router->nexthop_list))
2667 return NULL;
2668 return list_next_entry(nh, router_list_node);
2669}
2670
2671bool mlxsw_sp_nexthop_offload(struct mlxsw_sp_nexthop *nh)
2672{
2673 return nh->offloaded;
2674}
2675
2676unsigned char *mlxsw_sp_nexthop_ha(struct mlxsw_sp_nexthop *nh)
2677{
2678 if (!nh->offloaded)
2679 return NULL;
2680 return nh->neigh_entry->ha;
2681}
2682
2683int mlxsw_sp_nexthop_indexes(struct mlxsw_sp_nexthop *nh, u32 *p_adj_index,
Ido Schimmele69cd9d2017-10-22 23:11:43 +02002684 u32 *p_adj_size, u32 *p_adj_hash_index)
Arkadi Sharshevskyc556cd22017-09-25 10:32:25 +02002685{
2686 struct mlxsw_sp_nexthop_group *nh_grp = nh->nh_grp;
2687 u32 adj_hash_index = 0;
2688 int i;
2689
2690 if (!nh->offloaded || !nh_grp->adj_index_valid)
2691 return -EINVAL;
2692
2693 *p_adj_index = nh_grp->adj_index;
Ido Schimmele69cd9d2017-10-22 23:11:43 +02002694 *p_adj_size = nh_grp->ecmp_size;
Arkadi Sharshevskyc556cd22017-09-25 10:32:25 +02002695
2696 for (i = 0; i < nh_grp->count; i++) {
2697 struct mlxsw_sp_nexthop *nh_iter = &nh_grp->nexthops[i];
2698
2699 if (nh_iter == nh)
2700 break;
2701 if (nh_iter->offloaded)
Ido Schimmeleb789982017-10-22 23:11:48 +02002702 adj_hash_index += nh_iter->num_adj_entries;
Arkadi Sharshevskyc556cd22017-09-25 10:32:25 +02002703 }
2704
2705 *p_adj_hash_index = adj_hash_index;
2706 return 0;
2707}
2708
2709struct mlxsw_sp_rif *mlxsw_sp_nexthop_rif(struct mlxsw_sp_nexthop *nh)
2710{
2711 return nh->rif;
2712}
2713
2714bool mlxsw_sp_nexthop_group_has_ipip(struct mlxsw_sp_nexthop *nh)
2715{
2716 struct mlxsw_sp_nexthop_group *nh_grp = nh->nh_grp;
2717 int i;
2718
2719 for (i = 0; i < nh_grp->count; i++) {
2720 struct mlxsw_sp_nexthop *nh_iter = &nh_grp->nexthops[i];
2721
2722 if (nh_iter->type == MLXSW_SP_NEXTHOP_TYPE_IPIP)
2723 return true;
2724 }
2725 return false;
2726}
2727
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02002728static struct fib_info *
2729mlxsw_sp_nexthop4_group_fi(const struct mlxsw_sp_nexthop_group *nh_grp)
2730{
2731 return nh_grp->priv;
2732}
2733
2734struct mlxsw_sp_nexthop_group_cmp_arg {
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02002735 enum mlxsw_sp_l3proto proto;
2736 union {
2737 struct fib_info *fi;
2738 struct mlxsw_sp_fib6_entry *fib6_entry;
2739 };
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02002740};
2741
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02002742static bool
2743mlxsw_sp_nexthop6_group_has_nexthop(const struct mlxsw_sp_nexthop_group *nh_grp,
Ido Schimmel3743d882018-01-12 17:15:59 +01002744 const struct in6_addr *gw, int ifindex,
2745 int weight)
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02002746{
2747 int i;
2748
2749 for (i = 0; i < nh_grp->count; i++) {
2750 const struct mlxsw_sp_nexthop *nh;
2751
2752 nh = &nh_grp->nexthops[i];
Ido Schimmel3743d882018-01-12 17:15:59 +01002753 if (nh->ifindex == ifindex && nh->nh_weight == weight &&
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02002754 ipv6_addr_equal(gw, (struct in6_addr *) nh->gw_addr))
2755 return true;
2756 }
2757
2758 return false;
2759}
2760
2761static bool
2762mlxsw_sp_nexthop6_group_cmp(const struct mlxsw_sp_nexthop_group *nh_grp,
2763 const struct mlxsw_sp_fib6_entry *fib6_entry)
2764{
2765 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
2766
2767 if (nh_grp->count != fib6_entry->nrt6)
2768 return false;
2769
2770 list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) {
2771 struct in6_addr *gw;
Ido Schimmel3743d882018-01-12 17:15:59 +01002772 int ifindex, weight;
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02002773
David Ahern5e670d82018-04-17 17:33:14 -07002774 ifindex = mlxsw_sp_rt6->rt->fib6_nh.nh_dev->ifindex;
2775 weight = mlxsw_sp_rt6->rt->fib6_nh.nh_weight;
2776 gw = &mlxsw_sp_rt6->rt->fib6_nh.nh_gw;
Ido Schimmel3743d882018-01-12 17:15:59 +01002777 if (!mlxsw_sp_nexthop6_group_has_nexthop(nh_grp, gw, ifindex,
2778 weight))
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02002779 return false;
2780 }
2781
2782 return true;
2783}
2784
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02002785static int
2786mlxsw_sp_nexthop_group_cmp(struct rhashtable_compare_arg *arg, const void *ptr)
2787{
2788 const struct mlxsw_sp_nexthop_group_cmp_arg *cmp_arg = arg->key;
2789 const struct mlxsw_sp_nexthop_group *nh_grp = ptr;
2790
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02002791 switch (cmp_arg->proto) {
2792 case MLXSW_SP_L3_PROTO_IPV4:
2793 return cmp_arg->fi != mlxsw_sp_nexthop4_group_fi(nh_grp);
2794 case MLXSW_SP_L3_PROTO_IPV6:
2795 return !mlxsw_sp_nexthop6_group_cmp(nh_grp,
2796 cmp_arg->fib6_entry);
2797 default:
2798 WARN_ON(1);
2799 return 1;
2800 }
2801}
2802
2803static int
2804mlxsw_sp_nexthop_group_type(const struct mlxsw_sp_nexthop_group *nh_grp)
2805{
2806 return nh_grp->neigh_tbl->family;
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02002807}
2808
2809static u32 mlxsw_sp_nexthop_group_hash_obj(const void *data, u32 len, u32 seed)
2810{
2811 const struct mlxsw_sp_nexthop_group *nh_grp = data;
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02002812 const struct mlxsw_sp_nexthop *nh;
2813 struct fib_info *fi;
2814 unsigned int val;
2815 int i;
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02002816
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02002817 switch (mlxsw_sp_nexthop_group_type(nh_grp)) {
2818 case AF_INET:
2819 fi = mlxsw_sp_nexthop4_group_fi(nh_grp);
2820 return jhash(&fi, sizeof(fi), seed);
2821 case AF_INET6:
2822 val = nh_grp->count;
2823 for (i = 0; i < nh_grp->count; i++) {
2824 nh = &nh_grp->nexthops[i];
2825 val ^= nh->ifindex;
2826 }
2827 return jhash(&val, sizeof(val), seed);
2828 default:
2829 WARN_ON(1);
2830 return 0;
2831 }
2832}
2833
2834static u32
2835mlxsw_sp_nexthop6_group_hash(struct mlxsw_sp_fib6_entry *fib6_entry, u32 seed)
2836{
2837 unsigned int val = fib6_entry->nrt6;
2838 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
2839 struct net_device *dev;
2840
2841 list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) {
David Ahern5e670d82018-04-17 17:33:14 -07002842 dev = mlxsw_sp_rt6->rt->fib6_nh.nh_dev;
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02002843 val ^= dev->ifindex;
2844 }
2845
2846 return jhash(&val, sizeof(val), seed);
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02002847}
2848
2849static u32
2850mlxsw_sp_nexthop_group_hash(const void *data, u32 len, u32 seed)
2851{
2852 const struct mlxsw_sp_nexthop_group_cmp_arg *cmp_arg = data;
2853
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02002854 switch (cmp_arg->proto) {
2855 case MLXSW_SP_L3_PROTO_IPV4:
2856 return jhash(&cmp_arg->fi, sizeof(cmp_arg->fi), seed);
2857 case MLXSW_SP_L3_PROTO_IPV6:
2858 return mlxsw_sp_nexthop6_group_hash(cmp_arg->fib6_entry, seed);
2859 default:
2860 WARN_ON(1);
2861 return 0;
2862 }
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02002863}
2864
Ido Schimmele9ad5e72017-02-08 11:16:29 +01002865static const struct rhashtable_params mlxsw_sp_nexthop_group_ht_params = {
Ido Schimmele9ad5e72017-02-08 11:16:29 +01002866 .head_offset = offsetof(struct mlxsw_sp_nexthop_group, ht_node),
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02002867 .hashfn = mlxsw_sp_nexthop_group_hash,
2868 .obj_hashfn = mlxsw_sp_nexthop_group_hash_obj,
2869 .obj_cmpfn = mlxsw_sp_nexthop_group_cmp,
Ido Schimmele9ad5e72017-02-08 11:16:29 +01002870};
2871
2872static int mlxsw_sp_nexthop_group_insert(struct mlxsw_sp *mlxsw_sp,
2873 struct mlxsw_sp_nexthop_group *nh_grp)
2874{
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02002875 if (mlxsw_sp_nexthop_group_type(nh_grp) == AF_INET6 &&
2876 !nh_grp->gateway)
2877 return 0;
2878
Ido Schimmel9011b672017-05-16 19:38:25 +02002879 return rhashtable_insert_fast(&mlxsw_sp->router->nexthop_group_ht,
Ido Schimmele9ad5e72017-02-08 11:16:29 +01002880 &nh_grp->ht_node,
2881 mlxsw_sp_nexthop_group_ht_params);
2882}
2883
2884static void mlxsw_sp_nexthop_group_remove(struct mlxsw_sp *mlxsw_sp,
2885 struct mlxsw_sp_nexthop_group *nh_grp)
2886{
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02002887 if (mlxsw_sp_nexthop_group_type(nh_grp) == AF_INET6 &&
2888 !nh_grp->gateway)
2889 return;
2890
Ido Schimmel9011b672017-05-16 19:38:25 +02002891 rhashtable_remove_fast(&mlxsw_sp->router->nexthop_group_ht,
Ido Schimmele9ad5e72017-02-08 11:16:29 +01002892 &nh_grp->ht_node,
2893 mlxsw_sp_nexthop_group_ht_params);
2894}
2895
2896static struct mlxsw_sp_nexthop_group *
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02002897mlxsw_sp_nexthop4_group_lookup(struct mlxsw_sp *mlxsw_sp,
2898 struct fib_info *fi)
Ido Schimmele9ad5e72017-02-08 11:16:29 +01002899{
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02002900 struct mlxsw_sp_nexthop_group_cmp_arg cmp_arg;
2901
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02002902 cmp_arg.proto = MLXSW_SP_L3_PROTO_IPV4;
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02002903 cmp_arg.fi = fi;
2904 return rhashtable_lookup_fast(&mlxsw_sp->router->nexthop_group_ht,
2905 &cmp_arg,
Ido Schimmele9ad5e72017-02-08 11:16:29 +01002906 mlxsw_sp_nexthop_group_ht_params);
2907}
2908
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02002909static struct mlxsw_sp_nexthop_group *
2910mlxsw_sp_nexthop6_group_lookup(struct mlxsw_sp *mlxsw_sp,
2911 struct mlxsw_sp_fib6_entry *fib6_entry)
2912{
2913 struct mlxsw_sp_nexthop_group_cmp_arg cmp_arg;
2914
2915 cmp_arg.proto = MLXSW_SP_L3_PROTO_IPV6;
2916 cmp_arg.fib6_entry = fib6_entry;
2917 return rhashtable_lookup_fast(&mlxsw_sp->router->nexthop_group_ht,
2918 &cmp_arg,
2919 mlxsw_sp_nexthop_group_ht_params);
2920}
2921
Ido Schimmelc53b8e12017-02-08 11:16:30 +01002922static const struct rhashtable_params mlxsw_sp_nexthop_ht_params = {
2923 .key_offset = offsetof(struct mlxsw_sp_nexthop, key),
2924 .head_offset = offsetof(struct mlxsw_sp_nexthop, ht_node),
2925 .key_len = sizeof(struct mlxsw_sp_nexthop_key),
2926};
2927
2928static int mlxsw_sp_nexthop_insert(struct mlxsw_sp *mlxsw_sp,
2929 struct mlxsw_sp_nexthop *nh)
2930{
Ido Schimmel9011b672017-05-16 19:38:25 +02002931 return rhashtable_insert_fast(&mlxsw_sp->router->nexthop_ht,
Ido Schimmelc53b8e12017-02-08 11:16:30 +01002932 &nh->ht_node, mlxsw_sp_nexthop_ht_params);
2933}
2934
2935static void mlxsw_sp_nexthop_remove(struct mlxsw_sp *mlxsw_sp,
2936 struct mlxsw_sp_nexthop *nh)
2937{
Ido Schimmel9011b672017-05-16 19:38:25 +02002938 rhashtable_remove_fast(&mlxsw_sp->router->nexthop_ht, &nh->ht_node,
Ido Schimmelc53b8e12017-02-08 11:16:30 +01002939 mlxsw_sp_nexthop_ht_params);
2940}
2941
Ido Schimmelad178c82017-02-08 11:16:40 +01002942static struct mlxsw_sp_nexthop *
2943mlxsw_sp_nexthop_lookup(struct mlxsw_sp *mlxsw_sp,
2944 struct mlxsw_sp_nexthop_key key)
2945{
Ido Schimmel9011b672017-05-16 19:38:25 +02002946 return rhashtable_lookup_fast(&mlxsw_sp->router->nexthop_ht, &key,
Ido Schimmelad178c82017-02-08 11:16:40 +01002947 mlxsw_sp_nexthop_ht_params);
2948}
2949
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002950static int mlxsw_sp_adj_index_mass_update_vr(struct mlxsw_sp *mlxsw_sp,
Ido Schimmel76610eb2017-03-10 08:53:41 +01002951 const struct mlxsw_sp_fib *fib,
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002952 u32 adj_index, u16 ecmp_size,
2953 u32 new_adj_index,
2954 u16 new_ecmp_size)
2955{
2956 char raleu_pl[MLXSW_REG_RALEU_LEN];
2957
Ido Schimmel1a9234e662016-09-19 08:29:26 +02002958 mlxsw_reg_raleu_pack(raleu_pl,
Ido Schimmel76610eb2017-03-10 08:53:41 +01002959 (enum mlxsw_reg_ralxx_protocol) fib->proto,
2960 fib->vr->id, adj_index, ecmp_size, new_adj_index,
Ido Schimmel1a9234e662016-09-19 08:29:26 +02002961 new_ecmp_size);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002962 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raleu), raleu_pl);
2963}
2964
2965static int mlxsw_sp_adj_index_mass_update(struct mlxsw_sp *mlxsw_sp,
2966 struct mlxsw_sp_nexthop_group *nh_grp,
2967 u32 old_adj_index, u16 old_ecmp_size)
2968{
2969 struct mlxsw_sp_fib_entry *fib_entry;
Ido Schimmel76610eb2017-03-10 08:53:41 +01002970 struct mlxsw_sp_fib *fib = NULL;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002971 int err;
2972
2973 list_for_each_entry(fib_entry, &nh_grp->fib_list, nexthop_group_node) {
Ido Schimmel76610eb2017-03-10 08:53:41 +01002974 if (fib == fib_entry->fib_node->fib)
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002975 continue;
Ido Schimmel76610eb2017-03-10 08:53:41 +01002976 fib = fib_entry->fib_node->fib;
2977 err = mlxsw_sp_adj_index_mass_update_vr(mlxsw_sp, fib,
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002978 old_adj_index,
2979 old_ecmp_size,
2980 nh_grp->adj_index,
2981 nh_grp->ecmp_size);
2982 if (err)
2983 return err;
2984 }
2985 return 0;
2986}
2987
Ido Schimmeleb789982017-10-22 23:11:48 +02002988static int __mlxsw_sp_nexthop_update(struct mlxsw_sp *mlxsw_sp, u32 adj_index,
2989 struct mlxsw_sp_nexthop *nh)
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002990{
2991 struct mlxsw_sp_neigh_entry *neigh_entry = nh->neigh_entry;
2992 char ratr_pl[MLXSW_REG_RATR_LEN];
2993
2994 mlxsw_reg_ratr_pack(ratr_pl, MLXSW_REG_RATR_OP_WRITE_WRITE_ENTRY,
Petr Machata89e41982017-09-02 23:49:15 +02002995 true, MLXSW_REG_RATR_TYPE_ETHERNET,
2996 adj_index, neigh_entry->rif);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002997 mlxsw_reg_ratr_eth_entry_pack(ratr_pl, neigh_entry->ha);
Arkadi Sharshevskya5390272017-09-25 10:32:28 +02002998 if (nh->counter_valid)
2999 mlxsw_reg_ratr_counter_pack(ratr_pl, nh->counter_index, true);
3000 else
3001 mlxsw_reg_ratr_counter_pack(ratr_pl, 0, false);
3002
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003003 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ratr), ratr_pl);
3004}
3005
Ido Schimmeleb789982017-10-22 23:11:48 +02003006int mlxsw_sp_nexthop_update(struct mlxsw_sp *mlxsw_sp, u32 adj_index,
3007 struct mlxsw_sp_nexthop *nh)
3008{
3009 int i;
3010
3011 for (i = 0; i < nh->num_adj_entries; i++) {
3012 int err;
3013
3014 err = __mlxsw_sp_nexthop_update(mlxsw_sp, adj_index + i, nh);
3015 if (err)
3016 return err;
3017 }
3018
3019 return 0;
3020}
3021
3022static int __mlxsw_sp_nexthop_ipip_update(struct mlxsw_sp *mlxsw_sp,
3023 u32 adj_index,
3024 struct mlxsw_sp_nexthop *nh)
Petr Machata1012b9a2017-09-02 23:49:23 +02003025{
3026 const struct mlxsw_sp_ipip_ops *ipip_ops;
3027
3028 ipip_ops = mlxsw_sp->router->ipip_ops_arr[nh->ipip_entry->ipipt];
3029 return ipip_ops->nexthop_update(mlxsw_sp, adj_index, nh->ipip_entry);
3030}
3031
Ido Schimmeleb789982017-10-22 23:11:48 +02003032static int mlxsw_sp_nexthop_ipip_update(struct mlxsw_sp *mlxsw_sp,
3033 u32 adj_index,
3034 struct mlxsw_sp_nexthop *nh)
3035{
3036 int i;
3037
3038 for (i = 0; i < nh->num_adj_entries; i++) {
3039 int err;
3040
3041 err = __mlxsw_sp_nexthop_ipip_update(mlxsw_sp, adj_index + i,
3042 nh);
3043 if (err)
3044 return err;
3045 }
3046
3047 return 0;
3048}
3049
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003050static int
Petr Machata35225e42017-09-02 23:49:22 +02003051mlxsw_sp_nexthop_group_update(struct mlxsw_sp *mlxsw_sp,
3052 struct mlxsw_sp_nexthop_group *nh_grp,
3053 bool reallocate)
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003054{
3055 u32 adj_index = nh_grp->adj_index; /* base */
3056 struct mlxsw_sp_nexthop *nh;
3057 int i;
3058 int err;
3059
3060 for (i = 0; i < nh_grp->count; i++) {
3061 nh = &nh_grp->nexthops[i];
3062
3063 if (!nh->should_offload) {
3064 nh->offloaded = 0;
3065 continue;
3066 }
3067
Ido Schimmela59b7e02017-01-23 11:11:42 +01003068 if (nh->update || reallocate) {
Petr Machata35225e42017-09-02 23:49:22 +02003069 switch (nh->type) {
3070 case MLXSW_SP_NEXTHOP_TYPE_ETH:
Arkadi Sharshevskya5390272017-09-25 10:32:28 +02003071 err = mlxsw_sp_nexthop_update
Petr Machata35225e42017-09-02 23:49:22 +02003072 (mlxsw_sp, adj_index, nh);
3073 break;
Petr Machata1012b9a2017-09-02 23:49:23 +02003074 case MLXSW_SP_NEXTHOP_TYPE_IPIP:
3075 err = mlxsw_sp_nexthop_ipip_update
3076 (mlxsw_sp, adj_index, nh);
3077 break;
Petr Machata35225e42017-09-02 23:49:22 +02003078 }
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003079 if (err)
3080 return err;
3081 nh->update = 0;
3082 nh->offloaded = 1;
3083 }
Ido Schimmeleb789982017-10-22 23:11:48 +02003084 adj_index += nh->num_adj_entries;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003085 }
3086 return 0;
3087}
3088
Ido Schimmel1819ae32017-07-21 18:04:28 +02003089static bool
3090mlxsw_sp_fib_node_entry_is_first(const struct mlxsw_sp_fib_node *fib_node,
3091 const struct mlxsw_sp_fib_entry *fib_entry);
3092
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003093static int
3094mlxsw_sp_nexthop_fib_entries_update(struct mlxsw_sp *mlxsw_sp,
3095 struct mlxsw_sp_nexthop_group *nh_grp)
3096{
3097 struct mlxsw_sp_fib_entry *fib_entry;
3098 int err;
3099
3100 list_for_each_entry(fib_entry, &nh_grp->fib_list, nexthop_group_node) {
Ido Schimmel1819ae32017-07-21 18:04:28 +02003101 if (!mlxsw_sp_fib_node_entry_is_first(fib_entry->fib_node,
3102 fib_entry))
3103 continue;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003104 err = mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
3105 if (err)
3106 return err;
3107 }
3108 return 0;
3109}
3110
3111static void
Ido Schimmel77d964e2017-08-02 09:56:05 +02003112mlxsw_sp_fib_entry_offload_refresh(struct mlxsw_sp_fib_entry *fib_entry,
3113 enum mlxsw_reg_ralue_op op, int err);
3114
3115static void
3116mlxsw_sp_nexthop_fib_entries_refresh(struct mlxsw_sp_nexthop_group *nh_grp)
3117{
3118 enum mlxsw_reg_ralue_op op = MLXSW_REG_RALUE_OP_WRITE_WRITE;
3119 struct mlxsw_sp_fib_entry *fib_entry;
3120
3121 list_for_each_entry(fib_entry, &nh_grp->fib_list, nexthop_group_node) {
3122 if (!mlxsw_sp_fib_node_entry_is_first(fib_entry->fib_node,
3123 fib_entry))
3124 continue;
3125 mlxsw_sp_fib_entry_offload_refresh(fib_entry, op, 0);
3126 }
3127}
3128
Ido Schimmel425a08c2017-10-22 23:11:47 +02003129static void mlxsw_sp_adj_grp_size_round_up(u16 *p_adj_grp_size)
3130{
3131 /* Valid sizes for an adjacency group are:
3132 * 1-64, 512, 1024, 2048 and 4096.
3133 */
3134 if (*p_adj_grp_size <= 64)
3135 return;
3136 else if (*p_adj_grp_size <= 512)
3137 *p_adj_grp_size = 512;
3138 else if (*p_adj_grp_size <= 1024)
3139 *p_adj_grp_size = 1024;
3140 else if (*p_adj_grp_size <= 2048)
3141 *p_adj_grp_size = 2048;
3142 else
3143 *p_adj_grp_size = 4096;
3144}
3145
3146static void mlxsw_sp_adj_grp_size_round_down(u16 *p_adj_grp_size,
3147 unsigned int alloc_size)
3148{
3149 if (alloc_size >= 4096)
3150 *p_adj_grp_size = 4096;
3151 else if (alloc_size >= 2048)
3152 *p_adj_grp_size = 2048;
3153 else if (alloc_size >= 1024)
3154 *p_adj_grp_size = 1024;
3155 else if (alloc_size >= 512)
3156 *p_adj_grp_size = 512;
3157}
3158
3159static int mlxsw_sp_fix_adj_grp_size(struct mlxsw_sp *mlxsw_sp,
3160 u16 *p_adj_grp_size)
3161{
3162 unsigned int alloc_size;
3163 int err;
3164
3165 /* Round up the requested group size to the next size supported
3166 * by the device and make sure the request can be satisfied.
3167 */
3168 mlxsw_sp_adj_grp_size_round_up(p_adj_grp_size);
3169 err = mlxsw_sp_kvdl_alloc_size_query(mlxsw_sp, *p_adj_grp_size,
3170 &alloc_size);
3171 if (err)
3172 return err;
3173 /* It is possible the allocation results in more allocated
3174 * entries than requested. Try to use as much of them as
3175 * possible.
3176 */
3177 mlxsw_sp_adj_grp_size_round_down(p_adj_grp_size, alloc_size);
3178
3179 return 0;
3180}
3181
Ido Schimmel77d964e2017-08-02 09:56:05 +02003182static void
Ido Schimmeleb789982017-10-22 23:11:48 +02003183mlxsw_sp_nexthop_group_normalize(struct mlxsw_sp_nexthop_group *nh_grp)
3184{
3185 int i, g = 0, sum_norm_weight = 0;
3186 struct mlxsw_sp_nexthop *nh;
3187
3188 for (i = 0; i < nh_grp->count; i++) {
3189 nh = &nh_grp->nexthops[i];
3190
3191 if (!nh->should_offload)
3192 continue;
3193 if (g > 0)
3194 g = gcd(nh->nh_weight, g);
3195 else
3196 g = nh->nh_weight;
3197 }
3198
3199 for (i = 0; i < nh_grp->count; i++) {
3200 nh = &nh_grp->nexthops[i];
3201
3202 if (!nh->should_offload)
3203 continue;
3204 nh->norm_nh_weight = nh->nh_weight / g;
3205 sum_norm_weight += nh->norm_nh_weight;
3206 }
3207
3208 nh_grp->sum_norm_weight = sum_norm_weight;
3209}
3210
3211static void
3212mlxsw_sp_nexthop_group_rebalance(struct mlxsw_sp_nexthop_group *nh_grp)
3213{
3214 int total = nh_grp->sum_norm_weight;
3215 u16 ecmp_size = nh_grp->ecmp_size;
3216 int i, weight = 0, lower_bound = 0;
3217
3218 for (i = 0; i < nh_grp->count; i++) {
3219 struct mlxsw_sp_nexthop *nh = &nh_grp->nexthops[i];
3220 int upper_bound;
3221
3222 if (!nh->should_offload)
3223 continue;
3224 weight += nh->norm_nh_weight;
3225 upper_bound = DIV_ROUND_CLOSEST(ecmp_size * weight, total);
3226 nh->num_adj_entries = upper_bound - lower_bound;
3227 lower_bound = upper_bound;
3228 }
3229}
3230
3231static void
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003232mlxsw_sp_nexthop_group_refresh(struct mlxsw_sp *mlxsw_sp,
3233 struct mlxsw_sp_nexthop_group *nh_grp)
3234{
Ido Schimmeleb789982017-10-22 23:11:48 +02003235 u16 ecmp_size, old_ecmp_size;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003236 struct mlxsw_sp_nexthop *nh;
3237 bool offload_change = false;
3238 u32 adj_index;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003239 bool old_adj_index_valid;
3240 u32 old_adj_index;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003241 int i;
3242 int err;
3243
Ido Schimmelb3e8d1e2017-02-08 11:16:32 +01003244 if (!nh_grp->gateway) {
3245 mlxsw_sp_nexthop_fib_entries_update(mlxsw_sp, nh_grp);
3246 return;
3247 }
3248
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003249 for (i = 0; i < nh_grp->count; i++) {
3250 nh = &nh_grp->nexthops[i];
3251
Petr Machata56b8a9e2017-07-31 09:27:29 +02003252 if (nh->should_offload != nh->offloaded) {
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003253 offload_change = true;
3254 if (nh->should_offload)
3255 nh->update = 1;
3256 }
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003257 }
3258 if (!offload_change) {
3259 /* Nothing was added or removed, so no need to reallocate. Just
3260 * update MAC on existing adjacency indexes.
3261 */
Petr Machata35225e42017-09-02 23:49:22 +02003262 err = mlxsw_sp_nexthop_group_update(mlxsw_sp, nh_grp, false);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003263 if (err) {
3264 dev_warn(mlxsw_sp->bus_info->dev, "Failed to update neigh MAC in adjacency table.\n");
3265 goto set_trap;
3266 }
3267 return;
3268 }
Ido Schimmeleb789982017-10-22 23:11:48 +02003269 mlxsw_sp_nexthop_group_normalize(nh_grp);
3270 if (!nh_grp->sum_norm_weight)
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003271 /* No neigh of this group is connected so we just set
3272 * the trap and let everthing flow through kernel.
3273 */
3274 goto set_trap;
3275
Ido Schimmeleb789982017-10-22 23:11:48 +02003276 ecmp_size = nh_grp->sum_norm_weight;
Ido Schimmel425a08c2017-10-22 23:11:47 +02003277 err = mlxsw_sp_fix_adj_grp_size(mlxsw_sp, &ecmp_size);
3278 if (err)
3279 /* No valid allocation size available. */
3280 goto set_trap;
3281
Arkadi Sharshevsky13124442017-03-25 08:28:22 +01003282 err = mlxsw_sp_kvdl_alloc(mlxsw_sp, ecmp_size, &adj_index);
3283 if (err) {
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003284 /* We ran out of KVD linear space, just set the
3285 * trap and let everything flow through kernel.
3286 */
3287 dev_warn(mlxsw_sp->bus_info->dev, "Failed to allocate KVD linear area for nexthop group.\n");
3288 goto set_trap;
3289 }
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003290 old_adj_index_valid = nh_grp->adj_index_valid;
3291 old_adj_index = nh_grp->adj_index;
3292 old_ecmp_size = nh_grp->ecmp_size;
3293 nh_grp->adj_index_valid = 1;
3294 nh_grp->adj_index = adj_index;
3295 nh_grp->ecmp_size = ecmp_size;
Ido Schimmeleb789982017-10-22 23:11:48 +02003296 mlxsw_sp_nexthop_group_rebalance(nh_grp);
Petr Machata35225e42017-09-02 23:49:22 +02003297 err = mlxsw_sp_nexthop_group_update(mlxsw_sp, nh_grp, true);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003298 if (err) {
3299 dev_warn(mlxsw_sp->bus_info->dev, "Failed to update neigh MAC in adjacency table.\n");
3300 goto set_trap;
3301 }
3302
3303 if (!old_adj_index_valid) {
3304 /* The trap was set for fib entries, so we have to call
3305 * fib entry update to unset it and use adjacency index.
3306 */
3307 err = mlxsw_sp_nexthop_fib_entries_update(mlxsw_sp, nh_grp);
3308 if (err) {
3309 dev_warn(mlxsw_sp->bus_info->dev, "Failed to add adjacency index to fib entries.\n");
3310 goto set_trap;
3311 }
3312 return;
3313 }
3314
3315 err = mlxsw_sp_adj_index_mass_update(mlxsw_sp, nh_grp,
3316 old_adj_index, old_ecmp_size);
3317 mlxsw_sp_kvdl_free(mlxsw_sp, old_adj_index);
3318 if (err) {
3319 dev_warn(mlxsw_sp->bus_info->dev, "Failed to mass-update adjacency index for nexthop group.\n");
3320 goto set_trap;
3321 }
Ido Schimmel77d964e2017-08-02 09:56:05 +02003322
3323 /* Offload state within the group changed, so update the flags. */
3324 mlxsw_sp_nexthop_fib_entries_refresh(nh_grp);
3325
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003326 return;
3327
3328set_trap:
3329 old_adj_index_valid = nh_grp->adj_index_valid;
3330 nh_grp->adj_index_valid = 0;
3331 for (i = 0; i < nh_grp->count; i++) {
3332 nh = &nh_grp->nexthops[i];
3333 nh->offloaded = 0;
3334 }
3335 err = mlxsw_sp_nexthop_fib_entries_update(mlxsw_sp, nh_grp);
3336 if (err)
3337 dev_warn(mlxsw_sp->bus_info->dev, "Failed to set traps for fib entries.\n");
3338 if (old_adj_index_valid)
3339 mlxsw_sp_kvdl_free(mlxsw_sp, nh_grp->adj_index);
3340}
3341
3342static void __mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp_nexthop *nh,
3343 bool removing)
3344{
Petr Machata213666a2017-07-31 09:27:30 +02003345 if (!removing)
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003346 nh->should_offload = 1;
Ido Schimmel8764a822017-12-25 08:57:35 +01003347 else
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003348 nh->should_offload = 0;
3349 nh->update = 1;
3350}
3351
3352static void
3353mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp *mlxsw_sp,
3354 struct mlxsw_sp_neigh_entry *neigh_entry,
3355 bool removing)
3356{
3357 struct mlxsw_sp_nexthop *nh;
3358
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003359 list_for_each_entry(nh, &neigh_entry->nexthop_list,
3360 neigh_list_node) {
3361 __mlxsw_sp_nexthop_neigh_update(nh, removing);
3362 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nh_grp);
3363 }
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003364}
3365
Ido Schimmel9665b742017-02-08 11:16:42 +01003366static void mlxsw_sp_nexthop_rif_init(struct mlxsw_sp_nexthop *nh,
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003367 struct mlxsw_sp_rif *rif)
Ido Schimmel9665b742017-02-08 11:16:42 +01003368{
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003369 if (nh->rif)
Ido Schimmel9665b742017-02-08 11:16:42 +01003370 return;
3371
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003372 nh->rif = rif;
3373 list_add(&nh->rif_list_node, &rif->nexthop_list);
Ido Schimmel9665b742017-02-08 11:16:42 +01003374}
3375
3376static void mlxsw_sp_nexthop_rif_fini(struct mlxsw_sp_nexthop *nh)
3377{
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003378 if (!nh->rif)
Ido Schimmel9665b742017-02-08 11:16:42 +01003379 return;
3380
3381 list_del(&nh->rif_list_node);
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003382 nh->rif = NULL;
Ido Schimmel9665b742017-02-08 11:16:42 +01003383}
3384
Ido Schimmela8c97012017-02-08 11:16:35 +01003385static int mlxsw_sp_nexthop_neigh_init(struct mlxsw_sp *mlxsw_sp,
3386 struct mlxsw_sp_nexthop *nh)
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003387{
3388 struct mlxsw_sp_neigh_entry *neigh_entry;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003389 struct neighbour *n;
Ido Schimmel93a87e52016-12-23 09:32:49 +01003390 u8 nud_state, dead;
Ido Schimmelc53b8e12017-02-08 11:16:30 +01003391 int err;
3392
Ido Schimmelad178c82017-02-08 11:16:40 +01003393 if (!nh->nh_grp->gateway || nh->neigh_entry)
Ido Schimmelb8399a12017-02-08 11:16:33 +01003394 return 0;
3395
Jiri Pirko33b13412016-11-10 12:31:04 +01003396 /* Take a reference of neigh here ensuring that neigh would
Petr Machata8de3c172017-07-31 09:27:25 +02003397 * not be destructed before the nexthop entry is finished.
Jiri Pirko33b13412016-11-10 12:31:04 +01003398 * The reference is taken either in neigh_lookup() or
Ido Schimmelfd76d912017-02-06 16:20:17 +01003399 * in neigh_create() in case n is not found.
Jiri Pirko33b13412016-11-10 12:31:04 +01003400 */
Ido Schimmel58adf2c2017-07-18 10:10:19 +02003401 n = neigh_lookup(nh->nh_grp->neigh_tbl, &nh->gw_addr, nh->rif->dev);
Jiri Pirko33b13412016-11-10 12:31:04 +01003402 if (!n) {
Ido Schimmel58adf2c2017-07-18 10:10:19 +02003403 n = neigh_create(nh->nh_grp->neigh_tbl, &nh->gw_addr,
3404 nh->rif->dev);
Ido Schimmela8c97012017-02-08 11:16:35 +01003405 if (IS_ERR(n))
3406 return PTR_ERR(n);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003407 neigh_event_send(n, NULL);
Jiri Pirko33b13412016-11-10 12:31:04 +01003408 }
3409 neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp, n);
3410 if (!neigh_entry) {
Ido Schimmel5c8802f2017-02-06 16:20:13 +01003411 neigh_entry = mlxsw_sp_neigh_entry_create(mlxsw_sp, n);
3412 if (IS_ERR(neigh_entry)) {
Ido Schimmelc53b8e12017-02-08 11:16:30 +01003413 err = -EINVAL;
3414 goto err_neigh_entry_create;
Ido Schimmel5c8802f2017-02-06 16:20:13 +01003415 }
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003416 }
Yotam Gigib2157142016-07-05 11:27:51 +02003417
3418 /* If that is the first nexthop connected to that neigh, add to
3419 * nexthop_neighs_list
3420 */
3421 if (list_empty(&neigh_entry->nexthop_list))
3422 list_add_tail(&neigh_entry->nexthop_neighs_list_node,
Ido Schimmel9011b672017-05-16 19:38:25 +02003423 &mlxsw_sp->router->nexthop_neighs_list);
Yotam Gigib2157142016-07-05 11:27:51 +02003424
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003425 nh->neigh_entry = neigh_entry;
3426 list_add_tail(&nh->neigh_list_node, &neigh_entry->nexthop_list);
3427 read_lock_bh(&n->lock);
3428 nud_state = n->nud_state;
Ido Schimmel93a87e52016-12-23 09:32:49 +01003429 dead = n->dead;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003430 read_unlock_bh(&n->lock);
Ido Schimmel93a87e52016-12-23 09:32:49 +01003431 __mlxsw_sp_nexthop_neigh_update(nh, !(nud_state & NUD_VALID && !dead));
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003432
3433 return 0;
Ido Schimmelc53b8e12017-02-08 11:16:30 +01003434
3435err_neigh_entry_create:
3436 neigh_release(n);
Ido Schimmelc53b8e12017-02-08 11:16:30 +01003437 return err;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003438}
3439
Ido Schimmela8c97012017-02-08 11:16:35 +01003440static void mlxsw_sp_nexthop_neigh_fini(struct mlxsw_sp *mlxsw_sp,
3441 struct mlxsw_sp_nexthop *nh)
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003442{
3443 struct mlxsw_sp_neigh_entry *neigh_entry = nh->neigh_entry;
Ido Schimmela8c97012017-02-08 11:16:35 +01003444 struct neighbour *n;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003445
Ido Schimmelb8399a12017-02-08 11:16:33 +01003446 if (!neigh_entry)
Ido Schimmela8c97012017-02-08 11:16:35 +01003447 return;
3448 n = neigh_entry->key.n;
Ido Schimmelb8399a12017-02-08 11:16:33 +01003449
Ido Schimmel58312122016-12-23 09:32:50 +01003450 __mlxsw_sp_nexthop_neigh_update(nh, true);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003451 list_del(&nh->neigh_list_node);
Ido Schimmele58be792017-02-08 11:16:28 +01003452 nh->neigh_entry = NULL;
Yotam Gigib2157142016-07-05 11:27:51 +02003453
3454 /* If that is the last nexthop connected to that neigh, remove from
3455 * nexthop_neighs_list
3456 */
Ido Schimmele58be792017-02-08 11:16:28 +01003457 if (list_empty(&neigh_entry->nexthop_list))
3458 list_del(&neigh_entry->nexthop_neighs_list_node);
Yotam Gigib2157142016-07-05 11:27:51 +02003459
Ido Schimmel5c8802f2017-02-06 16:20:13 +01003460 if (!neigh_entry->connected && list_empty(&neigh_entry->nexthop_list))
3461 mlxsw_sp_neigh_entry_destroy(mlxsw_sp, neigh_entry);
3462
3463 neigh_release(n);
Ido Schimmela8c97012017-02-08 11:16:35 +01003464}
Ido Schimmelc53b8e12017-02-08 11:16:30 +01003465
Petr Machata44b0fff2017-11-03 10:03:44 +01003466static bool mlxsw_sp_ipip_netdev_ul_up(struct net_device *ol_dev)
3467{
3468 struct net_device *ul_dev = __mlxsw_sp_ipip_netdev_ul_dev_get(ol_dev);
3469
3470 return ul_dev ? (ul_dev->flags & IFF_UP) : true;
3471}
3472
Petr Machatad97cda52017-11-28 13:17:13 +01003473static void mlxsw_sp_nexthop_ipip_init(struct mlxsw_sp *mlxsw_sp,
3474 struct mlxsw_sp_nexthop *nh,
3475 struct mlxsw_sp_ipip_entry *ipip_entry)
Petr Machata1012b9a2017-09-02 23:49:23 +02003476{
Petr Machata44b0fff2017-11-03 10:03:44 +01003477 bool removing;
3478
Petr Machata1012b9a2017-09-02 23:49:23 +02003479 if (!nh->nh_grp->gateway || nh->ipip_entry)
Petr Machatad97cda52017-11-28 13:17:13 +01003480 return;
Petr Machata1012b9a2017-09-02 23:49:23 +02003481
Petr Machatad97cda52017-11-28 13:17:13 +01003482 nh->ipip_entry = ipip_entry;
3483 removing = !mlxsw_sp_ipip_netdev_ul_up(ipip_entry->ol_dev);
Petr Machata44b0fff2017-11-03 10:03:44 +01003484 __mlxsw_sp_nexthop_neigh_update(nh, removing);
Petr Machatad97cda52017-11-28 13:17:13 +01003485 mlxsw_sp_nexthop_rif_init(nh, &ipip_entry->ol_lb->common);
Petr Machata1012b9a2017-09-02 23:49:23 +02003486}
3487
3488static void mlxsw_sp_nexthop_ipip_fini(struct mlxsw_sp *mlxsw_sp,
3489 struct mlxsw_sp_nexthop *nh)
3490{
3491 struct mlxsw_sp_ipip_entry *ipip_entry = nh->ipip_entry;
3492
3493 if (!ipip_entry)
3494 return;
3495
3496 __mlxsw_sp_nexthop_neigh_update(nh, true);
Petr Machata1012b9a2017-09-02 23:49:23 +02003497 nh->ipip_entry = NULL;
3498}
3499
3500static bool mlxsw_sp_nexthop4_ipip_type(const struct mlxsw_sp *mlxsw_sp,
3501 const struct fib_nh *fib_nh,
3502 enum mlxsw_sp_ipip_type *p_ipipt)
3503{
3504 struct net_device *dev = fib_nh->nh_dev;
3505
3506 return dev &&
3507 fib_nh->nh_parent->fib_type == RTN_UNICAST &&
3508 mlxsw_sp_netdev_ipip_type(mlxsw_sp, dev, p_ipipt);
3509}
3510
Petr Machata35225e42017-09-02 23:49:22 +02003511static void mlxsw_sp_nexthop_type_fini(struct mlxsw_sp *mlxsw_sp,
3512 struct mlxsw_sp_nexthop *nh)
3513{
3514 switch (nh->type) {
3515 case MLXSW_SP_NEXTHOP_TYPE_ETH:
3516 mlxsw_sp_nexthop_neigh_fini(mlxsw_sp, nh);
3517 mlxsw_sp_nexthop_rif_fini(nh);
3518 break;
Petr Machata1012b9a2017-09-02 23:49:23 +02003519 case MLXSW_SP_NEXTHOP_TYPE_IPIP:
Petr Machatade0f43c2017-10-02 12:14:57 +02003520 mlxsw_sp_nexthop_rif_fini(nh);
Petr Machata1012b9a2017-09-02 23:49:23 +02003521 mlxsw_sp_nexthop_ipip_fini(mlxsw_sp, nh);
3522 break;
Petr Machata35225e42017-09-02 23:49:22 +02003523 }
3524}
3525
3526static int mlxsw_sp_nexthop4_type_init(struct mlxsw_sp *mlxsw_sp,
3527 struct mlxsw_sp_nexthop *nh,
3528 struct fib_nh *fib_nh)
3529{
Petr Machatad97cda52017-11-28 13:17:13 +01003530 const struct mlxsw_sp_ipip_ops *ipip_ops;
Petr Machata35225e42017-09-02 23:49:22 +02003531 struct net_device *dev = fib_nh->nh_dev;
Petr Machatad97cda52017-11-28 13:17:13 +01003532 struct mlxsw_sp_ipip_entry *ipip_entry;
Petr Machata35225e42017-09-02 23:49:22 +02003533 struct mlxsw_sp_rif *rif;
3534 int err;
3535
Petr Machatad97cda52017-11-28 13:17:13 +01003536 ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, dev);
3537 if (ipip_entry) {
3538 ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt];
3539 if (ipip_ops->can_offload(mlxsw_sp, dev,
3540 MLXSW_SP_L3_PROTO_IPV4)) {
3541 nh->type = MLXSW_SP_NEXTHOP_TYPE_IPIP;
3542 mlxsw_sp_nexthop_ipip_init(mlxsw_sp, nh, ipip_entry);
3543 return 0;
3544 }
Petr Machata1012b9a2017-09-02 23:49:23 +02003545 }
3546
Petr Machata35225e42017-09-02 23:49:22 +02003547 nh->type = MLXSW_SP_NEXTHOP_TYPE_ETH;
3548 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
3549 if (!rif)
3550 return 0;
3551
3552 mlxsw_sp_nexthop_rif_init(nh, rif);
3553 err = mlxsw_sp_nexthop_neigh_init(mlxsw_sp, nh);
3554 if (err)
3555 goto err_neigh_init;
3556
3557 return 0;
3558
3559err_neigh_init:
3560 mlxsw_sp_nexthop_rif_fini(nh);
3561 return err;
3562}
3563
3564static void mlxsw_sp_nexthop4_type_fini(struct mlxsw_sp *mlxsw_sp,
3565 struct mlxsw_sp_nexthop *nh)
3566{
3567 mlxsw_sp_nexthop_type_fini(mlxsw_sp, nh);
3568}
3569
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02003570static int mlxsw_sp_nexthop4_init(struct mlxsw_sp *mlxsw_sp,
3571 struct mlxsw_sp_nexthop_group *nh_grp,
3572 struct mlxsw_sp_nexthop *nh,
3573 struct fib_nh *fib_nh)
Ido Schimmela8c97012017-02-08 11:16:35 +01003574{
3575 struct net_device *dev = fib_nh->nh_dev;
Ido Schimmeldf6dd79b2017-02-08 14:36:49 +01003576 struct in_device *in_dev;
Ido Schimmela8c97012017-02-08 11:16:35 +01003577 int err;
3578
3579 nh->nh_grp = nh_grp;
3580 nh->key.fib_nh = fib_nh;
Ido Schimmel408bd942017-10-22 23:11:46 +02003581#ifdef CONFIG_IP_ROUTE_MULTIPATH
3582 nh->nh_weight = fib_nh->nh_weight;
3583#else
3584 nh->nh_weight = 1;
3585#endif
Ido Schimmel58adf2c2017-07-18 10:10:19 +02003586 memcpy(&nh->gw_addr, &fib_nh->nh_gw, sizeof(fib_nh->nh_gw));
Ido Schimmela8c97012017-02-08 11:16:35 +01003587 err = mlxsw_sp_nexthop_insert(mlxsw_sp, nh);
3588 if (err)
3589 return err;
3590
Arkadi Sharshevskya5390272017-09-25 10:32:28 +02003591 mlxsw_sp_nexthop_counter_alloc(mlxsw_sp, nh);
Arkadi Sharshevskydbe45982017-09-25 10:32:23 +02003592 list_add_tail(&nh->router_list_node, &mlxsw_sp->router->nexthop_list);
3593
Ido Schimmel97989ee2017-03-10 08:53:38 +01003594 if (!dev)
3595 return 0;
3596
Ido Schimmeldf6dd79b2017-02-08 14:36:49 +01003597 in_dev = __in_dev_get_rtnl(dev);
3598 if (in_dev && IN_DEV_IGNORE_ROUTES_WITH_LINKDOWN(in_dev) &&
3599 fib_nh->nh_flags & RTNH_F_LINKDOWN)
3600 return 0;
3601
Petr Machata35225e42017-09-02 23:49:22 +02003602 err = mlxsw_sp_nexthop4_type_init(mlxsw_sp, nh, fib_nh);
Ido Schimmela8c97012017-02-08 11:16:35 +01003603 if (err)
3604 goto err_nexthop_neigh_init;
3605
3606 return 0;
3607
3608err_nexthop_neigh_init:
3609 mlxsw_sp_nexthop_remove(mlxsw_sp, nh);
3610 return err;
3611}
3612
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02003613static void mlxsw_sp_nexthop4_fini(struct mlxsw_sp *mlxsw_sp,
3614 struct mlxsw_sp_nexthop *nh)
Ido Schimmela8c97012017-02-08 11:16:35 +01003615{
Petr Machata35225e42017-09-02 23:49:22 +02003616 mlxsw_sp_nexthop4_type_fini(mlxsw_sp, nh);
Arkadi Sharshevskydbe45982017-09-25 10:32:23 +02003617 list_del(&nh->router_list_node);
Arkadi Sharshevskya5390272017-09-25 10:32:28 +02003618 mlxsw_sp_nexthop_counter_free(mlxsw_sp, nh);
Ido Schimmelc53b8e12017-02-08 11:16:30 +01003619 mlxsw_sp_nexthop_remove(mlxsw_sp, nh);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003620}
3621
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02003622static void mlxsw_sp_nexthop4_event(struct mlxsw_sp *mlxsw_sp,
3623 unsigned long event, struct fib_nh *fib_nh)
Ido Schimmelad178c82017-02-08 11:16:40 +01003624{
3625 struct mlxsw_sp_nexthop_key key;
3626 struct mlxsw_sp_nexthop *nh;
Ido Schimmelad178c82017-02-08 11:16:40 +01003627
Ido Schimmel9011b672017-05-16 19:38:25 +02003628 if (mlxsw_sp->router->aborted)
Ido Schimmelad178c82017-02-08 11:16:40 +01003629 return;
3630
3631 key.fib_nh = fib_nh;
3632 nh = mlxsw_sp_nexthop_lookup(mlxsw_sp, key);
3633 if (WARN_ON_ONCE(!nh))
3634 return;
3635
Ido Schimmelad178c82017-02-08 11:16:40 +01003636 switch (event) {
3637 case FIB_EVENT_NH_ADD:
Petr Machata35225e42017-09-02 23:49:22 +02003638 mlxsw_sp_nexthop4_type_init(mlxsw_sp, nh, fib_nh);
Ido Schimmelad178c82017-02-08 11:16:40 +01003639 break;
3640 case FIB_EVENT_NH_DEL:
Petr Machata35225e42017-09-02 23:49:22 +02003641 mlxsw_sp_nexthop4_type_fini(mlxsw_sp, nh);
Ido Schimmelad178c82017-02-08 11:16:40 +01003642 break;
3643 }
3644
3645 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nh_grp);
3646}
3647
Petr Machata0c5f1cd2017-11-03 10:03:38 +01003648static void mlxsw_sp_nexthop_rif_update(struct mlxsw_sp *mlxsw_sp,
3649 struct mlxsw_sp_rif *rif)
3650{
3651 struct mlxsw_sp_nexthop *nh;
Petr Machata44b0fff2017-11-03 10:03:44 +01003652 bool removing;
Petr Machata0c5f1cd2017-11-03 10:03:38 +01003653
3654 list_for_each_entry(nh, &rif->nexthop_list, rif_list_node) {
Petr Machata44b0fff2017-11-03 10:03:44 +01003655 switch (nh->type) {
3656 case MLXSW_SP_NEXTHOP_TYPE_ETH:
3657 removing = false;
3658 break;
3659 case MLXSW_SP_NEXTHOP_TYPE_IPIP:
3660 removing = !mlxsw_sp_ipip_netdev_ul_up(rif->dev);
3661 break;
3662 default:
3663 WARN_ON(1);
3664 continue;
3665 }
3666
3667 __mlxsw_sp_nexthop_neigh_update(nh, removing);
Petr Machata0c5f1cd2017-11-03 10:03:38 +01003668 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nh_grp);
3669 }
3670}
3671
Petr Machata09dbf622017-11-28 13:17:14 +01003672static void mlxsw_sp_nexthop_rif_migrate(struct mlxsw_sp *mlxsw_sp,
3673 struct mlxsw_sp_rif *old_rif,
3674 struct mlxsw_sp_rif *new_rif)
3675{
3676 struct mlxsw_sp_nexthop *nh;
3677
3678 list_splice_init(&old_rif->nexthop_list, &new_rif->nexthop_list);
3679 list_for_each_entry(nh, &new_rif->nexthop_list, rif_list_node)
3680 nh->rif = new_rif;
3681 mlxsw_sp_nexthop_rif_update(mlxsw_sp, new_rif);
3682}
3683
Ido Schimmel9665b742017-02-08 11:16:42 +01003684static void mlxsw_sp_nexthop_rif_gone_sync(struct mlxsw_sp *mlxsw_sp,
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003685 struct mlxsw_sp_rif *rif)
Ido Schimmel9665b742017-02-08 11:16:42 +01003686{
3687 struct mlxsw_sp_nexthop *nh, *tmp;
3688
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003689 list_for_each_entry_safe(nh, tmp, &rif->nexthop_list, rif_list_node) {
Petr Machata35225e42017-09-02 23:49:22 +02003690 mlxsw_sp_nexthop_type_fini(mlxsw_sp, nh);
Ido Schimmel9665b742017-02-08 11:16:42 +01003691 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nh_grp);
3692 }
3693}
3694
Petr Machata9b014512017-09-02 23:49:20 +02003695static bool mlxsw_sp_fi_is_gateway(const struct mlxsw_sp *mlxsw_sp,
3696 const struct fib_info *fi)
3697{
Petr Machata1012b9a2017-09-02 23:49:23 +02003698 return fi->fib_nh->nh_scope == RT_SCOPE_LINK ||
3699 mlxsw_sp_nexthop4_ipip_type(mlxsw_sp, fi->fib_nh, NULL);
Petr Machata9b014512017-09-02 23:49:20 +02003700}
3701
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003702static struct mlxsw_sp_nexthop_group *
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02003703mlxsw_sp_nexthop4_group_create(struct mlxsw_sp *mlxsw_sp, struct fib_info *fi)
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003704{
3705 struct mlxsw_sp_nexthop_group *nh_grp;
3706 struct mlxsw_sp_nexthop *nh;
3707 struct fib_nh *fib_nh;
3708 size_t alloc_size;
3709 int i;
3710 int err;
3711
3712 alloc_size = sizeof(*nh_grp) +
3713 fi->fib_nhs * sizeof(struct mlxsw_sp_nexthop);
3714 nh_grp = kzalloc(alloc_size, GFP_KERNEL);
3715 if (!nh_grp)
3716 return ERR_PTR(-ENOMEM);
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02003717 nh_grp->priv = fi;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003718 INIT_LIST_HEAD(&nh_grp->fib_list);
Ido Schimmel58adf2c2017-07-18 10:10:19 +02003719 nh_grp->neigh_tbl = &arp_tbl;
3720
Petr Machata9b014512017-09-02 23:49:20 +02003721 nh_grp->gateway = mlxsw_sp_fi_is_gateway(mlxsw_sp, fi);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003722 nh_grp->count = fi->fib_nhs;
Ido Schimmel7387dbb2017-07-12 09:12:53 +02003723 fib_info_hold(fi);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003724 for (i = 0; i < nh_grp->count; i++) {
3725 nh = &nh_grp->nexthops[i];
3726 fib_nh = &fi->fib_nh[i];
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02003727 err = mlxsw_sp_nexthop4_init(mlxsw_sp, nh_grp, nh, fib_nh);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003728 if (err)
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02003729 goto err_nexthop4_init;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003730 }
Ido Schimmele9ad5e72017-02-08 11:16:29 +01003731 err = mlxsw_sp_nexthop_group_insert(mlxsw_sp, nh_grp);
3732 if (err)
3733 goto err_nexthop_group_insert;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003734 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
3735 return nh_grp;
3736
Ido Schimmele9ad5e72017-02-08 11:16:29 +01003737err_nexthop_group_insert:
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02003738err_nexthop4_init:
Ido Schimmeldf6dd79b2017-02-08 14:36:49 +01003739 for (i--; i >= 0; i--) {
3740 nh = &nh_grp->nexthops[i];
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02003741 mlxsw_sp_nexthop4_fini(mlxsw_sp, nh);
Ido Schimmeldf6dd79b2017-02-08 14:36:49 +01003742 }
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02003743 fib_info_put(fi);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003744 kfree(nh_grp);
3745 return ERR_PTR(err);
3746}
3747
3748static void
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02003749mlxsw_sp_nexthop4_group_destroy(struct mlxsw_sp *mlxsw_sp,
3750 struct mlxsw_sp_nexthop_group *nh_grp)
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003751{
3752 struct mlxsw_sp_nexthop *nh;
3753 int i;
3754
Ido Schimmele9ad5e72017-02-08 11:16:29 +01003755 mlxsw_sp_nexthop_group_remove(mlxsw_sp, nh_grp);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003756 for (i = 0; i < nh_grp->count; i++) {
3757 nh = &nh_grp->nexthops[i];
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02003758 mlxsw_sp_nexthop4_fini(mlxsw_sp, nh);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003759 }
Ido Schimmel58312122016-12-23 09:32:50 +01003760 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
3761 WARN_ON_ONCE(nh_grp->adj_index_valid);
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02003762 fib_info_put(mlxsw_sp_nexthop4_group_fi(nh_grp));
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003763 kfree(nh_grp);
3764}
3765
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02003766static int mlxsw_sp_nexthop4_group_get(struct mlxsw_sp *mlxsw_sp,
3767 struct mlxsw_sp_fib_entry *fib_entry,
3768 struct fib_info *fi)
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003769{
3770 struct mlxsw_sp_nexthop_group *nh_grp;
3771
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02003772 nh_grp = mlxsw_sp_nexthop4_group_lookup(mlxsw_sp, fi);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003773 if (!nh_grp) {
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02003774 nh_grp = mlxsw_sp_nexthop4_group_create(mlxsw_sp, fi);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003775 if (IS_ERR(nh_grp))
3776 return PTR_ERR(nh_grp);
3777 }
3778 list_add_tail(&fib_entry->nexthop_group_node, &nh_grp->fib_list);
3779 fib_entry->nh_group = nh_grp;
3780 return 0;
3781}
3782
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02003783static void mlxsw_sp_nexthop4_group_put(struct mlxsw_sp *mlxsw_sp,
3784 struct mlxsw_sp_fib_entry *fib_entry)
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003785{
3786 struct mlxsw_sp_nexthop_group *nh_grp = fib_entry->nh_group;
3787
3788 list_del(&fib_entry->nexthop_group_node);
3789 if (!list_empty(&nh_grp->fib_list))
3790 return;
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02003791 mlxsw_sp_nexthop4_group_destroy(mlxsw_sp, nh_grp);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003792}
3793
Ido Schimmel013b20f2017-02-08 11:16:36 +01003794static bool
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02003795mlxsw_sp_fib4_entry_should_offload(const struct mlxsw_sp_fib_entry *fib_entry)
3796{
3797 struct mlxsw_sp_fib4_entry *fib4_entry;
3798
3799 fib4_entry = container_of(fib_entry, struct mlxsw_sp_fib4_entry,
3800 common);
3801 return !fib4_entry->tos;
3802}
3803
3804static bool
Ido Schimmel013b20f2017-02-08 11:16:36 +01003805mlxsw_sp_fib_entry_should_offload(const struct mlxsw_sp_fib_entry *fib_entry)
3806{
3807 struct mlxsw_sp_nexthop_group *nh_group = fib_entry->nh_group;
3808
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02003809 switch (fib_entry->fib_node->fib->proto) {
3810 case MLXSW_SP_L3_PROTO_IPV4:
3811 if (!mlxsw_sp_fib4_entry_should_offload(fib_entry))
3812 return false;
3813 break;
3814 case MLXSW_SP_L3_PROTO_IPV6:
3815 break;
3816 }
Ido Schimmel9aecce12017-02-09 10:28:42 +01003817
Ido Schimmel013b20f2017-02-08 11:16:36 +01003818 switch (fib_entry->type) {
3819 case MLXSW_SP_FIB_ENTRY_TYPE_REMOTE:
3820 return !!nh_group->adj_index_valid;
3821 case MLXSW_SP_FIB_ENTRY_TYPE_LOCAL:
Ido Schimmel70ad3502017-02-08 11:16:38 +01003822 return !!nh_group->nh_rif;
Petr Machata4607f6d2017-09-02 23:49:25 +02003823 case MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP:
3824 return true;
Ido Schimmel013b20f2017-02-08 11:16:36 +01003825 default:
3826 return false;
3827 }
3828}
3829
Ido Schimmel428b8512017-08-03 13:28:28 +02003830static struct mlxsw_sp_nexthop *
3831mlxsw_sp_rt6_nexthop(struct mlxsw_sp_nexthop_group *nh_grp,
3832 const struct mlxsw_sp_rt6 *mlxsw_sp_rt6)
3833{
3834 int i;
3835
3836 for (i = 0; i < nh_grp->count; i++) {
3837 struct mlxsw_sp_nexthop *nh = &nh_grp->nexthops[i];
David Ahern8d1c8022018-04-17 17:33:26 -07003838 struct fib6_info *rt = mlxsw_sp_rt6->rt;
Ido Schimmel428b8512017-08-03 13:28:28 +02003839
David Ahern5e670d82018-04-17 17:33:14 -07003840 if (nh->rif && nh->rif->dev == rt->fib6_nh.nh_dev &&
Ido Schimmel428b8512017-08-03 13:28:28 +02003841 ipv6_addr_equal((const struct in6_addr *) &nh->gw_addr,
David Ahern5e670d82018-04-17 17:33:14 -07003842 &rt->fib6_nh.nh_gw))
Ido Schimmel428b8512017-08-03 13:28:28 +02003843 return nh;
3844 continue;
3845 }
3846
3847 return NULL;
3848}
3849
Ido Schimmel3984d1a2017-08-02 09:56:03 +02003850static void
3851mlxsw_sp_fib4_entry_offload_set(struct mlxsw_sp_fib_entry *fib_entry)
3852{
3853 struct mlxsw_sp_nexthop_group *nh_grp = fib_entry->nh_group;
3854 int i;
3855
Petr Machata4607f6d2017-09-02 23:49:25 +02003856 if (fib_entry->type == MLXSW_SP_FIB_ENTRY_TYPE_LOCAL ||
3857 fib_entry->type == MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP) {
Ido Schimmel3984d1a2017-08-02 09:56:03 +02003858 nh_grp->nexthops->key.fib_nh->nh_flags |= RTNH_F_OFFLOAD;
3859 return;
3860 }
3861
3862 for (i = 0; i < nh_grp->count; i++) {
3863 struct mlxsw_sp_nexthop *nh = &nh_grp->nexthops[i];
3864
3865 if (nh->offloaded)
3866 nh->key.fib_nh->nh_flags |= RTNH_F_OFFLOAD;
3867 else
3868 nh->key.fib_nh->nh_flags &= ~RTNH_F_OFFLOAD;
3869 }
3870}
3871
3872static void
3873mlxsw_sp_fib4_entry_offload_unset(struct mlxsw_sp_fib_entry *fib_entry)
3874{
3875 struct mlxsw_sp_nexthop_group *nh_grp = fib_entry->nh_group;
3876 int i;
3877
Ido Schimmeld1c95af2018-02-17 00:30:44 +01003878 if (!list_is_singular(&nh_grp->fib_list))
3879 return;
3880
Ido Schimmel3984d1a2017-08-02 09:56:03 +02003881 for (i = 0; i < nh_grp->count; i++) {
3882 struct mlxsw_sp_nexthop *nh = &nh_grp->nexthops[i];
3883
3884 nh->key.fib_nh->nh_flags &= ~RTNH_F_OFFLOAD;
3885 }
3886}
3887
Ido Schimmel428b8512017-08-03 13:28:28 +02003888static void
3889mlxsw_sp_fib6_entry_offload_set(struct mlxsw_sp_fib_entry *fib_entry)
3890{
3891 struct mlxsw_sp_fib6_entry *fib6_entry;
3892 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
3893
3894 fib6_entry = container_of(fib_entry, struct mlxsw_sp_fib6_entry,
3895 common);
3896
3897 if (fib_entry->type == MLXSW_SP_FIB_ENTRY_TYPE_LOCAL) {
3898 list_first_entry(&fib6_entry->rt6_list, struct mlxsw_sp_rt6,
David Ahern5e670d82018-04-17 17:33:14 -07003899 list)->rt->fib6_nh.nh_flags |= RTNH_F_OFFLOAD;
Ido Schimmel428b8512017-08-03 13:28:28 +02003900 return;
3901 }
3902
3903 list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) {
3904 struct mlxsw_sp_nexthop_group *nh_grp = fib_entry->nh_group;
3905 struct mlxsw_sp_nexthop *nh;
3906
3907 nh = mlxsw_sp_rt6_nexthop(nh_grp, mlxsw_sp_rt6);
3908 if (nh && nh->offloaded)
David Ahern5e670d82018-04-17 17:33:14 -07003909 mlxsw_sp_rt6->rt->fib6_nh.nh_flags |= RTNH_F_OFFLOAD;
Ido Schimmel428b8512017-08-03 13:28:28 +02003910 else
David Ahern5e670d82018-04-17 17:33:14 -07003911 mlxsw_sp_rt6->rt->fib6_nh.nh_flags &= ~RTNH_F_OFFLOAD;
Ido Schimmel428b8512017-08-03 13:28:28 +02003912 }
3913}
3914
3915static void
3916mlxsw_sp_fib6_entry_offload_unset(struct mlxsw_sp_fib_entry *fib_entry)
3917{
3918 struct mlxsw_sp_fib6_entry *fib6_entry;
3919 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
3920
3921 fib6_entry = container_of(fib_entry, struct mlxsw_sp_fib6_entry,
3922 common);
3923 list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) {
David Ahern8d1c8022018-04-17 17:33:26 -07003924 struct fib6_info *rt = mlxsw_sp_rt6->rt;
Ido Schimmel428b8512017-08-03 13:28:28 +02003925
David Ahern5e670d82018-04-17 17:33:14 -07003926 rt->fib6_nh.nh_flags &= ~RTNH_F_OFFLOAD;
Ido Schimmel428b8512017-08-03 13:28:28 +02003927 }
3928}
3929
Ido Schimmel013b20f2017-02-08 11:16:36 +01003930static void mlxsw_sp_fib_entry_offload_set(struct mlxsw_sp_fib_entry *fib_entry)
3931{
Ido Schimmel76610eb2017-03-10 08:53:41 +01003932 switch (fib_entry->fib_node->fib->proto) {
Ido Schimmel013b20f2017-02-08 11:16:36 +01003933 case MLXSW_SP_L3_PROTO_IPV4:
Ido Schimmel3984d1a2017-08-02 09:56:03 +02003934 mlxsw_sp_fib4_entry_offload_set(fib_entry);
Ido Schimmel013b20f2017-02-08 11:16:36 +01003935 break;
3936 case MLXSW_SP_L3_PROTO_IPV6:
Ido Schimmel428b8512017-08-03 13:28:28 +02003937 mlxsw_sp_fib6_entry_offload_set(fib_entry);
3938 break;
Ido Schimmel013b20f2017-02-08 11:16:36 +01003939 }
3940}
3941
3942static void
3943mlxsw_sp_fib_entry_offload_unset(struct mlxsw_sp_fib_entry *fib_entry)
3944{
Ido Schimmel76610eb2017-03-10 08:53:41 +01003945 switch (fib_entry->fib_node->fib->proto) {
Ido Schimmel013b20f2017-02-08 11:16:36 +01003946 case MLXSW_SP_L3_PROTO_IPV4:
Ido Schimmel3984d1a2017-08-02 09:56:03 +02003947 mlxsw_sp_fib4_entry_offload_unset(fib_entry);
Ido Schimmel013b20f2017-02-08 11:16:36 +01003948 break;
3949 case MLXSW_SP_L3_PROTO_IPV6:
Ido Schimmel428b8512017-08-03 13:28:28 +02003950 mlxsw_sp_fib6_entry_offload_unset(fib_entry);
3951 break;
Ido Schimmel013b20f2017-02-08 11:16:36 +01003952 }
Ido Schimmel013b20f2017-02-08 11:16:36 +01003953}
3954
3955static void
3956mlxsw_sp_fib_entry_offload_refresh(struct mlxsw_sp_fib_entry *fib_entry,
3957 enum mlxsw_reg_ralue_op op, int err)
3958{
3959 switch (op) {
3960 case MLXSW_REG_RALUE_OP_WRITE_DELETE:
Ido Schimmel013b20f2017-02-08 11:16:36 +01003961 return mlxsw_sp_fib_entry_offload_unset(fib_entry);
3962 case MLXSW_REG_RALUE_OP_WRITE_WRITE:
3963 if (err)
3964 return;
Ido Schimmel1353ee72017-08-02 09:56:04 +02003965 if (mlxsw_sp_fib_entry_should_offload(fib_entry))
Ido Schimmel013b20f2017-02-08 11:16:36 +01003966 mlxsw_sp_fib_entry_offload_set(fib_entry);
Petr Machata85f44a12017-10-02 12:21:58 +02003967 else
Ido Schimmel013b20f2017-02-08 11:16:36 +01003968 mlxsw_sp_fib_entry_offload_unset(fib_entry);
3969 return;
3970 default:
3971 return;
3972 }
3973}
3974
Ido Schimmel9dbf4d72017-07-18 10:10:24 +02003975static void
3976mlxsw_sp_fib_entry_ralue_pack(char *ralue_pl,
3977 const struct mlxsw_sp_fib_entry *fib_entry,
3978 enum mlxsw_reg_ralue_op op)
3979{
3980 struct mlxsw_sp_fib *fib = fib_entry->fib_node->fib;
3981 enum mlxsw_reg_ralxx_protocol proto;
3982 u32 *p_dip;
3983
3984 proto = (enum mlxsw_reg_ralxx_protocol) fib->proto;
3985
3986 switch (fib->proto) {
3987 case MLXSW_SP_L3_PROTO_IPV4:
3988 p_dip = (u32 *) fib_entry->fib_node->key.addr;
3989 mlxsw_reg_ralue_pack4(ralue_pl, proto, op, fib->vr->id,
3990 fib_entry->fib_node->key.prefix_len,
3991 *p_dip);
3992 break;
3993 case MLXSW_SP_L3_PROTO_IPV6:
3994 mlxsw_reg_ralue_pack6(ralue_pl, proto, op, fib->vr->id,
3995 fib_entry->fib_node->key.prefix_len,
3996 fib_entry->fib_node->key.addr);
3997 break;
3998 }
3999}
4000
4001static int mlxsw_sp_fib_entry_op_remote(struct mlxsw_sp *mlxsw_sp,
4002 struct mlxsw_sp_fib_entry *fib_entry,
4003 enum mlxsw_reg_ralue_op op)
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02004004{
4005 char ralue_pl[MLXSW_REG_RALUE_LEN];
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02004006 enum mlxsw_reg_ralue_trap_action trap_action;
4007 u16 trap_id = 0;
4008 u32 adjacency_index = 0;
4009 u16 ecmp_size = 0;
4010
4011 /* In case the nexthop group adjacency index is valid, use it
4012 * with provided ECMP size. Otherwise, setup trap and pass
4013 * traffic to kernel.
4014 */
Ido Schimmel4b411472017-02-08 11:16:37 +01004015 if (mlxsw_sp_fib_entry_should_offload(fib_entry)) {
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02004016 trap_action = MLXSW_REG_RALUE_TRAP_ACTION_NOP;
4017 adjacency_index = fib_entry->nh_group->adj_index;
4018 ecmp_size = fib_entry->nh_group->ecmp_size;
4019 } else {
4020 trap_action = MLXSW_REG_RALUE_TRAP_ACTION_TRAP;
4021 trap_id = MLXSW_TRAP_ID_RTR_INGRESS0;
4022 }
4023
Ido Schimmel9dbf4d72017-07-18 10:10:24 +02004024 mlxsw_sp_fib_entry_ralue_pack(ralue_pl, fib_entry, op);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02004025 mlxsw_reg_ralue_act_remote_pack(ralue_pl, trap_action, trap_id,
4026 adjacency_index, ecmp_size);
4027 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
4028}
4029
Ido Schimmel9dbf4d72017-07-18 10:10:24 +02004030static int mlxsw_sp_fib_entry_op_local(struct mlxsw_sp *mlxsw_sp,
4031 struct mlxsw_sp_fib_entry *fib_entry,
4032 enum mlxsw_reg_ralue_op op)
Jiri Pirko61c503f2016-07-04 08:23:11 +02004033{
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01004034 struct mlxsw_sp_rif *rif = fib_entry->nh_group->nh_rif;
Ido Schimmel70ad3502017-02-08 11:16:38 +01004035 enum mlxsw_reg_ralue_trap_action trap_action;
Jiri Pirko61c503f2016-07-04 08:23:11 +02004036 char ralue_pl[MLXSW_REG_RALUE_LEN];
Ido Schimmel70ad3502017-02-08 11:16:38 +01004037 u16 trap_id = 0;
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01004038 u16 rif_index = 0;
Ido Schimmel70ad3502017-02-08 11:16:38 +01004039
4040 if (mlxsw_sp_fib_entry_should_offload(fib_entry)) {
4041 trap_action = MLXSW_REG_RALUE_TRAP_ACTION_NOP;
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01004042 rif_index = rif->rif_index;
Ido Schimmel70ad3502017-02-08 11:16:38 +01004043 } else {
4044 trap_action = MLXSW_REG_RALUE_TRAP_ACTION_TRAP;
4045 trap_id = MLXSW_TRAP_ID_RTR_INGRESS0;
4046 }
Jiri Pirko61c503f2016-07-04 08:23:11 +02004047
Ido Schimmel9dbf4d72017-07-18 10:10:24 +02004048 mlxsw_sp_fib_entry_ralue_pack(ralue_pl, fib_entry, op);
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01004049 mlxsw_reg_ralue_act_local_pack(ralue_pl, trap_action, trap_id,
4050 rif_index);
Jiri Pirko61c503f2016-07-04 08:23:11 +02004051 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
4052}
4053
Ido Schimmel9dbf4d72017-07-18 10:10:24 +02004054static int mlxsw_sp_fib_entry_op_trap(struct mlxsw_sp *mlxsw_sp,
4055 struct mlxsw_sp_fib_entry *fib_entry,
4056 enum mlxsw_reg_ralue_op op)
Jiri Pirko61c503f2016-07-04 08:23:11 +02004057{
4058 char ralue_pl[MLXSW_REG_RALUE_LEN];
Jiri Pirko61c503f2016-07-04 08:23:11 +02004059
Ido Schimmel9dbf4d72017-07-18 10:10:24 +02004060 mlxsw_sp_fib_entry_ralue_pack(ralue_pl, fib_entry, op);
Jiri Pirko61c503f2016-07-04 08:23:11 +02004061 mlxsw_reg_ralue_act_ip2me_pack(ralue_pl);
4062 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
4063}
4064
Petr Machata4607f6d2017-09-02 23:49:25 +02004065static int
4066mlxsw_sp_fib_entry_op_ipip_decap(struct mlxsw_sp *mlxsw_sp,
4067 struct mlxsw_sp_fib_entry *fib_entry,
4068 enum mlxsw_reg_ralue_op op)
4069{
4070 struct mlxsw_sp_ipip_entry *ipip_entry = fib_entry->decap.ipip_entry;
4071 const struct mlxsw_sp_ipip_ops *ipip_ops;
4072
4073 if (WARN_ON(!ipip_entry))
4074 return -EINVAL;
4075
4076 ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt];
4077 return ipip_ops->fib_entry_op(mlxsw_sp, ipip_entry, op,
4078 fib_entry->decap.tunnel_index);
4079}
4080
Ido Schimmel9dbf4d72017-07-18 10:10:24 +02004081static int __mlxsw_sp_fib_entry_op(struct mlxsw_sp *mlxsw_sp,
4082 struct mlxsw_sp_fib_entry *fib_entry,
4083 enum mlxsw_reg_ralue_op op)
Jiri Pirko61c503f2016-07-04 08:23:11 +02004084{
4085 switch (fib_entry->type) {
4086 case MLXSW_SP_FIB_ENTRY_TYPE_REMOTE:
Ido Schimmel9dbf4d72017-07-18 10:10:24 +02004087 return mlxsw_sp_fib_entry_op_remote(mlxsw_sp, fib_entry, op);
Jiri Pirko61c503f2016-07-04 08:23:11 +02004088 case MLXSW_SP_FIB_ENTRY_TYPE_LOCAL:
Ido Schimmel9dbf4d72017-07-18 10:10:24 +02004089 return mlxsw_sp_fib_entry_op_local(mlxsw_sp, fib_entry, op);
Jiri Pirko61c503f2016-07-04 08:23:11 +02004090 case MLXSW_SP_FIB_ENTRY_TYPE_TRAP:
Ido Schimmel9dbf4d72017-07-18 10:10:24 +02004091 return mlxsw_sp_fib_entry_op_trap(mlxsw_sp, fib_entry, op);
Petr Machata4607f6d2017-09-02 23:49:25 +02004092 case MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP:
4093 return mlxsw_sp_fib_entry_op_ipip_decap(mlxsw_sp,
4094 fib_entry, op);
Jiri Pirko61c503f2016-07-04 08:23:11 +02004095 }
4096 return -EINVAL;
4097}
4098
4099static int mlxsw_sp_fib_entry_op(struct mlxsw_sp *mlxsw_sp,
4100 struct mlxsw_sp_fib_entry *fib_entry,
4101 enum mlxsw_reg_ralue_op op)
4102{
Ido Schimmel9dbf4d72017-07-18 10:10:24 +02004103 int err = __mlxsw_sp_fib_entry_op(mlxsw_sp, fib_entry, op);
Ido Schimmel013b20f2017-02-08 11:16:36 +01004104
Ido Schimmel013b20f2017-02-08 11:16:36 +01004105 mlxsw_sp_fib_entry_offload_refresh(fib_entry, op, err);
Ido Schimmel9dbf4d72017-07-18 10:10:24 +02004106
Ido Schimmel013b20f2017-02-08 11:16:36 +01004107 return err;
Jiri Pirko61c503f2016-07-04 08:23:11 +02004108}
4109
4110static int mlxsw_sp_fib_entry_update(struct mlxsw_sp *mlxsw_sp,
4111 struct mlxsw_sp_fib_entry *fib_entry)
4112{
Jiri Pirko7146da32016-09-01 10:37:41 +02004113 return mlxsw_sp_fib_entry_op(mlxsw_sp, fib_entry,
4114 MLXSW_REG_RALUE_OP_WRITE_WRITE);
Jiri Pirko61c503f2016-07-04 08:23:11 +02004115}
4116
4117static int mlxsw_sp_fib_entry_del(struct mlxsw_sp *mlxsw_sp,
4118 struct mlxsw_sp_fib_entry *fib_entry)
4119{
4120 return mlxsw_sp_fib_entry_op(mlxsw_sp, fib_entry,
4121 MLXSW_REG_RALUE_OP_WRITE_DELETE);
4122}
4123
Jiri Pirko61c503f2016-07-04 08:23:11 +02004124static int
Ido Schimmel013b20f2017-02-08 11:16:36 +01004125mlxsw_sp_fib4_entry_type_set(struct mlxsw_sp *mlxsw_sp,
4126 const struct fib_entry_notifier_info *fen_info,
4127 struct mlxsw_sp_fib_entry *fib_entry)
Jiri Pirko61c503f2016-07-04 08:23:11 +02004128{
Petr Machata4607f6d2017-09-02 23:49:25 +02004129 union mlxsw_sp_l3addr dip = { .addr4 = htonl(fen_info->dst) };
4130 struct net_device *dev = fen_info->fi->fib_dev;
4131 struct mlxsw_sp_ipip_entry *ipip_entry;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02004132 struct fib_info *fi = fen_info->fi;
Jiri Pirko61c503f2016-07-04 08:23:11 +02004133
Ido Schimmel97989ee2017-03-10 08:53:38 +01004134 switch (fen_info->type) {
Ido Schimmel97989ee2017-03-10 08:53:38 +01004135 case RTN_LOCAL:
Petr Machata4607f6d2017-09-02 23:49:25 +02004136 ipip_entry = mlxsw_sp_ipip_entry_find_by_decap(mlxsw_sp, dev,
4137 MLXSW_SP_L3_PROTO_IPV4, dip);
Petr Machata57c77ce2017-11-28 13:17:11 +01004138 if (ipip_entry && ipip_entry->ol_dev->flags & IFF_UP) {
Petr Machata4607f6d2017-09-02 23:49:25 +02004139 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP;
4140 return mlxsw_sp_fib_entry_decap_init(mlxsw_sp,
4141 fib_entry,
4142 ipip_entry);
4143 }
4144 /* fall through */
4145 case RTN_BROADCAST:
Jiri Pirko61c503f2016-07-04 08:23:11 +02004146 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
4147 return 0;
Ido Schimmel97989ee2017-03-10 08:53:38 +01004148 case RTN_UNREACHABLE: /* fall through */
4149 case RTN_BLACKHOLE: /* fall through */
4150 case RTN_PROHIBIT:
4151 /* Packets hitting these routes need to be trapped, but
4152 * can do so with a lower priority than packets directed
4153 * at the host, so use action type local instead of trap.
4154 */
Jiri Pirkob45f64d2016-09-26 12:52:31 +02004155 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_LOCAL;
Ido Schimmel97989ee2017-03-10 08:53:38 +01004156 return 0;
4157 case RTN_UNICAST:
Petr Machata9b014512017-09-02 23:49:20 +02004158 if (mlxsw_sp_fi_is_gateway(mlxsw_sp, fi))
Ido Schimmel97989ee2017-03-10 08:53:38 +01004159 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_REMOTE;
Petr Machata9b014512017-09-02 23:49:20 +02004160 else
4161 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_LOCAL;
Ido Schimmel97989ee2017-03-10 08:53:38 +01004162 return 0;
4163 default:
4164 return -EINVAL;
4165 }
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02004166}
4167
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004168static struct mlxsw_sp_fib4_entry *
Ido Schimmel9aecce12017-02-09 10:28:42 +01004169mlxsw_sp_fib4_entry_create(struct mlxsw_sp *mlxsw_sp,
4170 struct mlxsw_sp_fib_node *fib_node,
4171 const struct fib_entry_notifier_info *fen_info)
Jiri Pirko5b004412016-09-01 10:37:40 +02004172{
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004173 struct mlxsw_sp_fib4_entry *fib4_entry;
Jiri Pirko5b004412016-09-01 10:37:40 +02004174 struct mlxsw_sp_fib_entry *fib_entry;
Ido Schimmel9aecce12017-02-09 10:28:42 +01004175 int err;
4176
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004177 fib4_entry = kzalloc(sizeof(*fib4_entry), GFP_KERNEL);
4178 if (!fib4_entry)
4179 return ERR_PTR(-ENOMEM);
4180 fib_entry = &fib4_entry->common;
Ido Schimmel9aecce12017-02-09 10:28:42 +01004181
4182 err = mlxsw_sp_fib4_entry_type_set(mlxsw_sp, fen_info, fib_entry);
4183 if (err)
4184 goto err_fib4_entry_type_set;
4185
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02004186 err = mlxsw_sp_nexthop4_group_get(mlxsw_sp, fib_entry, fen_info->fi);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004187 if (err)
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02004188 goto err_nexthop4_group_get;
Ido Schimmel9aecce12017-02-09 10:28:42 +01004189
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004190 fib4_entry->prio = fen_info->fi->fib_priority;
4191 fib4_entry->tb_id = fen_info->tb_id;
4192 fib4_entry->type = fen_info->type;
4193 fib4_entry->tos = fen_info->tos;
Ido Schimmel9aecce12017-02-09 10:28:42 +01004194
4195 fib_entry->fib_node = fib_node;
4196
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004197 return fib4_entry;
Ido Schimmel9aecce12017-02-09 10:28:42 +01004198
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02004199err_nexthop4_group_get:
Ido Schimmel9aecce12017-02-09 10:28:42 +01004200err_fib4_entry_type_set:
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004201 kfree(fib4_entry);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004202 return ERR_PTR(err);
4203}
4204
4205static void mlxsw_sp_fib4_entry_destroy(struct mlxsw_sp *mlxsw_sp,
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004206 struct mlxsw_sp_fib4_entry *fib4_entry)
Ido Schimmel9aecce12017-02-09 10:28:42 +01004207{
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02004208 mlxsw_sp_nexthop4_group_put(mlxsw_sp, &fib4_entry->common);
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004209 kfree(fib4_entry);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004210}
4211
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004212static struct mlxsw_sp_fib4_entry *
Ido Schimmel9aecce12017-02-09 10:28:42 +01004213mlxsw_sp_fib4_entry_lookup(struct mlxsw_sp *mlxsw_sp,
4214 const struct fib_entry_notifier_info *fen_info)
4215{
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004216 struct mlxsw_sp_fib4_entry *fib4_entry;
Ido Schimmel9aecce12017-02-09 10:28:42 +01004217 struct mlxsw_sp_fib_node *fib_node;
Ido Schimmel160e22a2017-07-18 10:10:20 +02004218 struct mlxsw_sp_fib *fib;
4219 struct mlxsw_sp_vr *vr;
Ido Schimmel9aecce12017-02-09 10:28:42 +01004220
Ido Schimmel160e22a2017-07-18 10:10:20 +02004221 vr = mlxsw_sp_vr_find(mlxsw_sp, fen_info->tb_id);
4222 if (!vr)
4223 return NULL;
4224 fib = mlxsw_sp_vr_fib(vr, MLXSW_SP_L3_PROTO_IPV4);
4225
4226 fib_node = mlxsw_sp_fib_node_lookup(fib, &fen_info->dst,
4227 sizeof(fen_info->dst),
4228 fen_info->dst_len);
4229 if (!fib_node)
Ido Schimmel9aecce12017-02-09 10:28:42 +01004230 return NULL;
4231
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004232 list_for_each_entry(fib4_entry, &fib_node->entry_list, common.list) {
4233 if (fib4_entry->tb_id == fen_info->tb_id &&
4234 fib4_entry->tos == fen_info->tos &&
4235 fib4_entry->type == fen_info->type &&
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02004236 mlxsw_sp_nexthop4_group_fi(fib4_entry->common.nh_group) ==
4237 fen_info->fi) {
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004238 return fib4_entry;
Ido Schimmel9aecce12017-02-09 10:28:42 +01004239 }
4240 }
4241
4242 return NULL;
4243}
4244
4245static const struct rhashtable_params mlxsw_sp_fib_ht_params = {
4246 .key_offset = offsetof(struct mlxsw_sp_fib_node, key),
4247 .head_offset = offsetof(struct mlxsw_sp_fib_node, ht_node),
4248 .key_len = sizeof(struct mlxsw_sp_fib_key),
4249 .automatic_shrinking = true,
4250};
4251
4252static int mlxsw_sp_fib_node_insert(struct mlxsw_sp_fib *fib,
4253 struct mlxsw_sp_fib_node *fib_node)
4254{
4255 return rhashtable_insert_fast(&fib->ht, &fib_node->ht_node,
4256 mlxsw_sp_fib_ht_params);
4257}
4258
4259static void mlxsw_sp_fib_node_remove(struct mlxsw_sp_fib *fib,
4260 struct mlxsw_sp_fib_node *fib_node)
4261{
4262 rhashtable_remove_fast(&fib->ht, &fib_node->ht_node,
4263 mlxsw_sp_fib_ht_params);
4264}
4265
4266static struct mlxsw_sp_fib_node *
4267mlxsw_sp_fib_node_lookup(struct mlxsw_sp_fib *fib, const void *addr,
4268 size_t addr_len, unsigned char prefix_len)
4269{
4270 struct mlxsw_sp_fib_key key;
4271
4272 memset(&key, 0, sizeof(key));
4273 memcpy(key.addr, addr, addr_len);
4274 key.prefix_len = prefix_len;
4275 return rhashtable_lookup_fast(&fib->ht, &key, mlxsw_sp_fib_ht_params);
4276}
4277
4278static struct mlxsw_sp_fib_node *
Ido Schimmel76610eb2017-03-10 08:53:41 +01004279mlxsw_sp_fib_node_create(struct mlxsw_sp_fib *fib, const void *addr,
Ido Schimmel9aecce12017-02-09 10:28:42 +01004280 size_t addr_len, unsigned char prefix_len)
4281{
4282 struct mlxsw_sp_fib_node *fib_node;
4283
4284 fib_node = kzalloc(sizeof(*fib_node), GFP_KERNEL);
4285 if (!fib_node)
4286 return NULL;
4287
4288 INIT_LIST_HEAD(&fib_node->entry_list);
Ido Schimmel76610eb2017-03-10 08:53:41 +01004289 list_add(&fib_node->list, &fib->node_list);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004290 memcpy(fib_node->key.addr, addr, addr_len);
4291 fib_node->key.prefix_len = prefix_len;
Ido Schimmel9aecce12017-02-09 10:28:42 +01004292
4293 return fib_node;
4294}
4295
4296static void mlxsw_sp_fib_node_destroy(struct mlxsw_sp_fib_node *fib_node)
4297{
Ido Schimmel9aecce12017-02-09 10:28:42 +01004298 list_del(&fib_node->list);
4299 WARN_ON(!list_empty(&fib_node->entry_list));
4300 kfree(fib_node);
4301}
4302
4303static bool
4304mlxsw_sp_fib_node_entry_is_first(const struct mlxsw_sp_fib_node *fib_node,
4305 const struct mlxsw_sp_fib_entry *fib_entry)
4306{
4307 return list_first_entry(&fib_node->entry_list,
4308 struct mlxsw_sp_fib_entry, list) == fib_entry;
4309}
4310
Ido Schimmelfc922bb2017-08-14 10:54:05 +02004311static int mlxsw_sp_fib_lpm_tree_link(struct mlxsw_sp *mlxsw_sp,
Ido Schimmelfc922bb2017-08-14 10:54:05 +02004312 struct mlxsw_sp_fib_node *fib_node)
4313{
Ido Schimmel2b52ce02018-01-22 09:17:42 +01004314 struct mlxsw_sp_prefix_usage req_prefix_usage;
Ido Schimmel3aad95d2018-01-22 09:17:41 +01004315 struct mlxsw_sp_fib *fib = fib_node->fib;
Ido Schimmelfc922bb2017-08-14 10:54:05 +02004316 struct mlxsw_sp_lpm_tree *lpm_tree;
4317 int err;
4318
Ido Schimmel2b52ce02018-01-22 09:17:42 +01004319 lpm_tree = mlxsw_sp->router->lpm.proto_trees[fib->proto];
4320 if (lpm_tree->prefix_ref_count[fib_node->key.prefix_len] != 0)
4321 goto out;
Ido Schimmelfc922bb2017-08-14 10:54:05 +02004322
Ido Schimmel2b52ce02018-01-22 09:17:42 +01004323 mlxsw_sp_prefix_usage_cpy(&req_prefix_usage, &lpm_tree->prefix_usage);
4324 mlxsw_sp_prefix_usage_set(&req_prefix_usage, fib_node->key.prefix_len);
Ido Schimmelfc922bb2017-08-14 10:54:05 +02004325 lpm_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, &req_prefix_usage,
4326 fib->proto);
4327 if (IS_ERR(lpm_tree))
4328 return PTR_ERR(lpm_tree);
4329
Ido Schimmelfc922bb2017-08-14 10:54:05 +02004330 err = mlxsw_sp_vrs_lpm_tree_replace(mlxsw_sp, fib, lpm_tree);
4331 if (err)
Ido Schimmel2b52ce02018-01-22 09:17:42 +01004332 goto err_lpm_tree_replace;
Ido Schimmelfc922bb2017-08-14 10:54:05 +02004333
Ido Schimmel2b52ce02018-01-22 09:17:42 +01004334out:
4335 lpm_tree->prefix_ref_count[fib_node->key.prefix_len]++;
Ido Schimmelfc922bb2017-08-14 10:54:05 +02004336 return 0;
Ido Schimmel2b52ce02018-01-22 09:17:42 +01004337
4338err_lpm_tree_replace:
4339 mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
4340 return err;
Ido Schimmelfc922bb2017-08-14 10:54:05 +02004341}
4342
4343static void mlxsw_sp_fib_lpm_tree_unlink(struct mlxsw_sp *mlxsw_sp,
Ido Schimmel3aad95d2018-01-22 09:17:41 +01004344 struct mlxsw_sp_fib_node *fib_node)
Ido Schimmelfc922bb2017-08-14 10:54:05 +02004345{
Ido Schimmel2b52ce02018-01-22 09:17:42 +01004346 struct mlxsw_sp_lpm_tree *lpm_tree = fib_node->fib->lpm_tree;
4347 struct mlxsw_sp_prefix_usage req_prefix_usage;
Ido Schimmel3aad95d2018-01-22 09:17:41 +01004348 struct mlxsw_sp_fib *fib = fib_node->fib;
Ido Schimmel2b52ce02018-01-22 09:17:42 +01004349 int err;
Ido Schimmel3aad95d2018-01-22 09:17:41 +01004350
Ido Schimmel2b52ce02018-01-22 09:17:42 +01004351 if (--lpm_tree->prefix_ref_count[fib_node->key.prefix_len] != 0)
Ido Schimmelfc922bb2017-08-14 10:54:05 +02004352 return;
Ido Schimmel2b52ce02018-01-22 09:17:42 +01004353 /* Try to construct a new LPM tree from the current prefix usage
4354 * minus the unused one. If we fail, continue using the old one.
Ido Schimmel4fd00312018-01-22 09:17:40 +01004355 */
Ido Schimmel2b52ce02018-01-22 09:17:42 +01004356 mlxsw_sp_prefix_usage_cpy(&req_prefix_usage, &lpm_tree->prefix_usage);
4357 mlxsw_sp_prefix_usage_clear(&req_prefix_usage,
4358 fib_node->key.prefix_len);
4359 lpm_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, &req_prefix_usage,
4360 fib->proto);
4361 if (IS_ERR(lpm_tree))
4362 return;
Ido Schimmelfc922bb2017-08-14 10:54:05 +02004363
Ido Schimmel2b52ce02018-01-22 09:17:42 +01004364 err = mlxsw_sp_vrs_lpm_tree_replace(mlxsw_sp, fib, lpm_tree);
4365 if (err)
4366 goto err_lpm_tree_replace;
Ido Schimmel9aecce12017-02-09 10:28:42 +01004367
Ido Schimmel2b52ce02018-01-22 09:17:42 +01004368 return;
Ido Schimmel9aecce12017-02-09 10:28:42 +01004369
Ido Schimmel2b52ce02018-01-22 09:17:42 +01004370err_lpm_tree_replace:
4371 mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004372}
4373
Ido Schimmel76610eb2017-03-10 08:53:41 +01004374static int mlxsw_sp_fib_node_init(struct mlxsw_sp *mlxsw_sp,
4375 struct mlxsw_sp_fib_node *fib_node,
4376 struct mlxsw_sp_fib *fib)
4377{
Ido Schimmel76610eb2017-03-10 08:53:41 +01004378 int err;
4379
4380 err = mlxsw_sp_fib_node_insert(fib, fib_node);
4381 if (err)
4382 return err;
4383 fib_node->fib = fib;
4384
Ido Schimmel3aad95d2018-01-22 09:17:41 +01004385 err = mlxsw_sp_fib_lpm_tree_link(mlxsw_sp, fib_node);
Ido Schimmelfc922bb2017-08-14 10:54:05 +02004386 if (err)
4387 goto err_fib_lpm_tree_link;
Ido Schimmel76610eb2017-03-10 08:53:41 +01004388
Ido Schimmel76610eb2017-03-10 08:53:41 +01004389 return 0;
4390
Ido Schimmelfc922bb2017-08-14 10:54:05 +02004391err_fib_lpm_tree_link:
Ido Schimmel76610eb2017-03-10 08:53:41 +01004392 fib_node->fib = NULL;
4393 mlxsw_sp_fib_node_remove(fib, fib_node);
4394 return err;
4395}
4396
4397static void mlxsw_sp_fib_node_fini(struct mlxsw_sp *mlxsw_sp,
4398 struct mlxsw_sp_fib_node *fib_node)
4399{
Ido Schimmel76610eb2017-03-10 08:53:41 +01004400 struct mlxsw_sp_fib *fib = fib_node->fib;
4401
Ido Schimmel3aad95d2018-01-22 09:17:41 +01004402 mlxsw_sp_fib_lpm_tree_unlink(mlxsw_sp, fib_node);
Ido Schimmel76610eb2017-03-10 08:53:41 +01004403 fib_node->fib = NULL;
4404 mlxsw_sp_fib_node_remove(fib, fib_node);
4405}
4406
Ido Schimmel9aecce12017-02-09 10:28:42 +01004407static struct mlxsw_sp_fib_node *
Ido Schimmel731ea1c2017-07-18 10:10:21 +02004408mlxsw_sp_fib_node_get(struct mlxsw_sp *mlxsw_sp, u32 tb_id, const void *addr,
4409 size_t addr_len, unsigned char prefix_len,
4410 enum mlxsw_sp_l3proto proto)
Ido Schimmel9aecce12017-02-09 10:28:42 +01004411{
4412 struct mlxsw_sp_fib_node *fib_node;
Ido Schimmel76610eb2017-03-10 08:53:41 +01004413 struct mlxsw_sp_fib *fib;
Jiri Pirko5b004412016-09-01 10:37:40 +02004414 struct mlxsw_sp_vr *vr;
4415 int err;
4416
David Ahernf8fa9b42017-10-18 09:56:56 -07004417 vr = mlxsw_sp_vr_get(mlxsw_sp, tb_id, NULL);
Jiri Pirko5b004412016-09-01 10:37:40 +02004418 if (IS_ERR(vr))
4419 return ERR_CAST(vr);
Ido Schimmel731ea1c2017-07-18 10:10:21 +02004420 fib = mlxsw_sp_vr_fib(vr, proto);
Jiri Pirko5b004412016-09-01 10:37:40 +02004421
Ido Schimmel731ea1c2017-07-18 10:10:21 +02004422 fib_node = mlxsw_sp_fib_node_lookup(fib, addr, addr_len, prefix_len);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004423 if (fib_node)
4424 return fib_node;
4425
Ido Schimmel731ea1c2017-07-18 10:10:21 +02004426 fib_node = mlxsw_sp_fib_node_create(fib, addr, addr_len, prefix_len);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004427 if (!fib_node) {
Jiri Pirko5b004412016-09-01 10:37:40 +02004428 err = -ENOMEM;
Ido Schimmel9aecce12017-02-09 10:28:42 +01004429 goto err_fib_node_create;
Jiri Pirko5b004412016-09-01 10:37:40 +02004430 }
Jiri Pirko5b004412016-09-01 10:37:40 +02004431
Ido Schimmel76610eb2017-03-10 08:53:41 +01004432 err = mlxsw_sp_fib_node_init(mlxsw_sp, fib_node, fib);
4433 if (err)
4434 goto err_fib_node_init;
4435
Ido Schimmel9aecce12017-02-09 10:28:42 +01004436 return fib_node;
Jiri Pirko5b004412016-09-01 10:37:40 +02004437
Ido Schimmel76610eb2017-03-10 08:53:41 +01004438err_fib_node_init:
4439 mlxsw_sp_fib_node_destroy(fib_node);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004440err_fib_node_create:
Ido Schimmel2b52ce02018-01-22 09:17:42 +01004441 mlxsw_sp_vr_put(mlxsw_sp, vr);
Jiri Pirko5b004412016-09-01 10:37:40 +02004442 return ERR_PTR(err);
4443}
4444
Ido Schimmel731ea1c2017-07-18 10:10:21 +02004445static void mlxsw_sp_fib_node_put(struct mlxsw_sp *mlxsw_sp,
4446 struct mlxsw_sp_fib_node *fib_node)
Jiri Pirko5b004412016-09-01 10:37:40 +02004447{
Ido Schimmel76610eb2017-03-10 08:53:41 +01004448 struct mlxsw_sp_vr *vr = fib_node->fib->vr;
Jiri Pirko5b004412016-09-01 10:37:40 +02004449
Ido Schimmel9aecce12017-02-09 10:28:42 +01004450 if (!list_empty(&fib_node->entry_list))
4451 return;
Ido Schimmel76610eb2017-03-10 08:53:41 +01004452 mlxsw_sp_fib_node_fini(mlxsw_sp, fib_node);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004453 mlxsw_sp_fib_node_destroy(fib_node);
Ido Schimmel2b52ce02018-01-22 09:17:42 +01004454 mlxsw_sp_vr_put(mlxsw_sp, vr);
Jiri Pirko5b004412016-09-01 10:37:40 +02004455}
4456
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004457static struct mlxsw_sp_fib4_entry *
Ido Schimmel9aecce12017-02-09 10:28:42 +01004458mlxsw_sp_fib4_node_entry_find(const struct mlxsw_sp_fib_node *fib_node,
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004459 const struct mlxsw_sp_fib4_entry *new4_entry)
Jiri Pirko61c503f2016-07-04 08:23:11 +02004460{
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004461 struct mlxsw_sp_fib4_entry *fib4_entry;
Ido Schimmel9aecce12017-02-09 10:28:42 +01004462
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004463 list_for_each_entry(fib4_entry, &fib_node->entry_list, common.list) {
4464 if (fib4_entry->tb_id > new4_entry->tb_id)
Ido Schimmel9aecce12017-02-09 10:28:42 +01004465 continue;
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004466 if (fib4_entry->tb_id != new4_entry->tb_id)
Ido Schimmel9aecce12017-02-09 10:28:42 +01004467 break;
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004468 if (fib4_entry->tos > new4_entry->tos)
Ido Schimmel9aecce12017-02-09 10:28:42 +01004469 continue;
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004470 if (fib4_entry->prio >= new4_entry->prio ||
4471 fib4_entry->tos < new4_entry->tos)
4472 return fib4_entry;
Ido Schimmel9aecce12017-02-09 10:28:42 +01004473 }
4474
4475 return NULL;
4476}
4477
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004478static int
4479mlxsw_sp_fib4_node_list_append(struct mlxsw_sp_fib4_entry *fib4_entry,
4480 struct mlxsw_sp_fib4_entry *new4_entry)
Ido Schimmel4283bce2017-02-09 10:28:43 +01004481{
4482 struct mlxsw_sp_fib_node *fib_node;
4483
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004484 if (WARN_ON(!fib4_entry))
Ido Schimmel4283bce2017-02-09 10:28:43 +01004485 return -EINVAL;
4486
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004487 fib_node = fib4_entry->common.fib_node;
4488 list_for_each_entry_from(fib4_entry, &fib_node->entry_list,
4489 common.list) {
4490 if (fib4_entry->tb_id != new4_entry->tb_id ||
4491 fib4_entry->tos != new4_entry->tos ||
4492 fib4_entry->prio != new4_entry->prio)
Ido Schimmel4283bce2017-02-09 10:28:43 +01004493 break;
4494 }
4495
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004496 list_add_tail(&new4_entry->common.list, &fib4_entry->common.list);
Ido Schimmel4283bce2017-02-09 10:28:43 +01004497 return 0;
4498}
4499
Ido Schimmel9aecce12017-02-09 10:28:42 +01004500static int
Ido Schimmel9efbee62017-07-18 10:10:28 +02004501mlxsw_sp_fib4_node_list_insert(struct mlxsw_sp_fib4_entry *new4_entry,
Ido Schimmel599cf8f2017-02-09 10:28:44 +01004502 bool replace, bool append)
Ido Schimmel9aecce12017-02-09 10:28:42 +01004503{
Ido Schimmel9efbee62017-07-18 10:10:28 +02004504 struct mlxsw_sp_fib_node *fib_node = new4_entry->common.fib_node;
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004505 struct mlxsw_sp_fib4_entry *fib4_entry;
Ido Schimmel9aecce12017-02-09 10:28:42 +01004506
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004507 fib4_entry = mlxsw_sp_fib4_node_entry_find(fib_node, new4_entry);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004508
Ido Schimmel4283bce2017-02-09 10:28:43 +01004509 if (append)
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004510 return mlxsw_sp_fib4_node_list_append(fib4_entry, new4_entry);
4511 if (replace && WARN_ON(!fib4_entry))
Ido Schimmel599cf8f2017-02-09 10:28:44 +01004512 return -EINVAL;
Ido Schimmel4283bce2017-02-09 10:28:43 +01004513
Ido Schimmel599cf8f2017-02-09 10:28:44 +01004514 /* Insert new entry before replaced one, so that we can later
4515 * remove the second.
4516 */
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004517 if (fib4_entry) {
4518 list_add_tail(&new4_entry->common.list,
4519 &fib4_entry->common.list);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004520 } else {
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004521 struct mlxsw_sp_fib4_entry *last;
Ido Schimmel9aecce12017-02-09 10:28:42 +01004522
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004523 list_for_each_entry(last, &fib_node->entry_list, common.list) {
4524 if (new4_entry->tb_id > last->tb_id)
Ido Schimmel9aecce12017-02-09 10:28:42 +01004525 break;
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004526 fib4_entry = last;
Ido Schimmel9aecce12017-02-09 10:28:42 +01004527 }
4528
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004529 if (fib4_entry)
4530 list_add(&new4_entry->common.list,
4531 &fib4_entry->common.list);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004532 else
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004533 list_add(&new4_entry->common.list,
4534 &fib_node->entry_list);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004535 }
4536
4537 return 0;
4538}
4539
4540static void
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004541mlxsw_sp_fib4_node_list_remove(struct mlxsw_sp_fib4_entry *fib4_entry)
Ido Schimmel9aecce12017-02-09 10:28:42 +01004542{
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004543 list_del(&fib4_entry->common.list);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004544}
4545
Ido Schimmel80c238f2017-07-18 10:10:29 +02004546static int mlxsw_sp_fib_node_entry_add(struct mlxsw_sp *mlxsw_sp,
4547 struct mlxsw_sp_fib_entry *fib_entry)
Ido Schimmel9aecce12017-02-09 10:28:42 +01004548{
Ido Schimmel9efbee62017-07-18 10:10:28 +02004549 struct mlxsw_sp_fib_node *fib_node = fib_entry->fib_node;
4550
Ido Schimmel9aecce12017-02-09 10:28:42 +01004551 if (!mlxsw_sp_fib_node_entry_is_first(fib_node, fib_entry))
4552 return 0;
4553
4554 /* To prevent packet loss, overwrite the previously offloaded
4555 * entry.
4556 */
4557 if (!list_is_singular(&fib_node->entry_list)) {
4558 enum mlxsw_reg_ralue_op op = MLXSW_REG_RALUE_OP_WRITE_DELETE;
4559 struct mlxsw_sp_fib_entry *n = list_next_entry(fib_entry, list);
4560
4561 mlxsw_sp_fib_entry_offload_refresh(n, op, 0);
4562 }
4563
4564 return mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
4565}
4566
Ido Schimmel80c238f2017-07-18 10:10:29 +02004567static void mlxsw_sp_fib_node_entry_del(struct mlxsw_sp *mlxsw_sp,
4568 struct mlxsw_sp_fib_entry *fib_entry)
Ido Schimmel9aecce12017-02-09 10:28:42 +01004569{
Ido Schimmel9efbee62017-07-18 10:10:28 +02004570 struct mlxsw_sp_fib_node *fib_node = fib_entry->fib_node;
4571
Ido Schimmel9aecce12017-02-09 10:28:42 +01004572 if (!mlxsw_sp_fib_node_entry_is_first(fib_node, fib_entry))
4573 return;
4574
4575 /* Promote the next entry by overwriting the deleted entry */
4576 if (!list_is_singular(&fib_node->entry_list)) {
4577 struct mlxsw_sp_fib_entry *n = list_next_entry(fib_entry, list);
4578 enum mlxsw_reg_ralue_op op = MLXSW_REG_RALUE_OP_WRITE_DELETE;
4579
4580 mlxsw_sp_fib_entry_update(mlxsw_sp, n);
4581 mlxsw_sp_fib_entry_offload_refresh(fib_entry, op, 0);
4582 return;
4583 }
4584
4585 mlxsw_sp_fib_entry_del(mlxsw_sp, fib_entry);
4586}
4587
4588static int mlxsw_sp_fib4_node_entry_link(struct mlxsw_sp *mlxsw_sp,
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004589 struct mlxsw_sp_fib4_entry *fib4_entry,
Ido Schimmel599cf8f2017-02-09 10:28:44 +01004590 bool replace, bool append)
Ido Schimmel9aecce12017-02-09 10:28:42 +01004591{
Ido Schimmel9aecce12017-02-09 10:28:42 +01004592 int err;
4593
Ido Schimmel9efbee62017-07-18 10:10:28 +02004594 err = mlxsw_sp_fib4_node_list_insert(fib4_entry, replace, append);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004595 if (err)
4596 return err;
4597
Ido Schimmel80c238f2017-07-18 10:10:29 +02004598 err = mlxsw_sp_fib_node_entry_add(mlxsw_sp, &fib4_entry->common);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004599 if (err)
Ido Schimmel80c238f2017-07-18 10:10:29 +02004600 goto err_fib_node_entry_add;
Ido Schimmel9aecce12017-02-09 10:28:42 +01004601
Ido Schimmel9aecce12017-02-09 10:28:42 +01004602 return 0;
4603
Ido Schimmel80c238f2017-07-18 10:10:29 +02004604err_fib_node_entry_add:
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004605 mlxsw_sp_fib4_node_list_remove(fib4_entry);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004606 return err;
4607}
4608
4609static void
4610mlxsw_sp_fib4_node_entry_unlink(struct mlxsw_sp *mlxsw_sp,
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004611 struct mlxsw_sp_fib4_entry *fib4_entry)
Ido Schimmel9aecce12017-02-09 10:28:42 +01004612{
Ido Schimmel80c238f2017-07-18 10:10:29 +02004613 mlxsw_sp_fib_node_entry_del(mlxsw_sp, &fib4_entry->common);
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004614 mlxsw_sp_fib4_node_list_remove(fib4_entry);
Petr Machata4607f6d2017-09-02 23:49:25 +02004615
4616 if (fib4_entry->common.type == MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP)
4617 mlxsw_sp_fib_entry_decap_fini(mlxsw_sp, &fib4_entry->common);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004618}
4619
Ido Schimmel599cf8f2017-02-09 10:28:44 +01004620static void mlxsw_sp_fib4_entry_replace(struct mlxsw_sp *mlxsw_sp,
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004621 struct mlxsw_sp_fib4_entry *fib4_entry,
Ido Schimmel599cf8f2017-02-09 10:28:44 +01004622 bool replace)
4623{
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004624 struct mlxsw_sp_fib_node *fib_node = fib4_entry->common.fib_node;
4625 struct mlxsw_sp_fib4_entry *replaced;
Ido Schimmel599cf8f2017-02-09 10:28:44 +01004626
4627 if (!replace)
4628 return;
4629
4630 /* We inserted the new entry before replaced one */
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004631 replaced = list_next_entry(fib4_entry, common.list);
Ido Schimmel599cf8f2017-02-09 10:28:44 +01004632
4633 mlxsw_sp_fib4_node_entry_unlink(mlxsw_sp, replaced);
4634 mlxsw_sp_fib4_entry_destroy(mlxsw_sp, replaced);
Ido Schimmel731ea1c2017-07-18 10:10:21 +02004635 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
Ido Schimmel599cf8f2017-02-09 10:28:44 +01004636}
4637
Ido Schimmel9aecce12017-02-09 10:28:42 +01004638static int
4639mlxsw_sp_router_fib4_add(struct mlxsw_sp *mlxsw_sp,
Ido Schimmel4283bce2017-02-09 10:28:43 +01004640 const struct fib_entry_notifier_info *fen_info,
Ido Schimmel599cf8f2017-02-09 10:28:44 +01004641 bool replace, bool append)
Ido Schimmel9aecce12017-02-09 10:28:42 +01004642{
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004643 struct mlxsw_sp_fib4_entry *fib4_entry;
Ido Schimmel9aecce12017-02-09 10:28:42 +01004644 struct mlxsw_sp_fib_node *fib_node;
Jiri Pirko61c503f2016-07-04 08:23:11 +02004645 int err;
4646
Ido Schimmel9011b672017-05-16 19:38:25 +02004647 if (mlxsw_sp->router->aborted)
Jiri Pirkob45f64d2016-09-26 12:52:31 +02004648 return 0;
4649
Ido Schimmel731ea1c2017-07-18 10:10:21 +02004650 fib_node = mlxsw_sp_fib_node_get(mlxsw_sp, fen_info->tb_id,
4651 &fen_info->dst, sizeof(fen_info->dst),
4652 fen_info->dst_len,
4653 MLXSW_SP_L3_PROTO_IPV4);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004654 if (IS_ERR(fib_node)) {
4655 dev_warn(mlxsw_sp->bus_info->dev, "Failed to get FIB node\n");
4656 return PTR_ERR(fib_node);
4657 }
4658
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004659 fib4_entry = mlxsw_sp_fib4_entry_create(mlxsw_sp, fib_node, fen_info);
4660 if (IS_ERR(fib4_entry)) {
Ido Schimmel9aecce12017-02-09 10:28:42 +01004661 dev_warn(mlxsw_sp->bus_info->dev, "Failed to create FIB entry\n");
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004662 err = PTR_ERR(fib4_entry);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004663 goto err_fib4_entry_create;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02004664 }
Jiri Pirko61c503f2016-07-04 08:23:11 +02004665
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004666 err = mlxsw_sp_fib4_node_entry_link(mlxsw_sp, fib4_entry, replace,
Ido Schimmel599cf8f2017-02-09 10:28:44 +01004667 append);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02004668 if (err) {
Ido Schimmel9aecce12017-02-09 10:28:42 +01004669 dev_warn(mlxsw_sp->bus_info->dev, "Failed to link FIB entry to node\n");
4670 goto err_fib4_node_entry_link;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02004671 }
Ido Schimmel9aecce12017-02-09 10:28:42 +01004672
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004673 mlxsw_sp_fib4_entry_replace(mlxsw_sp, fib4_entry, replace);
Ido Schimmel599cf8f2017-02-09 10:28:44 +01004674
Jiri Pirko61c503f2016-07-04 08:23:11 +02004675 return 0;
4676
Ido Schimmel9aecce12017-02-09 10:28:42 +01004677err_fib4_node_entry_link:
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004678 mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib4_entry);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004679err_fib4_entry_create:
Ido Schimmel731ea1c2017-07-18 10:10:21 +02004680 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
Jiri Pirko61c503f2016-07-04 08:23:11 +02004681 return err;
4682}
4683
Jiri Pirko37956d72016-10-20 16:05:43 +02004684static void mlxsw_sp_router_fib4_del(struct mlxsw_sp *mlxsw_sp,
4685 struct fib_entry_notifier_info *fen_info)
Jiri Pirko61c503f2016-07-04 08:23:11 +02004686{
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004687 struct mlxsw_sp_fib4_entry *fib4_entry;
Ido Schimmel9aecce12017-02-09 10:28:42 +01004688 struct mlxsw_sp_fib_node *fib_node;
Jiri Pirko61c503f2016-07-04 08:23:11 +02004689
Ido Schimmel9011b672017-05-16 19:38:25 +02004690 if (mlxsw_sp->router->aborted)
Jiri Pirko37956d72016-10-20 16:05:43 +02004691 return;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02004692
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004693 fib4_entry = mlxsw_sp_fib4_entry_lookup(mlxsw_sp, fen_info);
4694 if (WARN_ON(!fib4_entry))
Jiri Pirko37956d72016-10-20 16:05:43 +02004695 return;
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004696 fib_node = fib4_entry->common.fib_node;
Jiri Pirko5b004412016-09-01 10:37:40 +02004697
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004698 mlxsw_sp_fib4_node_entry_unlink(mlxsw_sp, fib4_entry);
4699 mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib4_entry);
Ido Schimmel731ea1c2017-07-18 10:10:21 +02004700 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
Jiri Pirko61c503f2016-07-04 08:23:11 +02004701}
Jiri Pirkob45f64d2016-09-26 12:52:31 +02004702
David Ahern8d1c8022018-04-17 17:33:26 -07004703static bool mlxsw_sp_fib6_rt_should_ignore(const struct fib6_info *rt)
Ido Schimmel428b8512017-08-03 13:28:28 +02004704{
4705 /* Packets with link-local destination IP arriving to the router
4706 * are trapped to the CPU, so no need to program specific routes
4707 * for them.
4708 */
David Ahern93c2fb22018-04-18 15:38:59 -07004709 if (ipv6_addr_type(&rt->fib6_dst.addr) & IPV6_ADDR_LINKLOCAL)
Ido Schimmel428b8512017-08-03 13:28:28 +02004710 return true;
4711
4712 /* Multicast routes aren't supported, so ignore them. Neighbour
4713 * Discovery packets are specifically trapped.
4714 */
David Ahern93c2fb22018-04-18 15:38:59 -07004715 if (ipv6_addr_type(&rt->fib6_dst.addr) & IPV6_ADDR_MULTICAST)
Ido Schimmel428b8512017-08-03 13:28:28 +02004716 return true;
4717
4718 /* Cloned routes are irrelevant in the forwarding path. */
David Ahern93c2fb22018-04-18 15:38:59 -07004719 if (rt->fib6_flags & RTF_CACHE)
Ido Schimmel428b8512017-08-03 13:28:28 +02004720 return true;
4721
4722 return false;
4723}
4724
David Ahern8d1c8022018-04-17 17:33:26 -07004725static struct mlxsw_sp_rt6 *mlxsw_sp_rt6_create(struct fib6_info *rt)
Ido Schimmel428b8512017-08-03 13:28:28 +02004726{
4727 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
4728
4729 mlxsw_sp_rt6 = kzalloc(sizeof(*mlxsw_sp_rt6), GFP_KERNEL);
4730 if (!mlxsw_sp_rt6)
4731 return ERR_PTR(-ENOMEM);
4732
4733 /* In case of route replace, replaced route is deleted with
4734 * no notification. Take reference to prevent accessing freed
4735 * memory.
4736 */
4737 mlxsw_sp_rt6->rt = rt;
David Ahern8d1c8022018-04-17 17:33:26 -07004738 fib6_info_hold(rt);
Ido Schimmel428b8512017-08-03 13:28:28 +02004739
4740 return mlxsw_sp_rt6;
4741}
4742
4743#if IS_ENABLED(CONFIG_IPV6)
David Ahern8d1c8022018-04-17 17:33:26 -07004744static void mlxsw_sp_rt6_release(struct fib6_info *rt)
Ido Schimmel428b8512017-08-03 13:28:28 +02004745{
David Ahern8d1c8022018-04-17 17:33:26 -07004746 fib6_info_release(rt);
Ido Schimmel428b8512017-08-03 13:28:28 +02004747}
4748#else
David Ahern8d1c8022018-04-17 17:33:26 -07004749static void mlxsw_sp_rt6_release(struct fib6_info *rt)
Ido Schimmel428b8512017-08-03 13:28:28 +02004750{
4751}
4752#endif
4753
4754static void mlxsw_sp_rt6_destroy(struct mlxsw_sp_rt6 *mlxsw_sp_rt6)
4755{
4756 mlxsw_sp_rt6_release(mlxsw_sp_rt6->rt);
4757 kfree(mlxsw_sp_rt6);
4758}
4759
David Ahern8d1c8022018-04-17 17:33:26 -07004760static struct fib6_info *
Ido Schimmel428b8512017-08-03 13:28:28 +02004761mlxsw_sp_fib6_entry_rt(const struct mlxsw_sp_fib6_entry *fib6_entry)
4762{
4763 return list_first_entry(&fib6_entry->rt6_list, struct mlxsw_sp_rt6,
4764 list)->rt;
4765}
4766
4767static struct mlxsw_sp_fib6_entry *
4768mlxsw_sp_fib6_node_mp_entry_find(const struct mlxsw_sp_fib_node *fib_node,
Ido Schimmel53b562d2018-06-15 16:23:36 +03004769 const struct fib6_info *nrt, bool append)
Ido Schimmel428b8512017-08-03 13:28:28 +02004770{
4771 struct mlxsw_sp_fib6_entry *fib6_entry;
4772
Ido Schimmel53b562d2018-06-15 16:23:36 +03004773 if (!append)
Ido Schimmel428b8512017-08-03 13:28:28 +02004774 return NULL;
4775
4776 list_for_each_entry(fib6_entry, &fib_node->entry_list, common.list) {
David Ahern8d1c8022018-04-17 17:33:26 -07004777 struct fib6_info *rt = mlxsw_sp_fib6_entry_rt(fib6_entry);
Ido Schimmel428b8512017-08-03 13:28:28 +02004778
4779 /* RT6_TABLE_LOCAL and RT6_TABLE_MAIN share the same
4780 * virtual router.
4781 */
David Ahern93c2fb22018-04-18 15:38:59 -07004782 if (rt->fib6_table->tb6_id > nrt->fib6_table->tb6_id)
Ido Schimmel428b8512017-08-03 13:28:28 +02004783 continue;
David Ahern93c2fb22018-04-18 15:38:59 -07004784 if (rt->fib6_table->tb6_id != nrt->fib6_table->tb6_id)
Ido Schimmel428b8512017-08-03 13:28:28 +02004785 break;
David Ahern93c2fb22018-04-18 15:38:59 -07004786 if (rt->fib6_metric < nrt->fib6_metric)
Ido Schimmel428b8512017-08-03 13:28:28 +02004787 continue;
Ido Schimmel53b562d2018-06-15 16:23:36 +03004788 if (rt->fib6_metric == nrt->fib6_metric)
Ido Schimmel428b8512017-08-03 13:28:28 +02004789 return fib6_entry;
David Ahern93c2fb22018-04-18 15:38:59 -07004790 if (rt->fib6_metric > nrt->fib6_metric)
Ido Schimmel428b8512017-08-03 13:28:28 +02004791 break;
4792 }
4793
4794 return NULL;
4795}
4796
4797static struct mlxsw_sp_rt6 *
4798mlxsw_sp_fib6_entry_rt_find(const struct mlxsw_sp_fib6_entry *fib6_entry,
David Ahern8d1c8022018-04-17 17:33:26 -07004799 const struct fib6_info *rt)
Ido Schimmel428b8512017-08-03 13:28:28 +02004800{
4801 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
4802
4803 list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) {
4804 if (mlxsw_sp_rt6->rt == rt)
4805 return mlxsw_sp_rt6;
4806 }
4807
4808 return NULL;
4809}
4810
Petr Machata8f28a302017-09-02 23:49:24 +02004811static bool mlxsw_sp_nexthop6_ipip_type(const struct mlxsw_sp *mlxsw_sp,
David Ahern8d1c8022018-04-17 17:33:26 -07004812 const struct fib6_info *rt,
Petr Machata8f28a302017-09-02 23:49:24 +02004813 enum mlxsw_sp_ipip_type *ret)
4814{
David Ahern5e670d82018-04-17 17:33:14 -07004815 return rt->fib6_nh.nh_dev &&
4816 mlxsw_sp_netdev_ipip_type(mlxsw_sp, rt->fib6_nh.nh_dev, ret);
Petr Machata8f28a302017-09-02 23:49:24 +02004817}
4818
Petr Machata35225e42017-09-02 23:49:22 +02004819static int mlxsw_sp_nexthop6_type_init(struct mlxsw_sp *mlxsw_sp,
4820 struct mlxsw_sp_nexthop_group *nh_grp,
4821 struct mlxsw_sp_nexthop *nh,
David Ahern8d1c8022018-04-17 17:33:26 -07004822 const struct fib6_info *rt)
Ido Schimmel428b8512017-08-03 13:28:28 +02004823{
Petr Machatad97cda52017-11-28 13:17:13 +01004824 const struct mlxsw_sp_ipip_ops *ipip_ops;
4825 struct mlxsw_sp_ipip_entry *ipip_entry;
David Ahern5e670d82018-04-17 17:33:14 -07004826 struct net_device *dev = rt->fib6_nh.nh_dev;
Ido Schimmel428b8512017-08-03 13:28:28 +02004827 struct mlxsw_sp_rif *rif;
4828 int err;
4829
Petr Machatad97cda52017-11-28 13:17:13 +01004830 ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, dev);
4831 if (ipip_entry) {
4832 ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt];
4833 if (ipip_ops->can_offload(mlxsw_sp, dev,
4834 MLXSW_SP_L3_PROTO_IPV6)) {
4835 nh->type = MLXSW_SP_NEXTHOP_TYPE_IPIP;
4836 mlxsw_sp_nexthop_ipip_init(mlxsw_sp, nh, ipip_entry);
4837 return 0;
4838 }
Petr Machata8f28a302017-09-02 23:49:24 +02004839 }
4840
Petr Machata35225e42017-09-02 23:49:22 +02004841 nh->type = MLXSW_SP_NEXTHOP_TYPE_ETH;
Ido Schimmel428b8512017-08-03 13:28:28 +02004842 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
4843 if (!rif)
4844 return 0;
4845 mlxsw_sp_nexthop_rif_init(nh, rif);
4846
4847 err = mlxsw_sp_nexthop_neigh_init(mlxsw_sp, nh);
4848 if (err)
4849 goto err_nexthop_neigh_init;
4850
4851 return 0;
4852
4853err_nexthop_neigh_init:
4854 mlxsw_sp_nexthop_rif_fini(nh);
4855 return err;
4856}
4857
Petr Machata35225e42017-09-02 23:49:22 +02004858static void mlxsw_sp_nexthop6_type_fini(struct mlxsw_sp *mlxsw_sp,
4859 struct mlxsw_sp_nexthop *nh)
4860{
4861 mlxsw_sp_nexthop_type_fini(mlxsw_sp, nh);
4862}
4863
4864static int mlxsw_sp_nexthop6_init(struct mlxsw_sp *mlxsw_sp,
4865 struct mlxsw_sp_nexthop_group *nh_grp,
4866 struct mlxsw_sp_nexthop *nh,
David Ahern8d1c8022018-04-17 17:33:26 -07004867 const struct fib6_info *rt)
Petr Machata35225e42017-09-02 23:49:22 +02004868{
David Ahern5e670d82018-04-17 17:33:14 -07004869 struct net_device *dev = rt->fib6_nh.nh_dev;
Petr Machata35225e42017-09-02 23:49:22 +02004870
4871 nh->nh_grp = nh_grp;
David Ahern5e670d82018-04-17 17:33:14 -07004872 nh->nh_weight = rt->fib6_nh.nh_weight;
4873 memcpy(&nh->gw_addr, &rt->fib6_nh.nh_gw, sizeof(nh->gw_addr));
Arkadi Sharshevskya5390272017-09-25 10:32:28 +02004874 mlxsw_sp_nexthop_counter_alloc(mlxsw_sp, nh);
Petr Machata35225e42017-09-02 23:49:22 +02004875
Arkadi Sharshevskydbe45982017-09-25 10:32:23 +02004876 list_add_tail(&nh->router_list_node, &mlxsw_sp->router->nexthop_list);
4877
Petr Machata35225e42017-09-02 23:49:22 +02004878 if (!dev)
4879 return 0;
4880 nh->ifindex = dev->ifindex;
4881
4882 return mlxsw_sp_nexthop6_type_init(mlxsw_sp, nh_grp, nh, rt);
4883}
4884
Ido Schimmel428b8512017-08-03 13:28:28 +02004885static void mlxsw_sp_nexthop6_fini(struct mlxsw_sp *mlxsw_sp,
4886 struct mlxsw_sp_nexthop *nh)
4887{
Petr Machata35225e42017-09-02 23:49:22 +02004888 mlxsw_sp_nexthop6_type_fini(mlxsw_sp, nh);
Arkadi Sharshevskydbe45982017-09-25 10:32:23 +02004889 list_del(&nh->router_list_node);
Arkadi Sharshevskya5390272017-09-25 10:32:28 +02004890 mlxsw_sp_nexthop_counter_free(mlxsw_sp, nh);
Ido Schimmel428b8512017-08-03 13:28:28 +02004891}
4892
Petr Machataf6050ee2017-09-02 23:49:21 +02004893static bool mlxsw_sp_rt6_is_gateway(const struct mlxsw_sp *mlxsw_sp,
David Ahern8d1c8022018-04-17 17:33:26 -07004894 const struct fib6_info *rt)
Petr Machataf6050ee2017-09-02 23:49:21 +02004895{
David Ahern93c2fb22018-04-18 15:38:59 -07004896 return rt->fib6_flags & RTF_GATEWAY ||
Petr Machata8f28a302017-09-02 23:49:24 +02004897 mlxsw_sp_nexthop6_ipip_type(mlxsw_sp, rt, NULL);
Petr Machataf6050ee2017-09-02 23:49:21 +02004898}
4899
Ido Schimmel428b8512017-08-03 13:28:28 +02004900static struct mlxsw_sp_nexthop_group *
4901mlxsw_sp_nexthop6_group_create(struct mlxsw_sp *mlxsw_sp,
4902 struct mlxsw_sp_fib6_entry *fib6_entry)
4903{
4904 struct mlxsw_sp_nexthop_group *nh_grp;
4905 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
4906 struct mlxsw_sp_nexthop *nh;
4907 size_t alloc_size;
4908 int i = 0;
4909 int err;
4910
4911 alloc_size = sizeof(*nh_grp) +
4912 fib6_entry->nrt6 * sizeof(struct mlxsw_sp_nexthop);
4913 nh_grp = kzalloc(alloc_size, GFP_KERNEL);
4914 if (!nh_grp)
4915 return ERR_PTR(-ENOMEM);
4916 INIT_LIST_HEAD(&nh_grp->fib_list);
4917#if IS_ENABLED(CONFIG_IPV6)
4918 nh_grp->neigh_tbl = &nd_tbl;
4919#endif
4920 mlxsw_sp_rt6 = list_first_entry(&fib6_entry->rt6_list,
4921 struct mlxsw_sp_rt6, list);
Petr Machataf6050ee2017-09-02 23:49:21 +02004922 nh_grp->gateway = mlxsw_sp_rt6_is_gateway(mlxsw_sp, mlxsw_sp_rt6->rt);
Ido Schimmel428b8512017-08-03 13:28:28 +02004923 nh_grp->count = fib6_entry->nrt6;
4924 for (i = 0; i < nh_grp->count; i++) {
David Ahern8d1c8022018-04-17 17:33:26 -07004925 struct fib6_info *rt = mlxsw_sp_rt6->rt;
Ido Schimmel428b8512017-08-03 13:28:28 +02004926
4927 nh = &nh_grp->nexthops[i];
4928 err = mlxsw_sp_nexthop6_init(mlxsw_sp, nh_grp, nh, rt);
4929 if (err)
4930 goto err_nexthop6_init;
4931 mlxsw_sp_rt6 = list_next_entry(mlxsw_sp_rt6, list);
4932 }
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02004933
4934 err = mlxsw_sp_nexthop_group_insert(mlxsw_sp, nh_grp);
4935 if (err)
4936 goto err_nexthop_group_insert;
4937
Ido Schimmel428b8512017-08-03 13:28:28 +02004938 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
4939 return nh_grp;
4940
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02004941err_nexthop_group_insert:
Ido Schimmel428b8512017-08-03 13:28:28 +02004942err_nexthop6_init:
4943 for (i--; i >= 0; i--) {
4944 nh = &nh_grp->nexthops[i];
4945 mlxsw_sp_nexthop6_fini(mlxsw_sp, nh);
4946 }
4947 kfree(nh_grp);
4948 return ERR_PTR(err);
4949}
4950
4951static void
4952mlxsw_sp_nexthop6_group_destroy(struct mlxsw_sp *mlxsw_sp,
4953 struct mlxsw_sp_nexthop_group *nh_grp)
4954{
4955 struct mlxsw_sp_nexthop *nh;
4956 int i = nh_grp->count;
4957
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02004958 mlxsw_sp_nexthop_group_remove(mlxsw_sp, nh_grp);
Ido Schimmel428b8512017-08-03 13:28:28 +02004959 for (i--; i >= 0; i--) {
4960 nh = &nh_grp->nexthops[i];
4961 mlxsw_sp_nexthop6_fini(mlxsw_sp, nh);
4962 }
4963 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
4964 WARN_ON(nh_grp->adj_index_valid);
4965 kfree(nh_grp);
4966}
4967
4968static int mlxsw_sp_nexthop6_group_get(struct mlxsw_sp *mlxsw_sp,
4969 struct mlxsw_sp_fib6_entry *fib6_entry)
4970{
4971 struct mlxsw_sp_nexthop_group *nh_grp;
4972
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02004973 nh_grp = mlxsw_sp_nexthop6_group_lookup(mlxsw_sp, fib6_entry);
4974 if (!nh_grp) {
4975 nh_grp = mlxsw_sp_nexthop6_group_create(mlxsw_sp, fib6_entry);
4976 if (IS_ERR(nh_grp))
4977 return PTR_ERR(nh_grp);
4978 }
Ido Schimmel428b8512017-08-03 13:28:28 +02004979
4980 list_add_tail(&fib6_entry->common.nexthop_group_node,
4981 &nh_grp->fib_list);
4982 fib6_entry->common.nh_group = nh_grp;
4983
4984 return 0;
4985}
4986
4987static void mlxsw_sp_nexthop6_group_put(struct mlxsw_sp *mlxsw_sp,
4988 struct mlxsw_sp_fib_entry *fib_entry)
4989{
4990 struct mlxsw_sp_nexthop_group *nh_grp = fib_entry->nh_group;
4991
4992 list_del(&fib_entry->nexthop_group_node);
4993 if (!list_empty(&nh_grp->fib_list))
4994 return;
4995 mlxsw_sp_nexthop6_group_destroy(mlxsw_sp, nh_grp);
4996}
4997
4998static int
4999mlxsw_sp_nexthop6_group_update(struct mlxsw_sp *mlxsw_sp,
5000 struct mlxsw_sp_fib6_entry *fib6_entry)
5001{
5002 struct mlxsw_sp_nexthop_group *old_nh_grp = fib6_entry->common.nh_group;
5003 int err;
5004
5005 fib6_entry->common.nh_group = NULL;
5006 list_del(&fib6_entry->common.nexthop_group_node);
5007
5008 err = mlxsw_sp_nexthop6_group_get(mlxsw_sp, fib6_entry);
5009 if (err)
5010 goto err_nexthop6_group_get;
5011
5012 /* In case this entry is offloaded, then the adjacency index
5013 * currently associated with it in the device's table is that
5014 * of the old group. Start using the new one instead.
5015 */
5016 err = mlxsw_sp_fib_node_entry_add(mlxsw_sp, &fib6_entry->common);
5017 if (err)
5018 goto err_fib_node_entry_add;
5019
5020 if (list_empty(&old_nh_grp->fib_list))
5021 mlxsw_sp_nexthop6_group_destroy(mlxsw_sp, old_nh_grp);
5022
5023 return 0;
5024
5025err_fib_node_entry_add:
5026 mlxsw_sp_nexthop6_group_put(mlxsw_sp, &fib6_entry->common);
5027err_nexthop6_group_get:
5028 list_add_tail(&fib6_entry->common.nexthop_group_node,
5029 &old_nh_grp->fib_list);
5030 fib6_entry->common.nh_group = old_nh_grp;
5031 return err;
5032}
5033
5034static int
5035mlxsw_sp_fib6_entry_nexthop_add(struct mlxsw_sp *mlxsw_sp,
5036 struct mlxsw_sp_fib6_entry *fib6_entry,
David Ahern8d1c8022018-04-17 17:33:26 -07005037 struct fib6_info *rt)
Ido Schimmel428b8512017-08-03 13:28:28 +02005038{
5039 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
5040 int err;
5041
5042 mlxsw_sp_rt6 = mlxsw_sp_rt6_create(rt);
5043 if (IS_ERR(mlxsw_sp_rt6))
5044 return PTR_ERR(mlxsw_sp_rt6);
5045
5046 list_add_tail(&mlxsw_sp_rt6->list, &fib6_entry->rt6_list);
5047 fib6_entry->nrt6++;
5048
5049 err = mlxsw_sp_nexthop6_group_update(mlxsw_sp, fib6_entry);
5050 if (err)
5051 goto err_nexthop6_group_update;
5052
5053 return 0;
5054
5055err_nexthop6_group_update:
5056 fib6_entry->nrt6--;
5057 list_del(&mlxsw_sp_rt6->list);
5058 mlxsw_sp_rt6_destroy(mlxsw_sp_rt6);
5059 return err;
5060}
5061
5062static void
5063mlxsw_sp_fib6_entry_nexthop_del(struct mlxsw_sp *mlxsw_sp,
5064 struct mlxsw_sp_fib6_entry *fib6_entry,
David Ahern8d1c8022018-04-17 17:33:26 -07005065 struct fib6_info *rt)
Ido Schimmel428b8512017-08-03 13:28:28 +02005066{
5067 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
5068
5069 mlxsw_sp_rt6 = mlxsw_sp_fib6_entry_rt_find(fib6_entry, rt);
5070 if (WARN_ON(!mlxsw_sp_rt6))
5071 return;
5072
5073 fib6_entry->nrt6--;
5074 list_del(&mlxsw_sp_rt6->list);
5075 mlxsw_sp_nexthop6_group_update(mlxsw_sp, fib6_entry);
5076 mlxsw_sp_rt6_destroy(mlxsw_sp_rt6);
5077}
5078
Petr Machataf6050ee2017-09-02 23:49:21 +02005079static void mlxsw_sp_fib6_entry_type_set(struct mlxsw_sp *mlxsw_sp,
5080 struct mlxsw_sp_fib_entry *fib_entry,
David Ahern8d1c8022018-04-17 17:33:26 -07005081 const struct fib6_info *rt)
Ido Schimmel428b8512017-08-03 13:28:28 +02005082{
5083 /* Packets hitting RTF_REJECT routes need to be discarded by the
5084 * stack. We can rely on their destination device not having a
5085 * RIF (it's the loopback device) and can thus use action type
5086 * local, which will cause them to be trapped with a lower
5087 * priority than packets that need to be locally received.
5088 */
David Ahern93c2fb22018-04-18 15:38:59 -07005089 if (rt->fib6_flags & (RTF_LOCAL | RTF_ANYCAST))
Ido Schimmel428b8512017-08-03 13:28:28 +02005090 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
David Ahern93c2fb22018-04-18 15:38:59 -07005091 else if (rt->fib6_flags & RTF_REJECT)
Ido Schimmel428b8512017-08-03 13:28:28 +02005092 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_LOCAL;
Petr Machataf6050ee2017-09-02 23:49:21 +02005093 else if (mlxsw_sp_rt6_is_gateway(mlxsw_sp, rt))
Ido Schimmel428b8512017-08-03 13:28:28 +02005094 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_REMOTE;
5095 else
5096 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_LOCAL;
5097}
5098
5099static void
5100mlxsw_sp_fib6_entry_rt_destroy_all(struct mlxsw_sp_fib6_entry *fib6_entry)
5101{
5102 struct mlxsw_sp_rt6 *mlxsw_sp_rt6, *tmp;
5103
5104 list_for_each_entry_safe(mlxsw_sp_rt6, tmp, &fib6_entry->rt6_list,
5105 list) {
5106 fib6_entry->nrt6--;
5107 list_del(&mlxsw_sp_rt6->list);
5108 mlxsw_sp_rt6_destroy(mlxsw_sp_rt6);
5109 }
5110}
5111
5112static struct mlxsw_sp_fib6_entry *
5113mlxsw_sp_fib6_entry_create(struct mlxsw_sp *mlxsw_sp,
5114 struct mlxsw_sp_fib_node *fib_node,
David Ahern8d1c8022018-04-17 17:33:26 -07005115 struct fib6_info *rt)
Ido Schimmel428b8512017-08-03 13:28:28 +02005116{
5117 struct mlxsw_sp_fib6_entry *fib6_entry;
5118 struct mlxsw_sp_fib_entry *fib_entry;
5119 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
5120 int err;
5121
5122 fib6_entry = kzalloc(sizeof(*fib6_entry), GFP_KERNEL);
5123 if (!fib6_entry)
5124 return ERR_PTR(-ENOMEM);
5125 fib_entry = &fib6_entry->common;
5126
5127 mlxsw_sp_rt6 = mlxsw_sp_rt6_create(rt);
5128 if (IS_ERR(mlxsw_sp_rt6)) {
5129 err = PTR_ERR(mlxsw_sp_rt6);
5130 goto err_rt6_create;
5131 }
5132
Petr Machataf6050ee2017-09-02 23:49:21 +02005133 mlxsw_sp_fib6_entry_type_set(mlxsw_sp, fib_entry, mlxsw_sp_rt6->rt);
Ido Schimmel428b8512017-08-03 13:28:28 +02005134
5135 INIT_LIST_HEAD(&fib6_entry->rt6_list);
5136 list_add_tail(&mlxsw_sp_rt6->list, &fib6_entry->rt6_list);
5137 fib6_entry->nrt6 = 1;
5138 err = mlxsw_sp_nexthop6_group_get(mlxsw_sp, fib6_entry);
5139 if (err)
5140 goto err_nexthop6_group_get;
5141
5142 fib_entry->fib_node = fib_node;
5143
5144 return fib6_entry;
5145
5146err_nexthop6_group_get:
5147 list_del(&mlxsw_sp_rt6->list);
5148 mlxsw_sp_rt6_destroy(mlxsw_sp_rt6);
5149err_rt6_create:
5150 kfree(fib6_entry);
5151 return ERR_PTR(err);
5152}
5153
5154static void mlxsw_sp_fib6_entry_destroy(struct mlxsw_sp *mlxsw_sp,
5155 struct mlxsw_sp_fib6_entry *fib6_entry)
5156{
5157 mlxsw_sp_nexthop6_group_put(mlxsw_sp, &fib6_entry->common);
5158 mlxsw_sp_fib6_entry_rt_destroy_all(fib6_entry);
5159 WARN_ON(fib6_entry->nrt6);
5160 kfree(fib6_entry);
5161}
5162
5163static struct mlxsw_sp_fib6_entry *
5164mlxsw_sp_fib6_node_entry_find(const struct mlxsw_sp_fib_node *fib_node,
David Ahern8d1c8022018-04-17 17:33:26 -07005165 const struct fib6_info *nrt, bool replace)
Ido Schimmel428b8512017-08-03 13:28:28 +02005166{
Ido Schimmelce45bded2018-06-15 16:23:37 +03005167 struct mlxsw_sp_fib6_entry *fib6_entry;
Ido Schimmel428b8512017-08-03 13:28:28 +02005168
5169 list_for_each_entry(fib6_entry, &fib_node->entry_list, common.list) {
David Ahern8d1c8022018-04-17 17:33:26 -07005170 struct fib6_info *rt = mlxsw_sp_fib6_entry_rt(fib6_entry);
Ido Schimmel428b8512017-08-03 13:28:28 +02005171
David Ahern93c2fb22018-04-18 15:38:59 -07005172 if (rt->fib6_table->tb6_id > nrt->fib6_table->tb6_id)
Ido Schimmel428b8512017-08-03 13:28:28 +02005173 continue;
David Ahern93c2fb22018-04-18 15:38:59 -07005174 if (rt->fib6_table->tb6_id != nrt->fib6_table->tb6_id)
Ido Schimmel428b8512017-08-03 13:28:28 +02005175 break;
Ido Schimmelce45bded2018-06-15 16:23:37 +03005176 if (replace && rt->fib6_metric == nrt->fib6_metric)
5177 return fib6_entry;
David Ahern93c2fb22018-04-18 15:38:59 -07005178 if (rt->fib6_metric > nrt->fib6_metric)
Ido Schimmelce45bded2018-06-15 16:23:37 +03005179 return fib6_entry;
Ido Schimmel428b8512017-08-03 13:28:28 +02005180 }
5181
Ido Schimmelce45bded2018-06-15 16:23:37 +03005182 return NULL;
Ido Schimmel428b8512017-08-03 13:28:28 +02005183}
5184
5185static int
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02005186mlxsw_sp_fib6_node_list_insert(struct mlxsw_sp_fib6_entry *new6_entry,
5187 bool replace)
Ido Schimmel428b8512017-08-03 13:28:28 +02005188{
5189 struct mlxsw_sp_fib_node *fib_node = new6_entry->common.fib_node;
David Ahern8d1c8022018-04-17 17:33:26 -07005190 struct fib6_info *nrt = mlxsw_sp_fib6_entry_rt(new6_entry);
Ido Schimmel428b8512017-08-03 13:28:28 +02005191 struct mlxsw_sp_fib6_entry *fib6_entry;
5192
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02005193 fib6_entry = mlxsw_sp_fib6_node_entry_find(fib_node, nrt, replace);
5194
5195 if (replace && WARN_ON(!fib6_entry))
5196 return -EINVAL;
Ido Schimmel428b8512017-08-03 13:28:28 +02005197
5198 if (fib6_entry) {
5199 list_add_tail(&new6_entry->common.list,
5200 &fib6_entry->common.list);
5201 } else {
5202 struct mlxsw_sp_fib6_entry *last;
5203
5204 list_for_each_entry(last, &fib_node->entry_list, common.list) {
David Ahern8d1c8022018-04-17 17:33:26 -07005205 struct fib6_info *rt = mlxsw_sp_fib6_entry_rt(last);
Ido Schimmel428b8512017-08-03 13:28:28 +02005206
David Ahern93c2fb22018-04-18 15:38:59 -07005207 if (nrt->fib6_table->tb6_id > rt->fib6_table->tb6_id)
Ido Schimmel428b8512017-08-03 13:28:28 +02005208 break;
5209 fib6_entry = last;
5210 }
5211
5212 if (fib6_entry)
5213 list_add(&new6_entry->common.list,
5214 &fib6_entry->common.list);
5215 else
5216 list_add(&new6_entry->common.list,
5217 &fib_node->entry_list);
5218 }
5219
5220 return 0;
5221}
5222
5223static void
5224mlxsw_sp_fib6_node_list_remove(struct mlxsw_sp_fib6_entry *fib6_entry)
5225{
5226 list_del(&fib6_entry->common.list);
5227}
5228
5229static int mlxsw_sp_fib6_node_entry_link(struct mlxsw_sp *mlxsw_sp,
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02005230 struct mlxsw_sp_fib6_entry *fib6_entry,
5231 bool replace)
Ido Schimmel428b8512017-08-03 13:28:28 +02005232{
5233 int err;
5234
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02005235 err = mlxsw_sp_fib6_node_list_insert(fib6_entry, replace);
Ido Schimmel428b8512017-08-03 13:28:28 +02005236 if (err)
5237 return err;
5238
5239 err = mlxsw_sp_fib_node_entry_add(mlxsw_sp, &fib6_entry->common);
5240 if (err)
5241 goto err_fib_node_entry_add;
5242
5243 return 0;
5244
5245err_fib_node_entry_add:
5246 mlxsw_sp_fib6_node_list_remove(fib6_entry);
5247 return err;
5248}
5249
5250static void
5251mlxsw_sp_fib6_node_entry_unlink(struct mlxsw_sp *mlxsw_sp,
5252 struct mlxsw_sp_fib6_entry *fib6_entry)
5253{
5254 mlxsw_sp_fib_node_entry_del(mlxsw_sp, &fib6_entry->common);
5255 mlxsw_sp_fib6_node_list_remove(fib6_entry);
5256}
5257
5258static struct mlxsw_sp_fib6_entry *
5259mlxsw_sp_fib6_entry_lookup(struct mlxsw_sp *mlxsw_sp,
David Ahern8d1c8022018-04-17 17:33:26 -07005260 const struct fib6_info *rt)
Ido Schimmel428b8512017-08-03 13:28:28 +02005261{
5262 struct mlxsw_sp_fib6_entry *fib6_entry;
5263 struct mlxsw_sp_fib_node *fib_node;
5264 struct mlxsw_sp_fib *fib;
5265 struct mlxsw_sp_vr *vr;
5266
David Ahern93c2fb22018-04-18 15:38:59 -07005267 vr = mlxsw_sp_vr_find(mlxsw_sp, rt->fib6_table->tb6_id);
Ido Schimmel428b8512017-08-03 13:28:28 +02005268 if (!vr)
5269 return NULL;
5270 fib = mlxsw_sp_vr_fib(vr, MLXSW_SP_L3_PROTO_IPV6);
5271
David Ahern93c2fb22018-04-18 15:38:59 -07005272 fib_node = mlxsw_sp_fib_node_lookup(fib, &rt->fib6_dst.addr,
5273 sizeof(rt->fib6_dst.addr),
5274 rt->fib6_dst.plen);
Ido Schimmel428b8512017-08-03 13:28:28 +02005275 if (!fib_node)
5276 return NULL;
5277
5278 list_for_each_entry(fib6_entry, &fib_node->entry_list, common.list) {
David Ahern8d1c8022018-04-17 17:33:26 -07005279 struct fib6_info *iter_rt = mlxsw_sp_fib6_entry_rt(fib6_entry);
Ido Schimmel428b8512017-08-03 13:28:28 +02005280
David Ahern93c2fb22018-04-18 15:38:59 -07005281 if (rt->fib6_table->tb6_id == iter_rt->fib6_table->tb6_id &&
5282 rt->fib6_metric == iter_rt->fib6_metric &&
Ido Schimmel428b8512017-08-03 13:28:28 +02005283 mlxsw_sp_fib6_entry_rt_find(fib6_entry, rt))
5284 return fib6_entry;
5285 }
5286
5287 return NULL;
5288}
5289
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02005290static void mlxsw_sp_fib6_entry_replace(struct mlxsw_sp *mlxsw_sp,
5291 struct mlxsw_sp_fib6_entry *fib6_entry,
5292 bool replace)
5293{
5294 struct mlxsw_sp_fib_node *fib_node = fib6_entry->common.fib_node;
5295 struct mlxsw_sp_fib6_entry *replaced;
5296
5297 if (!replace)
5298 return;
5299
5300 replaced = list_next_entry(fib6_entry, common.list);
5301
5302 mlxsw_sp_fib6_node_entry_unlink(mlxsw_sp, replaced);
5303 mlxsw_sp_fib6_entry_destroy(mlxsw_sp, replaced);
5304 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
5305}
5306
Ido Schimmel428b8512017-08-03 13:28:28 +02005307static int mlxsw_sp_router_fib6_add(struct mlxsw_sp *mlxsw_sp,
Ido Schimmel53b562d2018-06-15 16:23:36 +03005308 struct fib6_info *rt, bool replace,
5309 bool append)
Ido Schimmel428b8512017-08-03 13:28:28 +02005310{
5311 struct mlxsw_sp_fib6_entry *fib6_entry;
5312 struct mlxsw_sp_fib_node *fib_node;
5313 int err;
5314
5315 if (mlxsw_sp->router->aborted)
5316 return 0;
5317
David Ahern93c2fb22018-04-18 15:38:59 -07005318 if (rt->fib6_src.plen)
Ido Schimmelf36f5ac2017-08-03 13:28:30 +02005319 return -EINVAL;
5320
Ido Schimmel428b8512017-08-03 13:28:28 +02005321 if (mlxsw_sp_fib6_rt_should_ignore(rt))
5322 return 0;
5323
David Ahern93c2fb22018-04-18 15:38:59 -07005324 fib_node = mlxsw_sp_fib_node_get(mlxsw_sp, rt->fib6_table->tb6_id,
5325 &rt->fib6_dst.addr,
5326 sizeof(rt->fib6_dst.addr),
5327 rt->fib6_dst.plen,
Ido Schimmel428b8512017-08-03 13:28:28 +02005328 MLXSW_SP_L3_PROTO_IPV6);
5329 if (IS_ERR(fib_node))
5330 return PTR_ERR(fib_node);
5331
5332 /* Before creating a new entry, try to append route to an existing
5333 * multipath entry.
5334 */
Ido Schimmel53b562d2018-06-15 16:23:36 +03005335 fib6_entry = mlxsw_sp_fib6_node_mp_entry_find(fib_node, rt, append);
Ido Schimmel428b8512017-08-03 13:28:28 +02005336 if (fib6_entry) {
5337 err = mlxsw_sp_fib6_entry_nexthop_add(mlxsw_sp, fib6_entry, rt);
5338 if (err)
5339 goto err_fib6_entry_nexthop_add;
5340 return 0;
5341 }
5342
Ido Schimmel53b562d2018-06-15 16:23:36 +03005343 /* We received an append event, yet did not find any route to
5344 * append to.
5345 */
5346 if (WARN_ON(append)) {
5347 err = -EINVAL;
5348 goto err_fib6_entry_append;
5349 }
5350
Ido Schimmel428b8512017-08-03 13:28:28 +02005351 fib6_entry = mlxsw_sp_fib6_entry_create(mlxsw_sp, fib_node, rt);
5352 if (IS_ERR(fib6_entry)) {
5353 err = PTR_ERR(fib6_entry);
5354 goto err_fib6_entry_create;
5355 }
5356
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02005357 err = mlxsw_sp_fib6_node_entry_link(mlxsw_sp, fib6_entry, replace);
Ido Schimmel428b8512017-08-03 13:28:28 +02005358 if (err)
5359 goto err_fib6_node_entry_link;
5360
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02005361 mlxsw_sp_fib6_entry_replace(mlxsw_sp, fib6_entry, replace);
5362
Ido Schimmel428b8512017-08-03 13:28:28 +02005363 return 0;
5364
5365err_fib6_node_entry_link:
5366 mlxsw_sp_fib6_entry_destroy(mlxsw_sp, fib6_entry);
5367err_fib6_entry_create:
Ido Schimmel53b562d2018-06-15 16:23:36 +03005368err_fib6_entry_append:
Ido Schimmel428b8512017-08-03 13:28:28 +02005369err_fib6_entry_nexthop_add:
5370 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
5371 return err;
5372}
5373
5374static void mlxsw_sp_router_fib6_del(struct mlxsw_sp *mlxsw_sp,
David Ahern8d1c8022018-04-17 17:33:26 -07005375 struct fib6_info *rt)
Ido Schimmel428b8512017-08-03 13:28:28 +02005376{
5377 struct mlxsw_sp_fib6_entry *fib6_entry;
5378 struct mlxsw_sp_fib_node *fib_node;
5379
5380 if (mlxsw_sp->router->aborted)
5381 return;
5382
5383 if (mlxsw_sp_fib6_rt_should_ignore(rt))
5384 return;
5385
5386 fib6_entry = mlxsw_sp_fib6_entry_lookup(mlxsw_sp, rt);
5387 if (WARN_ON(!fib6_entry))
5388 return;
5389
5390 /* If route is part of a multipath entry, but not the last one
5391 * removed, then only reduce its nexthop group.
5392 */
5393 if (!list_is_singular(&fib6_entry->rt6_list)) {
5394 mlxsw_sp_fib6_entry_nexthop_del(mlxsw_sp, fib6_entry, rt);
5395 return;
5396 }
5397
5398 fib_node = fib6_entry->common.fib_node;
5399
5400 mlxsw_sp_fib6_node_entry_unlink(mlxsw_sp, fib6_entry);
5401 mlxsw_sp_fib6_entry_destroy(mlxsw_sp, fib6_entry);
5402 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
5403}
5404
Ido Schimmelbc65a8a2017-07-18 10:10:25 +02005405static int __mlxsw_sp_router_set_abort_trap(struct mlxsw_sp *mlxsw_sp,
5406 enum mlxsw_reg_ralxx_protocol proto,
5407 u8 tree_id)
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005408{
5409 char ralta_pl[MLXSW_REG_RALTA_LEN];
5410 char ralst_pl[MLXSW_REG_RALST_LEN];
Ido Schimmelb5d90e62017-03-10 08:53:43 +01005411 int i, err;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005412
Ido Schimmelbc65a8a2017-07-18 10:10:25 +02005413 mlxsw_reg_ralta_pack(ralta_pl, true, proto, tree_id);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005414 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralta), ralta_pl);
5415 if (err)
5416 return err;
5417
Ido Schimmelbc65a8a2017-07-18 10:10:25 +02005418 mlxsw_reg_ralst_pack(ralst_pl, 0xff, tree_id);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005419 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralst), ralst_pl);
5420 if (err)
5421 return err;
5422
Ido Schimmelb5d90e62017-03-10 08:53:43 +01005423 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
Ido Schimmel9011b672017-05-16 19:38:25 +02005424 struct mlxsw_sp_vr *vr = &mlxsw_sp->router->vrs[i];
Ido Schimmelb5d90e62017-03-10 08:53:43 +01005425 char raltb_pl[MLXSW_REG_RALTB_LEN];
5426 char ralue_pl[MLXSW_REG_RALUE_LEN];
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005427
Ido Schimmelbc65a8a2017-07-18 10:10:25 +02005428 mlxsw_reg_raltb_pack(raltb_pl, vr->id, proto, tree_id);
Ido Schimmelb5d90e62017-03-10 08:53:43 +01005429 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raltb),
5430 raltb_pl);
5431 if (err)
5432 return err;
5433
Ido Schimmelbc65a8a2017-07-18 10:10:25 +02005434 mlxsw_reg_ralue_pack(ralue_pl, proto,
5435 MLXSW_REG_RALUE_OP_WRITE_WRITE, vr->id, 0);
Ido Schimmelb5d90e62017-03-10 08:53:43 +01005436 mlxsw_reg_ralue_act_ip2me_pack(ralue_pl);
5437 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue),
5438 ralue_pl);
5439 if (err)
5440 return err;
5441 }
5442
5443 return 0;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005444}
5445
Yuval Mintzeb35da02018-03-26 15:01:42 +03005446static struct mlxsw_sp_mr_table *
5447mlxsw_sp_router_fibmr_family_to_table(struct mlxsw_sp_vr *vr, int family)
5448{
Yuval Mintz64ed1b92018-03-26 15:01:44 +03005449 if (family == RTNL_FAMILY_IPMR)
Yuval Mintzeb35da02018-03-26 15:01:42 +03005450 return vr->mr_table[MLXSW_SP_L3_PROTO_IPV4];
Yuval Mintz64ed1b92018-03-26 15:01:44 +03005451 else
5452 return vr->mr_table[MLXSW_SP_L3_PROTO_IPV6];
Yuval Mintzeb35da02018-03-26 15:01:42 +03005453}
5454
Yotam Gigid42b0962017-09-27 08:23:20 +02005455static int mlxsw_sp_router_fibmr_add(struct mlxsw_sp *mlxsw_sp,
5456 struct mfc_entry_notifier_info *men_info,
5457 bool replace)
5458{
Yuval Mintzeb35da02018-03-26 15:01:42 +03005459 struct mlxsw_sp_mr_table *mrt;
Yotam Gigid42b0962017-09-27 08:23:20 +02005460 struct mlxsw_sp_vr *vr;
5461
5462 if (mlxsw_sp->router->aborted)
5463 return 0;
5464
David Ahernf8fa9b42017-10-18 09:56:56 -07005465 vr = mlxsw_sp_vr_get(mlxsw_sp, men_info->tb_id, NULL);
Yotam Gigid42b0962017-09-27 08:23:20 +02005466 if (IS_ERR(vr))
5467 return PTR_ERR(vr);
5468
Yuval Mintzeb35da02018-03-26 15:01:42 +03005469 mrt = mlxsw_sp_router_fibmr_family_to_table(vr, men_info->info.family);
5470 return mlxsw_sp_mr_route_add(mrt, men_info->mfc, replace);
Yotam Gigid42b0962017-09-27 08:23:20 +02005471}
5472
5473static void mlxsw_sp_router_fibmr_del(struct mlxsw_sp *mlxsw_sp,
5474 struct mfc_entry_notifier_info *men_info)
5475{
Yuval Mintzeb35da02018-03-26 15:01:42 +03005476 struct mlxsw_sp_mr_table *mrt;
Yotam Gigid42b0962017-09-27 08:23:20 +02005477 struct mlxsw_sp_vr *vr;
5478
5479 if (mlxsw_sp->router->aborted)
5480 return;
5481
5482 vr = mlxsw_sp_vr_find(mlxsw_sp, men_info->tb_id);
5483 if (WARN_ON(!vr))
5484 return;
5485
Yuval Mintzeb35da02018-03-26 15:01:42 +03005486 mrt = mlxsw_sp_router_fibmr_family_to_table(vr, men_info->info.family);
5487 mlxsw_sp_mr_route_del(mrt, men_info->mfc);
Ido Schimmel2b52ce02018-01-22 09:17:42 +01005488 mlxsw_sp_vr_put(mlxsw_sp, vr);
Yotam Gigid42b0962017-09-27 08:23:20 +02005489}
5490
5491static int
5492mlxsw_sp_router_fibmr_vif_add(struct mlxsw_sp *mlxsw_sp,
5493 struct vif_entry_notifier_info *ven_info)
5494{
Yuval Mintzeb35da02018-03-26 15:01:42 +03005495 struct mlxsw_sp_mr_table *mrt;
Yotam Gigid42b0962017-09-27 08:23:20 +02005496 struct mlxsw_sp_rif *rif;
5497 struct mlxsw_sp_vr *vr;
5498
5499 if (mlxsw_sp->router->aborted)
5500 return 0;
5501
David Ahernf8fa9b42017-10-18 09:56:56 -07005502 vr = mlxsw_sp_vr_get(mlxsw_sp, ven_info->tb_id, NULL);
Yotam Gigid42b0962017-09-27 08:23:20 +02005503 if (IS_ERR(vr))
5504 return PTR_ERR(vr);
5505
Yuval Mintzeb35da02018-03-26 15:01:42 +03005506 mrt = mlxsw_sp_router_fibmr_family_to_table(vr, ven_info->info.family);
Yotam Gigid42b0962017-09-27 08:23:20 +02005507 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, ven_info->dev);
Yuval Mintzeb35da02018-03-26 15:01:42 +03005508 return mlxsw_sp_mr_vif_add(mrt, ven_info->dev,
Yotam Gigid42b0962017-09-27 08:23:20 +02005509 ven_info->vif_index,
5510 ven_info->vif_flags, rif);
5511}
5512
5513static void
5514mlxsw_sp_router_fibmr_vif_del(struct mlxsw_sp *mlxsw_sp,
5515 struct vif_entry_notifier_info *ven_info)
5516{
Yuval Mintzeb35da02018-03-26 15:01:42 +03005517 struct mlxsw_sp_mr_table *mrt;
Yotam Gigid42b0962017-09-27 08:23:20 +02005518 struct mlxsw_sp_vr *vr;
5519
5520 if (mlxsw_sp->router->aborted)
5521 return;
5522
5523 vr = mlxsw_sp_vr_find(mlxsw_sp, ven_info->tb_id);
5524 if (WARN_ON(!vr))
5525 return;
5526
Yuval Mintzeb35da02018-03-26 15:01:42 +03005527 mrt = mlxsw_sp_router_fibmr_family_to_table(vr, ven_info->info.family);
5528 mlxsw_sp_mr_vif_del(mrt, ven_info->vif_index);
Ido Schimmel2b52ce02018-01-22 09:17:42 +01005529 mlxsw_sp_vr_put(mlxsw_sp, vr);
Yotam Gigid42b0962017-09-27 08:23:20 +02005530}
5531
Ido Schimmelbc65a8a2017-07-18 10:10:25 +02005532static int mlxsw_sp_router_set_abort_trap(struct mlxsw_sp *mlxsw_sp)
5533{
5534 enum mlxsw_reg_ralxx_protocol proto = MLXSW_REG_RALXX_PROTOCOL_IPV4;
5535 int err;
5536
5537 err = __mlxsw_sp_router_set_abort_trap(mlxsw_sp, proto,
5538 MLXSW_SP_LPM_TREE_MIN);
5539 if (err)
5540 return err;
5541
Yotam Gigid42b0962017-09-27 08:23:20 +02005542 /* The multicast router code does not need an abort trap as by default,
5543 * packets that don't match any routes are trapped to the CPU.
5544 */
5545
Ido Schimmelbc65a8a2017-07-18 10:10:25 +02005546 proto = MLXSW_REG_RALXX_PROTOCOL_IPV6;
5547 return __mlxsw_sp_router_set_abort_trap(mlxsw_sp, proto,
5548 MLXSW_SP_LPM_TREE_MIN + 1);
5549}
5550
Ido Schimmel9aecce12017-02-09 10:28:42 +01005551static void mlxsw_sp_fib4_node_flush(struct mlxsw_sp *mlxsw_sp,
5552 struct mlxsw_sp_fib_node *fib_node)
5553{
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02005554 struct mlxsw_sp_fib4_entry *fib4_entry, *tmp;
Ido Schimmel9aecce12017-02-09 10:28:42 +01005555
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02005556 list_for_each_entry_safe(fib4_entry, tmp, &fib_node->entry_list,
5557 common.list) {
5558 bool do_break = &tmp->common.list == &fib_node->entry_list;
Ido Schimmel9aecce12017-02-09 10:28:42 +01005559
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02005560 mlxsw_sp_fib4_node_entry_unlink(mlxsw_sp, fib4_entry);
5561 mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib4_entry);
Ido Schimmel731ea1c2017-07-18 10:10:21 +02005562 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
Ido Schimmel9aecce12017-02-09 10:28:42 +01005563 /* Break when entry list is empty and node was freed.
5564 * Otherwise, we'll access freed memory in the next
5565 * iteration.
5566 */
5567 if (do_break)
5568 break;
5569 }
5570}
5571
Ido Schimmel428b8512017-08-03 13:28:28 +02005572static void mlxsw_sp_fib6_node_flush(struct mlxsw_sp *mlxsw_sp,
5573 struct mlxsw_sp_fib_node *fib_node)
5574{
5575 struct mlxsw_sp_fib6_entry *fib6_entry, *tmp;
5576
5577 list_for_each_entry_safe(fib6_entry, tmp, &fib_node->entry_list,
5578 common.list) {
5579 bool do_break = &tmp->common.list == &fib_node->entry_list;
5580
5581 mlxsw_sp_fib6_node_entry_unlink(mlxsw_sp, fib6_entry);
5582 mlxsw_sp_fib6_entry_destroy(mlxsw_sp, fib6_entry);
5583 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
5584 if (do_break)
5585 break;
5586 }
5587}
5588
Ido Schimmel9aecce12017-02-09 10:28:42 +01005589static void mlxsw_sp_fib_node_flush(struct mlxsw_sp *mlxsw_sp,
5590 struct mlxsw_sp_fib_node *fib_node)
5591{
Ido Schimmel76610eb2017-03-10 08:53:41 +01005592 switch (fib_node->fib->proto) {
Ido Schimmel9aecce12017-02-09 10:28:42 +01005593 case MLXSW_SP_L3_PROTO_IPV4:
5594 mlxsw_sp_fib4_node_flush(mlxsw_sp, fib_node);
5595 break;
5596 case MLXSW_SP_L3_PROTO_IPV6:
Ido Schimmel428b8512017-08-03 13:28:28 +02005597 mlxsw_sp_fib6_node_flush(mlxsw_sp, fib_node);
Ido Schimmel9aecce12017-02-09 10:28:42 +01005598 break;
5599 }
5600}
5601
Ido Schimmel76610eb2017-03-10 08:53:41 +01005602static void mlxsw_sp_vr_fib_flush(struct mlxsw_sp *mlxsw_sp,
5603 struct mlxsw_sp_vr *vr,
5604 enum mlxsw_sp_l3proto proto)
5605{
5606 struct mlxsw_sp_fib *fib = mlxsw_sp_vr_fib(vr, proto);
5607 struct mlxsw_sp_fib_node *fib_node, *tmp;
5608
5609 list_for_each_entry_safe(fib_node, tmp, &fib->node_list, list) {
5610 bool do_break = &tmp->list == &fib->node_list;
5611
5612 mlxsw_sp_fib_node_flush(mlxsw_sp, fib_node);
5613 if (do_break)
5614 break;
5615 }
5616}
5617
Ido Schimmelac571de2016-11-14 11:26:32 +01005618static void mlxsw_sp_router_fib_flush(struct mlxsw_sp *mlxsw_sp)
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005619{
Yuval Mintz9742f862018-03-26 15:01:40 +03005620 int i, j;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005621
Jiri Pirkoc1a38312016-10-21 16:07:23 +02005622 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
Ido Schimmel9011b672017-05-16 19:38:25 +02005623 struct mlxsw_sp_vr *vr = &mlxsw_sp->router->vrs[i];
Ido Schimmelac571de2016-11-14 11:26:32 +01005624
Ido Schimmel76610eb2017-03-10 08:53:41 +01005625 if (!mlxsw_sp_vr_is_used(vr))
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005626 continue;
Yotam Gigid42b0962017-09-27 08:23:20 +02005627
Yuval Mintz9742f862018-03-26 15:01:40 +03005628 for (j = 0; j < MLXSW_SP_L3_PROTO_MAX; j++)
5629 mlxsw_sp_mr_table_flush(vr->mr_table[j]);
Ido Schimmel76610eb2017-03-10 08:53:41 +01005630 mlxsw_sp_vr_fib_flush(mlxsw_sp, vr, MLXSW_SP_L3_PROTO_IPV4);
Ido Schimmela3d9bc52017-07-18 10:10:22 +02005631
5632 /* If virtual router was only used for IPv4, then it's no
5633 * longer used.
5634 */
5635 if (!mlxsw_sp_vr_is_used(vr))
5636 continue;
5637 mlxsw_sp_vr_fib_flush(mlxsw_sp, vr, MLXSW_SP_L3_PROTO_IPV6);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005638 }
Ido Schimmelac571de2016-11-14 11:26:32 +01005639}
5640
Ido Schimmelbc65a8a2017-07-18 10:10:25 +02005641static void mlxsw_sp_router_fib_abort(struct mlxsw_sp *mlxsw_sp)
Ido Schimmelac571de2016-11-14 11:26:32 +01005642{
5643 int err;
5644
Ido Schimmel9011b672017-05-16 19:38:25 +02005645 if (mlxsw_sp->router->aborted)
Ido Schimmeld331d302016-11-16 09:51:58 +01005646 return;
5647 dev_warn(mlxsw_sp->bus_info->dev, "FIB abort triggered. Note that FIB entries are no longer being offloaded to this device.\n");
Ido Schimmelac571de2016-11-14 11:26:32 +01005648 mlxsw_sp_router_fib_flush(mlxsw_sp);
Ido Schimmel9011b672017-05-16 19:38:25 +02005649 mlxsw_sp->router->aborted = true;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005650 err = mlxsw_sp_router_set_abort_trap(mlxsw_sp);
5651 if (err)
5652 dev_warn(mlxsw_sp->bus_info->dev, "Failed to set abort trap.\n");
5653}
5654
Ido Schimmel30572242016-12-03 16:45:01 +01005655struct mlxsw_sp_fib_event_work {
Ido Schimmela0e47612017-02-06 16:20:10 +01005656 struct work_struct work;
Ido Schimmelad178c82017-02-08 11:16:40 +01005657 union {
Ido Schimmel428b8512017-08-03 13:28:28 +02005658 struct fib6_entry_notifier_info fen6_info;
Ido Schimmelad178c82017-02-08 11:16:40 +01005659 struct fib_entry_notifier_info fen_info;
Ido Schimmel5d7bfd12017-03-16 09:08:14 +01005660 struct fib_rule_notifier_info fr_info;
Ido Schimmelad178c82017-02-08 11:16:40 +01005661 struct fib_nh_notifier_info fnh_info;
Yotam Gigid42b0962017-09-27 08:23:20 +02005662 struct mfc_entry_notifier_info men_info;
5663 struct vif_entry_notifier_info ven_info;
Ido Schimmelad178c82017-02-08 11:16:40 +01005664 };
Ido Schimmel30572242016-12-03 16:45:01 +01005665 struct mlxsw_sp *mlxsw_sp;
5666 unsigned long event;
5667};
5668
Ido Schimmel66a57632017-08-03 13:28:26 +02005669static void mlxsw_sp_router_fib4_event_work(struct work_struct *work)
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005670{
Ido Schimmel30572242016-12-03 16:45:01 +01005671 struct mlxsw_sp_fib_event_work *fib_work =
Ido Schimmela0e47612017-02-06 16:20:10 +01005672 container_of(work, struct mlxsw_sp_fib_event_work, work);
Ido Schimmel30572242016-12-03 16:45:01 +01005673 struct mlxsw_sp *mlxsw_sp = fib_work->mlxsw_sp;
Ido Schimmel599cf8f2017-02-09 10:28:44 +01005674 bool replace, append;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005675 int err;
5676
Ido Schimmel30572242016-12-03 16:45:01 +01005677 /* Protect internal structures from changes */
5678 rtnl_lock();
Petr Machata803335a2018-02-27 14:53:46 +01005679 mlxsw_sp_span_respin(mlxsw_sp);
5680
Ido Schimmel30572242016-12-03 16:45:01 +01005681 switch (fib_work->event) {
Ido Schimmel599cf8f2017-02-09 10:28:44 +01005682 case FIB_EVENT_ENTRY_REPLACE: /* fall through */
Ido Schimmel4283bce2017-02-09 10:28:43 +01005683 case FIB_EVENT_ENTRY_APPEND: /* fall through */
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005684 case FIB_EVENT_ENTRY_ADD:
Ido Schimmel599cf8f2017-02-09 10:28:44 +01005685 replace = fib_work->event == FIB_EVENT_ENTRY_REPLACE;
Ido Schimmel4283bce2017-02-09 10:28:43 +01005686 append = fib_work->event == FIB_EVENT_ENTRY_APPEND;
5687 err = mlxsw_sp_router_fib4_add(mlxsw_sp, &fib_work->fen_info,
Ido Schimmel599cf8f2017-02-09 10:28:44 +01005688 replace, append);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005689 if (err)
Ido Schimmelbc65a8a2017-07-18 10:10:25 +02005690 mlxsw_sp_router_fib_abort(mlxsw_sp);
Ido Schimmel30572242016-12-03 16:45:01 +01005691 fib_info_put(fib_work->fen_info.fi);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005692 break;
5693 case FIB_EVENT_ENTRY_DEL:
Ido Schimmel30572242016-12-03 16:45:01 +01005694 mlxsw_sp_router_fib4_del(mlxsw_sp, &fib_work->fen_info);
5695 fib_info_put(fib_work->fen_info.fi);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005696 break;
David Ahern1f279232017-10-27 17:37:14 -07005697 case FIB_EVENT_RULE_ADD:
5698 /* if we get here, a rule was added that we do not support.
5699 * just do the fib_abort
5700 */
5701 mlxsw_sp_router_fib_abort(mlxsw_sp);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005702 break;
Ido Schimmelad178c82017-02-08 11:16:40 +01005703 case FIB_EVENT_NH_ADD: /* fall through */
5704 case FIB_EVENT_NH_DEL:
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02005705 mlxsw_sp_nexthop4_event(mlxsw_sp, fib_work->event,
5706 fib_work->fnh_info.fib_nh);
Ido Schimmelad178c82017-02-08 11:16:40 +01005707 fib_info_put(fib_work->fnh_info.fib_nh->nh_parent);
5708 break;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005709 }
Ido Schimmel30572242016-12-03 16:45:01 +01005710 rtnl_unlock();
5711 kfree(fib_work);
5712}
5713
Ido Schimmel66a57632017-08-03 13:28:26 +02005714static void mlxsw_sp_router_fib6_event_work(struct work_struct *work)
5715{
Ido Schimmel583419f2017-08-03 13:28:27 +02005716 struct mlxsw_sp_fib_event_work *fib_work =
5717 container_of(work, struct mlxsw_sp_fib_event_work, work);
5718 struct mlxsw_sp *mlxsw_sp = fib_work->mlxsw_sp;
Ido Schimmel53b562d2018-06-15 16:23:36 +03005719 bool replace, append;
Ido Schimmel428b8512017-08-03 13:28:28 +02005720 int err;
Ido Schimmel583419f2017-08-03 13:28:27 +02005721
5722 rtnl_lock();
Petr Machata803335a2018-02-27 14:53:46 +01005723 mlxsw_sp_span_respin(mlxsw_sp);
5724
Ido Schimmel583419f2017-08-03 13:28:27 +02005725 switch (fib_work->event) {
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02005726 case FIB_EVENT_ENTRY_REPLACE: /* fall through */
David Ahern5a15a1b2018-05-21 10:26:52 -07005727 case FIB_EVENT_ENTRY_APPEND: /* fall through */
Ido Schimmel428b8512017-08-03 13:28:28 +02005728 case FIB_EVENT_ENTRY_ADD:
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02005729 replace = fib_work->event == FIB_EVENT_ENTRY_REPLACE;
Ido Schimmel53b562d2018-06-15 16:23:36 +03005730 append = fib_work->event == FIB_EVENT_ENTRY_APPEND;
Ido Schimmel428b8512017-08-03 13:28:28 +02005731 err = mlxsw_sp_router_fib6_add(mlxsw_sp,
Ido Schimmel53b562d2018-06-15 16:23:36 +03005732 fib_work->fen6_info.rt, replace,
5733 append);
Ido Schimmel428b8512017-08-03 13:28:28 +02005734 if (err)
5735 mlxsw_sp_router_fib_abort(mlxsw_sp);
5736 mlxsw_sp_rt6_release(fib_work->fen6_info.rt);
5737 break;
5738 case FIB_EVENT_ENTRY_DEL:
5739 mlxsw_sp_router_fib6_del(mlxsw_sp, fib_work->fen6_info.rt);
5740 mlxsw_sp_rt6_release(fib_work->fen6_info.rt);
5741 break;
David Ahern1f279232017-10-27 17:37:14 -07005742 case FIB_EVENT_RULE_ADD:
5743 /* if we get here, a rule was added that we do not support.
5744 * just do the fib_abort
5745 */
5746 mlxsw_sp_router_fib_abort(mlxsw_sp);
Ido Schimmel583419f2017-08-03 13:28:27 +02005747 break;
5748 }
5749 rtnl_unlock();
5750 kfree(fib_work);
Ido Schimmel66a57632017-08-03 13:28:26 +02005751}
5752
Yotam Gigid42b0962017-09-27 08:23:20 +02005753static void mlxsw_sp_router_fibmr_event_work(struct work_struct *work)
5754{
5755 struct mlxsw_sp_fib_event_work *fib_work =
5756 container_of(work, struct mlxsw_sp_fib_event_work, work);
5757 struct mlxsw_sp *mlxsw_sp = fib_work->mlxsw_sp;
Yotam Gigid42b0962017-09-27 08:23:20 +02005758 bool replace;
5759 int err;
5760
5761 rtnl_lock();
5762 switch (fib_work->event) {
5763 case FIB_EVENT_ENTRY_REPLACE: /* fall through */
5764 case FIB_EVENT_ENTRY_ADD:
5765 replace = fib_work->event == FIB_EVENT_ENTRY_REPLACE;
5766
5767 err = mlxsw_sp_router_fibmr_add(mlxsw_sp, &fib_work->men_info,
5768 replace);
5769 if (err)
5770 mlxsw_sp_router_fib_abort(mlxsw_sp);
Yuval Mintz8c13af22018-03-26 15:01:36 +03005771 mr_cache_put(fib_work->men_info.mfc);
Yotam Gigid42b0962017-09-27 08:23:20 +02005772 break;
5773 case FIB_EVENT_ENTRY_DEL:
5774 mlxsw_sp_router_fibmr_del(mlxsw_sp, &fib_work->men_info);
Yuval Mintz8c13af22018-03-26 15:01:36 +03005775 mr_cache_put(fib_work->men_info.mfc);
Yotam Gigid42b0962017-09-27 08:23:20 +02005776 break;
5777 case FIB_EVENT_VIF_ADD:
5778 err = mlxsw_sp_router_fibmr_vif_add(mlxsw_sp,
5779 &fib_work->ven_info);
5780 if (err)
5781 mlxsw_sp_router_fib_abort(mlxsw_sp);
5782 dev_put(fib_work->ven_info.dev);
5783 break;
5784 case FIB_EVENT_VIF_DEL:
5785 mlxsw_sp_router_fibmr_vif_del(mlxsw_sp,
5786 &fib_work->ven_info);
5787 dev_put(fib_work->ven_info.dev);
5788 break;
David Ahern1f279232017-10-27 17:37:14 -07005789 case FIB_EVENT_RULE_ADD:
5790 /* if we get here, a rule was added that we do not support.
5791 * just do the fib_abort
5792 */
5793 mlxsw_sp_router_fib_abort(mlxsw_sp);
Yotam Gigid42b0962017-09-27 08:23:20 +02005794 break;
5795 }
5796 rtnl_unlock();
5797 kfree(fib_work);
5798}
5799
Ido Schimmel66a57632017-08-03 13:28:26 +02005800static void mlxsw_sp_router_fib4_event(struct mlxsw_sp_fib_event_work *fib_work,
5801 struct fib_notifier_info *info)
5802{
David Ahern3c75f9b2017-10-18 15:01:38 -07005803 struct fib_entry_notifier_info *fen_info;
David Ahern3c75f9b2017-10-18 15:01:38 -07005804 struct fib_nh_notifier_info *fnh_info;
5805
Ido Schimmel66a57632017-08-03 13:28:26 +02005806 switch (fib_work->event) {
5807 case FIB_EVENT_ENTRY_REPLACE: /* fall through */
5808 case FIB_EVENT_ENTRY_APPEND: /* fall through */
5809 case FIB_EVENT_ENTRY_ADD: /* fall through */
5810 case FIB_EVENT_ENTRY_DEL:
David Ahern3c75f9b2017-10-18 15:01:38 -07005811 fen_info = container_of(info, struct fib_entry_notifier_info,
5812 info);
5813 fib_work->fen_info = *fen_info;
5814 /* Take reference on fib_info to prevent it from being
Ido Schimmel66a57632017-08-03 13:28:26 +02005815 * freed while work is queued. Release it afterwards.
5816 */
5817 fib_info_hold(fib_work->fen_info.fi);
5818 break;
Ido Schimmel66a57632017-08-03 13:28:26 +02005819 case FIB_EVENT_NH_ADD: /* fall through */
5820 case FIB_EVENT_NH_DEL:
David Ahern3c75f9b2017-10-18 15:01:38 -07005821 fnh_info = container_of(info, struct fib_nh_notifier_info,
5822 info);
5823 fib_work->fnh_info = *fnh_info;
Ido Schimmel66a57632017-08-03 13:28:26 +02005824 fib_info_hold(fib_work->fnh_info.fib_nh->nh_parent);
5825 break;
5826 }
5827}
5828
5829static void mlxsw_sp_router_fib6_event(struct mlxsw_sp_fib_event_work *fib_work,
5830 struct fib_notifier_info *info)
5831{
David Ahern3c75f9b2017-10-18 15:01:38 -07005832 struct fib6_entry_notifier_info *fen6_info;
David Ahern3c75f9b2017-10-18 15:01:38 -07005833
Ido Schimmel583419f2017-08-03 13:28:27 +02005834 switch (fib_work->event) {
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02005835 case FIB_EVENT_ENTRY_REPLACE: /* fall through */
David Ahern5a15a1b2018-05-21 10:26:52 -07005836 case FIB_EVENT_ENTRY_APPEND: /* fall through */
Ido Schimmel428b8512017-08-03 13:28:28 +02005837 case FIB_EVENT_ENTRY_ADD: /* fall through */
5838 case FIB_EVENT_ENTRY_DEL:
David Ahern3c75f9b2017-10-18 15:01:38 -07005839 fen6_info = container_of(info, struct fib6_entry_notifier_info,
5840 info);
5841 fib_work->fen6_info = *fen6_info;
David Ahern8d1c8022018-04-17 17:33:26 -07005842 fib6_info_hold(fib_work->fen6_info.rt);
Ido Schimmel428b8512017-08-03 13:28:28 +02005843 break;
Ido Schimmel583419f2017-08-03 13:28:27 +02005844 }
Ido Schimmel66a57632017-08-03 13:28:26 +02005845}
5846
Yotam Gigid42b0962017-09-27 08:23:20 +02005847static void
5848mlxsw_sp_router_fibmr_event(struct mlxsw_sp_fib_event_work *fib_work,
5849 struct fib_notifier_info *info)
5850{
5851 switch (fib_work->event) {
5852 case FIB_EVENT_ENTRY_REPLACE: /* fall through */
5853 case FIB_EVENT_ENTRY_ADD: /* fall through */
5854 case FIB_EVENT_ENTRY_DEL:
5855 memcpy(&fib_work->men_info, info, sizeof(fib_work->men_info));
Yuval Mintz8c13af22018-03-26 15:01:36 +03005856 mr_cache_hold(fib_work->men_info.mfc);
Yotam Gigid42b0962017-09-27 08:23:20 +02005857 break;
5858 case FIB_EVENT_VIF_ADD: /* fall through */
5859 case FIB_EVENT_VIF_DEL:
5860 memcpy(&fib_work->ven_info, info, sizeof(fib_work->ven_info));
5861 dev_hold(fib_work->ven_info.dev);
5862 break;
David Ahern1f279232017-10-27 17:37:14 -07005863 }
5864}
5865
5866static int mlxsw_sp_router_fib_rule_event(unsigned long event,
5867 struct fib_notifier_info *info,
5868 struct mlxsw_sp *mlxsw_sp)
5869{
5870 struct netlink_ext_ack *extack = info->extack;
5871 struct fib_rule_notifier_info *fr_info;
5872 struct fib_rule *rule;
5873 int err = 0;
5874
5875 /* nothing to do at the moment */
5876 if (event == FIB_EVENT_RULE_DEL)
5877 return 0;
5878
5879 if (mlxsw_sp->router->aborted)
5880 return 0;
5881
5882 fr_info = container_of(info, struct fib_rule_notifier_info, info);
5883 rule = fr_info->rule;
5884
5885 switch (info->family) {
5886 case AF_INET:
5887 if (!fib4_rule_default(rule) && !rule->l3mdev)
Ido Schimmel62901822018-05-02 10:17:34 +03005888 err = -EOPNOTSUPP;
David Ahern1f279232017-10-27 17:37:14 -07005889 break;
5890 case AF_INET6:
5891 if (!fib6_rule_default(rule) && !rule->l3mdev)
Ido Schimmel62901822018-05-02 10:17:34 +03005892 err = -EOPNOTSUPP;
David Ahern1f279232017-10-27 17:37:14 -07005893 break;
5894 case RTNL_FAMILY_IPMR:
5895 if (!ipmr_rule_default(rule) && !rule->l3mdev)
Ido Schimmel62901822018-05-02 10:17:34 +03005896 err = -EOPNOTSUPP;
Yotam Gigid42b0962017-09-27 08:23:20 +02005897 break;
Yuval Mintz64ed1b92018-03-26 15:01:44 +03005898 case RTNL_FAMILY_IP6MR:
5899 if (!ip6mr_rule_default(rule) && !rule->l3mdev)
Ido Schimmel62901822018-05-02 10:17:34 +03005900 err = -EOPNOTSUPP;
Yuval Mintz64ed1b92018-03-26 15:01:44 +03005901 break;
Yotam Gigid42b0962017-09-27 08:23:20 +02005902 }
David Ahern1f279232017-10-27 17:37:14 -07005903
5904 if (err < 0)
Ido Schimmel62901822018-05-02 10:17:34 +03005905 NL_SET_ERR_MSG_MOD(extack, "FIB rules not supported");
David Ahern1f279232017-10-27 17:37:14 -07005906
5907 return err;
Yotam Gigid42b0962017-09-27 08:23:20 +02005908}
5909
Ido Schimmel30572242016-12-03 16:45:01 +01005910/* Called with rcu_read_lock() */
5911static int mlxsw_sp_router_fib_event(struct notifier_block *nb,
5912 unsigned long event, void *ptr)
5913{
Ido Schimmel30572242016-12-03 16:45:01 +01005914 struct mlxsw_sp_fib_event_work *fib_work;
5915 struct fib_notifier_info *info = ptr;
Ido Schimmel7e39d112017-05-16 19:38:28 +02005916 struct mlxsw_sp_router *router;
David Ahern1f279232017-10-27 17:37:14 -07005917 int err;
Ido Schimmel30572242016-12-03 16:45:01 +01005918
Ido Schimmel8e29f972017-09-15 15:31:07 +02005919 if (!net_eq(info->net, &init_net) ||
Yotam Gigi664375e2017-09-27 08:23:22 +02005920 (info->family != AF_INET && info->family != AF_INET6 &&
Yuval Mintz64ed1b92018-03-26 15:01:44 +03005921 info->family != RTNL_FAMILY_IPMR &&
5922 info->family != RTNL_FAMILY_IP6MR))
Ido Schimmel30572242016-12-03 16:45:01 +01005923 return NOTIFY_DONE;
5924
David Ahern1f279232017-10-27 17:37:14 -07005925 router = container_of(nb, struct mlxsw_sp_router, fib_nb);
5926
5927 switch (event) {
5928 case FIB_EVENT_RULE_ADD: /* fall through */
5929 case FIB_EVENT_RULE_DEL:
5930 err = mlxsw_sp_router_fib_rule_event(event, info,
5931 router->mlxsw_sp);
Ido Schimmel62901822018-05-02 10:17:34 +03005932 if (!err || info->extack)
5933 return notifier_from_errno(err);
Ido Schimmel50d10712018-05-02 10:17:35 +03005934 break;
5935 case FIB_EVENT_ENTRY_ADD:
5936 if (router->aborted) {
5937 NL_SET_ERR_MSG_MOD(info->extack, "FIB offload was aborted. Not configuring route");
5938 return notifier_from_errno(-EINVAL);
5939 }
5940 break;
David Ahern1f279232017-10-27 17:37:14 -07005941 }
5942
Ido Schimmel30572242016-12-03 16:45:01 +01005943 fib_work = kzalloc(sizeof(*fib_work), GFP_ATOMIC);
5944 if (WARN_ON(!fib_work))
5945 return NOTIFY_BAD;
5946
Ido Schimmel7e39d112017-05-16 19:38:28 +02005947 fib_work->mlxsw_sp = router->mlxsw_sp;
Ido Schimmel30572242016-12-03 16:45:01 +01005948 fib_work->event = event;
5949
Ido Schimmel66a57632017-08-03 13:28:26 +02005950 switch (info->family) {
5951 case AF_INET:
5952 INIT_WORK(&fib_work->work, mlxsw_sp_router_fib4_event_work);
5953 mlxsw_sp_router_fib4_event(fib_work, info);
Ido Schimmel30572242016-12-03 16:45:01 +01005954 break;
Ido Schimmel66a57632017-08-03 13:28:26 +02005955 case AF_INET6:
5956 INIT_WORK(&fib_work->work, mlxsw_sp_router_fib6_event_work);
5957 mlxsw_sp_router_fib6_event(fib_work, info);
Ido Schimmelad178c82017-02-08 11:16:40 +01005958 break;
Yuval Mintz64ed1b92018-03-26 15:01:44 +03005959 case RTNL_FAMILY_IP6MR:
Yotam Gigid42b0962017-09-27 08:23:20 +02005960 case RTNL_FAMILY_IPMR:
5961 INIT_WORK(&fib_work->work, mlxsw_sp_router_fibmr_event_work);
5962 mlxsw_sp_router_fibmr_event(fib_work, info);
5963 break;
Ido Schimmel30572242016-12-03 16:45:01 +01005964 }
5965
Ido Schimmela0e47612017-02-06 16:20:10 +01005966 mlxsw_core_schedule_work(&fib_work->work);
Ido Schimmel30572242016-12-03 16:45:01 +01005967
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005968 return NOTIFY_DONE;
5969}
5970
Ido Schimmel4724ba562017-03-10 08:53:39 +01005971static struct mlxsw_sp_rif *
5972mlxsw_sp_rif_find_by_dev(const struct mlxsw_sp *mlxsw_sp,
5973 const struct net_device *dev)
5974{
5975 int i;
5976
5977 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++)
Ido Schimmel5f9efff2017-05-16 19:38:27 +02005978 if (mlxsw_sp->router->rifs[i] &&
5979 mlxsw_sp->router->rifs[i]->dev == dev)
5980 return mlxsw_sp->router->rifs[i];
Ido Schimmel4724ba562017-03-10 08:53:39 +01005981
5982 return NULL;
5983}
5984
5985static int mlxsw_sp_router_rif_disable(struct mlxsw_sp *mlxsw_sp, u16 rif)
5986{
5987 char ritr_pl[MLXSW_REG_RITR_LEN];
5988 int err;
5989
5990 mlxsw_reg_ritr_rif_pack(ritr_pl, rif);
5991 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
5992 if (WARN_ON_ONCE(err))
5993 return err;
5994
5995 mlxsw_reg_ritr_enable_set(ritr_pl, false);
5996 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
5997}
5998
5999static void mlxsw_sp_router_rif_gone_sync(struct mlxsw_sp *mlxsw_sp,
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01006000 struct mlxsw_sp_rif *rif)
Ido Schimmel4724ba562017-03-10 08:53:39 +01006001{
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01006002 mlxsw_sp_router_rif_disable(mlxsw_sp, rif->rif_index);
6003 mlxsw_sp_nexthop_rif_gone_sync(mlxsw_sp, rif);
6004 mlxsw_sp_neigh_rif_gone_sync(mlxsw_sp, rif);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006005}
6006
Arkadi Sharshevsky5ea12372017-07-18 10:10:13 +02006007static bool
6008mlxsw_sp_rif_should_config(struct mlxsw_sp_rif *rif, struct net_device *dev,
6009 unsigned long event)
Ido Schimmel4724ba562017-03-10 08:53:39 +01006010{
Arkadi Sharshevsky5ea12372017-07-18 10:10:13 +02006011 struct inet6_dev *inet6_dev;
6012 bool addr_list_empty = true;
6013 struct in_device *idev;
6014
Ido Schimmel4724ba562017-03-10 08:53:39 +01006015 switch (event) {
6016 case NETDEV_UP:
Petr Machataf1b1f272017-07-31 09:27:28 +02006017 return rif == NULL;
Ido Schimmel4724ba562017-03-10 08:53:39 +01006018 case NETDEV_DOWN:
Arkadi Sharshevsky5ea12372017-07-18 10:10:13 +02006019 idev = __in_dev_get_rtnl(dev);
6020 if (idev && idev->ifa_list)
6021 addr_list_empty = false;
6022
6023 inet6_dev = __in6_dev_get(dev);
6024 if (addr_list_empty && inet6_dev &&
6025 !list_empty(&inet6_dev->addr_list))
6026 addr_list_empty = false;
6027
6028 if (rif && addr_list_empty &&
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01006029 !netif_is_l3_slave(rif->dev))
Ido Schimmel4724ba562017-03-10 08:53:39 +01006030 return true;
6031 /* It is possible we already removed the RIF ourselves
6032 * if it was assigned to a netdev that is now a bridge
6033 * or LAG slave.
6034 */
6035 return false;
6036 }
6037
6038 return false;
6039}
6040
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006041static enum mlxsw_sp_rif_type
6042mlxsw_sp_dev_rif_type(const struct mlxsw_sp *mlxsw_sp,
6043 const struct net_device *dev)
6044{
6045 enum mlxsw_sp_fid_type type;
6046
Petr Machata6ddb7422017-09-02 23:49:19 +02006047 if (mlxsw_sp_netdev_ipip_type(mlxsw_sp, dev, NULL))
6048 return MLXSW_SP_RIF_TYPE_IPIP_LB;
6049
6050 /* Otherwise RIF type is derived from the type of the underlying FID. */
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006051 if (is_vlan_dev(dev) && netif_is_bridge_master(vlan_dev_real_dev(dev)))
6052 type = MLXSW_SP_FID_TYPE_8021Q;
6053 else if (netif_is_bridge_master(dev) && br_vlan_enabled(dev))
6054 type = MLXSW_SP_FID_TYPE_8021Q;
6055 else if (netif_is_bridge_master(dev))
6056 type = MLXSW_SP_FID_TYPE_8021D;
6057 else
6058 type = MLXSW_SP_FID_TYPE_RFID;
6059
6060 return mlxsw_sp_fid_type_rif_type(mlxsw_sp, type);
6061}
6062
Ido Schimmelde5ed992017-06-04 16:53:40 +02006063static int mlxsw_sp_rif_index_alloc(struct mlxsw_sp *mlxsw_sp, u16 *p_rif_index)
Ido Schimmel4724ba562017-03-10 08:53:39 +01006064{
6065 int i;
6066
Ido Schimmelde5ed992017-06-04 16:53:40 +02006067 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++) {
6068 if (!mlxsw_sp->router->rifs[i]) {
6069 *p_rif_index = i;
6070 return 0;
6071 }
6072 }
Ido Schimmel4724ba562017-03-10 08:53:39 +01006073
Ido Schimmelde5ed992017-06-04 16:53:40 +02006074 return -ENOBUFS;
Ido Schimmel4724ba562017-03-10 08:53:39 +01006075}
6076
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006077static struct mlxsw_sp_rif *mlxsw_sp_rif_alloc(size_t rif_size, u16 rif_index,
6078 u16 vr_id,
6079 struct net_device *l3_dev)
Ido Schimmel4724ba562017-03-10 08:53:39 +01006080{
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01006081 struct mlxsw_sp_rif *rif;
Ido Schimmel4724ba562017-03-10 08:53:39 +01006082
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006083 rif = kzalloc(rif_size, GFP_KERNEL);
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01006084 if (!rif)
Ido Schimmel4724ba562017-03-10 08:53:39 +01006085 return NULL;
6086
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01006087 INIT_LIST_HEAD(&rif->nexthop_list);
6088 INIT_LIST_HEAD(&rif->neigh_list);
6089 ether_addr_copy(rif->addr, l3_dev->dev_addr);
6090 rif->mtu = l3_dev->mtu;
6091 rif->vr_id = vr_id;
6092 rif->dev = l3_dev;
6093 rif->rif_index = rif_index;
Ido Schimmel4724ba562017-03-10 08:53:39 +01006094
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01006095 return rif;
Ido Schimmel4724ba562017-03-10 08:53:39 +01006096}
6097
Ido Schimmel5f9efff2017-05-16 19:38:27 +02006098struct mlxsw_sp_rif *mlxsw_sp_rif_by_index(const struct mlxsw_sp *mlxsw_sp,
6099 u16 rif_index)
6100{
6101 return mlxsw_sp->router->rifs[rif_index];
6102}
6103
Arkadi Sharshevskyfd1b9d42017-03-28 17:24:16 +02006104u16 mlxsw_sp_rif_index(const struct mlxsw_sp_rif *rif)
6105{
6106 return rif->rif_index;
6107}
6108
Petr Machata92107cf2017-09-02 23:49:28 +02006109u16 mlxsw_sp_ipip_lb_rif_index(const struct mlxsw_sp_rif_ipip_lb *lb_rif)
6110{
6111 return lb_rif->common.rif_index;
6112}
6113
6114u16 mlxsw_sp_ipip_lb_ul_vr_id(const struct mlxsw_sp_rif_ipip_lb *lb_rif)
6115{
6116 return lb_rif->ul_vr_id;
6117}
6118
Arkadi Sharshevskyfd1b9d42017-03-28 17:24:16 +02006119int mlxsw_sp_rif_dev_ifindex(const struct mlxsw_sp_rif *rif)
6120{
6121 return rif->dev->ifindex;
6122}
6123
Yotam Gigi91e4d592017-09-19 10:00:19 +02006124const struct net_device *mlxsw_sp_rif_dev(const struct mlxsw_sp_rif *rif)
6125{
6126 return rif->dev;
6127}
6128
Ido Schimmel4724ba562017-03-10 08:53:39 +01006129static struct mlxsw_sp_rif *
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006130mlxsw_sp_rif_create(struct mlxsw_sp *mlxsw_sp,
David Ahernf8fa9b42017-10-18 09:56:56 -07006131 const struct mlxsw_sp_rif_params *params,
6132 struct netlink_ext_ack *extack)
Ido Schimmel4724ba562017-03-10 08:53:39 +01006133{
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006134 u32 tb_id = l3mdev_fib_table(params->dev);
6135 const struct mlxsw_sp_rif_ops *ops;
Petr Machata010cadf2017-09-02 23:49:18 +02006136 struct mlxsw_sp_fid *fid = NULL;
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006137 enum mlxsw_sp_rif_type type;
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01006138 struct mlxsw_sp_rif *rif;
Ido Schimmela1107482017-05-26 08:37:39 +02006139 struct mlxsw_sp_vr *vr;
6140 u16 rif_index;
Yuval Mintz9742f862018-03-26 15:01:40 +03006141 int i, err;
Ido Schimmel4724ba562017-03-10 08:53:39 +01006142
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006143 type = mlxsw_sp_dev_rif_type(mlxsw_sp, params->dev);
6144 ops = mlxsw_sp->router->rif_ops_arr[type];
6145
David Ahernf8fa9b42017-10-18 09:56:56 -07006146 vr = mlxsw_sp_vr_get(mlxsw_sp, tb_id ? : RT_TABLE_MAIN, extack);
Ido Schimmelc9ec53f2017-05-26 08:37:38 +02006147 if (IS_ERR(vr))
6148 return ERR_CAST(vr);
Petr Machata28a04c72017-10-02 12:14:56 +02006149 vr->rif_count++;
Ido Schimmelc9ec53f2017-05-26 08:37:38 +02006150
Ido Schimmelde5ed992017-06-04 16:53:40 +02006151 err = mlxsw_sp_rif_index_alloc(mlxsw_sp, &rif_index);
David Ahernf8fa9b42017-10-18 09:56:56 -07006152 if (err) {
Arkadi Sharshevsky6c677752018-02-13 11:29:05 +01006153 NL_SET_ERR_MSG_MOD(extack, "Exceeded number of supported router interfaces");
Ido Schimmelde5ed992017-06-04 16:53:40 +02006154 goto err_rif_index_alloc;
David Ahernf8fa9b42017-10-18 09:56:56 -07006155 }
Ido Schimmel4724ba562017-03-10 08:53:39 +01006156
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006157 rif = mlxsw_sp_rif_alloc(ops->rif_size, rif_index, vr->id, params->dev);
Ido Schimmela13a5942017-05-26 08:37:33 +02006158 if (!rif) {
6159 err = -ENOMEM;
6160 goto err_rif_alloc;
6161 }
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006162 rif->mlxsw_sp = mlxsw_sp;
6163 rif->ops = ops;
Ido Schimmela13a5942017-05-26 08:37:33 +02006164
Petr Machata010cadf2017-09-02 23:49:18 +02006165 if (ops->fid_get) {
Petr Machata5f15e252018-06-25 10:48:13 +03006166 fid = ops->fid_get(rif, extack);
Petr Machata010cadf2017-09-02 23:49:18 +02006167 if (IS_ERR(fid)) {
6168 err = PTR_ERR(fid);
6169 goto err_fid_get;
6170 }
6171 rif->fid = fid;
Ido Schimmel4d93cee2017-05-26 08:37:34 +02006172 }
6173
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006174 if (ops->setup)
6175 ops->setup(rif, params);
6176
6177 err = ops->configure(rif);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006178 if (err)
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006179 goto err_configure;
Ido Schimmel4724ba562017-03-10 08:53:39 +01006180
Yuval Mintz9742f862018-03-26 15:01:40 +03006181 for (i = 0; i < MLXSW_SP_L3_PROTO_MAX; i++) {
6182 err = mlxsw_sp_mr_rif_add(vr->mr_table[i], rif);
6183 if (err)
6184 goto err_mr_rif_add;
6185 }
Yotam Gigid42b0962017-09-27 08:23:20 +02006186
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006187 mlxsw_sp_rif_counters_alloc(rif);
Ido Schimmel5f9efff2017-05-16 19:38:27 +02006188 mlxsw_sp->router->rifs[rif_index] = rif;
Ido Schimmel4724ba562017-03-10 08:53:39 +01006189
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01006190 return rif;
Ido Schimmel4724ba562017-03-10 08:53:39 +01006191
Yotam Gigid42b0962017-09-27 08:23:20 +02006192err_mr_rif_add:
Yuval Mintz9742f862018-03-26 15:01:40 +03006193 for (i--; i >= 0; i--)
6194 mlxsw_sp_mr_rif_del(vr->mr_table[i], rif);
Yotam Gigid42b0962017-09-27 08:23:20 +02006195 ops->deconfigure(rif);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006196err_configure:
Petr Machata010cadf2017-09-02 23:49:18 +02006197 if (fid)
6198 mlxsw_sp_fid_put(fid);
Ido Schimmela1107482017-05-26 08:37:39 +02006199err_fid_get:
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006200 kfree(rif);
6201err_rif_alloc:
Ido Schimmelde5ed992017-06-04 16:53:40 +02006202err_rif_index_alloc:
Petr Machata28a04c72017-10-02 12:14:56 +02006203 vr->rif_count--;
Ido Schimmel2b52ce02018-01-22 09:17:42 +01006204 mlxsw_sp_vr_put(mlxsw_sp, vr);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006205 return ERR_PTR(err);
6206}
6207
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006208void mlxsw_sp_rif_destroy(struct mlxsw_sp_rif *rif)
Ido Schimmel4724ba562017-03-10 08:53:39 +01006209{
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006210 const struct mlxsw_sp_rif_ops *ops = rif->ops;
6211 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
Ido Schimmela1107482017-05-26 08:37:39 +02006212 struct mlxsw_sp_fid *fid = rif->fid;
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006213 struct mlxsw_sp_vr *vr;
Yuval Mintz9742f862018-03-26 15:01:40 +03006214 int i;
Ido Schimmel4724ba562017-03-10 08:53:39 +01006215
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01006216 mlxsw_sp_router_rif_gone_sync(mlxsw_sp, rif);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006217 vr = &mlxsw_sp->router->vrs[rif->vr_id];
Arkadi Sharshevskye0c0afd2017-03-28 17:24:15 +02006218
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006219 mlxsw_sp->router->rifs[rif->rif_index] = NULL;
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006220 mlxsw_sp_rif_counters_free(rif);
Yuval Mintz9742f862018-03-26 15:01:40 +03006221 for (i = 0; i < MLXSW_SP_L3_PROTO_MAX; i++)
6222 mlxsw_sp_mr_rif_del(vr->mr_table[i], rif);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006223 ops->deconfigure(rif);
Petr Machata010cadf2017-09-02 23:49:18 +02006224 if (fid)
6225 /* Loopback RIFs are not associated with a FID. */
6226 mlxsw_sp_fid_put(fid);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006227 kfree(rif);
Petr Machata28a04c72017-10-02 12:14:56 +02006228 vr->rif_count--;
Ido Schimmel2b52ce02018-01-22 09:17:42 +01006229 mlxsw_sp_vr_put(mlxsw_sp, vr);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006230}
6231
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006232static void
6233mlxsw_sp_rif_subport_params_init(struct mlxsw_sp_rif_params *params,
6234 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
6235{
6236 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
6237
6238 params->vid = mlxsw_sp_port_vlan->vid;
6239 params->lag = mlxsw_sp_port->lagged;
6240 if (params->lag)
6241 params->lag_id = mlxsw_sp_port->lag_id;
6242 else
6243 params->system_port = mlxsw_sp_port->local_port;
6244}
6245
Ido Schimmel7cbecf22017-05-26 08:37:28 +02006246static int
Ido Schimmela1107482017-05-26 08:37:39 +02006247mlxsw_sp_port_vlan_router_join(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan,
David Ahernf8fa9b42017-10-18 09:56:56 -07006248 struct net_device *l3_dev,
6249 struct netlink_ext_ack *extack)
Ido Schimmel4724ba562017-03-10 08:53:39 +01006250{
Ido Schimmel7cbecf22017-05-26 08:37:28 +02006251 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
Ido Schimmel1b8f09a2017-05-26 08:37:36 +02006252 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
Ido Schimmel7cbecf22017-05-26 08:37:28 +02006253 u16 vid = mlxsw_sp_port_vlan->vid;
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01006254 struct mlxsw_sp_rif *rif;
Ido Schimmela1107482017-05-26 08:37:39 +02006255 struct mlxsw_sp_fid *fid;
Ido Schimmel03ea01e2017-05-23 21:56:30 +02006256 int err;
Ido Schimmel4724ba562017-03-10 08:53:39 +01006257
Ido Schimmel1b8f09a2017-05-26 08:37:36 +02006258 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev);
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01006259 if (!rif) {
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006260 struct mlxsw_sp_rif_params params = {
6261 .dev = l3_dev,
6262 };
6263
6264 mlxsw_sp_rif_subport_params_init(&params, mlxsw_sp_port_vlan);
David Ahernf8fa9b42017-10-18 09:56:56 -07006265 rif = mlxsw_sp_rif_create(mlxsw_sp, &params, extack);
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01006266 if (IS_ERR(rif))
6267 return PTR_ERR(rif);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006268 }
6269
Ido Schimmela1107482017-05-26 08:37:39 +02006270 /* FID was already created, just take a reference */
Petr Machata5f15e252018-06-25 10:48:13 +03006271 fid = rif->ops->fid_get(rif, extack);
Ido Schimmela1107482017-05-26 08:37:39 +02006272 err = mlxsw_sp_fid_port_vid_map(fid, mlxsw_sp_port, vid);
6273 if (err)
6274 goto err_fid_port_vid_map;
6275
Ido Schimmel7cbecf22017-05-26 08:37:28 +02006276 err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, false);
Ido Schimmel03ea01e2017-05-23 21:56:30 +02006277 if (err)
6278 goto err_port_vid_learning_set;
6279
Ido Schimmel7cbecf22017-05-26 08:37:28 +02006280 err = mlxsw_sp_port_vid_stp_set(mlxsw_sp_port, vid,
Ido Schimmel03ea01e2017-05-23 21:56:30 +02006281 BR_STATE_FORWARDING);
6282 if (err)
6283 goto err_port_vid_stp_set;
6284
Ido Schimmela1107482017-05-26 08:37:39 +02006285 mlxsw_sp_port_vlan->fid = fid;
Ido Schimmel4724ba562017-03-10 08:53:39 +01006286
Ido Schimmel4724ba562017-03-10 08:53:39 +01006287 return 0;
Ido Schimmel03ea01e2017-05-23 21:56:30 +02006288
6289err_port_vid_stp_set:
Ido Schimmel7cbecf22017-05-26 08:37:28 +02006290 mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, true);
Ido Schimmel03ea01e2017-05-23 21:56:30 +02006291err_port_vid_learning_set:
Ido Schimmela1107482017-05-26 08:37:39 +02006292 mlxsw_sp_fid_port_vid_unmap(fid, mlxsw_sp_port, vid);
6293err_fid_port_vid_map:
6294 mlxsw_sp_fid_put(fid);
Ido Schimmel03ea01e2017-05-23 21:56:30 +02006295 return err;
Ido Schimmel4724ba562017-03-10 08:53:39 +01006296}
6297
Ido Schimmela1107482017-05-26 08:37:39 +02006298void
6299mlxsw_sp_port_vlan_router_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
Ido Schimmel4724ba562017-03-10 08:53:39 +01006300{
Ido Schimmelce95e152017-05-26 08:37:27 +02006301 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
Ido Schimmel7cbecf22017-05-26 08:37:28 +02006302 struct mlxsw_sp_fid *fid = mlxsw_sp_port_vlan->fid;
Ido Schimmelce95e152017-05-26 08:37:27 +02006303 u16 vid = mlxsw_sp_port_vlan->vid;
Ido Schimmelce95e152017-05-26 08:37:27 +02006304
Ido Schimmela1107482017-05-26 08:37:39 +02006305 if (WARN_ON(mlxsw_sp_fid_type(fid) != MLXSW_SP_FID_TYPE_RFID))
6306 return;
Ido Schimmel4aafc362017-05-26 08:37:25 +02006307
Ido Schimmela1107482017-05-26 08:37:39 +02006308 mlxsw_sp_port_vlan->fid = NULL;
Ido Schimmel7cbecf22017-05-26 08:37:28 +02006309 mlxsw_sp_port_vid_stp_set(mlxsw_sp_port, vid, BR_STATE_BLOCKING);
6310 mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, true);
Ido Schimmela1107482017-05-26 08:37:39 +02006311 mlxsw_sp_fid_port_vid_unmap(fid, mlxsw_sp_port, vid);
6312 /* If router port holds the last reference on the rFID, then the
6313 * associated Sub-port RIF will be destroyed.
6314 */
6315 mlxsw_sp_fid_put(fid);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006316}
6317
Ido Schimmel7cbecf22017-05-26 08:37:28 +02006318static int mlxsw_sp_inetaddr_port_vlan_event(struct net_device *l3_dev,
6319 struct net_device *port_dev,
David Ahernf8fa9b42017-10-18 09:56:56 -07006320 unsigned long event, u16 vid,
6321 struct netlink_ext_ack *extack)
Ido Schimmel4724ba562017-03-10 08:53:39 +01006322{
6323 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(port_dev);
Ido Schimmelce95e152017-05-26 08:37:27 +02006324 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
Ido Schimmel4724ba562017-03-10 08:53:39 +01006325
Ido Schimmelce95e152017-05-26 08:37:27 +02006326 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
Ido Schimmel7cbecf22017-05-26 08:37:28 +02006327 if (WARN_ON(!mlxsw_sp_port_vlan))
6328 return -EINVAL;
Ido Schimmel4724ba562017-03-10 08:53:39 +01006329
6330 switch (event) {
6331 case NETDEV_UP:
Ido Schimmela1107482017-05-26 08:37:39 +02006332 return mlxsw_sp_port_vlan_router_join(mlxsw_sp_port_vlan,
David Ahernf8fa9b42017-10-18 09:56:56 -07006333 l3_dev, extack);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006334 case NETDEV_DOWN:
Ido Schimmela1107482017-05-26 08:37:39 +02006335 mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port_vlan);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006336 break;
6337 }
6338
6339 return 0;
6340}
6341
6342static int mlxsw_sp_inetaddr_port_event(struct net_device *port_dev,
David Ahernf8fa9b42017-10-18 09:56:56 -07006343 unsigned long event,
6344 struct netlink_ext_ack *extack)
Ido Schimmel4724ba562017-03-10 08:53:39 +01006345{
Jiri Pirko2b94e582017-04-18 16:55:37 +02006346 if (netif_is_bridge_port(port_dev) ||
6347 netif_is_lag_port(port_dev) ||
6348 netif_is_ovs_port(port_dev))
Ido Schimmel4724ba562017-03-10 08:53:39 +01006349 return 0;
6350
David Ahernf8fa9b42017-10-18 09:56:56 -07006351 return mlxsw_sp_inetaddr_port_vlan_event(port_dev, port_dev, event, 1,
6352 extack);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006353}
6354
6355static int __mlxsw_sp_inetaddr_lag_event(struct net_device *l3_dev,
6356 struct net_device *lag_dev,
David Ahernf8fa9b42017-10-18 09:56:56 -07006357 unsigned long event, u16 vid,
6358 struct netlink_ext_ack *extack)
Ido Schimmel4724ba562017-03-10 08:53:39 +01006359{
6360 struct net_device *port_dev;
6361 struct list_head *iter;
6362 int err;
6363
6364 netdev_for_each_lower_dev(lag_dev, port_dev, iter) {
6365 if (mlxsw_sp_port_dev_check(port_dev)) {
Ido Schimmel7cbecf22017-05-26 08:37:28 +02006366 err = mlxsw_sp_inetaddr_port_vlan_event(l3_dev,
6367 port_dev,
David Ahernf8fa9b42017-10-18 09:56:56 -07006368 event, vid,
6369 extack);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006370 if (err)
6371 return err;
6372 }
6373 }
6374
6375 return 0;
6376}
6377
6378static int mlxsw_sp_inetaddr_lag_event(struct net_device *lag_dev,
David Ahernf8fa9b42017-10-18 09:56:56 -07006379 unsigned long event,
6380 struct netlink_ext_ack *extack)
Ido Schimmel4724ba562017-03-10 08:53:39 +01006381{
6382 if (netif_is_bridge_port(lag_dev))
6383 return 0;
6384
David Ahernf8fa9b42017-10-18 09:56:56 -07006385 return __mlxsw_sp_inetaddr_lag_event(lag_dev, lag_dev, event, 1,
6386 extack);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006387}
6388
Ido Schimmel4724ba562017-03-10 08:53:39 +01006389static int mlxsw_sp_inetaddr_bridge_event(struct net_device *l3_dev,
David Ahernf8fa9b42017-10-18 09:56:56 -07006390 unsigned long event,
6391 struct netlink_ext_ack *extack)
Ido Schimmel4724ba562017-03-10 08:53:39 +01006392{
6393 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(l3_dev);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006394 struct mlxsw_sp_rif_params params = {
6395 .dev = l3_dev,
6396 };
Ido Schimmela1107482017-05-26 08:37:39 +02006397 struct mlxsw_sp_rif *rif;
Ido Schimmel4724ba562017-03-10 08:53:39 +01006398
6399 switch (event) {
6400 case NETDEV_UP:
David Ahernf8fa9b42017-10-18 09:56:56 -07006401 rif = mlxsw_sp_rif_create(mlxsw_sp, &params, extack);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006402 if (IS_ERR(rif))
6403 return PTR_ERR(rif);
6404 break;
Ido Schimmel4724ba562017-03-10 08:53:39 +01006405 case NETDEV_DOWN:
Ido Schimmela1107482017-05-26 08:37:39 +02006406 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006407 mlxsw_sp_rif_destroy(rif);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006408 break;
6409 }
6410
6411 return 0;
6412}
6413
6414static int mlxsw_sp_inetaddr_vlan_event(struct net_device *vlan_dev,
David Ahernf8fa9b42017-10-18 09:56:56 -07006415 unsigned long event,
6416 struct netlink_ext_ack *extack)
Ido Schimmel4724ba562017-03-10 08:53:39 +01006417{
6418 struct net_device *real_dev = vlan_dev_real_dev(vlan_dev);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006419 u16 vid = vlan_dev_vlan_id(vlan_dev);
6420
Ido Schimmel6b27c8a2017-06-28 09:03:12 +03006421 if (netif_is_bridge_port(vlan_dev))
6422 return 0;
6423
Ido Schimmel4724ba562017-03-10 08:53:39 +01006424 if (mlxsw_sp_port_dev_check(real_dev))
Ido Schimmel7cbecf22017-05-26 08:37:28 +02006425 return mlxsw_sp_inetaddr_port_vlan_event(vlan_dev, real_dev,
David Ahernf8fa9b42017-10-18 09:56:56 -07006426 event, vid, extack);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006427 else if (netif_is_lag_master(real_dev))
6428 return __mlxsw_sp_inetaddr_lag_event(vlan_dev, real_dev, event,
David Ahernf8fa9b42017-10-18 09:56:56 -07006429 vid, extack);
Ido Schimmelc57529e2017-05-26 08:37:31 +02006430 else if (netif_is_bridge_master(real_dev) && br_vlan_enabled(real_dev))
David Ahernf8fa9b42017-10-18 09:56:56 -07006431 return mlxsw_sp_inetaddr_bridge_event(vlan_dev, event, extack);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006432
6433 return 0;
6434}
6435
Ido Schimmelb1e45522017-04-30 19:47:14 +03006436static int __mlxsw_sp_inetaddr_event(struct net_device *dev,
David Ahernf8fa9b42017-10-18 09:56:56 -07006437 unsigned long event,
6438 struct netlink_ext_ack *extack)
Ido Schimmelb1e45522017-04-30 19:47:14 +03006439{
6440 if (mlxsw_sp_port_dev_check(dev))
David Ahernf8fa9b42017-10-18 09:56:56 -07006441 return mlxsw_sp_inetaddr_port_event(dev, event, extack);
Ido Schimmelb1e45522017-04-30 19:47:14 +03006442 else if (netif_is_lag_master(dev))
David Ahernf8fa9b42017-10-18 09:56:56 -07006443 return mlxsw_sp_inetaddr_lag_event(dev, event, extack);
Ido Schimmelb1e45522017-04-30 19:47:14 +03006444 else if (netif_is_bridge_master(dev))
David Ahernf8fa9b42017-10-18 09:56:56 -07006445 return mlxsw_sp_inetaddr_bridge_event(dev, event, extack);
Ido Schimmelb1e45522017-04-30 19:47:14 +03006446 else if (is_vlan_dev(dev))
David Ahernf8fa9b42017-10-18 09:56:56 -07006447 return mlxsw_sp_inetaddr_vlan_event(dev, event, extack);
Ido Schimmelb1e45522017-04-30 19:47:14 +03006448 else
6449 return 0;
6450}
6451
Ido Schimmel4724ba562017-03-10 08:53:39 +01006452int mlxsw_sp_inetaddr_event(struct notifier_block *unused,
6453 unsigned long event, void *ptr)
6454{
6455 struct in_ifaddr *ifa = (struct in_ifaddr *) ptr;
6456 struct net_device *dev = ifa->ifa_dev->dev;
6457 struct mlxsw_sp *mlxsw_sp;
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01006458 struct mlxsw_sp_rif *rif;
Ido Schimmel4724ba562017-03-10 08:53:39 +01006459 int err = 0;
6460
David Ahern89d5dd22017-10-18 09:56:55 -07006461 /* NETDEV_UP event is handled by mlxsw_sp_inetaddr_valid_event */
6462 if (event == NETDEV_UP)
6463 goto out;
6464
6465 mlxsw_sp = mlxsw_sp_lower_get(dev);
6466 if (!mlxsw_sp)
6467 goto out;
6468
6469 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
6470 if (!mlxsw_sp_rif_should_config(rif, dev, event))
6471 goto out;
6472
David Ahernf8fa9b42017-10-18 09:56:56 -07006473 err = __mlxsw_sp_inetaddr_event(dev, event, NULL);
David Ahern89d5dd22017-10-18 09:56:55 -07006474out:
6475 return notifier_from_errno(err);
6476}
6477
6478int mlxsw_sp_inetaddr_valid_event(struct notifier_block *unused,
6479 unsigned long event, void *ptr)
6480{
6481 struct in_validator_info *ivi = (struct in_validator_info *) ptr;
6482 struct net_device *dev = ivi->ivi_dev->dev;
6483 struct mlxsw_sp *mlxsw_sp;
6484 struct mlxsw_sp_rif *rif;
6485 int err = 0;
6486
Ido Schimmel4724ba562017-03-10 08:53:39 +01006487 mlxsw_sp = mlxsw_sp_lower_get(dev);
6488 if (!mlxsw_sp)
6489 goto out;
6490
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01006491 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
Arkadi Sharshevsky5ea12372017-07-18 10:10:13 +02006492 if (!mlxsw_sp_rif_should_config(rif, dev, event))
Ido Schimmel4724ba562017-03-10 08:53:39 +01006493 goto out;
6494
David Ahernf8fa9b42017-10-18 09:56:56 -07006495 err = __mlxsw_sp_inetaddr_event(dev, event, ivi->extack);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006496out:
6497 return notifier_from_errno(err);
6498}
6499
Arkadi Sharshevsky5ea12372017-07-18 10:10:13 +02006500struct mlxsw_sp_inet6addr_event_work {
6501 struct work_struct work;
6502 struct net_device *dev;
6503 unsigned long event;
6504};
6505
6506static void mlxsw_sp_inet6addr_event_work(struct work_struct *work)
6507{
6508 struct mlxsw_sp_inet6addr_event_work *inet6addr_work =
6509 container_of(work, struct mlxsw_sp_inet6addr_event_work, work);
6510 struct net_device *dev = inet6addr_work->dev;
6511 unsigned long event = inet6addr_work->event;
6512 struct mlxsw_sp *mlxsw_sp;
6513 struct mlxsw_sp_rif *rif;
6514
6515 rtnl_lock();
6516 mlxsw_sp = mlxsw_sp_lower_get(dev);
6517 if (!mlxsw_sp)
6518 goto out;
6519
6520 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
6521 if (!mlxsw_sp_rif_should_config(rif, dev, event))
6522 goto out;
6523
David Ahernf8fa9b42017-10-18 09:56:56 -07006524 __mlxsw_sp_inetaddr_event(dev, event, NULL);
Arkadi Sharshevsky5ea12372017-07-18 10:10:13 +02006525out:
6526 rtnl_unlock();
6527 dev_put(dev);
6528 kfree(inet6addr_work);
6529}
6530
6531/* Called with rcu_read_lock() */
6532int mlxsw_sp_inet6addr_event(struct notifier_block *unused,
6533 unsigned long event, void *ptr)
6534{
6535 struct inet6_ifaddr *if6 = (struct inet6_ifaddr *) ptr;
6536 struct mlxsw_sp_inet6addr_event_work *inet6addr_work;
6537 struct net_device *dev = if6->idev->dev;
6538
David Ahern89d5dd22017-10-18 09:56:55 -07006539 /* NETDEV_UP event is handled by mlxsw_sp_inet6addr_valid_event */
6540 if (event == NETDEV_UP)
6541 return NOTIFY_DONE;
6542
Arkadi Sharshevsky5ea12372017-07-18 10:10:13 +02006543 if (!mlxsw_sp_port_dev_lower_find_rcu(dev))
6544 return NOTIFY_DONE;
6545
6546 inet6addr_work = kzalloc(sizeof(*inet6addr_work), GFP_ATOMIC);
6547 if (!inet6addr_work)
6548 return NOTIFY_BAD;
6549
6550 INIT_WORK(&inet6addr_work->work, mlxsw_sp_inet6addr_event_work);
6551 inet6addr_work->dev = dev;
6552 inet6addr_work->event = event;
6553 dev_hold(dev);
6554 mlxsw_core_schedule_work(&inet6addr_work->work);
6555
6556 return NOTIFY_DONE;
6557}
6558
David Ahern89d5dd22017-10-18 09:56:55 -07006559int mlxsw_sp_inet6addr_valid_event(struct notifier_block *unused,
6560 unsigned long event, void *ptr)
6561{
6562 struct in6_validator_info *i6vi = (struct in6_validator_info *) ptr;
6563 struct net_device *dev = i6vi->i6vi_dev->dev;
6564 struct mlxsw_sp *mlxsw_sp;
6565 struct mlxsw_sp_rif *rif;
6566 int err = 0;
6567
6568 mlxsw_sp = mlxsw_sp_lower_get(dev);
6569 if (!mlxsw_sp)
6570 goto out;
6571
6572 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
6573 if (!mlxsw_sp_rif_should_config(rif, dev, event))
6574 goto out;
6575
David Ahernf8fa9b42017-10-18 09:56:56 -07006576 err = __mlxsw_sp_inetaddr_event(dev, event, i6vi->extack);
David Ahern89d5dd22017-10-18 09:56:55 -07006577out:
6578 return notifier_from_errno(err);
6579}
6580
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01006581static int mlxsw_sp_rif_edit(struct mlxsw_sp *mlxsw_sp, u16 rif_index,
Ido Schimmel4724ba562017-03-10 08:53:39 +01006582 const char *mac, int mtu)
6583{
6584 char ritr_pl[MLXSW_REG_RITR_LEN];
6585 int err;
6586
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01006587 mlxsw_reg_ritr_rif_pack(ritr_pl, rif_index);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006588 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
6589 if (err)
6590 return err;
6591
6592 mlxsw_reg_ritr_mtu_set(ritr_pl, mtu);
6593 mlxsw_reg_ritr_if_mac_memcpy_to(ritr_pl, mac);
6594 mlxsw_reg_ritr_op_set(ritr_pl, MLXSW_REG_RITR_RIF_CREATE);
6595 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
6596}
6597
6598int mlxsw_sp_netdevice_router_port_event(struct net_device *dev)
6599{
6600 struct mlxsw_sp *mlxsw_sp;
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01006601 struct mlxsw_sp_rif *rif;
Ido Schimmela1107482017-05-26 08:37:39 +02006602 u16 fid_index;
Ido Schimmel4724ba562017-03-10 08:53:39 +01006603 int err;
6604
6605 mlxsw_sp = mlxsw_sp_lower_get(dev);
6606 if (!mlxsw_sp)
6607 return 0;
6608
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01006609 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
6610 if (!rif)
Ido Schimmel4724ba562017-03-10 08:53:39 +01006611 return 0;
Ido Schimmela1107482017-05-26 08:37:39 +02006612 fid_index = mlxsw_sp_fid_index(rif->fid);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006613
Ido Schimmela1107482017-05-26 08:37:39 +02006614 err = mlxsw_sp_rif_fdb_op(mlxsw_sp, rif->addr, fid_index, false);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006615 if (err)
6616 return err;
6617
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01006618 err = mlxsw_sp_rif_edit(mlxsw_sp, rif->rif_index, dev->dev_addr,
6619 dev->mtu);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006620 if (err)
6621 goto err_rif_edit;
6622
Ido Schimmela1107482017-05-26 08:37:39 +02006623 err = mlxsw_sp_rif_fdb_op(mlxsw_sp, dev->dev_addr, fid_index, true);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006624 if (err)
6625 goto err_rif_fdb_op;
6626
Yotam Gigifd890fe2017-09-27 08:23:21 +02006627 if (rif->mtu != dev->mtu) {
6628 struct mlxsw_sp_vr *vr;
Yuval Mintz9742f862018-03-26 15:01:40 +03006629 int i;
Yotam Gigifd890fe2017-09-27 08:23:21 +02006630
6631 /* The RIF is relevant only to its mr_table instance, as unlike
6632 * unicast routing, in multicast routing a RIF cannot be shared
6633 * between several multicast routing tables.
6634 */
6635 vr = &mlxsw_sp->router->vrs[rif->vr_id];
Yuval Mintz9742f862018-03-26 15:01:40 +03006636 for (i = 0; i < MLXSW_SP_L3_PROTO_MAX; i++)
6637 mlxsw_sp_mr_rif_mtu_update(vr->mr_table[i],
6638 rif, dev->mtu);
Yotam Gigifd890fe2017-09-27 08:23:21 +02006639 }
6640
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01006641 ether_addr_copy(rif->addr, dev->dev_addr);
6642 rif->mtu = dev->mtu;
Ido Schimmel4724ba562017-03-10 08:53:39 +01006643
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01006644 netdev_dbg(dev, "Updated RIF=%d\n", rif->rif_index);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006645
6646 return 0;
6647
6648err_rif_fdb_op:
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01006649 mlxsw_sp_rif_edit(mlxsw_sp, rif->rif_index, rif->addr, rif->mtu);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006650err_rif_edit:
Ido Schimmela1107482017-05-26 08:37:39 +02006651 mlxsw_sp_rif_fdb_op(mlxsw_sp, rif->addr, fid_index, true);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006652 return err;
6653}
6654
Ido Schimmelb1e45522017-04-30 19:47:14 +03006655static int mlxsw_sp_port_vrf_join(struct mlxsw_sp *mlxsw_sp,
David Ahernf8fa9b42017-10-18 09:56:56 -07006656 struct net_device *l3_dev,
6657 struct netlink_ext_ack *extack)
Ido Schimmel7179eb52017-03-16 09:08:18 +01006658{
Ido Schimmelb1e45522017-04-30 19:47:14 +03006659 struct mlxsw_sp_rif *rif;
Ido Schimmel7179eb52017-03-16 09:08:18 +01006660
Ido Schimmelb1e45522017-04-30 19:47:14 +03006661 /* If netdev is already associated with a RIF, then we need to
6662 * destroy it and create a new one with the new virtual router ID.
Ido Schimmel7179eb52017-03-16 09:08:18 +01006663 */
Ido Schimmelb1e45522017-04-30 19:47:14 +03006664 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev);
6665 if (rif)
David Ahernf8fa9b42017-10-18 09:56:56 -07006666 __mlxsw_sp_inetaddr_event(l3_dev, NETDEV_DOWN, extack);
Ido Schimmel7179eb52017-03-16 09:08:18 +01006667
David Ahernf8fa9b42017-10-18 09:56:56 -07006668 return __mlxsw_sp_inetaddr_event(l3_dev, NETDEV_UP, extack);
Ido Schimmel7179eb52017-03-16 09:08:18 +01006669}
6670
Ido Schimmelb1e45522017-04-30 19:47:14 +03006671static void mlxsw_sp_port_vrf_leave(struct mlxsw_sp *mlxsw_sp,
6672 struct net_device *l3_dev)
Ido Schimmel7179eb52017-03-16 09:08:18 +01006673{
Ido Schimmelb1e45522017-04-30 19:47:14 +03006674 struct mlxsw_sp_rif *rif;
Ido Schimmel7179eb52017-03-16 09:08:18 +01006675
Ido Schimmelb1e45522017-04-30 19:47:14 +03006676 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev);
6677 if (!rif)
Ido Schimmel7179eb52017-03-16 09:08:18 +01006678 return;
David Ahernf8fa9b42017-10-18 09:56:56 -07006679 __mlxsw_sp_inetaddr_event(l3_dev, NETDEV_DOWN, NULL);
Ido Schimmel7179eb52017-03-16 09:08:18 +01006680}
6681
Ido Schimmelb1e45522017-04-30 19:47:14 +03006682int mlxsw_sp_netdevice_vrf_event(struct net_device *l3_dev, unsigned long event,
6683 struct netdev_notifier_changeupper_info *info)
Ido Schimmel3d70e4582017-03-16 09:08:19 +01006684{
Ido Schimmelb1e45522017-04-30 19:47:14 +03006685 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(l3_dev);
6686 int err = 0;
Ido Schimmel3d70e4582017-03-16 09:08:19 +01006687
Ido Schimmelb1e45522017-04-30 19:47:14 +03006688 if (!mlxsw_sp)
6689 return 0;
Ido Schimmel3d70e4582017-03-16 09:08:19 +01006690
Ido Schimmelb1e45522017-04-30 19:47:14 +03006691 switch (event) {
6692 case NETDEV_PRECHANGEUPPER:
6693 return 0;
6694 case NETDEV_CHANGEUPPER:
David Ahernf8fa9b42017-10-18 09:56:56 -07006695 if (info->linking) {
6696 struct netlink_ext_ack *extack;
6697
6698 extack = netdev_notifier_info_to_extack(&info->info);
6699 err = mlxsw_sp_port_vrf_join(mlxsw_sp, l3_dev, extack);
6700 } else {
Ido Schimmelb1e45522017-04-30 19:47:14 +03006701 mlxsw_sp_port_vrf_leave(mlxsw_sp, l3_dev);
David Ahernf8fa9b42017-10-18 09:56:56 -07006702 }
Ido Schimmelb1e45522017-04-30 19:47:14 +03006703 break;
6704 }
Ido Schimmel3d70e4582017-03-16 09:08:19 +01006705
Ido Schimmelb1e45522017-04-30 19:47:14 +03006706 return err;
Ido Schimmel3d70e4582017-03-16 09:08:19 +01006707}
6708
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006709static struct mlxsw_sp_rif_subport *
6710mlxsw_sp_rif_subport_rif(const struct mlxsw_sp_rif *rif)
Ido Schimmela1107482017-05-26 08:37:39 +02006711{
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006712 return container_of(rif, struct mlxsw_sp_rif_subport, common);
Ido Schimmela1107482017-05-26 08:37:39 +02006713}
6714
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006715static void mlxsw_sp_rif_subport_setup(struct mlxsw_sp_rif *rif,
6716 const struct mlxsw_sp_rif_params *params)
6717{
6718 struct mlxsw_sp_rif_subport *rif_subport;
6719
6720 rif_subport = mlxsw_sp_rif_subport_rif(rif);
6721 rif_subport->vid = params->vid;
6722 rif_subport->lag = params->lag;
6723 if (params->lag)
6724 rif_subport->lag_id = params->lag_id;
6725 else
6726 rif_subport->system_port = params->system_port;
6727}
6728
6729static int mlxsw_sp_rif_subport_op(struct mlxsw_sp_rif *rif, bool enable)
6730{
6731 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
6732 struct mlxsw_sp_rif_subport *rif_subport;
6733 char ritr_pl[MLXSW_REG_RITR_LEN];
6734
6735 rif_subport = mlxsw_sp_rif_subport_rif(rif);
6736 mlxsw_reg_ritr_pack(ritr_pl, enable, MLXSW_REG_RITR_SP_IF,
Petr Machata9571e822017-09-02 23:49:14 +02006737 rif->rif_index, rif->vr_id, rif->dev->mtu);
6738 mlxsw_reg_ritr_mac_pack(ritr_pl, rif->dev->dev_addr);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006739 mlxsw_reg_ritr_sp_if_pack(ritr_pl, rif_subport->lag,
6740 rif_subport->lag ? rif_subport->lag_id :
6741 rif_subport->system_port,
6742 rif_subport->vid);
6743
6744 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
6745}
6746
6747static int mlxsw_sp_rif_subport_configure(struct mlxsw_sp_rif *rif)
6748{
Petr Machata010cadf2017-09-02 23:49:18 +02006749 int err;
6750
6751 err = mlxsw_sp_rif_subport_op(rif, true);
6752 if (err)
6753 return err;
6754
6755 err = mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
6756 mlxsw_sp_fid_index(rif->fid), true);
6757 if (err)
6758 goto err_rif_fdb_op;
6759
6760 mlxsw_sp_fid_rif_set(rif->fid, rif);
6761 return 0;
6762
6763err_rif_fdb_op:
6764 mlxsw_sp_rif_subport_op(rif, false);
6765 return err;
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006766}
6767
6768static void mlxsw_sp_rif_subport_deconfigure(struct mlxsw_sp_rif *rif)
6769{
Petr Machata010cadf2017-09-02 23:49:18 +02006770 struct mlxsw_sp_fid *fid = rif->fid;
6771
6772 mlxsw_sp_fid_rif_set(fid, NULL);
6773 mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
6774 mlxsw_sp_fid_index(fid), false);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006775 mlxsw_sp_rif_subport_op(rif, false);
6776}
6777
6778static struct mlxsw_sp_fid *
Petr Machata5f15e252018-06-25 10:48:13 +03006779mlxsw_sp_rif_subport_fid_get(struct mlxsw_sp_rif *rif,
6780 struct netlink_ext_ack *extack)
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006781{
6782 return mlxsw_sp_fid_rfid_get(rif->mlxsw_sp, rif->rif_index);
6783}
6784
6785static const struct mlxsw_sp_rif_ops mlxsw_sp_rif_subport_ops = {
6786 .type = MLXSW_SP_RIF_TYPE_SUBPORT,
6787 .rif_size = sizeof(struct mlxsw_sp_rif_subport),
6788 .setup = mlxsw_sp_rif_subport_setup,
6789 .configure = mlxsw_sp_rif_subport_configure,
6790 .deconfigure = mlxsw_sp_rif_subport_deconfigure,
6791 .fid_get = mlxsw_sp_rif_subport_fid_get,
6792};
6793
6794static int mlxsw_sp_rif_vlan_fid_op(struct mlxsw_sp_rif *rif,
6795 enum mlxsw_reg_ritr_if_type type,
6796 u16 vid_fid, bool enable)
6797{
6798 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
6799 char ritr_pl[MLXSW_REG_RITR_LEN];
6800
6801 mlxsw_reg_ritr_pack(ritr_pl, enable, type, rif->rif_index, rif->vr_id,
Petr Machata9571e822017-09-02 23:49:14 +02006802 rif->dev->mtu);
6803 mlxsw_reg_ritr_mac_pack(ritr_pl, rif->dev->dev_addr);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006804 mlxsw_reg_ritr_fid_set(ritr_pl, type, vid_fid);
6805
6806 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
6807}
6808
Yotam Gigib35750f2017-10-09 11:15:33 +02006809u8 mlxsw_sp_router_port(const struct mlxsw_sp *mlxsw_sp)
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006810{
6811 return mlxsw_core_max_ports(mlxsw_sp->core) + 1;
6812}
6813
6814static int mlxsw_sp_rif_vlan_configure(struct mlxsw_sp_rif *rif)
6815{
6816 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
6817 u16 vid = mlxsw_sp_fid_8021q_vid(rif->fid);
6818 int err;
6819
6820 err = mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_VLAN_IF, vid, true);
6821 if (err)
6822 return err;
6823
Ido Schimmel0d284812017-07-18 10:10:12 +02006824 err = mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
6825 mlxsw_sp_router_port(mlxsw_sp), true);
6826 if (err)
6827 goto err_fid_mc_flood_set;
6828
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006829 err = mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
6830 mlxsw_sp_router_port(mlxsw_sp), true);
6831 if (err)
6832 goto err_fid_bc_flood_set;
6833
Petr Machata010cadf2017-09-02 23:49:18 +02006834 err = mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
6835 mlxsw_sp_fid_index(rif->fid), true);
6836 if (err)
6837 goto err_rif_fdb_op;
6838
6839 mlxsw_sp_fid_rif_set(rif->fid, rif);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006840 return 0;
6841
Petr Machata010cadf2017-09-02 23:49:18 +02006842err_rif_fdb_op:
6843 mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
6844 mlxsw_sp_router_port(mlxsw_sp), false);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006845err_fid_bc_flood_set:
Ido Schimmel0d284812017-07-18 10:10:12 +02006846 mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
6847 mlxsw_sp_router_port(mlxsw_sp), false);
6848err_fid_mc_flood_set:
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006849 mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_VLAN_IF, vid, false);
6850 return err;
6851}
6852
6853static void mlxsw_sp_rif_vlan_deconfigure(struct mlxsw_sp_rif *rif)
6854{
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006855 u16 vid = mlxsw_sp_fid_8021q_vid(rif->fid);
Petr Machata010cadf2017-09-02 23:49:18 +02006856 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
6857 struct mlxsw_sp_fid *fid = rif->fid;
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006858
Petr Machata010cadf2017-09-02 23:49:18 +02006859 mlxsw_sp_fid_rif_set(fid, NULL);
6860 mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
6861 mlxsw_sp_fid_index(fid), false);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006862 mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
6863 mlxsw_sp_router_port(mlxsw_sp), false);
Ido Schimmel0d284812017-07-18 10:10:12 +02006864 mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
6865 mlxsw_sp_router_port(mlxsw_sp), false);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006866 mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_VLAN_IF, vid, false);
6867}
6868
6869static struct mlxsw_sp_fid *
Petr Machata5f15e252018-06-25 10:48:13 +03006870mlxsw_sp_rif_vlan_fid_get(struct mlxsw_sp_rif *rif,
6871 struct netlink_ext_ack *extack)
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006872{
6873 u16 vid = is_vlan_dev(rif->dev) ? vlan_dev_vlan_id(rif->dev) : 1;
6874
6875 return mlxsw_sp_fid_8021q_get(rif->mlxsw_sp, vid);
6876}
6877
6878static const struct mlxsw_sp_rif_ops mlxsw_sp_rif_vlan_ops = {
6879 .type = MLXSW_SP_RIF_TYPE_VLAN,
6880 .rif_size = sizeof(struct mlxsw_sp_rif),
6881 .configure = mlxsw_sp_rif_vlan_configure,
6882 .deconfigure = mlxsw_sp_rif_vlan_deconfigure,
6883 .fid_get = mlxsw_sp_rif_vlan_fid_get,
6884};
6885
6886static int mlxsw_sp_rif_fid_configure(struct mlxsw_sp_rif *rif)
6887{
6888 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
6889 u16 fid_index = mlxsw_sp_fid_index(rif->fid);
6890 int err;
6891
6892 err = mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_FID_IF, fid_index,
6893 true);
6894 if (err)
6895 return err;
6896
Ido Schimmel0d284812017-07-18 10:10:12 +02006897 err = mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
6898 mlxsw_sp_router_port(mlxsw_sp), true);
6899 if (err)
6900 goto err_fid_mc_flood_set;
6901
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006902 err = mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
6903 mlxsw_sp_router_port(mlxsw_sp), true);
6904 if (err)
6905 goto err_fid_bc_flood_set;
6906
Petr Machata010cadf2017-09-02 23:49:18 +02006907 err = mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
6908 mlxsw_sp_fid_index(rif->fid), true);
6909 if (err)
6910 goto err_rif_fdb_op;
6911
6912 mlxsw_sp_fid_rif_set(rif->fid, rif);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006913 return 0;
6914
Petr Machata010cadf2017-09-02 23:49:18 +02006915err_rif_fdb_op:
6916 mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
6917 mlxsw_sp_router_port(mlxsw_sp), false);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006918err_fid_bc_flood_set:
Ido Schimmel0d284812017-07-18 10:10:12 +02006919 mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
6920 mlxsw_sp_router_port(mlxsw_sp), false);
6921err_fid_mc_flood_set:
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006922 mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_FID_IF, fid_index, false);
6923 return err;
6924}
6925
6926static void mlxsw_sp_rif_fid_deconfigure(struct mlxsw_sp_rif *rif)
6927{
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006928 u16 fid_index = mlxsw_sp_fid_index(rif->fid);
Petr Machata010cadf2017-09-02 23:49:18 +02006929 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
6930 struct mlxsw_sp_fid *fid = rif->fid;
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006931
Petr Machata010cadf2017-09-02 23:49:18 +02006932 mlxsw_sp_fid_rif_set(fid, NULL);
6933 mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
6934 mlxsw_sp_fid_index(fid), false);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006935 mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
6936 mlxsw_sp_router_port(mlxsw_sp), false);
Ido Schimmel0d284812017-07-18 10:10:12 +02006937 mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
6938 mlxsw_sp_router_port(mlxsw_sp), false);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006939 mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_FID_IF, fid_index, false);
6940}
6941
6942static struct mlxsw_sp_fid *
Petr Machata5f15e252018-06-25 10:48:13 +03006943mlxsw_sp_rif_fid_fid_get(struct mlxsw_sp_rif *rif,
6944 struct netlink_ext_ack *extack)
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006945{
6946 return mlxsw_sp_fid_8021d_get(rif->mlxsw_sp, rif->dev->ifindex);
6947}
6948
6949static const struct mlxsw_sp_rif_ops mlxsw_sp_rif_fid_ops = {
6950 .type = MLXSW_SP_RIF_TYPE_FID,
6951 .rif_size = sizeof(struct mlxsw_sp_rif),
6952 .configure = mlxsw_sp_rif_fid_configure,
6953 .deconfigure = mlxsw_sp_rif_fid_deconfigure,
6954 .fid_get = mlxsw_sp_rif_fid_fid_get,
6955};
6956
Petr Machata6ddb7422017-09-02 23:49:19 +02006957static struct mlxsw_sp_rif_ipip_lb *
6958mlxsw_sp_rif_ipip_lb_rif(struct mlxsw_sp_rif *rif)
6959{
6960 return container_of(rif, struct mlxsw_sp_rif_ipip_lb, common);
6961}
6962
6963static void
6964mlxsw_sp_rif_ipip_lb_setup(struct mlxsw_sp_rif *rif,
6965 const struct mlxsw_sp_rif_params *params)
6966{
6967 struct mlxsw_sp_rif_params_ipip_lb *params_lb;
6968 struct mlxsw_sp_rif_ipip_lb *rif_lb;
6969
6970 params_lb = container_of(params, struct mlxsw_sp_rif_params_ipip_lb,
6971 common);
6972 rif_lb = mlxsw_sp_rif_ipip_lb_rif(rif);
6973 rif_lb->lb_config = params_lb->lb_config;
6974}
6975
6976static int
Petr Machata6ddb7422017-09-02 23:49:19 +02006977mlxsw_sp_rif_ipip_lb_configure(struct mlxsw_sp_rif *rif)
6978{
6979 struct mlxsw_sp_rif_ipip_lb *lb_rif = mlxsw_sp_rif_ipip_lb_rif(rif);
6980 u32 ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(rif->dev);
6981 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
6982 struct mlxsw_sp_vr *ul_vr;
6983 int err;
6984
David Ahernf8fa9b42017-10-18 09:56:56 -07006985 ul_vr = mlxsw_sp_vr_get(mlxsw_sp, ul_tb_id, NULL);
Petr Machata6ddb7422017-09-02 23:49:19 +02006986 if (IS_ERR(ul_vr))
6987 return PTR_ERR(ul_vr);
6988
6989 err = mlxsw_sp_rif_ipip_lb_op(lb_rif, ul_vr, true);
6990 if (err)
6991 goto err_loopback_op;
6992
6993 lb_rif->ul_vr_id = ul_vr->id;
6994 ++ul_vr->rif_count;
6995 return 0;
6996
6997err_loopback_op:
Ido Schimmel2b52ce02018-01-22 09:17:42 +01006998 mlxsw_sp_vr_put(mlxsw_sp, ul_vr);
Petr Machata6ddb7422017-09-02 23:49:19 +02006999 return err;
7000}
7001
7002static void mlxsw_sp_rif_ipip_lb_deconfigure(struct mlxsw_sp_rif *rif)
7003{
7004 struct mlxsw_sp_rif_ipip_lb *lb_rif = mlxsw_sp_rif_ipip_lb_rif(rif);
7005 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
7006 struct mlxsw_sp_vr *ul_vr;
7007
7008 ul_vr = &mlxsw_sp->router->vrs[lb_rif->ul_vr_id];
7009 mlxsw_sp_rif_ipip_lb_op(lb_rif, ul_vr, false);
7010
7011 --ul_vr->rif_count;
Ido Schimmel2b52ce02018-01-22 09:17:42 +01007012 mlxsw_sp_vr_put(mlxsw_sp, ul_vr);
Petr Machata6ddb7422017-09-02 23:49:19 +02007013}
7014
7015static const struct mlxsw_sp_rif_ops mlxsw_sp_rif_ipip_lb_ops = {
7016 .type = MLXSW_SP_RIF_TYPE_IPIP_LB,
7017 .rif_size = sizeof(struct mlxsw_sp_rif_ipip_lb),
7018 .setup = mlxsw_sp_rif_ipip_lb_setup,
7019 .configure = mlxsw_sp_rif_ipip_lb_configure,
7020 .deconfigure = mlxsw_sp_rif_ipip_lb_deconfigure,
7021};
7022
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02007023static const struct mlxsw_sp_rif_ops *mlxsw_sp_rif_ops_arr[] = {
7024 [MLXSW_SP_RIF_TYPE_SUBPORT] = &mlxsw_sp_rif_subport_ops,
7025 [MLXSW_SP_RIF_TYPE_VLAN] = &mlxsw_sp_rif_vlan_ops,
7026 [MLXSW_SP_RIF_TYPE_FID] = &mlxsw_sp_rif_fid_ops,
Petr Machata6ddb7422017-09-02 23:49:19 +02007027 [MLXSW_SP_RIF_TYPE_IPIP_LB] = &mlxsw_sp_rif_ipip_lb_ops,
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02007028};
7029
Ido Schimmel348b8fc2017-05-16 19:38:29 +02007030static int mlxsw_sp_rifs_init(struct mlxsw_sp *mlxsw_sp)
7031{
7032 u64 max_rifs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS);
7033
7034 mlxsw_sp->router->rifs = kcalloc(max_rifs,
7035 sizeof(struct mlxsw_sp_rif *),
7036 GFP_KERNEL);
7037 if (!mlxsw_sp->router->rifs)
7038 return -ENOMEM;
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02007039
7040 mlxsw_sp->router->rif_ops_arr = mlxsw_sp_rif_ops_arr;
7041
Ido Schimmel348b8fc2017-05-16 19:38:29 +02007042 return 0;
7043}
7044
7045static void mlxsw_sp_rifs_fini(struct mlxsw_sp *mlxsw_sp)
7046{
7047 int i;
7048
7049 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++)
7050 WARN_ON_ONCE(mlxsw_sp->router->rifs[i]);
7051
7052 kfree(mlxsw_sp->router->rifs);
7053}
7054
Petr Machatadcbda282017-10-20 09:16:16 +02007055static int
7056mlxsw_sp_ipip_config_tigcr(struct mlxsw_sp *mlxsw_sp)
7057{
7058 char tigcr_pl[MLXSW_REG_TIGCR_LEN];
7059
7060 mlxsw_reg_tigcr_pack(tigcr_pl, true, 0);
7061 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(tigcr), tigcr_pl);
7062}
7063
Petr Machata38ebc0f2017-09-02 23:49:17 +02007064static int mlxsw_sp_ipips_init(struct mlxsw_sp *mlxsw_sp)
7065{
7066 mlxsw_sp->router->ipip_ops_arr = mlxsw_sp_ipip_ops_arr;
Petr Machata1012b9a2017-09-02 23:49:23 +02007067 INIT_LIST_HEAD(&mlxsw_sp->router->ipip_list);
Petr Machatadcbda282017-10-20 09:16:16 +02007068 return mlxsw_sp_ipip_config_tigcr(mlxsw_sp);
Petr Machata38ebc0f2017-09-02 23:49:17 +02007069}
7070
7071static void mlxsw_sp_ipips_fini(struct mlxsw_sp *mlxsw_sp)
7072{
Petr Machata1012b9a2017-09-02 23:49:23 +02007073 WARN_ON(!list_empty(&mlxsw_sp->router->ipip_list));
Petr Machata38ebc0f2017-09-02 23:49:17 +02007074}
7075
Ido Schimmelc3852ef2016-12-03 16:45:07 +01007076static void mlxsw_sp_router_fib_dump_flush(struct notifier_block *nb)
7077{
Ido Schimmel7e39d112017-05-16 19:38:28 +02007078 struct mlxsw_sp_router *router;
Ido Schimmelc3852ef2016-12-03 16:45:07 +01007079
7080 /* Flush pending FIB notifications and then flush the device's
7081 * table before requesting another dump. The FIB notification
7082 * block is unregistered, so no need to take RTNL.
7083 */
7084 mlxsw_core_flush_owq();
Ido Schimmel7e39d112017-05-16 19:38:28 +02007085 router = container_of(nb, struct mlxsw_sp_router, fib_nb);
7086 mlxsw_sp_router_fib_flush(router->mlxsw_sp);
Ido Schimmelc3852ef2016-12-03 16:45:07 +01007087}
7088
Ido Schimmelaf658b62017-11-02 17:14:09 +01007089#ifdef CONFIG_IP_ROUTE_MULTIPATH
7090static void mlxsw_sp_mp_hash_header_set(char *recr2_pl, int header)
7091{
7092 mlxsw_reg_recr2_outer_header_enables_set(recr2_pl, header, true);
7093}
7094
7095static void mlxsw_sp_mp_hash_field_set(char *recr2_pl, int field)
7096{
7097 mlxsw_reg_recr2_outer_header_fields_enable_set(recr2_pl, field, true);
7098}
7099
7100static void mlxsw_sp_mp4_hash_init(char *recr2_pl)
7101{
7102 bool only_l3 = !init_net.ipv4.sysctl_fib_multipath_hash_policy;
7103
7104 mlxsw_sp_mp_hash_header_set(recr2_pl,
7105 MLXSW_REG_RECR2_IPV4_EN_NOT_TCP_NOT_UDP);
7106 mlxsw_sp_mp_hash_header_set(recr2_pl, MLXSW_REG_RECR2_IPV4_EN_TCP_UDP);
7107 mlxsw_reg_recr2_ipv4_sip_enable(recr2_pl);
7108 mlxsw_reg_recr2_ipv4_dip_enable(recr2_pl);
7109 if (only_l3)
7110 return;
7111 mlxsw_sp_mp_hash_header_set(recr2_pl, MLXSW_REG_RECR2_TCP_UDP_EN_IPV4);
7112 mlxsw_sp_mp_hash_field_set(recr2_pl, MLXSW_REG_RECR2_IPV4_PROTOCOL);
7113 mlxsw_sp_mp_hash_field_set(recr2_pl, MLXSW_REG_RECR2_TCP_UDP_SPORT);
7114 mlxsw_sp_mp_hash_field_set(recr2_pl, MLXSW_REG_RECR2_TCP_UDP_DPORT);
7115}
7116
7117static void mlxsw_sp_mp6_hash_init(char *recr2_pl)
7118{
Petr Machata918ee502018-03-11 09:45:47 +02007119 bool only_l3 = !ip6_multipath_hash_policy(&init_net);
David Ahern5e18b9c552018-03-02 08:32:19 -08007120
Ido Schimmelaf658b62017-11-02 17:14:09 +01007121 mlxsw_sp_mp_hash_header_set(recr2_pl,
7122 MLXSW_REG_RECR2_IPV6_EN_NOT_TCP_NOT_UDP);
7123 mlxsw_sp_mp_hash_header_set(recr2_pl, MLXSW_REG_RECR2_IPV6_EN_TCP_UDP);
7124 mlxsw_reg_recr2_ipv6_sip_enable(recr2_pl);
7125 mlxsw_reg_recr2_ipv6_dip_enable(recr2_pl);
Ido Schimmelaf658b62017-11-02 17:14:09 +01007126 mlxsw_sp_mp_hash_field_set(recr2_pl, MLXSW_REG_RECR2_IPV6_NEXT_HEADER);
David Ahern5e18b9c552018-03-02 08:32:19 -08007127 if (only_l3) {
7128 mlxsw_sp_mp_hash_field_set(recr2_pl,
7129 MLXSW_REG_RECR2_IPV6_FLOW_LABEL);
7130 } else {
7131 mlxsw_sp_mp_hash_header_set(recr2_pl,
7132 MLXSW_REG_RECR2_TCP_UDP_EN_IPV6);
7133 mlxsw_sp_mp_hash_field_set(recr2_pl,
7134 MLXSW_REG_RECR2_TCP_UDP_SPORT);
7135 mlxsw_sp_mp_hash_field_set(recr2_pl,
7136 MLXSW_REG_RECR2_TCP_UDP_DPORT);
7137 }
Ido Schimmelaf658b62017-11-02 17:14:09 +01007138}
7139
7140static int mlxsw_sp_mp_hash_init(struct mlxsw_sp *mlxsw_sp)
7141{
7142 char recr2_pl[MLXSW_REG_RECR2_LEN];
7143 u32 seed;
7144
7145 get_random_bytes(&seed, sizeof(seed));
7146 mlxsw_reg_recr2_pack(recr2_pl, seed);
7147 mlxsw_sp_mp4_hash_init(recr2_pl);
7148 mlxsw_sp_mp6_hash_init(recr2_pl);
7149
7150 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(recr2), recr2_pl);
7151}
7152#else
7153static int mlxsw_sp_mp_hash_init(struct mlxsw_sp *mlxsw_sp)
7154{
7155 return 0;
7156}
7157#endif
7158
Yuval Mintz48276a22018-01-14 12:33:14 +01007159static int mlxsw_sp_dscp_init(struct mlxsw_sp *mlxsw_sp)
7160{
7161 char rdpm_pl[MLXSW_REG_RDPM_LEN];
7162 unsigned int i;
7163
7164 MLXSW_REG_ZERO(rdpm, rdpm_pl);
7165
7166 /* HW is determining switch priority based on DSCP-bits, but the
7167 * kernel is still doing that based on the ToS. Since there's a
7168 * mismatch in bits we need to make sure to translate the right
7169 * value ToS would observe, skipping the 2 least-significant ECN bits.
7170 */
7171 for (i = 0; i < MLXSW_REG_RDPM_DSCP_ENTRY_REC_MAX_COUNT; i++)
7172 mlxsw_reg_rdpm_pack(rdpm_pl, i, rt_tos2priority(i << 2));
7173
7174 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rdpm), rdpm_pl);
7175}
7176
Ido Schimmel4724ba562017-03-10 08:53:39 +01007177static int __mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp)
7178{
7179 char rgcr_pl[MLXSW_REG_RGCR_LEN];
7180 u64 max_rifs;
7181 int err;
7182
7183 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_RIFS))
7184 return -EIO;
Ido Schimmel4724ba562017-03-10 08:53:39 +01007185 max_rifs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS);
Ido Schimmel4724ba562017-03-10 08:53:39 +01007186
Arkadi Sharshevskye29237e2017-07-18 10:10:09 +02007187 mlxsw_reg_rgcr_pack(rgcr_pl, true, true);
Ido Schimmel4724ba562017-03-10 08:53:39 +01007188 mlxsw_reg_rgcr_max_router_interfaces_set(rgcr_pl, max_rifs);
Yuval Mintz48276a22018-01-14 12:33:14 +01007189 mlxsw_reg_rgcr_usp_set(rgcr_pl, true);
Ido Schimmel4724ba562017-03-10 08:53:39 +01007190 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rgcr), rgcr_pl);
7191 if (err)
Ido Schimmel348b8fc2017-05-16 19:38:29 +02007192 return err;
Ido Schimmel4724ba562017-03-10 08:53:39 +01007193 return 0;
Ido Schimmel4724ba562017-03-10 08:53:39 +01007194}
7195
7196static void __mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp)
7197{
7198 char rgcr_pl[MLXSW_REG_RGCR_LEN];
Ido Schimmel4724ba562017-03-10 08:53:39 +01007199
Arkadi Sharshevskye29237e2017-07-18 10:10:09 +02007200 mlxsw_reg_rgcr_pack(rgcr_pl, false, false);
Ido Schimmel4724ba562017-03-10 08:53:39 +01007201 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rgcr), rgcr_pl);
Ido Schimmel4724ba562017-03-10 08:53:39 +01007202}
7203
Jiri Pirkob45f64d2016-09-26 12:52:31 +02007204int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp)
7205{
Ido Schimmel9011b672017-05-16 19:38:25 +02007206 struct mlxsw_sp_router *router;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02007207 int err;
7208
Ido Schimmel9011b672017-05-16 19:38:25 +02007209 router = kzalloc(sizeof(*mlxsw_sp->router), GFP_KERNEL);
7210 if (!router)
7211 return -ENOMEM;
7212 mlxsw_sp->router = router;
7213 router->mlxsw_sp = mlxsw_sp;
7214
7215 INIT_LIST_HEAD(&mlxsw_sp->router->nexthop_neighs_list);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02007216 err = __mlxsw_sp_router_init(mlxsw_sp);
7217 if (err)
Ido Schimmel9011b672017-05-16 19:38:25 +02007218 goto err_router_init;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02007219
Ido Schimmel348b8fc2017-05-16 19:38:29 +02007220 err = mlxsw_sp_rifs_init(mlxsw_sp);
7221 if (err)
7222 goto err_rifs_init;
7223
Petr Machata38ebc0f2017-09-02 23:49:17 +02007224 err = mlxsw_sp_ipips_init(mlxsw_sp);
7225 if (err)
7226 goto err_ipips_init;
7227
Ido Schimmel9011b672017-05-16 19:38:25 +02007228 err = rhashtable_init(&mlxsw_sp->router->nexthop_ht,
Ido Schimmelc53b8e12017-02-08 11:16:30 +01007229 &mlxsw_sp_nexthop_ht_params);
7230 if (err)
7231 goto err_nexthop_ht_init;
7232
Ido Schimmel9011b672017-05-16 19:38:25 +02007233 err = rhashtable_init(&mlxsw_sp->router->nexthop_group_ht,
Ido Schimmele9ad5e72017-02-08 11:16:29 +01007234 &mlxsw_sp_nexthop_group_ht_params);
7235 if (err)
7236 goto err_nexthop_group_ht_init;
7237
Arkadi Sharshevskydbe45982017-09-25 10:32:23 +02007238 INIT_LIST_HEAD(&mlxsw_sp->router->nexthop_list);
Ido Schimmel8494ab02017-03-24 08:02:47 +01007239 err = mlxsw_sp_lpm_init(mlxsw_sp);
7240 if (err)
7241 goto err_lpm_init;
7242
Yotam Gigid42b0962017-09-27 08:23:20 +02007243 err = mlxsw_sp_mr_init(mlxsw_sp, &mlxsw_sp_mr_tcam_ops);
7244 if (err)
7245 goto err_mr_init;
7246
Jiri Pirkob45f64d2016-09-26 12:52:31 +02007247 err = mlxsw_sp_vrs_init(mlxsw_sp);
7248 if (err)
7249 goto err_vrs_init;
7250
Ido Schimmel8c9583a2016-10-27 15:12:57 +02007251 err = mlxsw_sp_neigh_init(mlxsw_sp);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02007252 if (err)
7253 goto err_neigh_init;
7254
Ido Schimmel48fac882017-11-02 17:14:06 +01007255 mlxsw_sp->router->netevent_nb.notifier_call =
7256 mlxsw_sp_router_netevent_event;
7257 err = register_netevent_notifier(&mlxsw_sp->router->netevent_nb);
7258 if (err)
7259 goto err_register_netevent_notifier;
7260
Ido Schimmelaf658b62017-11-02 17:14:09 +01007261 err = mlxsw_sp_mp_hash_init(mlxsw_sp);
7262 if (err)
7263 goto err_mp_hash_init;
7264
Yuval Mintz48276a22018-01-14 12:33:14 +01007265 err = mlxsw_sp_dscp_init(mlxsw_sp);
7266 if (err)
7267 goto err_dscp_init;
7268
Ido Schimmel7e39d112017-05-16 19:38:28 +02007269 mlxsw_sp->router->fib_nb.notifier_call = mlxsw_sp_router_fib_event;
7270 err = register_fib_notifier(&mlxsw_sp->router->fib_nb,
Ido Schimmelc3852ef2016-12-03 16:45:07 +01007271 mlxsw_sp_router_fib_dump_flush);
7272 if (err)
7273 goto err_register_fib_notifier;
7274
Jiri Pirkob45f64d2016-09-26 12:52:31 +02007275 return 0;
7276
Ido Schimmelc3852ef2016-12-03 16:45:07 +01007277err_register_fib_notifier:
Yuval Mintz48276a22018-01-14 12:33:14 +01007278err_dscp_init:
Ido Schimmelaf658b62017-11-02 17:14:09 +01007279err_mp_hash_init:
Ido Schimmel48fac882017-11-02 17:14:06 +01007280 unregister_netevent_notifier(&mlxsw_sp->router->netevent_nb);
7281err_register_netevent_notifier:
Ido Schimmelc3852ef2016-12-03 16:45:07 +01007282 mlxsw_sp_neigh_fini(mlxsw_sp);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02007283err_neigh_init:
7284 mlxsw_sp_vrs_fini(mlxsw_sp);
7285err_vrs_init:
Yotam Gigid42b0962017-09-27 08:23:20 +02007286 mlxsw_sp_mr_fini(mlxsw_sp);
7287err_mr_init:
Ido Schimmel8494ab02017-03-24 08:02:47 +01007288 mlxsw_sp_lpm_fini(mlxsw_sp);
7289err_lpm_init:
Ido Schimmel9011b672017-05-16 19:38:25 +02007290 rhashtable_destroy(&mlxsw_sp->router->nexthop_group_ht);
Ido Schimmele9ad5e72017-02-08 11:16:29 +01007291err_nexthop_group_ht_init:
Ido Schimmel9011b672017-05-16 19:38:25 +02007292 rhashtable_destroy(&mlxsw_sp->router->nexthop_ht);
Ido Schimmelc53b8e12017-02-08 11:16:30 +01007293err_nexthop_ht_init:
Petr Machata38ebc0f2017-09-02 23:49:17 +02007294 mlxsw_sp_ipips_fini(mlxsw_sp);
7295err_ipips_init:
Ido Schimmel348b8fc2017-05-16 19:38:29 +02007296 mlxsw_sp_rifs_fini(mlxsw_sp);
7297err_rifs_init:
Jiri Pirkob45f64d2016-09-26 12:52:31 +02007298 __mlxsw_sp_router_fini(mlxsw_sp);
Ido Schimmel9011b672017-05-16 19:38:25 +02007299err_router_init:
7300 kfree(mlxsw_sp->router);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02007301 return err;
7302}
7303
7304void mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp)
7305{
Ido Schimmel7e39d112017-05-16 19:38:28 +02007306 unregister_fib_notifier(&mlxsw_sp->router->fib_nb);
Ido Schimmel48fac882017-11-02 17:14:06 +01007307 unregister_netevent_notifier(&mlxsw_sp->router->netevent_nb);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02007308 mlxsw_sp_neigh_fini(mlxsw_sp);
7309 mlxsw_sp_vrs_fini(mlxsw_sp);
Yotam Gigid42b0962017-09-27 08:23:20 +02007310 mlxsw_sp_mr_fini(mlxsw_sp);
Ido Schimmel8494ab02017-03-24 08:02:47 +01007311 mlxsw_sp_lpm_fini(mlxsw_sp);
Ido Schimmel9011b672017-05-16 19:38:25 +02007312 rhashtable_destroy(&mlxsw_sp->router->nexthop_group_ht);
7313 rhashtable_destroy(&mlxsw_sp->router->nexthop_ht);
Petr Machata38ebc0f2017-09-02 23:49:17 +02007314 mlxsw_sp_ipips_fini(mlxsw_sp);
Ido Schimmel348b8fc2017-05-16 19:38:29 +02007315 mlxsw_sp_rifs_fini(mlxsw_sp);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02007316 __mlxsw_sp_router_fini(mlxsw_sp);
Ido Schimmel9011b672017-05-16 19:38:25 +02007317 kfree(mlxsw_sp->router);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02007318}