blob: be241c708bb959dc5ea356111aec99bc507ff8b2 [file] [log] [blame]
Ido Schimmel464dce12016-07-02 11:00:15 +02001/*
2 * drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
Petr Machatae437f3b2018-02-13 11:26:09 +01003 * Copyright (c) 2016-2018 Mellanox Technologies. All rights reserved.
Ido Schimmel464dce12016-07-02 11:00:15 +02004 * Copyright (c) 2016 Jiri Pirko <jiri@mellanox.com>
5 * Copyright (c) 2016 Ido Schimmel <idosch@mellanox.com>
Yotam Gigic723c7352016-07-05 11:27:43 +02006 * Copyright (c) 2016 Yotam Gigi <yotamg@mellanox.com>
Petr Machatae437f3b2018-02-13 11:26:09 +01007 * Copyright (c) 2017-2018 Petr Machata <petrm@mellanox.com>
Ido Schimmel464dce12016-07-02 11:00:15 +02008 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions are met:
11 *
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. Neither the names of the copyright holders nor the names of its
18 * contributors may be used to endorse or promote products derived from
19 * this software without specific prior written permission.
20 *
21 * Alternatively, this software may be distributed under the terms of the
22 * GNU General Public License ("GPL") version 2 as published by the Free
23 * Software Foundation.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
26 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
29 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
36 */
37
38#include <linux/kernel.h>
39#include <linux/types.h>
Jiri Pirko5e9c16c2016-07-04 08:23:04 +020040#include <linux/rhashtable.h>
41#include <linux/bitops.h>
42#include <linux/in6.h>
Yotam Gigic723c7352016-07-05 11:27:43 +020043#include <linux/notifier.h>
Ido Schimmeldf6dd79b2017-02-08 14:36:49 +010044#include <linux/inetdevice.h>
Ido Schimmel9db032b2017-03-16 09:08:17 +010045#include <linux/netdevice.h>
Ido Schimmel03ea01e2017-05-23 21:56:30 +020046#include <linux/if_bridge.h>
Ido Schimmelb5f3e0d2017-07-24 09:56:00 +020047#include <linux/socket.h>
Ido Schimmel428b8512017-08-03 13:28:28 +020048#include <linux/route.h>
Ido Schimmeleb789982017-10-22 23:11:48 +020049#include <linux/gcd.h>
Ido Schimmelaf658b62017-11-02 17:14:09 +010050#include <linux/random.h>
Yotam Gigic723c7352016-07-05 11:27:43 +020051#include <net/netevent.h>
Jiri Pirko6cf3c972016-07-05 11:27:39 +020052#include <net/neighbour.h>
53#include <net/arp.h>
Jiri Pirkob45f64d2016-09-26 12:52:31 +020054#include <net/ip_fib.h>
Ido Schimmel583419f2017-08-03 13:28:27 +020055#include <net/ip6_fib.h>
Ido Schimmel5d7bfd12017-03-16 09:08:14 +010056#include <net/fib_rules.h>
Petr Machata6ddb7422017-09-02 23:49:19 +020057#include <net/ip_tunnels.h>
Ido Schimmel57837882017-03-16 09:08:16 +010058#include <net/l3mdev.h>
Arkadi Sharshevsky5ea12372017-07-18 10:10:13 +020059#include <net/addrconf.h>
Arkadi Sharshevskyd5eb89c2017-07-18 10:10:15 +020060#include <net/ndisc.h>
61#include <net/ipv6.h>
Ido Schimmel04b1d4e2017-08-03 13:28:11 +020062#include <net/fib_notifier.h>
Ido Schimmel464dce12016-07-02 11:00:15 +020063
64#include "spectrum.h"
65#include "core.h"
66#include "reg.h"
Arkadi Sharshevskye0c0afd2017-03-28 17:24:15 +020067#include "spectrum_cnt.h"
68#include "spectrum_dpipe.h"
Petr Machata38ebc0f2017-09-02 23:49:17 +020069#include "spectrum_ipip.h"
Yotam Gigid42b0962017-09-27 08:23:20 +020070#include "spectrum_mr.h"
71#include "spectrum_mr_tcam.h"
Arkadi Sharshevskye0c0afd2017-03-28 17:24:15 +020072#include "spectrum_router.h"
Petr Machata803335a2018-02-27 14:53:46 +010073#include "spectrum_span.h"
Ido Schimmel464dce12016-07-02 11:00:15 +020074
Ido Schimmel2b52ce02018-01-22 09:17:42 +010075struct mlxsw_sp_fib;
Ido Schimmel9011b672017-05-16 19:38:25 +020076struct mlxsw_sp_vr;
77struct mlxsw_sp_lpm_tree;
Ido Schimmele4f3c1c2017-05-26 08:37:40 +020078struct mlxsw_sp_rif_ops;
Ido Schimmel9011b672017-05-16 19:38:25 +020079
80struct mlxsw_sp_router {
81 struct mlxsw_sp *mlxsw_sp;
Ido Schimmel5f9efff2017-05-16 19:38:27 +020082 struct mlxsw_sp_rif **rifs;
Ido Schimmel9011b672017-05-16 19:38:25 +020083 struct mlxsw_sp_vr *vrs;
84 struct rhashtable neigh_ht;
85 struct rhashtable nexthop_group_ht;
86 struct rhashtable nexthop_ht;
Arkadi Sharshevskydbe45982017-09-25 10:32:23 +020087 struct list_head nexthop_list;
Ido Schimmel9011b672017-05-16 19:38:25 +020088 struct {
Ido Schimmel2b52ce02018-01-22 09:17:42 +010089 /* One tree for each protocol: IPv4 and IPv6 */
90 struct mlxsw_sp_lpm_tree *proto_trees[2];
Ido Schimmel9011b672017-05-16 19:38:25 +020091 struct mlxsw_sp_lpm_tree *trees;
92 unsigned int tree_count;
93 } lpm;
94 struct {
95 struct delayed_work dw;
96 unsigned long interval; /* ms */
97 } neighs_update;
98 struct delayed_work nexthop_probe_dw;
99#define MLXSW_SP_UNRESOLVED_NH_PROBE_INTERVAL 5000 /* ms */
100 struct list_head nexthop_neighs_list;
Petr Machata1012b9a2017-09-02 23:49:23 +0200101 struct list_head ipip_list;
Ido Schimmel9011b672017-05-16 19:38:25 +0200102 bool aborted;
Ido Schimmel7e39d112017-05-16 19:38:28 +0200103 struct notifier_block fib_nb;
Ido Schimmel48fac882017-11-02 17:14:06 +0100104 struct notifier_block netevent_nb;
Ido Schimmele4f3c1c2017-05-26 08:37:40 +0200105 const struct mlxsw_sp_rif_ops **rif_ops_arr;
Petr Machata38ebc0f2017-09-02 23:49:17 +0200106 const struct mlxsw_sp_ipip_ops **ipip_ops_arr;
Ido Schimmel9011b672017-05-16 19:38:25 +0200107};
108
Ido Schimmel4724ba562017-03-10 08:53:39 +0100109struct mlxsw_sp_rif {
110 struct list_head nexthop_list;
111 struct list_head neigh_list;
112 struct net_device *dev;
Ido Schimmela1107482017-05-26 08:37:39 +0200113 struct mlxsw_sp_fid *fid;
Ido Schimmel4724ba562017-03-10 08:53:39 +0100114 unsigned char addr[ETH_ALEN];
115 int mtu;
Arkadi Sharshevskybf952332017-03-17 09:38:00 +0100116 u16 rif_index;
Ido Schimmel69132292017-03-10 08:53:42 +0100117 u16 vr_id;
Ido Schimmele4f3c1c2017-05-26 08:37:40 +0200118 const struct mlxsw_sp_rif_ops *ops;
119 struct mlxsw_sp *mlxsw_sp;
120
Arkadi Sharshevskye0c0afd2017-03-28 17:24:15 +0200121 unsigned int counter_ingress;
122 bool counter_ingress_valid;
123 unsigned int counter_egress;
124 bool counter_egress_valid;
Ido Schimmel4724ba562017-03-10 08:53:39 +0100125};
126
Ido Schimmele4f3c1c2017-05-26 08:37:40 +0200127struct mlxsw_sp_rif_params {
128 struct net_device *dev;
129 union {
130 u16 system_port;
131 u16 lag_id;
132 };
133 u16 vid;
134 bool lag;
135};
136
Ido Schimmel4d93cee2017-05-26 08:37:34 +0200137struct mlxsw_sp_rif_subport {
138 struct mlxsw_sp_rif common;
139 union {
140 u16 system_port;
141 u16 lag_id;
142 };
143 u16 vid;
144 bool lag;
145};
146
Petr Machata6ddb7422017-09-02 23:49:19 +0200147struct mlxsw_sp_rif_ipip_lb {
148 struct mlxsw_sp_rif common;
149 struct mlxsw_sp_rif_ipip_lb_config lb_config;
150 u16 ul_vr_id; /* Reserved for Spectrum-2. */
151};
152
153struct mlxsw_sp_rif_params_ipip_lb {
154 struct mlxsw_sp_rif_params common;
155 struct mlxsw_sp_rif_ipip_lb_config lb_config;
156};
157
Ido Schimmele4f3c1c2017-05-26 08:37:40 +0200158struct mlxsw_sp_rif_ops {
159 enum mlxsw_sp_rif_type type;
160 size_t rif_size;
161
162 void (*setup)(struct mlxsw_sp_rif *rif,
163 const struct mlxsw_sp_rif_params *params);
164 int (*configure)(struct mlxsw_sp_rif *rif);
165 void (*deconfigure)(struct mlxsw_sp_rif *rif);
166 struct mlxsw_sp_fid * (*fid_get)(struct mlxsw_sp_rif *rif);
167};
168
Ido Schimmel2b52ce02018-01-22 09:17:42 +0100169static void mlxsw_sp_lpm_tree_hold(struct mlxsw_sp_lpm_tree *lpm_tree);
170static void mlxsw_sp_lpm_tree_put(struct mlxsw_sp *mlxsw_sp,
171 struct mlxsw_sp_lpm_tree *lpm_tree);
172static int mlxsw_sp_vr_lpm_tree_bind(struct mlxsw_sp *mlxsw_sp,
173 const struct mlxsw_sp_fib *fib,
174 u8 tree_id);
175static int mlxsw_sp_vr_lpm_tree_unbind(struct mlxsw_sp *mlxsw_sp,
176 const struct mlxsw_sp_fib *fib);
177
Arkadi Sharshevskye0c0afd2017-03-28 17:24:15 +0200178static unsigned int *
179mlxsw_sp_rif_p_counter_get(struct mlxsw_sp_rif *rif,
180 enum mlxsw_sp_rif_counter_dir dir)
181{
182 switch (dir) {
183 case MLXSW_SP_RIF_COUNTER_EGRESS:
184 return &rif->counter_egress;
185 case MLXSW_SP_RIF_COUNTER_INGRESS:
186 return &rif->counter_ingress;
187 }
188 return NULL;
189}
190
191static bool
192mlxsw_sp_rif_counter_valid_get(struct mlxsw_sp_rif *rif,
193 enum mlxsw_sp_rif_counter_dir dir)
194{
195 switch (dir) {
196 case MLXSW_SP_RIF_COUNTER_EGRESS:
197 return rif->counter_egress_valid;
198 case MLXSW_SP_RIF_COUNTER_INGRESS:
199 return rif->counter_ingress_valid;
200 }
201 return false;
202}
203
204static void
205mlxsw_sp_rif_counter_valid_set(struct mlxsw_sp_rif *rif,
206 enum mlxsw_sp_rif_counter_dir dir,
207 bool valid)
208{
209 switch (dir) {
210 case MLXSW_SP_RIF_COUNTER_EGRESS:
211 rif->counter_egress_valid = valid;
212 break;
213 case MLXSW_SP_RIF_COUNTER_INGRESS:
214 rif->counter_ingress_valid = valid;
215 break;
216 }
217}
218
219static int mlxsw_sp_rif_counter_edit(struct mlxsw_sp *mlxsw_sp, u16 rif_index,
220 unsigned int counter_index, bool enable,
221 enum mlxsw_sp_rif_counter_dir dir)
222{
223 char ritr_pl[MLXSW_REG_RITR_LEN];
224 bool is_egress = false;
225 int err;
226
227 if (dir == MLXSW_SP_RIF_COUNTER_EGRESS)
228 is_egress = true;
229 mlxsw_reg_ritr_rif_pack(ritr_pl, rif_index);
230 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
231 if (err)
232 return err;
233
234 mlxsw_reg_ritr_counter_pack(ritr_pl, counter_index, enable,
235 is_egress);
236 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
237}
238
239int mlxsw_sp_rif_counter_value_get(struct mlxsw_sp *mlxsw_sp,
240 struct mlxsw_sp_rif *rif,
241 enum mlxsw_sp_rif_counter_dir dir, u64 *cnt)
242{
243 char ricnt_pl[MLXSW_REG_RICNT_LEN];
244 unsigned int *p_counter_index;
245 bool valid;
246 int err;
247
248 valid = mlxsw_sp_rif_counter_valid_get(rif, dir);
249 if (!valid)
250 return -EINVAL;
251
252 p_counter_index = mlxsw_sp_rif_p_counter_get(rif, dir);
253 if (!p_counter_index)
254 return -EINVAL;
255 mlxsw_reg_ricnt_pack(ricnt_pl, *p_counter_index,
256 MLXSW_REG_RICNT_OPCODE_NOP);
257 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ricnt), ricnt_pl);
258 if (err)
259 return err;
260 *cnt = mlxsw_reg_ricnt_good_unicast_packets_get(ricnt_pl);
261 return 0;
262}
263
264static int mlxsw_sp_rif_counter_clear(struct mlxsw_sp *mlxsw_sp,
265 unsigned int counter_index)
266{
267 char ricnt_pl[MLXSW_REG_RICNT_LEN];
268
269 mlxsw_reg_ricnt_pack(ricnt_pl, counter_index,
270 MLXSW_REG_RICNT_OPCODE_CLEAR);
271 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ricnt), ricnt_pl);
272}
273
274int mlxsw_sp_rif_counter_alloc(struct mlxsw_sp *mlxsw_sp,
275 struct mlxsw_sp_rif *rif,
276 enum mlxsw_sp_rif_counter_dir dir)
277{
278 unsigned int *p_counter_index;
279 int err;
280
281 p_counter_index = mlxsw_sp_rif_p_counter_get(rif, dir);
282 if (!p_counter_index)
283 return -EINVAL;
284 err = mlxsw_sp_counter_alloc(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_RIF,
285 p_counter_index);
286 if (err)
287 return err;
288
289 err = mlxsw_sp_rif_counter_clear(mlxsw_sp, *p_counter_index);
290 if (err)
291 goto err_counter_clear;
292
293 err = mlxsw_sp_rif_counter_edit(mlxsw_sp, rif->rif_index,
294 *p_counter_index, true, dir);
295 if (err)
296 goto err_counter_edit;
297 mlxsw_sp_rif_counter_valid_set(rif, dir, true);
298 return 0;
299
300err_counter_edit:
301err_counter_clear:
302 mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_RIF,
303 *p_counter_index);
304 return err;
305}
306
307void mlxsw_sp_rif_counter_free(struct mlxsw_sp *mlxsw_sp,
308 struct mlxsw_sp_rif *rif,
309 enum mlxsw_sp_rif_counter_dir dir)
310{
311 unsigned int *p_counter_index;
312
Arkadi Sharshevsky6b1206b2017-05-18 09:18:53 +0200313 if (!mlxsw_sp_rif_counter_valid_get(rif, dir))
314 return;
315
Arkadi Sharshevskye0c0afd2017-03-28 17:24:15 +0200316 p_counter_index = mlxsw_sp_rif_p_counter_get(rif, dir);
317 if (WARN_ON(!p_counter_index))
318 return;
319 mlxsw_sp_rif_counter_edit(mlxsw_sp, rif->rif_index,
320 *p_counter_index, false, dir);
321 mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_RIF,
322 *p_counter_index);
323 mlxsw_sp_rif_counter_valid_set(rif, dir, false);
324}
325
Ido Schimmele4f3c1c2017-05-26 08:37:40 +0200326static void mlxsw_sp_rif_counters_alloc(struct mlxsw_sp_rif *rif)
327{
328 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
329 struct devlink *devlink;
330
331 devlink = priv_to_devlink(mlxsw_sp->core);
332 if (!devlink_dpipe_table_counter_enabled(devlink,
333 MLXSW_SP_DPIPE_TABLE_NAME_ERIF))
334 return;
335 mlxsw_sp_rif_counter_alloc(mlxsw_sp, rif, MLXSW_SP_RIF_COUNTER_EGRESS);
336}
337
338static void mlxsw_sp_rif_counters_free(struct mlxsw_sp_rif *rif)
339{
340 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
341
342 mlxsw_sp_rif_counter_free(mlxsw_sp, rif, MLXSW_SP_RIF_COUNTER_EGRESS);
343}
344
Ido Schimmel4724ba562017-03-10 08:53:39 +0100345static struct mlxsw_sp_rif *
346mlxsw_sp_rif_find_by_dev(const struct mlxsw_sp *mlxsw_sp,
347 const struct net_device *dev);
348
Ido Schimmel7dcc18a2017-07-18 10:10:30 +0200349#define MLXSW_SP_PREFIX_COUNT (sizeof(struct in6_addr) * BITS_PER_BYTE + 1)
Ido Schimmel9011b672017-05-16 19:38:25 +0200350
351struct mlxsw_sp_prefix_usage {
352 DECLARE_BITMAP(b, MLXSW_SP_PREFIX_COUNT);
353};
354
Jiri Pirko53342022016-07-04 08:23:08 +0200355#define mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage) \
356 for_each_set_bit(prefix, (prefix_usage)->b, MLXSW_SP_PREFIX_COUNT)
357
358static bool
359mlxsw_sp_prefix_usage_eq(struct mlxsw_sp_prefix_usage *prefix_usage1,
360 struct mlxsw_sp_prefix_usage *prefix_usage2)
361{
362 return !memcmp(prefix_usage1, prefix_usage2, sizeof(*prefix_usage1));
363}
364
Jiri Pirko6b75c482016-07-04 08:23:09 +0200365static void
366mlxsw_sp_prefix_usage_cpy(struct mlxsw_sp_prefix_usage *prefix_usage1,
367 struct mlxsw_sp_prefix_usage *prefix_usage2)
368{
369 memcpy(prefix_usage1, prefix_usage2, sizeof(*prefix_usage1));
370}
371
372static void
Jiri Pirko5e9c16c2016-07-04 08:23:04 +0200373mlxsw_sp_prefix_usage_set(struct mlxsw_sp_prefix_usage *prefix_usage,
374 unsigned char prefix_len)
375{
376 set_bit(prefix_len, prefix_usage->b);
377}
378
379static void
380mlxsw_sp_prefix_usage_clear(struct mlxsw_sp_prefix_usage *prefix_usage,
381 unsigned char prefix_len)
382{
383 clear_bit(prefix_len, prefix_usage->b);
384}
385
386struct mlxsw_sp_fib_key {
387 unsigned char addr[sizeof(struct in6_addr)];
388 unsigned char prefix_len;
389};
390
Jiri Pirko61c503f2016-07-04 08:23:11 +0200391enum mlxsw_sp_fib_entry_type {
392 MLXSW_SP_FIB_ENTRY_TYPE_REMOTE,
393 MLXSW_SP_FIB_ENTRY_TYPE_LOCAL,
394 MLXSW_SP_FIB_ENTRY_TYPE_TRAP,
Petr Machata4607f6d2017-09-02 23:49:25 +0200395
396 /* This is a special case of local delivery, where a packet should be
397 * decapsulated on reception. Note that there is no corresponding ENCAP,
398 * because that's a type of next hop, not of FIB entry. (There can be
399 * several next hops in a REMOTE entry, and some of them may be
400 * encapsulating entries.)
401 */
402 MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP,
Jiri Pirko61c503f2016-07-04 08:23:11 +0200403};
404
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +0200405struct mlxsw_sp_nexthop_group;
406
Ido Schimmel9aecce12017-02-09 10:28:42 +0100407struct mlxsw_sp_fib_node {
408 struct list_head entry_list;
Jiri Pirkob45f64d2016-09-26 12:52:31 +0200409 struct list_head list;
Ido Schimmel9aecce12017-02-09 10:28:42 +0100410 struct rhash_head ht_node;
Ido Schimmel76610eb2017-03-10 08:53:41 +0100411 struct mlxsw_sp_fib *fib;
Ido Schimmel9aecce12017-02-09 10:28:42 +0100412 struct mlxsw_sp_fib_key key;
413};
414
Petr Machata4607f6d2017-09-02 23:49:25 +0200415struct mlxsw_sp_fib_entry_decap {
416 struct mlxsw_sp_ipip_entry *ipip_entry;
417 u32 tunnel_index;
418};
419
Ido Schimmel9aecce12017-02-09 10:28:42 +0100420struct mlxsw_sp_fib_entry {
421 struct list_head list;
422 struct mlxsw_sp_fib_node *fib_node;
423 enum mlxsw_sp_fib_entry_type type;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +0200424 struct list_head nexthop_group_node;
425 struct mlxsw_sp_nexthop_group *nh_group;
Petr Machata4607f6d2017-09-02 23:49:25 +0200426 struct mlxsw_sp_fib_entry_decap decap; /* Valid for decap entries. */
Jiri Pirko5e9c16c2016-07-04 08:23:04 +0200427};
428
Ido Schimmel4f1c7f12017-07-18 10:10:26 +0200429struct mlxsw_sp_fib4_entry {
430 struct mlxsw_sp_fib_entry common;
431 u32 tb_id;
432 u32 prio;
433 u8 tos;
434 u8 type;
435};
436
Ido Schimmel428b8512017-08-03 13:28:28 +0200437struct mlxsw_sp_fib6_entry {
438 struct mlxsw_sp_fib_entry common;
439 struct list_head rt6_list;
440 unsigned int nrt6;
441};
442
443struct mlxsw_sp_rt6 {
444 struct list_head list;
445 struct rt6_info *rt;
446};
447
Ido Schimmel9011b672017-05-16 19:38:25 +0200448struct mlxsw_sp_lpm_tree {
449 u8 id; /* tree ID */
450 unsigned int ref_count;
451 enum mlxsw_sp_l3proto proto;
Ido Schimmel2b52ce02018-01-22 09:17:42 +0100452 unsigned long prefix_ref_count[MLXSW_SP_PREFIX_COUNT];
Ido Schimmel9011b672017-05-16 19:38:25 +0200453 struct mlxsw_sp_prefix_usage prefix_usage;
454};
455
Jiri Pirko5e9c16c2016-07-04 08:23:04 +0200456struct mlxsw_sp_fib {
457 struct rhashtable ht;
Ido Schimmel9aecce12017-02-09 10:28:42 +0100458 struct list_head node_list;
Ido Schimmel76610eb2017-03-10 08:53:41 +0100459 struct mlxsw_sp_vr *vr;
460 struct mlxsw_sp_lpm_tree *lpm_tree;
Ido Schimmel76610eb2017-03-10 08:53:41 +0100461 enum mlxsw_sp_l3proto proto;
Jiri Pirko5e9c16c2016-07-04 08:23:04 +0200462};
463
Ido Schimmel9011b672017-05-16 19:38:25 +0200464struct mlxsw_sp_vr {
465 u16 id; /* virtual router ID */
466 u32 tb_id; /* kernel fib table id */
467 unsigned int rif_count;
468 struct mlxsw_sp_fib *fib4;
Ido Schimmela3d9bc52017-07-18 10:10:22 +0200469 struct mlxsw_sp_fib *fib6;
Yotam Gigid42b0962017-09-27 08:23:20 +0200470 struct mlxsw_sp_mr_table *mr4_table;
Ido Schimmel9011b672017-05-16 19:38:25 +0200471};
472
Ido Schimmel9aecce12017-02-09 10:28:42 +0100473static const struct rhashtable_params mlxsw_sp_fib_ht_params;
Jiri Pirko5e9c16c2016-07-04 08:23:04 +0200474
Ido Schimmel2b52ce02018-01-22 09:17:42 +0100475static struct mlxsw_sp_fib *mlxsw_sp_fib_create(struct mlxsw_sp *mlxsw_sp,
476 struct mlxsw_sp_vr *vr,
Ido Schimmel76610eb2017-03-10 08:53:41 +0100477 enum mlxsw_sp_l3proto proto)
Jiri Pirko5e9c16c2016-07-04 08:23:04 +0200478{
Ido Schimmel2b52ce02018-01-22 09:17:42 +0100479 struct mlxsw_sp_lpm_tree *lpm_tree;
Jiri Pirko5e9c16c2016-07-04 08:23:04 +0200480 struct mlxsw_sp_fib *fib;
481 int err;
482
Ido Schimmel2b52ce02018-01-22 09:17:42 +0100483 lpm_tree = mlxsw_sp->router->lpm.proto_trees[proto];
Jiri Pirko5e9c16c2016-07-04 08:23:04 +0200484 fib = kzalloc(sizeof(*fib), GFP_KERNEL);
485 if (!fib)
486 return ERR_PTR(-ENOMEM);
487 err = rhashtable_init(&fib->ht, &mlxsw_sp_fib_ht_params);
488 if (err)
489 goto err_rhashtable_init;
Ido Schimmel9aecce12017-02-09 10:28:42 +0100490 INIT_LIST_HEAD(&fib->node_list);
Ido Schimmel76610eb2017-03-10 08:53:41 +0100491 fib->proto = proto;
492 fib->vr = vr;
Ido Schimmel2b52ce02018-01-22 09:17:42 +0100493 fib->lpm_tree = lpm_tree;
494 mlxsw_sp_lpm_tree_hold(lpm_tree);
495 err = mlxsw_sp_vr_lpm_tree_bind(mlxsw_sp, fib, lpm_tree->id);
496 if (err)
497 goto err_lpm_tree_bind;
Jiri Pirko5e9c16c2016-07-04 08:23:04 +0200498 return fib;
499
Ido Schimmel2b52ce02018-01-22 09:17:42 +0100500err_lpm_tree_bind:
501 mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
Jiri Pirko5e9c16c2016-07-04 08:23:04 +0200502err_rhashtable_init:
503 kfree(fib);
504 return ERR_PTR(err);
505}
506
Ido Schimmel2b52ce02018-01-22 09:17:42 +0100507static void mlxsw_sp_fib_destroy(struct mlxsw_sp *mlxsw_sp,
508 struct mlxsw_sp_fib *fib)
Jiri Pirko5e9c16c2016-07-04 08:23:04 +0200509{
Ido Schimmel2b52ce02018-01-22 09:17:42 +0100510 mlxsw_sp_vr_lpm_tree_unbind(mlxsw_sp, fib);
511 mlxsw_sp_lpm_tree_put(mlxsw_sp, fib->lpm_tree);
Ido Schimmel9aecce12017-02-09 10:28:42 +0100512 WARN_ON(!list_empty(&fib->node_list));
Jiri Pirko5e9c16c2016-07-04 08:23:04 +0200513 rhashtable_destroy(&fib->ht);
514 kfree(fib);
515}
516
Jiri Pirko53342022016-07-04 08:23:08 +0200517static struct mlxsw_sp_lpm_tree *
Ido Schimmel382dbb42017-03-10 08:53:40 +0100518mlxsw_sp_lpm_tree_find_unused(struct mlxsw_sp *mlxsw_sp)
Jiri Pirko53342022016-07-04 08:23:08 +0200519{
520 static struct mlxsw_sp_lpm_tree *lpm_tree;
521 int i;
522
Ido Schimmel9011b672017-05-16 19:38:25 +0200523 for (i = 0; i < mlxsw_sp->router->lpm.tree_count; i++) {
524 lpm_tree = &mlxsw_sp->router->lpm.trees[i];
Ido Schimmel382dbb42017-03-10 08:53:40 +0100525 if (lpm_tree->ref_count == 0)
526 return lpm_tree;
Jiri Pirko53342022016-07-04 08:23:08 +0200527 }
528 return NULL;
529}
530
531static int mlxsw_sp_lpm_tree_alloc(struct mlxsw_sp *mlxsw_sp,
532 struct mlxsw_sp_lpm_tree *lpm_tree)
533{
534 char ralta_pl[MLXSW_REG_RALTA_LEN];
535
Ido Schimmel1a9234e662016-09-19 08:29:26 +0200536 mlxsw_reg_ralta_pack(ralta_pl, true,
537 (enum mlxsw_reg_ralxx_protocol) lpm_tree->proto,
538 lpm_tree->id);
Jiri Pirko53342022016-07-04 08:23:08 +0200539 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralta), ralta_pl);
540}
541
Ido Schimmelcc702672017-08-14 10:54:03 +0200542static void mlxsw_sp_lpm_tree_free(struct mlxsw_sp *mlxsw_sp,
543 struct mlxsw_sp_lpm_tree *lpm_tree)
Jiri Pirko53342022016-07-04 08:23:08 +0200544{
545 char ralta_pl[MLXSW_REG_RALTA_LEN];
546
Ido Schimmel1a9234e662016-09-19 08:29:26 +0200547 mlxsw_reg_ralta_pack(ralta_pl, false,
548 (enum mlxsw_reg_ralxx_protocol) lpm_tree->proto,
549 lpm_tree->id);
Ido Schimmelcc702672017-08-14 10:54:03 +0200550 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralta), ralta_pl);
Jiri Pirko53342022016-07-04 08:23:08 +0200551}
552
553static int
554mlxsw_sp_lpm_tree_left_struct_set(struct mlxsw_sp *mlxsw_sp,
555 struct mlxsw_sp_prefix_usage *prefix_usage,
556 struct mlxsw_sp_lpm_tree *lpm_tree)
557{
558 char ralst_pl[MLXSW_REG_RALST_LEN];
559 u8 root_bin = 0;
560 u8 prefix;
561 u8 last_prefix = MLXSW_REG_RALST_BIN_NO_CHILD;
562
563 mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage)
564 root_bin = prefix;
565
566 mlxsw_reg_ralst_pack(ralst_pl, root_bin, lpm_tree->id);
567 mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage) {
568 if (prefix == 0)
569 continue;
570 mlxsw_reg_ralst_bin_pack(ralst_pl, prefix, last_prefix,
571 MLXSW_REG_RALST_BIN_NO_CHILD);
572 last_prefix = prefix;
573 }
574 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralst), ralst_pl);
575}
576
577static struct mlxsw_sp_lpm_tree *
578mlxsw_sp_lpm_tree_create(struct mlxsw_sp *mlxsw_sp,
579 struct mlxsw_sp_prefix_usage *prefix_usage,
Ido Schimmel382dbb42017-03-10 08:53:40 +0100580 enum mlxsw_sp_l3proto proto)
Jiri Pirko53342022016-07-04 08:23:08 +0200581{
582 struct mlxsw_sp_lpm_tree *lpm_tree;
583 int err;
584
Ido Schimmel382dbb42017-03-10 08:53:40 +0100585 lpm_tree = mlxsw_sp_lpm_tree_find_unused(mlxsw_sp);
Jiri Pirko53342022016-07-04 08:23:08 +0200586 if (!lpm_tree)
587 return ERR_PTR(-EBUSY);
588 lpm_tree->proto = proto;
589 err = mlxsw_sp_lpm_tree_alloc(mlxsw_sp, lpm_tree);
590 if (err)
591 return ERR_PTR(err);
592
593 err = mlxsw_sp_lpm_tree_left_struct_set(mlxsw_sp, prefix_usage,
594 lpm_tree);
595 if (err)
596 goto err_left_struct_set;
Jiri Pirko2083d362016-10-25 11:25:56 +0200597 memcpy(&lpm_tree->prefix_usage, prefix_usage,
598 sizeof(lpm_tree->prefix_usage));
Ido Schimmel2b52ce02018-01-22 09:17:42 +0100599 memset(&lpm_tree->prefix_ref_count, 0,
600 sizeof(lpm_tree->prefix_ref_count));
601 lpm_tree->ref_count = 1;
Jiri Pirko53342022016-07-04 08:23:08 +0200602 return lpm_tree;
603
604err_left_struct_set:
605 mlxsw_sp_lpm_tree_free(mlxsw_sp, lpm_tree);
606 return ERR_PTR(err);
607}
608
Ido Schimmelcc702672017-08-14 10:54:03 +0200609static void mlxsw_sp_lpm_tree_destroy(struct mlxsw_sp *mlxsw_sp,
610 struct mlxsw_sp_lpm_tree *lpm_tree)
Jiri Pirko53342022016-07-04 08:23:08 +0200611{
Ido Schimmelcc702672017-08-14 10:54:03 +0200612 mlxsw_sp_lpm_tree_free(mlxsw_sp, lpm_tree);
Jiri Pirko53342022016-07-04 08:23:08 +0200613}
614
615static struct mlxsw_sp_lpm_tree *
616mlxsw_sp_lpm_tree_get(struct mlxsw_sp *mlxsw_sp,
617 struct mlxsw_sp_prefix_usage *prefix_usage,
Ido Schimmel382dbb42017-03-10 08:53:40 +0100618 enum mlxsw_sp_l3proto proto)
Jiri Pirko53342022016-07-04 08:23:08 +0200619{
620 struct mlxsw_sp_lpm_tree *lpm_tree;
621 int i;
622
Ido Schimmel9011b672017-05-16 19:38:25 +0200623 for (i = 0; i < mlxsw_sp->router->lpm.tree_count; i++) {
624 lpm_tree = &mlxsw_sp->router->lpm.trees[i];
Jiri Pirko8b99bec2016-10-25 11:25:57 +0200625 if (lpm_tree->ref_count != 0 &&
626 lpm_tree->proto == proto &&
Jiri Pirko53342022016-07-04 08:23:08 +0200627 mlxsw_sp_prefix_usage_eq(&lpm_tree->prefix_usage,
Ido Schimmel2b52ce02018-01-22 09:17:42 +0100628 prefix_usage)) {
629 mlxsw_sp_lpm_tree_hold(lpm_tree);
Ido Schimmelfc922bb2017-08-14 10:54:05 +0200630 return lpm_tree;
Ido Schimmel2b52ce02018-01-22 09:17:42 +0100631 }
Jiri Pirko53342022016-07-04 08:23:08 +0200632 }
Ido Schimmelfc922bb2017-08-14 10:54:05 +0200633 return mlxsw_sp_lpm_tree_create(mlxsw_sp, prefix_usage, proto);
634}
Jiri Pirko53342022016-07-04 08:23:08 +0200635
Ido Schimmelfc922bb2017-08-14 10:54:05 +0200636static void mlxsw_sp_lpm_tree_hold(struct mlxsw_sp_lpm_tree *lpm_tree)
637{
Jiri Pirko53342022016-07-04 08:23:08 +0200638 lpm_tree->ref_count++;
Jiri Pirko53342022016-07-04 08:23:08 +0200639}
640
Ido Schimmelcc702672017-08-14 10:54:03 +0200641static void mlxsw_sp_lpm_tree_put(struct mlxsw_sp *mlxsw_sp,
642 struct mlxsw_sp_lpm_tree *lpm_tree)
Jiri Pirko53342022016-07-04 08:23:08 +0200643{
644 if (--lpm_tree->ref_count == 0)
Ido Schimmelcc702672017-08-14 10:54:03 +0200645 mlxsw_sp_lpm_tree_destroy(mlxsw_sp, lpm_tree);
Jiri Pirko53342022016-07-04 08:23:08 +0200646}
647
Ido Schimmeld7a60302017-06-08 08:47:43 +0200648#define MLXSW_SP_LPM_TREE_MIN 1 /* tree 0 is reserved */
Ido Schimmel8494ab02017-03-24 08:02:47 +0100649
650static int mlxsw_sp_lpm_init(struct mlxsw_sp *mlxsw_sp)
Jiri Pirko53342022016-07-04 08:23:08 +0200651{
Ido Schimmel2b52ce02018-01-22 09:17:42 +0100652 struct mlxsw_sp_prefix_usage req_prefix_usage = {{ 0 } };
Jiri Pirko53342022016-07-04 08:23:08 +0200653 struct mlxsw_sp_lpm_tree *lpm_tree;
Ido Schimmel8494ab02017-03-24 08:02:47 +0100654 u64 max_trees;
Ido Schimmel2b52ce02018-01-22 09:17:42 +0100655 int err, i;
Jiri Pirko53342022016-07-04 08:23:08 +0200656
Ido Schimmel8494ab02017-03-24 08:02:47 +0100657 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_LPM_TREES))
658 return -EIO;
659
660 max_trees = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_LPM_TREES);
Ido Schimmel9011b672017-05-16 19:38:25 +0200661 mlxsw_sp->router->lpm.tree_count = max_trees - MLXSW_SP_LPM_TREE_MIN;
662 mlxsw_sp->router->lpm.trees = kcalloc(mlxsw_sp->router->lpm.tree_count,
Ido Schimmel8494ab02017-03-24 08:02:47 +0100663 sizeof(struct mlxsw_sp_lpm_tree),
664 GFP_KERNEL);
Ido Schimmel9011b672017-05-16 19:38:25 +0200665 if (!mlxsw_sp->router->lpm.trees)
Ido Schimmel8494ab02017-03-24 08:02:47 +0100666 return -ENOMEM;
667
Ido Schimmel9011b672017-05-16 19:38:25 +0200668 for (i = 0; i < mlxsw_sp->router->lpm.tree_count; i++) {
669 lpm_tree = &mlxsw_sp->router->lpm.trees[i];
Jiri Pirko53342022016-07-04 08:23:08 +0200670 lpm_tree->id = i + MLXSW_SP_LPM_TREE_MIN;
671 }
Ido Schimmel8494ab02017-03-24 08:02:47 +0100672
Ido Schimmel2b52ce02018-01-22 09:17:42 +0100673 lpm_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, &req_prefix_usage,
674 MLXSW_SP_L3_PROTO_IPV4);
675 if (IS_ERR(lpm_tree)) {
676 err = PTR_ERR(lpm_tree);
677 goto err_ipv4_tree_get;
678 }
679 mlxsw_sp->router->lpm.proto_trees[MLXSW_SP_L3_PROTO_IPV4] = lpm_tree;
680
681 lpm_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, &req_prefix_usage,
682 MLXSW_SP_L3_PROTO_IPV6);
683 if (IS_ERR(lpm_tree)) {
684 err = PTR_ERR(lpm_tree);
685 goto err_ipv6_tree_get;
686 }
687 mlxsw_sp->router->lpm.proto_trees[MLXSW_SP_L3_PROTO_IPV6] = lpm_tree;
688
Ido Schimmel8494ab02017-03-24 08:02:47 +0100689 return 0;
Ido Schimmel2b52ce02018-01-22 09:17:42 +0100690
691err_ipv6_tree_get:
692 lpm_tree = mlxsw_sp->router->lpm.proto_trees[MLXSW_SP_L3_PROTO_IPV4];
693 mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
694err_ipv4_tree_get:
695 kfree(mlxsw_sp->router->lpm.trees);
696 return err;
Ido Schimmel8494ab02017-03-24 08:02:47 +0100697}
698
699static void mlxsw_sp_lpm_fini(struct mlxsw_sp *mlxsw_sp)
700{
Ido Schimmel2b52ce02018-01-22 09:17:42 +0100701 struct mlxsw_sp_lpm_tree *lpm_tree;
702
703 lpm_tree = mlxsw_sp->router->lpm.proto_trees[MLXSW_SP_L3_PROTO_IPV6];
704 mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
705
706 lpm_tree = mlxsw_sp->router->lpm.proto_trees[MLXSW_SP_L3_PROTO_IPV4];
707 mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
708
Ido Schimmel9011b672017-05-16 19:38:25 +0200709 kfree(mlxsw_sp->router->lpm.trees);
Jiri Pirko53342022016-07-04 08:23:08 +0200710}
711
Ido Schimmel76610eb2017-03-10 08:53:41 +0100712static bool mlxsw_sp_vr_is_used(const struct mlxsw_sp_vr *vr)
713{
Yotam Gigid42b0962017-09-27 08:23:20 +0200714 return !!vr->fib4 || !!vr->fib6 || !!vr->mr4_table;
Ido Schimmel76610eb2017-03-10 08:53:41 +0100715}
716
Jiri Pirko6b75c482016-07-04 08:23:09 +0200717static struct mlxsw_sp_vr *mlxsw_sp_vr_find_unused(struct mlxsw_sp *mlxsw_sp)
718{
719 struct mlxsw_sp_vr *vr;
720 int i;
721
Jiri Pirkoc1a38312016-10-21 16:07:23 +0200722 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
Ido Schimmel9011b672017-05-16 19:38:25 +0200723 vr = &mlxsw_sp->router->vrs[i];
Ido Schimmel76610eb2017-03-10 08:53:41 +0100724 if (!mlxsw_sp_vr_is_used(vr))
Jiri Pirko6b75c482016-07-04 08:23:09 +0200725 return vr;
726 }
727 return NULL;
728}
729
730static int mlxsw_sp_vr_lpm_tree_bind(struct mlxsw_sp *mlxsw_sp,
Ido Schimmel0adb2142017-08-14 10:54:04 +0200731 const struct mlxsw_sp_fib *fib, u8 tree_id)
Jiri Pirko6b75c482016-07-04 08:23:09 +0200732{
733 char raltb_pl[MLXSW_REG_RALTB_LEN];
734
Ido Schimmel76610eb2017-03-10 08:53:41 +0100735 mlxsw_reg_raltb_pack(raltb_pl, fib->vr->id,
736 (enum mlxsw_reg_ralxx_protocol) fib->proto,
Ido Schimmel0adb2142017-08-14 10:54:04 +0200737 tree_id);
Jiri Pirko6b75c482016-07-04 08:23:09 +0200738 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raltb), raltb_pl);
739}
740
741static int mlxsw_sp_vr_lpm_tree_unbind(struct mlxsw_sp *mlxsw_sp,
Ido Schimmel76610eb2017-03-10 08:53:41 +0100742 const struct mlxsw_sp_fib *fib)
Jiri Pirko6b75c482016-07-04 08:23:09 +0200743{
744 char raltb_pl[MLXSW_REG_RALTB_LEN];
745
746 /* Bind to tree 0 which is default */
Ido Schimmel76610eb2017-03-10 08:53:41 +0100747 mlxsw_reg_raltb_pack(raltb_pl, fib->vr->id,
748 (enum mlxsw_reg_ralxx_protocol) fib->proto, 0);
Jiri Pirko6b75c482016-07-04 08:23:09 +0200749 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raltb), raltb_pl);
750}
751
752static u32 mlxsw_sp_fix_tb_id(u32 tb_id)
753{
Yotam Gigi7e50d432017-09-27 08:23:19 +0200754 /* For our purpose, squash main, default and local tables into one */
755 if (tb_id == RT_TABLE_LOCAL || tb_id == RT_TABLE_DEFAULT)
Jiri Pirko6b75c482016-07-04 08:23:09 +0200756 tb_id = RT_TABLE_MAIN;
757 return tb_id;
758}
759
760static struct mlxsw_sp_vr *mlxsw_sp_vr_find(struct mlxsw_sp *mlxsw_sp,
Ido Schimmel76610eb2017-03-10 08:53:41 +0100761 u32 tb_id)
Jiri Pirko6b75c482016-07-04 08:23:09 +0200762{
763 struct mlxsw_sp_vr *vr;
764 int i;
765
766 tb_id = mlxsw_sp_fix_tb_id(tb_id);
Nogah Frankel9497c042016-09-20 11:16:54 +0200767
Jiri Pirkoc1a38312016-10-21 16:07:23 +0200768 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
Ido Schimmel9011b672017-05-16 19:38:25 +0200769 vr = &mlxsw_sp->router->vrs[i];
Ido Schimmel76610eb2017-03-10 08:53:41 +0100770 if (mlxsw_sp_vr_is_used(vr) && vr->tb_id == tb_id)
Jiri Pirko6b75c482016-07-04 08:23:09 +0200771 return vr;
772 }
773 return NULL;
774}
775
Ido Schimmel76610eb2017-03-10 08:53:41 +0100776static struct mlxsw_sp_fib *mlxsw_sp_vr_fib(const struct mlxsw_sp_vr *vr,
777 enum mlxsw_sp_l3proto proto)
Jiri Pirko6b75c482016-07-04 08:23:09 +0200778{
Ido Schimmel76610eb2017-03-10 08:53:41 +0100779 switch (proto) {
780 case MLXSW_SP_L3_PROTO_IPV4:
781 return vr->fib4;
782 case MLXSW_SP_L3_PROTO_IPV6:
Ido Schimmela3d9bc52017-07-18 10:10:22 +0200783 return vr->fib6;
Ido Schimmel76610eb2017-03-10 08:53:41 +0100784 }
785 return NULL;
786}
787
788static struct mlxsw_sp_vr *mlxsw_sp_vr_create(struct mlxsw_sp *mlxsw_sp,
David Ahernf8fa9b42017-10-18 09:56:56 -0700789 u32 tb_id,
790 struct netlink_ext_ack *extack)
Ido Schimmel76610eb2017-03-10 08:53:41 +0100791{
Jiri Pirko0f2d2b22018-02-13 11:22:42 +0100792 struct mlxsw_sp_mr_table *mr4_table;
793 struct mlxsw_sp_fib *fib4;
794 struct mlxsw_sp_fib *fib6;
Jiri Pirko6b75c482016-07-04 08:23:09 +0200795 struct mlxsw_sp_vr *vr;
Ido Schimmela3d9bc52017-07-18 10:10:22 +0200796 int err;
Jiri Pirko6b75c482016-07-04 08:23:09 +0200797
798 vr = mlxsw_sp_vr_find_unused(mlxsw_sp);
David Ahernf8fa9b42017-10-18 09:56:56 -0700799 if (!vr) {
Arkadi Sharshevsky6c677752018-02-13 11:29:05 +0100800 NL_SET_ERR_MSG_MOD(extack, "Exceeded number of supported virtual routers");
Jiri Pirko6b75c482016-07-04 08:23:09 +0200801 return ERR_PTR(-EBUSY);
David Ahernf8fa9b42017-10-18 09:56:56 -0700802 }
Jiri Pirko0f2d2b22018-02-13 11:22:42 +0100803 fib4 = mlxsw_sp_fib_create(mlxsw_sp, vr, MLXSW_SP_L3_PROTO_IPV4);
804 if (IS_ERR(fib4))
805 return ERR_CAST(fib4);
806 fib6 = mlxsw_sp_fib_create(mlxsw_sp, vr, MLXSW_SP_L3_PROTO_IPV6);
807 if (IS_ERR(fib6)) {
808 err = PTR_ERR(fib6);
Ido Schimmela3d9bc52017-07-18 10:10:22 +0200809 goto err_fib6_create;
810 }
Jiri Pirko0f2d2b22018-02-13 11:22:42 +0100811 mr4_table = mlxsw_sp_mr_table_create(mlxsw_sp, vr->id,
812 MLXSW_SP_L3_PROTO_IPV4);
813 if (IS_ERR(mr4_table)) {
814 err = PTR_ERR(mr4_table);
Yotam Gigid42b0962017-09-27 08:23:20 +0200815 goto err_mr_table_create;
816 }
Jiri Pirko0f2d2b22018-02-13 11:22:42 +0100817 vr->fib4 = fib4;
818 vr->fib6 = fib6;
819 vr->mr4_table = mr4_table;
Jiri Pirko6b75c482016-07-04 08:23:09 +0200820 vr->tb_id = tb_id;
Jiri Pirko6b75c482016-07-04 08:23:09 +0200821 return vr;
Ido Schimmela3d9bc52017-07-18 10:10:22 +0200822
Yotam Gigid42b0962017-09-27 08:23:20 +0200823err_mr_table_create:
Jiri Pirko0f2d2b22018-02-13 11:22:42 +0100824 mlxsw_sp_fib_destroy(mlxsw_sp, fib6);
Ido Schimmela3d9bc52017-07-18 10:10:22 +0200825err_fib6_create:
Jiri Pirko0f2d2b22018-02-13 11:22:42 +0100826 mlxsw_sp_fib_destroy(mlxsw_sp, fib4);
Ido Schimmela3d9bc52017-07-18 10:10:22 +0200827 return ERR_PTR(err);
Jiri Pirko6b75c482016-07-04 08:23:09 +0200828}
829
Ido Schimmel2b52ce02018-01-22 09:17:42 +0100830static void mlxsw_sp_vr_destroy(struct mlxsw_sp *mlxsw_sp,
831 struct mlxsw_sp_vr *vr)
Jiri Pirko6b75c482016-07-04 08:23:09 +0200832{
Yotam Gigid42b0962017-09-27 08:23:20 +0200833 mlxsw_sp_mr_table_destroy(vr->mr4_table);
834 vr->mr4_table = NULL;
Ido Schimmel2b52ce02018-01-22 09:17:42 +0100835 mlxsw_sp_fib_destroy(mlxsw_sp, vr->fib6);
Ido Schimmela3d9bc52017-07-18 10:10:22 +0200836 vr->fib6 = NULL;
Ido Schimmel2b52ce02018-01-22 09:17:42 +0100837 mlxsw_sp_fib_destroy(mlxsw_sp, vr->fib4);
Ido Schimmel76610eb2017-03-10 08:53:41 +0100838 vr->fib4 = NULL;
Jiri Pirko6b75c482016-07-04 08:23:09 +0200839}
840
David Ahernf8fa9b42017-10-18 09:56:56 -0700841static struct mlxsw_sp_vr *mlxsw_sp_vr_get(struct mlxsw_sp *mlxsw_sp, u32 tb_id,
842 struct netlink_ext_ack *extack)
Jiri Pirko6b75c482016-07-04 08:23:09 +0200843{
844 struct mlxsw_sp_vr *vr;
Jiri Pirko6b75c482016-07-04 08:23:09 +0200845
846 tb_id = mlxsw_sp_fix_tb_id(tb_id);
Ido Schimmel76610eb2017-03-10 08:53:41 +0100847 vr = mlxsw_sp_vr_find(mlxsw_sp, tb_id);
848 if (!vr)
David Ahernf8fa9b42017-10-18 09:56:56 -0700849 vr = mlxsw_sp_vr_create(mlxsw_sp, tb_id, extack);
Jiri Pirko6b75c482016-07-04 08:23:09 +0200850 return vr;
851}
852
Ido Schimmel2b52ce02018-01-22 09:17:42 +0100853static void mlxsw_sp_vr_put(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_vr *vr)
Jiri Pirko6b75c482016-07-04 08:23:09 +0200854{
Ido Schimmela3d9bc52017-07-18 10:10:22 +0200855 if (!vr->rif_count && list_empty(&vr->fib4->node_list) &&
Yotam Gigid42b0962017-09-27 08:23:20 +0200856 list_empty(&vr->fib6->node_list) &&
857 mlxsw_sp_mr_table_empty(vr->mr4_table))
Ido Schimmel2b52ce02018-01-22 09:17:42 +0100858 mlxsw_sp_vr_destroy(mlxsw_sp, vr);
Jiri Pirko6b75c482016-07-04 08:23:09 +0200859}
860
Ido Schimmelfc922bb2017-08-14 10:54:05 +0200861static bool
862mlxsw_sp_vr_lpm_tree_should_replace(struct mlxsw_sp_vr *vr,
863 enum mlxsw_sp_l3proto proto, u8 tree_id)
864{
865 struct mlxsw_sp_fib *fib = mlxsw_sp_vr_fib(vr, proto);
866
867 if (!mlxsw_sp_vr_is_used(vr))
868 return false;
Ido Schimmel2b52ce02018-01-22 09:17:42 +0100869 if (fib->lpm_tree->id == tree_id)
Ido Schimmelfc922bb2017-08-14 10:54:05 +0200870 return true;
871 return false;
872}
873
874static int mlxsw_sp_vr_lpm_tree_replace(struct mlxsw_sp *mlxsw_sp,
875 struct mlxsw_sp_fib *fib,
876 struct mlxsw_sp_lpm_tree *new_tree)
877{
878 struct mlxsw_sp_lpm_tree *old_tree = fib->lpm_tree;
879 int err;
880
Ido Schimmelfc922bb2017-08-14 10:54:05 +0200881 fib->lpm_tree = new_tree;
882 mlxsw_sp_lpm_tree_hold(new_tree);
Ido Schimmeled604c52018-01-18 15:42:10 +0100883 err = mlxsw_sp_vr_lpm_tree_bind(mlxsw_sp, fib, new_tree->id);
884 if (err)
885 goto err_tree_bind;
Ido Schimmelfc922bb2017-08-14 10:54:05 +0200886 mlxsw_sp_lpm_tree_put(mlxsw_sp, old_tree);
887 return 0;
Ido Schimmeled604c52018-01-18 15:42:10 +0100888
889err_tree_bind:
890 mlxsw_sp_lpm_tree_put(mlxsw_sp, new_tree);
891 fib->lpm_tree = old_tree;
892 return err;
Ido Schimmelfc922bb2017-08-14 10:54:05 +0200893}
894
895static int mlxsw_sp_vrs_lpm_tree_replace(struct mlxsw_sp *mlxsw_sp,
896 struct mlxsw_sp_fib *fib,
897 struct mlxsw_sp_lpm_tree *new_tree)
898{
Ido Schimmelfc922bb2017-08-14 10:54:05 +0200899 enum mlxsw_sp_l3proto proto = fib->proto;
Ido Schimmel2b52ce02018-01-22 09:17:42 +0100900 struct mlxsw_sp_lpm_tree *old_tree;
Ido Schimmelfc922bb2017-08-14 10:54:05 +0200901 u8 old_id, new_id = new_tree->id;
902 struct mlxsw_sp_vr *vr;
903 int i, err;
904
Ido Schimmel2b52ce02018-01-22 09:17:42 +0100905 old_tree = mlxsw_sp->router->lpm.proto_trees[proto];
Ido Schimmelfc922bb2017-08-14 10:54:05 +0200906 old_id = old_tree->id;
907
908 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
909 vr = &mlxsw_sp->router->vrs[i];
910 if (!mlxsw_sp_vr_lpm_tree_should_replace(vr, proto, old_id))
911 continue;
912 err = mlxsw_sp_vr_lpm_tree_replace(mlxsw_sp,
913 mlxsw_sp_vr_fib(vr, proto),
914 new_tree);
915 if (err)
916 goto err_tree_replace;
917 }
918
Ido Schimmel2b52ce02018-01-22 09:17:42 +0100919 memcpy(new_tree->prefix_ref_count, old_tree->prefix_ref_count,
920 sizeof(new_tree->prefix_ref_count));
921 mlxsw_sp->router->lpm.proto_trees[proto] = new_tree;
922 mlxsw_sp_lpm_tree_put(mlxsw_sp, old_tree);
923
Ido Schimmelfc922bb2017-08-14 10:54:05 +0200924 return 0;
925
926err_tree_replace:
927 for (i--; i >= 0; i--) {
928 if (!mlxsw_sp_vr_lpm_tree_should_replace(vr, proto, new_id))
929 continue;
930 mlxsw_sp_vr_lpm_tree_replace(mlxsw_sp,
931 mlxsw_sp_vr_fib(vr, proto),
932 old_tree);
933 }
934 return err;
Ido Schimmelfc922bb2017-08-14 10:54:05 +0200935}
936
Nogah Frankel9497c042016-09-20 11:16:54 +0200937static int mlxsw_sp_vrs_init(struct mlxsw_sp *mlxsw_sp)
Jiri Pirko6b75c482016-07-04 08:23:09 +0200938{
939 struct mlxsw_sp_vr *vr;
Jiri Pirkoc1a38312016-10-21 16:07:23 +0200940 u64 max_vrs;
Jiri Pirko6b75c482016-07-04 08:23:09 +0200941 int i;
942
Jiri Pirkoc1a38312016-10-21 16:07:23 +0200943 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_VRS))
Nogah Frankel9497c042016-09-20 11:16:54 +0200944 return -EIO;
945
Jiri Pirkoc1a38312016-10-21 16:07:23 +0200946 max_vrs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS);
Ido Schimmel9011b672017-05-16 19:38:25 +0200947 mlxsw_sp->router->vrs = kcalloc(max_vrs, sizeof(struct mlxsw_sp_vr),
948 GFP_KERNEL);
949 if (!mlxsw_sp->router->vrs)
Nogah Frankel9497c042016-09-20 11:16:54 +0200950 return -ENOMEM;
951
Jiri Pirkoc1a38312016-10-21 16:07:23 +0200952 for (i = 0; i < max_vrs; i++) {
Ido Schimmel9011b672017-05-16 19:38:25 +0200953 vr = &mlxsw_sp->router->vrs[i];
Jiri Pirko6b75c482016-07-04 08:23:09 +0200954 vr->id = i;
955 }
Nogah Frankel9497c042016-09-20 11:16:54 +0200956
957 return 0;
958}
959
Ido Schimmelac571de2016-11-14 11:26:32 +0100960static void mlxsw_sp_router_fib_flush(struct mlxsw_sp *mlxsw_sp);
961
Nogah Frankel9497c042016-09-20 11:16:54 +0200962static void mlxsw_sp_vrs_fini(struct mlxsw_sp *mlxsw_sp)
963{
Ido Schimmel30572242016-12-03 16:45:01 +0100964 /* At this stage we're guaranteed not to have new incoming
965 * FIB notifications and the work queue is free from FIBs
966 * sitting on top of mlxsw netdevs. However, we can still
967 * have other FIBs queued. Flush the queue before flushing
968 * the device's tables. No need for locks, as we're the only
969 * writer.
970 */
971 mlxsw_core_flush_owq();
Ido Schimmelac571de2016-11-14 11:26:32 +0100972 mlxsw_sp_router_fib_flush(mlxsw_sp);
Ido Schimmel9011b672017-05-16 19:38:25 +0200973 kfree(mlxsw_sp->router->vrs);
Jiri Pirko6b75c482016-07-04 08:23:09 +0200974}
975
Petr Machata6ddb7422017-09-02 23:49:19 +0200976static struct net_device *
977__mlxsw_sp_ipip_netdev_ul_dev_get(const struct net_device *ol_dev)
978{
979 struct ip_tunnel *tun = netdev_priv(ol_dev);
980 struct net *net = dev_net(ol_dev);
981
982 return __dev_get_by_index(net, tun->parms.link);
983}
984
Petr Machata4cf04f32017-11-03 10:03:42 +0100985u32 mlxsw_sp_ipip_dev_ul_tb_id(const struct net_device *ol_dev)
Petr Machata6ddb7422017-09-02 23:49:19 +0200986{
987 struct net_device *d = __mlxsw_sp_ipip_netdev_ul_dev_get(ol_dev);
988
989 if (d)
990 return l3mdev_fib_table(d) ? : RT_TABLE_MAIN;
991 else
992 return l3mdev_fib_table(ol_dev) ? : RT_TABLE_MAIN;
993}
994
Petr Machata1012b9a2017-09-02 23:49:23 +0200995static struct mlxsw_sp_rif *
996mlxsw_sp_rif_create(struct mlxsw_sp *mlxsw_sp,
David Ahernf8fa9b42017-10-18 09:56:56 -0700997 const struct mlxsw_sp_rif_params *params,
998 struct netlink_ext_ack *extack);
Petr Machata1012b9a2017-09-02 23:49:23 +0200999
1000static struct mlxsw_sp_rif_ipip_lb *
1001mlxsw_sp_ipip_ol_ipip_lb_create(struct mlxsw_sp *mlxsw_sp,
1002 enum mlxsw_sp_ipip_type ipipt,
Petr Machata7e75af62017-11-03 10:03:36 +01001003 struct net_device *ol_dev,
1004 struct netlink_ext_ack *extack)
Petr Machata1012b9a2017-09-02 23:49:23 +02001005{
1006 struct mlxsw_sp_rif_params_ipip_lb lb_params;
1007 const struct mlxsw_sp_ipip_ops *ipip_ops;
1008 struct mlxsw_sp_rif *rif;
1009
1010 ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipipt];
1011 lb_params = (struct mlxsw_sp_rif_params_ipip_lb) {
1012 .common.dev = ol_dev,
1013 .common.lag = false,
1014 .lb_config = ipip_ops->ol_loopback_config(mlxsw_sp, ol_dev),
1015 };
1016
Petr Machata7e75af62017-11-03 10:03:36 +01001017 rif = mlxsw_sp_rif_create(mlxsw_sp, &lb_params.common, extack);
Petr Machata1012b9a2017-09-02 23:49:23 +02001018 if (IS_ERR(rif))
1019 return ERR_CAST(rif);
1020 return container_of(rif, struct mlxsw_sp_rif_ipip_lb, common);
1021}
1022
1023static struct mlxsw_sp_ipip_entry *
1024mlxsw_sp_ipip_entry_alloc(struct mlxsw_sp *mlxsw_sp,
1025 enum mlxsw_sp_ipip_type ipipt,
1026 struct net_device *ol_dev)
1027{
Petr Machatae437f3b2018-02-13 11:26:09 +01001028 const struct mlxsw_sp_ipip_ops *ipip_ops;
Petr Machata1012b9a2017-09-02 23:49:23 +02001029 struct mlxsw_sp_ipip_entry *ipip_entry;
1030 struct mlxsw_sp_ipip_entry *ret = NULL;
1031
Petr Machatae437f3b2018-02-13 11:26:09 +01001032 ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipipt];
Petr Machata1012b9a2017-09-02 23:49:23 +02001033 ipip_entry = kzalloc(sizeof(*ipip_entry), GFP_KERNEL);
1034 if (!ipip_entry)
1035 return ERR_PTR(-ENOMEM);
1036
1037 ipip_entry->ol_lb = mlxsw_sp_ipip_ol_ipip_lb_create(mlxsw_sp, ipipt,
Petr Machata7e75af62017-11-03 10:03:36 +01001038 ol_dev, NULL);
Petr Machata1012b9a2017-09-02 23:49:23 +02001039 if (IS_ERR(ipip_entry->ol_lb)) {
1040 ret = ERR_CAST(ipip_entry->ol_lb);
1041 goto err_ol_ipip_lb_create;
1042 }
1043
1044 ipip_entry->ipipt = ipipt;
1045 ipip_entry->ol_dev = ol_dev;
Petr Machatae437f3b2018-02-13 11:26:09 +01001046
1047 switch (ipip_ops->ul_proto) {
1048 case MLXSW_SP_L3_PROTO_IPV4:
1049 ipip_entry->parms4 = mlxsw_sp_ipip_netdev_parms4(ol_dev);
1050 break;
1051 case MLXSW_SP_L3_PROTO_IPV6:
1052 WARN_ON(1);
1053 break;
1054 }
Petr Machata1012b9a2017-09-02 23:49:23 +02001055
1056 return ipip_entry;
1057
1058err_ol_ipip_lb_create:
1059 kfree(ipip_entry);
1060 return ret;
1061}
1062
1063static void
Petr Machata4cccb732017-10-16 16:26:39 +02001064mlxsw_sp_ipip_entry_dealloc(struct mlxsw_sp_ipip_entry *ipip_entry)
Petr Machata1012b9a2017-09-02 23:49:23 +02001065{
Petr Machata1012b9a2017-09-02 23:49:23 +02001066 mlxsw_sp_rif_destroy(&ipip_entry->ol_lb->common);
1067 kfree(ipip_entry);
1068}
1069
Petr Machata1012b9a2017-09-02 23:49:23 +02001070static bool
1071mlxsw_sp_ipip_entry_saddr_matches(struct mlxsw_sp *mlxsw_sp,
1072 const enum mlxsw_sp_l3proto ul_proto,
1073 union mlxsw_sp_l3addr saddr,
1074 u32 ul_tb_id,
1075 struct mlxsw_sp_ipip_entry *ipip_entry)
1076{
1077 u32 tun_ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(ipip_entry->ol_dev);
1078 enum mlxsw_sp_ipip_type ipipt = ipip_entry->ipipt;
1079 union mlxsw_sp_l3addr tun_saddr;
1080
1081 if (mlxsw_sp->router->ipip_ops_arr[ipipt]->ul_proto != ul_proto)
1082 return false;
1083
1084 tun_saddr = mlxsw_sp_ipip_netdev_saddr(ul_proto, ipip_entry->ol_dev);
1085 return tun_ul_tb_id == ul_tb_id &&
1086 mlxsw_sp_l3addr_eq(&tun_saddr, &saddr);
1087}
1088
Petr Machata4607f6d2017-09-02 23:49:25 +02001089static int
1090mlxsw_sp_fib_entry_decap_init(struct mlxsw_sp *mlxsw_sp,
1091 struct mlxsw_sp_fib_entry *fib_entry,
1092 struct mlxsw_sp_ipip_entry *ipip_entry)
1093{
1094 u32 tunnel_index;
1095 int err;
1096
1097 err = mlxsw_sp_kvdl_alloc(mlxsw_sp, 1, &tunnel_index);
1098 if (err)
1099 return err;
1100
1101 ipip_entry->decap_fib_entry = fib_entry;
1102 fib_entry->decap.ipip_entry = ipip_entry;
1103 fib_entry->decap.tunnel_index = tunnel_index;
1104 return 0;
1105}
1106
1107static void mlxsw_sp_fib_entry_decap_fini(struct mlxsw_sp *mlxsw_sp,
1108 struct mlxsw_sp_fib_entry *fib_entry)
1109{
1110 /* Unlink this node from the IPIP entry that it's the decap entry of. */
1111 fib_entry->decap.ipip_entry->decap_fib_entry = NULL;
1112 fib_entry->decap.ipip_entry = NULL;
1113 mlxsw_sp_kvdl_free(mlxsw_sp, fib_entry->decap.tunnel_index);
1114}
1115
Petr Machata1cc38fb2017-09-02 23:49:26 +02001116static struct mlxsw_sp_fib_node *
1117mlxsw_sp_fib_node_lookup(struct mlxsw_sp_fib *fib, const void *addr,
1118 size_t addr_len, unsigned char prefix_len);
Petr Machata4607f6d2017-09-02 23:49:25 +02001119static int mlxsw_sp_fib_entry_update(struct mlxsw_sp *mlxsw_sp,
1120 struct mlxsw_sp_fib_entry *fib_entry);
1121
1122static void
1123mlxsw_sp_ipip_entry_demote_decap(struct mlxsw_sp *mlxsw_sp,
1124 struct mlxsw_sp_ipip_entry *ipip_entry)
1125{
1126 struct mlxsw_sp_fib_entry *fib_entry = ipip_entry->decap_fib_entry;
1127
1128 mlxsw_sp_fib_entry_decap_fini(mlxsw_sp, fib_entry);
1129 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
1130
1131 mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
1132}
1133
Petr Machata1cc38fb2017-09-02 23:49:26 +02001134static void
1135mlxsw_sp_ipip_entry_promote_decap(struct mlxsw_sp *mlxsw_sp,
1136 struct mlxsw_sp_ipip_entry *ipip_entry,
1137 struct mlxsw_sp_fib_entry *decap_fib_entry)
1138{
1139 if (mlxsw_sp_fib_entry_decap_init(mlxsw_sp, decap_fib_entry,
1140 ipip_entry))
1141 return;
1142 decap_fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP;
1143
1144 if (mlxsw_sp_fib_entry_update(mlxsw_sp, decap_fib_entry))
1145 mlxsw_sp_ipip_entry_demote_decap(mlxsw_sp, ipip_entry);
1146}
1147
1148/* Given an IPIP entry, find the corresponding decap route. */
1149static struct mlxsw_sp_fib_entry *
1150mlxsw_sp_ipip_entry_find_decap(struct mlxsw_sp *mlxsw_sp,
1151 struct mlxsw_sp_ipip_entry *ipip_entry)
1152{
1153 static struct mlxsw_sp_fib_node *fib_node;
1154 const struct mlxsw_sp_ipip_ops *ipip_ops;
1155 struct mlxsw_sp_fib_entry *fib_entry;
1156 unsigned char saddr_prefix_len;
1157 union mlxsw_sp_l3addr saddr;
1158 struct mlxsw_sp_fib *ul_fib;
1159 struct mlxsw_sp_vr *ul_vr;
1160 const void *saddrp;
1161 size_t saddr_len;
1162 u32 ul_tb_id;
1163 u32 saddr4;
1164
1165 ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt];
1166
1167 ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(ipip_entry->ol_dev);
1168 ul_vr = mlxsw_sp_vr_find(mlxsw_sp, ul_tb_id);
1169 if (!ul_vr)
1170 return NULL;
1171
1172 ul_fib = mlxsw_sp_vr_fib(ul_vr, ipip_ops->ul_proto);
1173 saddr = mlxsw_sp_ipip_netdev_saddr(ipip_ops->ul_proto,
1174 ipip_entry->ol_dev);
1175
1176 switch (ipip_ops->ul_proto) {
1177 case MLXSW_SP_L3_PROTO_IPV4:
1178 saddr4 = be32_to_cpu(saddr.addr4);
1179 saddrp = &saddr4;
1180 saddr_len = 4;
1181 saddr_prefix_len = 32;
1182 break;
1183 case MLXSW_SP_L3_PROTO_IPV6:
1184 WARN_ON(1);
1185 return NULL;
1186 }
1187
1188 fib_node = mlxsw_sp_fib_node_lookup(ul_fib, saddrp, saddr_len,
1189 saddr_prefix_len);
1190 if (!fib_node || list_empty(&fib_node->entry_list))
1191 return NULL;
1192
1193 fib_entry = list_first_entry(&fib_node->entry_list,
1194 struct mlxsw_sp_fib_entry, list);
1195 if (fib_entry->type != MLXSW_SP_FIB_ENTRY_TYPE_TRAP)
1196 return NULL;
1197
1198 return fib_entry;
1199}
1200
Petr Machata1012b9a2017-09-02 23:49:23 +02001201static struct mlxsw_sp_ipip_entry *
Petr Machata4cccb732017-10-16 16:26:39 +02001202mlxsw_sp_ipip_entry_create(struct mlxsw_sp *mlxsw_sp,
1203 enum mlxsw_sp_ipip_type ipipt,
1204 struct net_device *ol_dev)
Petr Machata1012b9a2017-09-02 23:49:23 +02001205{
Petr Machata1012b9a2017-09-02 23:49:23 +02001206 struct mlxsw_sp_ipip_entry *ipip_entry;
Petr Machata1012b9a2017-09-02 23:49:23 +02001207
1208 ipip_entry = mlxsw_sp_ipip_entry_alloc(mlxsw_sp, ipipt, ol_dev);
1209 if (IS_ERR(ipip_entry))
1210 return ipip_entry;
1211
1212 list_add_tail(&ipip_entry->ipip_list_node,
1213 &mlxsw_sp->router->ipip_list);
1214
Petr Machata1012b9a2017-09-02 23:49:23 +02001215 return ipip_entry;
1216}
1217
1218static void
Petr Machata4cccb732017-10-16 16:26:39 +02001219mlxsw_sp_ipip_entry_destroy(struct mlxsw_sp *mlxsw_sp,
1220 struct mlxsw_sp_ipip_entry *ipip_entry)
Petr Machata1012b9a2017-09-02 23:49:23 +02001221{
Petr Machata4cccb732017-10-16 16:26:39 +02001222 list_del(&ipip_entry->ipip_list_node);
1223 mlxsw_sp_ipip_entry_dealloc(ipip_entry);
Petr Machata1012b9a2017-09-02 23:49:23 +02001224}
1225
Petr Machata4607f6d2017-09-02 23:49:25 +02001226static bool
1227mlxsw_sp_ipip_entry_matches_decap(struct mlxsw_sp *mlxsw_sp,
1228 const struct net_device *ul_dev,
1229 enum mlxsw_sp_l3proto ul_proto,
1230 union mlxsw_sp_l3addr ul_dip,
1231 struct mlxsw_sp_ipip_entry *ipip_entry)
1232{
1233 u32 ul_tb_id = l3mdev_fib_table(ul_dev) ? : RT_TABLE_MAIN;
1234 enum mlxsw_sp_ipip_type ipipt = ipip_entry->ipipt;
1235 struct net_device *ipip_ul_dev;
1236
1237 if (mlxsw_sp->router->ipip_ops_arr[ipipt]->ul_proto != ul_proto)
1238 return false;
1239
1240 ipip_ul_dev = __mlxsw_sp_ipip_netdev_ul_dev_get(ipip_entry->ol_dev);
1241 return mlxsw_sp_ipip_entry_saddr_matches(mlxsw_sp, ul_proto, ul_dip,
1242 ul_tb_id, ipip_entry) &&
1243 (!ipip_ul_dev || ipip_ul_dev == ul_dev);
1244}
1245
1246/* Given decap parameters, find the corresponding IPIP entry. */
1247static struct mlxsw_sp_ipip_entry *
1248mlxsw_sp_ipip_entry_find_by_decap(struct mlxsw_sp *mlxsw_sp,
1249 const struct net_device *ul_dev,
1250 enum mlxsw_sp_l3proto ul_proto,
1251 union mlxsw_sp_l3addr ul_dip)
1252{
1253 struct mlxsw_sp_ipip_entry *ipip_entry;
1254
1255 list_for_each_entry(ipip_entry, &mlxsw_sp->router->ipip_list,
1256 ipip_list_node)
1257 if (mlxsw_sp_ipip_entry_matches_decap(mlxsw_sp, ul_dev,
1258 ul_proto, ul_dip,
1259 ipip_entry))
1260 return ipip_entry;
1261
1262 return NULL;
1263}
1264
Petr Machata6698c162017-10-16 16:26:36 +02001265static bool mlxsw_sp_netdev_ipip_type(const struct mlxsw_sp *mlxsw_sp,
1266 const struct net_device *dev,
1267 enum mlxsw_sp_ipip_type *p_type)
1268{
1269 struct mlxsw_sp_router *router = mlxsw_sp->router;
1270 const struct mlxsw_sp_ipip_ops *ipip_ops;
1271 enum mlxsw_sp_ipip_type ipipt;
1272
1273 for (ipipt = 0; ipipt < MLXSW_SP_IPIP_TYPE_MAX; ++ipipt) {
1274 ipip_ops = router->ipip_ops_arr[ipipt];
1275 if (dev->type == ipip_ops->dev_type) {
1276 if (p_type)
1277 *p_type = ipipt;
1278 return true;
1279 }
1280 }
1281 return false;
1282}
1283
Petr Machata796ec772017-11-03 10:03:29 +01001284bool mlxsw_sp_netdev_is_ipip_ol(const struct mlxsw_sp *mlxsw_sp,
1285 const struct net_device *dev)
Petr Machata00635872017-10-16 16:26:37 +02001286{
1287 return mlxsw_sp_netdev_ipip_type(mlxsw_sp, dev, NULL);
1288}
1289
1290static struct mlxsw_sp_ipip_entry *
1291mlxsw_sp_ipip_entry_find_by_ol_dev(struct mlxsw_sp *mlxsw_sp,
1292 const struct net_device *ol_dev)
1293{
1294 struct mlxsw_sp_ipip_entry *ipip_entry;
1295
1296 list_for_each_entry(ipip_entry, &mlxsw_sp->router->ipip_list,
1297 ipip_list_node)
1298 if (ipip_entry->ol_dev == ol_dev)
1299 return ipip_entry;
1300
1301 return NULL;
1302}
1303
Petr Machata61481f22017-11-03 10:03:41 +01001304static struct mlxsw_sp_ipip_entry *
1305mlxsw_sp_ipip_entry_find_by_ul_dev(const struct mlxsw_sp *mlxsw_sp,
1306 const struct net_device *ul_dev,
1307 struct mlxsw_sp_ipip_entry *start)
1308{
1309 struct mlxsw_sp_ipip_entry *ipip_entry;
1310
1311 ipip_entry = list_prepare_entry(start, &mlxsw_sp->router->ipip_list,
1312 ipip_list_node);
1313 list_for_each_entry_continue(ipip_entry, &mlxsw_sp->router->ipip_list,
1314 ipip_list_node) {
1315 struct net_device *ipip_ul_dev =
1316 __mlxsw_sp_ipip_netdev_ul_dev_get(ipip_entry->ol_dev);
1317
1318 if (ipip_ul_dev == ul_dev)
1319 return ipip_entry;
1320 }
1321
1322 return NULL;
1323}
1324
1325bool mlxsw_sp_netdev_is_ipip_ul(const struct mlxsw_sp *mlxsw_sp,
1326 const struct net_device *dev)
1327{
1328 return mlxsw_sp_ipip_entry_find_by_ul_dev(mlxsw_sp, dev, NULL);
1329}
1330
Petr Machatacafdb2a2017-11-03 10:03:30 +01001331static bool mlxsw_sp_netdevice_ipip_can_offload(struct mlxsw_sp *mlxsw_sp,
1332 const struct net_device *ol_dev,
1333 enum mlxsw_sp_ipip_type ipipt)
1334{
1335 const struct mlxsw_sp_ipip_ops *ops
1336 = mlxsw_sp->router->ipip_ops_arr[ipipt];
1337
1338 /* For deciding whether decap should be offloaded, we don't care about
1339 * overlay protocol, so ask whether either one is supported.
1340 */
1341 return ops->can_offload(mlxsw_sp, ol_dev, MLXSW_SP_L3_PROTO_IPV4) ||
1342 ops->can_offload(mlxsw_sp, ol_dev, MLXSW_SP_L3_PROTO_IPV6);
1343}
1344
Petr Machata796ec772017-11-03 10:03:29 +01001345static int mlxsw_sp_netdevice_ipip_ol_reg_event(struct mlxsw_sp *mlxsw_sp,
1346 struct net_device *ol_dev)
Petr Machata00635872017-10-16 16:26:37 +02001347{
Petr Machata00635872017-10-16 16:26:37 +02001348 struct mlxsw_sp_ipip_entry *ipip_entry;
Petr Machataaf641712017-11-03 10:03:40 +01001349 enum mlxsw_sp_l3proto ul_proto;
Petr Machata00635872017-10-16 16:26:37 +02001350 enum mlxsw_sp_ipip_type ipipt;
Petr Machataaf641712017-11-03 10:03:40 +01001351 union mlxsw_sp_l3addr saddr;
1352 u32 ul_tb_id;
Petr Machata00635872017-10-16 16:26:37 +02001353
1354 mlxsw_sp_netdev_ipip_type(mlxsw_sp, ol_dev, &ipipt);
Petr Machatacafdb2a2017-11-03 10:03:30 +01001355 if (mlxsw_sp_netdevice_ipip_can_offload(mlxsw_sp, ol_dev, ipipt)) {
Petr Machataaf641712017-11-03 10:03:40 +01001356 ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(ol_dev);
1357 ul_proto = mlxsw_sp->router->ipip_ops_arr[ipipt]->ul_proto;
1358 saddr = mlxsw_sp_ipip_netdev_saddr(ul_proto, ol_dev);
1359 if (!mlxsw_sp_ipip_demote_tunnel_by_saddr(mlxsw_sp, ul_proto,
1360 saddr, ul_tb_id,
1361 NULL)) {
1362 ipip_entry = mlxsw_sp_ipip_entry_create(mlxsw_sp, ipipt,
1363 ol_dev);
1364 if (IS_ERR(ipip_entry))
1365 return PTR_ERR(ipip_entry);
1366 }
Petr Machata00635872017-10-16 16:26:37 +02001367 }
1368
1369 return 0;
1370}
1371
Petr Machata796ec772017-11-03 10:03:29 +01001372static void mlxsw_sp_netdevice_ipip_ol_unreg_event(struct mlxsw_sp *mlxsw_sp,
1373 struct net_device *ol_dev)
Petr Machata00635872017-10-16 16:26:37 +02001374{
1375 struct mlxsw_sp_ipip_entry *ipip_entry;
1376
1377 ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
1378 if (ipip_entry)
Petr Machata4cccb732017-10-16 16:26:39 +02001379 mlxsw_sp_ipip_entry_destroy(mlxsw_sp, ipip_entry);
Petr Machata00635872017-10-16 16:26:37 +02001380}
1381
Petr Machata47518ca2017-11-03 10:03:35 +01001382static void
1383mlxsw_sp_ipip_entry_ol_up_event(struct mlxsw_sp *mlxsw_sp,
1384 struct mlxsw_sp_ipip_entry *ipip_entry)
1385{
1386 struct mlxsw_sp_fib_entry *decap_fib_entry;
1387
1388 decap_fib_entry = mlxsw_sp_ipip_entry_find_decap(mlxsw_sp, ipip_entry);
1389 if (decap_fib_entry)
1390 mlxsw_sp_ipip_entry_promote_decap(mlxsw_sp, ipip_entry,
1391 decap_fib_entry);
1392}
1393
Petr Machata6d4de442017-11-03 10:03:34 +01001394static void mlxsw_sp_netdevice_ipip_ol_up_event(struct mlxsw_sp *mlxsw_sp,
1395 struct net_device *ol_dev)
Petr Machata00635872017-10-16 16:26:37 +02001396{
Petr Machata00635872017-10-16 16:26:37 +02001397 struct mlxsw_sp_ipip_entry *ipip_entry;
1398
1399 ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
Petr Machata47518ca2017-11-03 10:03:35 +01001400 if (ipip_entry)
1401 mlxsw_sp_ipip_entry_ol_up_event(mlxsw_sp, ipip_entry);
Petr Machata00635872017-10-16 16:26:37 +02001402}
1403
Petr Machataa3fe1982017-11-03 10:03:33 +01001404static void
1405mlxsw_sp_ipip_entry_ol_down_event(struct mlxsw_sp *mlxsw_sp,
1406 struct mlxsw_sp_ipip_entry *ipip_entry)
1407{
1408 if (ipip_entry->decap_fib_entry)
1409 mlxsw_sp_ipip_entry_demote_decap(mlxsw_sp, ipip_entry);
1410}
1411
Petr Machata796ec772017-11-03 10:03:29 +01001412static void mlxsw_sp_netdevice_ipip_ol_down_event(struct mlxsw_sp *mlxsw_sp,
1413 struct net_device *ol_dev)
Petr Machata00635872017-10-16 16:26:37 +02001414{
1415 struct mlxsw_sp_ipip_entry *ipip_entry;
1416
1417 ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
Petr Machataa3fe1982017-11-03 10:03:33 +01001418 if (ipip_entry)
1419 mlxsw_sp_ipip_entry_ol_down_event(mlxsw_sp, ipip_entry);
Petr Machata00635872017-10-16 16:26:37 +02001420}
1421
Petr Machata09dbf622017-11-28 13:17:14 +01001422static void mlxsw_sp_nexthop_rif_migrate(struct mlxsw_sp *mlxsw_sp,
1423 struct mlxsw_sp_rif *old_rif,
1424 struct mlxsw_sp_rif *new_rif);
Petr Machata65a61212017-11-03 10:03:37 +01001425static int
1426mlxsw_sp_ipip_entry_ol_lb_update(struct mlxsw_sp *mlxsw_sp,
1427 struct mlxsw_sp_ipip_entry *ipip_entry,
Petr Machata0c5f1cd2017-11-03 10:03:38 +01001428 bool keep_encap,
Petr Machata65a61212017-11-03 10:03:37 +01001429 struct netlink_ext_ack *extack)
Petr Machataf63ce4e2017-10-16 16:26:38 +02001430{
Petr Machata65a61212017-11-03 10:03:37 +01001431 struct mlxsw_sp_rif_ipip_lb *old_lb_rif = ipip_entry->ol_lb;
1432 struct mlxsw_sp_rif_ipip_lb *new_lb_rif;
Petr Machataf63ce4e2017-10-16 16:26:38 +02001433
Petr Machata65a61212017-11-03 10:03:37 +01001434 new_lb_rif = mlxsw_sp_ipip_ol_ipip_lb_create(mlxsw_sp,
1435 ipip_entry->ipipt,
1436 ipip_entry->ol_dev,
1437 extack);
1438 if (IS_ERR(new_lb_rif))
1439 return PTR_ERR(new_lb_rif);
1440 ipip_entry->ol_lb = new_lb_rif;
Petr Machata0c5f1cd2017-11-03 10:03:38 +01001441
Petr Machata09dbf622017-11-28 13:17:14 +01001442 if (keep_encap)
1443 mlxsw_sp_nexthop_rif_migrate(mlxsw_sp, &old_lb_rif->common,
1444 &new_lb_rif->common);
Petr Machata0c5f1cd2017-11-03 10:03:38 +01001445
Petr Machata65a61212017-11-03 10:03:37 +01001446 mlxsw_sp_rif_destroy(&old_lb_rif->common);
Petr Machataf63ce4e2017-10-16 16:26:38 +02001447
Petr Machata65a61212017-11-03 10:03:37 +01001448 return 0;
1449}
1450
Petr Machata09dbf622017-11-28 13:17:14 +01001451static void mlxsw_sp_nexthop_rif_update(struct mlxsw_sp *mlxsw_sp,
1452 struct mlxsw_sp_rif *rif);
1453
Petr Machata0c5f1cd2017-11-03 10:03:38 +01001454/**
1455 * Update the offload related to an IPIP entry. This always updates decap, and
1456 * in addition to that it also:
1457 * @recreate_loopback: recreates the associated loopback RIF
1458 * @keep_encap: updates next hops that use the tunnel netdevice. This is only
1459 * relevant when recreate_loopback is true.
1460 * @update_nexthops: updates next hops, keeping the current loopback RIF. This
1461 * is only relevant when recreate_loopback is false.
1462 */
Petr Machata65a61212017-11-03 10:03:37 +01001463int __mlxsw_sp_ipip_entry_update_tunnel(struct mlxsw_sp *mlxsw_sp,
1464 struct mlxsw_sp_ipip_entry *ipip_entry,
Petr Machata0c5f1cd2017-11-03 10:03:38 +01001465 bool recreate_loopback,
1466 bool keep_encap,
1467 bool update_nexthops,
Petr Machata65a61212017-11-03 10:03:37 +01001468 struct netlink_ext_ack *extack)
1469{
1470 int err;
1471
1472 /* RIFs can't be edited, so to update loopback, we need to destroy and
1473 * recreate it. That creates a window of opportunity where RALUE and
1474 * RATR registers end up referencing a RIF that's already gone. RATRs
1475 * are handled in mlxsw_sp_ipip_entry_ol_lb_update(), and to take care
Petr Machataf63ce4e2017-10-16 16:26:38 +02001476 * of RALUE, demote the decap route back.
1477 */
1478 if (ipip_entry->decap_fib_entry)
1479 mlxsw_sp_ipip_entry_demote_decap(mlxsw_sp, ipip_entry);
1480
Petr Machata0c5f1cd2017-11-03 10:03:38 +01001481 if (recreate_loopback) {
1482 err = mlxsw_sp_ipip_entry_ol_lb_update(mlxsw_sp, ipip_entry,
1483 keep_encap, extack);
1484 if (err)
1485 return err;
1486 } else if (update_nexthops) {
1487 mlxsw_sp_nexthop_rif_update(mlxsw_sp,
1488 &ipip_entry->ol_lb->common);
1489 }
Petr Machataf63ce4e2017-10-16 16:26:38 +02001490
Petr Machata65a61212017-11-03 10:03:37 +01001491 if (ipip_entry->ol_dev->flags & IFF_UP)
1492 mlxsw_sp_ipip_entry_ol_up_event(mlxsw_sp, ipip_entry);
Petr Machataf63ce4e2017-10-16 16:26:38 +02001493
1494 return 0;
1495}
1496
Petr Machata65a61212017-11-03 10:03:37 +01001497static int mlxsw_sp_netdevice_ipip_ol_vrf_event(struct mlxsw_sp *mlxsw_sp,
1498 struct net_device *ol_dev,
1499 struct netlink_ext_ack *extack)
1500{
1501 struct mlxsw_sp_ipip_entry *ipip_entry =
1502 mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
Petr Machatacab43d92017-11-28 13:17:12 +01001503 enum mlxsw_sp_l3proto ul_proto;
1504 union mlxsw_sp_l3addr saddr;
1505 u32 ul_tb_id;
Petr Machata65a61212017-11-03 10:03:37 +01001506
1507 if (!ipip_entry)
1508 return 0;
Petr Machatacab43d92017-11-28 13:17:12 +01001509
1510 /* For flat configuration cases, moving overlay to a different VRF might
1511 * cause local address conflict, and the conflicting tunnels need to be
1512 * demoted.
1513 */
1514 ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(ol_dev);
1515 ul_proto = mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt]->ul_proto;
1516 saddr = mlxsw_sp_ipip_netdev_saddr(ul_proto, ol_dev);
1517 if (mlxsw_sp_ipip_demote_tunnel_by_saddr(mlxsw_sp, ul_proto,
1518 saddr, ul_tb_id,
1519 ipip_entry)) {
1520 mlxsw_sp_ipip_entry_demote_tunnel(mlxsw_sp, ipip_entry);
1521 return 0;
1522 }
1523
Petr Machata65a61212017-11-03 10:03:37 +01001524 return __mlxsw_sp_ipip_entry_update_tunnel(mlxsw_sp, ipip_entry,
Petr Machata0c5f1cd2017-11-03 10:03:38 +01001525 true, false, false, extack);
Petr Machata65a61212017-11-03 10:03:37 +01001526}
1527
Petr Machata61481f22017-11-03 10:03:41 +01001528static int
1529mlxsw_sp_netdevice_ipip_ul_vrf_event(struct mlxsw_sp *mlxsw_sp,
1530 struct mlxsw_sp_ipip_entry *ipip_entry,
1531 struct net_device *ul_dev,
1532 struct netlink_ext_ack *extack)
1533{
1534 return __mlxsw_sp_ipip_entry_update_tunnel(mlxsw_sp, ipip_entry,
1535 true, true, false, extack);
1536}
1537
Petr Machata4cf04f32017-11-03 10:03:42 +01001538static int
Petr Machata44b0fff2017-11-03 10:03:44 +01001539mlxsw_sp_netdevice_ipip_ul_up_event(struct mlxsw_sp *mlxsw_sp,
1540 struct mlxsw_sp_ipip_entry *ipip_entry,
1541 struct net_device *ul_dev)
1542{
1543 return __mlxsw_sp_ipip_entry_update_tunnel(mlxsw_sp, ipip_entry,
1544 false, false, true, NULL);
1545}
1546
1547static int
1548mlxsw_sp_netdevice_ipip_ul_down_event(struct mlxsw_sp *mlxsw_sp,
1549 struct mlxsw_sp_ipip_entry *ipip_entry,
1550 struct net_device *ul_dev)
1551{
1552 /* A down underlay device causes encapsulated packets to not be
1553 * forwarded, but decap still works. So refresh next hops without
1554 * touching anything else.
1555 */
1556 return __mlxsw_sp_ipip_entry_update_tunnel(mlxsw_sp, ipip_entry,
1557 false, false, true, NULL);
1558}
1559
1560static int
Petr Machata4cf04f32017-11-03 10:03:42 +01001561mlxsw_sp_netdevice_ipip_ol_change_event(struct mlxsw_sp *mlxsw_sp,
1562 struct net_device *ol_dev,
1563 struct netlink_ext_ack *extack)
1564{
1565 const struct mlxsw_sp_ipip_ops *ipip_ops;
1566 struct mlxsw_sp_ipip_entry *ipip_entry;
1567 int err;
1568
1569 ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
1570 if (!ipip_entry)
1571 /* A change might make a tunnel eligible for offloading, but
1572 * that is currently not implemented. What falls to slow path
1573 * stays there.
1574 */
1575 return 0;
1576
1577 /* A change might make a tunnel not eligible for offloading. */
1578 if (!mlxsw_sp_netdevice_ipip_can_offload(mlxsw_sp, ol_dev,
1579 ipip_entry->ipipt)) {
1580 mlxsw_sp_ipip_entry_demote_tunnel(mlxsw_sp, ipip_entry);
1581 return 0;
1582 }
1583
1584 ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt];
1585 err = ipip_ops->ol_netdev_change(mlxsw_sp, ipip_entry, extack);
1586 return err;
1587}
1588
Petr Machataaf641712017-11-03 10:03:40 +01001589void mlxsw_sp_ipip_entry_demote_tunnel(struct mlxsw_sp *mlxsw_sp,
1590 struct mlxsw_sp_ipip_entry *ipip_entry)
1591{
1592 struct net_device *ol_dev = ipip_entry->ol_dev;
1593
1594 if (ol_dev->flags & IFF_UP)
1595 mlxsw_sp_ipip_entry_ol_down_event(mlxsw_sp, ipip_entry);
1596 mlxsw_sp_ipip_entry_destroy(mlxsw_sp, ipip_entry);
1597}
1598
1599/* The configuration where several tunnels have the same local address in the
1600 * same underlay table needs special treatment in the HW. That is currently not
1601 * implemented in the driver. This function finds and demotes the first tunnel
1602 * with a given source address, except the one passed in in the argument
1603 * `except'.
1604 */
1605bool
1606mlxsw_sp_ipip_demote_tunnel_by_saddr(struct mlxsw_sp *mlxsw_sp,
1607 enum mlxsw_sp_l3proto ul_proto,
1608 union mlxsw_sp_l3addr saddr,
1609 u32 ul_tb_id,
1610 const struct mlxsw_sp_ipip_entry *except)
1611{
1612 struct mlxsw_sp_ipip_entry *ipip_entry, *tmp;
1613
1614 list_for_each_entry_safe(ipip_entry, tmp, &mlxsw_sp->router->ipip_list,
1615 ipip_list_node) {
1616 if (ipip_entry != except &&
1617 mlxsw_sp_ipip_entry_saddr_matches(mlxsw_sp, ul_proto, saddr,
1618 ul_tb_id, ipip_entry)) {
1619 mlxsw_sp_ipip_entry_demote_tunnel(mlxsw_sp, ipip_entry);
1620 return true;
1621 }
1622 }
1623
1624 return false;
1625}
1626
Petr Machata61481f22017-11-03 10:03:41 +01001627static void mlxsw_sp_ipip_demote_tunnel_by_ul_netdev(struct mlxsw_sp *mlxsw_sp,
1628 struct net_device *ul_dev)
1629{
1630 struct mlxsw_sp_ipip_entry *ipip_entry, *tmp;
1631
1632 list_for_each_entry_safe(ipip_entry, tmp, &mlxsw_sp->router->ipip_list,
1633 ipip_list_node) {
1634 struct net_device *ipip_ul_dev =
1635 __mlxsw_sp_ipip_netdev_ul_dev_get(ipip_entry->ol_dev);
1636
1637 if (ipip_ul_dev == ul_dev)
1638 mlxsw_sp_ipip_entry_demote_tunnel(mlxsw_sp, ipip_entry);
1639 }
1640}
1641
Petr Machata7e75af62017-11-03 10:03:36 +01001642int mlxsw_sp_netdevice_ipip_ol_event(struct mlxsw_sp *mlxsw_sp,
1643 struct net_device *ol_dev,
1644 unsigned long event,
1645 struct netdev_notifier_info *info)
Petr Machata00635872017-10-16 16:26:37 +02001646{
Petr Machata7e75af62017-11-03 10:03:36 +01001647 struct netdev_notifier_changeupper_info *chup;
1648 struct netlink_ext_ack *extack;
1649
Petr Machata00635872017-10-16 16:26:37 +02001650 switch (event) {
1651 case NETDEV_REGISTER:
Petr Machata796ec772017-11-03 10:03:29 +01001652 return mlxsw_sp_netdevice_ipip_ol_reg_event(mlxsw_sp, ol_dev);
Petr Machata00635872017-10-16 16:26:37 +02001653 case NETDEV_UNREGISTER:
Petr Machata796ec772017-11-03 10:03:29 +01001654 mlxsw_sp_netdevice_ipip_ol_unreg_event(mlxsw_sp, ol_dev);
Petr Machata00635872017-10-16 16:26:37 +02001655 return 0;
1656 case NETDEV_UP:
Petr Machata6d4de442017-11-03 10:03:34 +01001657 mlxsw_sp_netdevice_ipip_ol_up_event(mlxsw_sp, ol_dev);
1658 return 0;
Petr Machata00635872017-10-16 16:26:37 +02001659 case NETDEV_DOWN:
Petr Machata796ec772017-11-03 10:03:29 +01001660 mlxsw_sp_netdevice_ipip_ol_down_event(mlxsw_sp, ol_dev);
Petr Machata00635872017-10-16 16:26:37 +02001661 return 0;
Petr Machataf63ce4e2017-10-16 16:26:38 +02001662 case NETDEV_CHANGEUPPER:
Petr Machata7e75af62017-11-03 10:03:36 +01001663 chup = container_of(info, typeof(*chup), info);
1664 extack = info->extack;
1665 if (netif_is_l3_master(chup->upper_dev))
Petr Machata796ec772017-11-03 10:03:29 +01001666 return mlxsw_sp_netdevice_ipip_ol_vrf_event(mlxsw_sp,
Petr Machata7e75af62017-11-03 10:03:36 +01001667 ol_dev,
1668 extack);
Petr Machataf63ce4e2017-10-16 16:26:38 +02001669 return 0;
Petr Machata4cf04f32017-11-03 10:03:42 +01001670 case NETDEV_CHANGE:
1671 extack = info->extack;
1672 return mlxsw_sp_netdevice_ipip_ol_change_event(mlxsw_sp,
1673 ol_dev, extack);
Petr Machata00635872017-10-16 16:26:37 +02001674 }
1675 return 0;
1676}
1677
Petr Machata61481f22017-11-03 10:03:41 +01001678static int
1679__mlxsw_sp_netdevice_ipip_ul_event(struct mlxsw_sp *mlxsw_sp,
1680 struct mlxsw_sp_ipip_entry *ipip_entry,
1681 struct net_device *ul_dev,
1682 unsigned long event,
1683 struct netdev_notifier_info *info)
1684{
1685 struct netdev_notifier_changeupper_info *chup;
1686 struct netlink_ext_ack *extack;
1687
1688 switch (event) {
1689 case NETDEV_CHANGEUPPER:
1690 chup = container_of(info, typeof(*chup), info);
1691 extack = info->extack;
1692 if (netif_is_l3_master(chup->upper_dev))
1693 return mlxsw_sp_netdevice_ipip_ul_vrf_event(mlxsw_sp,
1694 ipip_entry,
1695 ul_dev,
1696 extack);
1697 break;
Petr Machata44b0fff2017-11-03 10:03:44 +01001698
1699 case NETDEV_UP:
1700 return mlxsw_sp_netdevice_ipip_ul_up_event(mlxsw_sp, ipip_entry,
1701 ul_dev);
1702 case NETDEV_DOWN:
1703 return mlxsw_sp_netdevice_ipip_ul_down_event(mlxsw_sp,
1704 ipip_entry,
1705 ul_dev);
Petr Machata61481f22017-11-03 10:03:41 +01001706 }
1707 return 0;
1708}
1709
1710int
1711mlxsw_sp_netdevice_ipip_ul_event(struct mlxsw_sp *mlxsw_sp,
1712 struct net_device *ul_dev,
1713 unsigned long event,
1714 struct netdev_notifier_info *info)
1715{
1716 struct mlxsw_sp_ipip_entry *ipip_entry = NULL;
1717 int err;
1718
1719 while ((ipip_entry = mlxsw_sp_ipip_entry_find_by_ul_dev(mlxsw_sp,
1720 ul_dev,
1721 ipip_entry))) {
1722 err = __mlxsw_sp_netdevice_ipip_ul_event(mlxsw_sp, ipip_entry,
1723 ul_dev, event, info);
1724 if (err) {
1725 mlxsw_sp_ipip_demote_tunnel_by_ul_netdev(mlxsw_sp,
1726 ul_dev);
1727 return err;
1728 }
1729 }
1730
1731 return 0;
1732}
1733
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001734struct mlxsw_sp_neigh_key {
Jiri Pirko33b13412016-11-10 12:31:04 +01001735 struct neighbour *n;
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001736};
1737
1738struct mlxsw_sp_neigh_entry {
Ido Schimmel9665b742017-02-08 11:16:42 +01001739 struct list_head rif_list_node;
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001740 struct rhash_head ht_node;
1741 struct mlxsw_sp_neigh_key key;
1742 u16 rif;
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001743 bool connected;
Yotam Gigia6bf9e92016-07-05 11:27:44 +02001744 unsigned char ha[ETH_ALEN];
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001745 struct list_head nexthop_list; /* list of nexthops using
1746 * this neigh entry
1747 */
Yotam Gigib2157142016-07-05 11:27:51 +02001748 struct list_head nexthop_neighs_list_node;
Arkadi Sharshevsky7cfcbc72017-08-24 08:40:08 +02001749 unsigned int counter_index;
1750 bool counter_valid;
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001751};
1752
1753static const struct rhashtable_params mlxsw_sp_neigh_ht_params = {
1754 .key_offset = offsetof(struct mlxsw_sp_neigh_entry, key),
1755 .head_offset = offsetof(struct mlxsw_sp_neigh_entry, ht_node),
1756 .key_len = sizeof(struct mlxsw_sp_neigh_key),
1757};
1758
Arkadi Sharshevskyf17cc842017-08-24 08:40:04 +02001759struct mlxsw_sp_neigh_entry *
1760mlxsw_sp_rif_neigh_next(struct mlxsw_sp_rif *rif,
1761 struct mlxsw_sp_neigh_entry *neigh_entry)
1762{
1763 if (!neigh_entry) {
1764 if (list_empty(&rif->neigh_list))
1765 return NULL;
1766 else
1767 return list_first_entry(&rif->neigh_list,
1768 typeof(*neigh_entry),
1769 rif_list_node);
1770 }
Arkadi Sharshevskyec2437f2017-09-25 10:32:24 +02001771 if (list_is_last(&neigh_entry->rif_list_node, &rif->neigh_list))
Arkadi Sharshevskyf17cc842017-08-24 08:40:04 +02001772 return NULL;
1773 return list_next_entry(neigh_entry, rif_list_node);
1774}
1775
1776int mlxsw_sp_neigh_entry_type(struct mlxsw_sp_neigh_entry *neigh_entry)
1777{
1778 return neigh_entry->key.n->tbl->family;
1779}
1780
1781unsigned char *
1782mlxsw_sp_neigh_entry_ha(struct mlxsw_sp_neigh_entry *neigh_entry)
1783{
1784 return neigh_entry->ha;
1785}
1786
1787u32 mlxsw_sp_neigh4_entry_dip(struct mlxsw_sp_neigh_entry *neigh_entry)
1788{
1789 struct neighbour *n;
1790
1791 n = neigh_entry->key.n;
1792 return ntohl(*((__be32 *) n->primary_key));
1793}
1794
Arkadi Sharshevsky02507682017-08-31 17:59:15 +02001795struct in6_addr *
1796mlxsw_sp_neigh6_entry_dip(struct mlxsw_sp_neigh_entry *neigh_entry)
1797{
1798 struct neighbour *n;
1799
1800 n = neigh_entry->key.n;
1801 return (struct in6_addr *) &n->primary_key;
1802}
1803
Arkadi Sharshevsky7cfcbc72017-08-24 08:40:08 +02001804int mlxsw_sp_neigh_counter_get(struct mlxsw_sp *mlxsw_sp,
1805 struct mlxsw_sp_neigh_entry *neigh_entry,
1806 u64 *p_counter)
1807{
1808 if (!neigh_entry->counter_valid)
1809 return -EINVAL;
1810
1811 return mlxsw_sp_flow_counter_get(mlxsw_sp, neigh_entry->counter_index,
1812 p_counter, NULL);
1813}
1814
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001815static struct mlxsw_sp_neigh_entry *
1816mlxsw_sp_neigh_entry_alloc(struct mlxsw_sp *mlxsw_sp, struct neighbour *n,
1817 u16 rif)
1818{
1819 struct mlxsw_sp_neigh_entry *neigh_entry;
1820
1821 neigh_entry = kzalloc(sizeof(*neigh_entry), GFP_KERNEL);
1822 if (!neigh_entry)
1823 return NULL;
1824
1825 neigh_entry->key.n = n;
1826 neigh_entry->rif = rif;
1827 INIT_LIST_HEAD(&neigh_entry->nexthop_list);
1828
1829 return neigh_entry;
1830}
1831
1832static void mlxsw_sp_neigh_entry_free(struct mlxsw_sp_neigh_entry *neigh_entry)
1833{
1834 kfree(neigh_entry);
1835}
1836
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001837static int
1838mlxsw_sp_neigh_entry_insert(struct mlxsw_sp *mlxsw_sp,
1839 struct mlxsw_sp_neigh_entry *neigh_entry)
1840{
Ido Schimmel9011b672017-05-16 19:38:25 +02001841 return rhashtable_insert_fast(&mlxsw_sp->router->neigh_ht,
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001842 &neigh_entry->ht_node,
1843 mlxsw_sp_neigh_ht_params);
1844}
1845
1846static void
1847mlxsw_sp_neigh_entry_remove(struct mlxsw_sp *mlxsw_sp,
1848 struct mlxsw_sp_neigh_entry *neigh_entry)
1849{
Ido Schimmel9011b672017-05-16 19:38:25 +02001850 rhashtable_remove_fast(&mlxsw_sp->router->neigh_ht,
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001851 &neigh_entry->ht_node,
1852 mlxsw_sp_neigh_ht_params);
1853}
1854
Arkadi Sharshevsky7cfcbc72017-08-24 08:40:08 +02001855static bool
Arkadi Sharshevsky1ed55742017-08-31 17:59:18 +02001856mlxsw_sp_neigh_counter_should_alloc(struct mlxsw_sp *mlxsw_sp,
1857 struct mlxsw_sp_neigh_entry *neigh_entry)
Arkadi Sharshevsky7cfcbc72017-08-24 08:40:08 +02001858{
1859 struct devlink *devlink;
Arkadi Sharshevsky1ed55742017-08-31 17:59:18 +02001860 const char *table_name;
1861
1862 switch (mlxsw_sp_neigh_entry_type(neigh_entry)) {
1863 case AF_INET:
1864 table_name = MLXSW_SP_DPIPE_TABLE_NAME_HOST4;
1865 break;
1866 case AF_INET6:
1867 table_name = MLXSW_SP_DPIPE_TABLE_NAME_HOST6;
1868 break;
1869 default:
1870 WARN_ON(1);
1871 return false;
1872 }
Arkadi Sharshevsky7cfcbc72017-08-24 08:40:08 +02001873
1874 devlink = priv_to_devlink(mlxsw_sp->core);
Arkadi Sharshevsky1ed55742017-08-31 17:59:18 +02001875 return devlink_dpipe_table_counter_enabled(devlink, table_name);
Arkadi Sharshevsky7cfcbc72017-08-24 08:40:08 +02001876}
1877
1878static void
1879mlxsw_sp_neigh_counter_alloc(struct mlxsw_sp *mlxsw_sp,
1880 struct mlxsw_sp_neigh_entry *neigh_entry)
1881{
Arkadi Sharshevsky1ed55742017-08-31 17:59:18 +02001882 if (!mlxsw_sp_neigh_counter_should_alloc(mlxsw_sp, neigh_entry))
Arkadi Sharshevsky7cfcbc72017-08-24 08:40:08 +02001883 return;
1884
1885 if (mlxsw_sp_flow_counter_alloc(mlxsw_sp, &neigh_entry->counter_index))
1886 return;
1887
1888 neigh_entry->counter_valid = true;
1889}
1890
1891static void
1892mlxsw_sp_neigh_counter_free(struct mlxsw_sp *mlxsw_sp,
1893 struct mlxsw_sp_neigh_entry *neigh_entry)
1894{
1895 if (!neigh_entry->counter_valid)
1896 return;
1897 mlxsw_sp_flow_counter_free(mlxsw_sp,
1898 neigh_entry->counter_index);
1899 neigh_entry->counter_valid = false;
1900}
1901
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001902static struct mlxsw_sp_neigh_entry *
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001903mlxsw_sp_neigh_entry_create(struct mlxsw_sp *mlxsw_sp, struct neighbour *n)
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001904{
1905 struct mlxsw_sp_neigh_entry *neigh_entry;
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01001906 struct mlxsw_sp_rif *rif;
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001907 int err;
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001908
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01001909 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, n->dev);
1910 if (!rif)
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001911 return ERR_PTR(-EINVAL);
1912
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01001913 neigh_entry = mlxsw_sp_neigh_entry_alloc(mlxsw_sp, n, rif->rif_index);
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001914 if (!neigh_entry)
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001915 return ERR_PTR(-ENOMEM);
1916
1917 err = mlxsw_sp_neigh_entry_insert(mlxsw_sp, neigh_entry);
1918 if (err)
1919 goto err_neigh_entry_insert;
1920
Arkadi Sharshevsky7cfcbc72017-08-24 08:40:08 +02001921 mlxsw_sp_neigh_counter_alloc(mlxsw_sp, neigh_entry);
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01001922 list_add(&neigh_entry->rif_list_node, &rif->neigh_list);
Ido Schimmel9665b742017-02-08 11:16:42 +01001923
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001924 return neigh_entry;
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001925
1926err_neigh_entry_insert:
1927 mlxsw_sp_neigh_entry_free(neigh_entry);
1928 return ERR_PTR(err);
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001929}
1930
1931static void
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001932mlxsw_sp_neigh_entry_destroy(struct mlxsw_sp *mlxsw_sp,
1933 struct mlxsw_sp_neigh_entry *neigh_entry)
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001934{
Ido Schimmel9665b742017-02-08 11:16:42 +01001935 list_del(&neigh_entry->rif_list_node);
Arkadi Sharshevsky7cfcbc72017-08-24 08:40:08 +02001936 mlxsw_sp_neigh_counter_free(mlxsw_sp, neigh_entry);
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001937 mlxsw_sp_neigh_entry_remove(mlxsw_sp, neigh_entry);
1938 mlxsw_sp_neigh_entry_free(neigh_entry);
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001939}
1940
1941static struct mlxsw_sp_neigh_entry *
Jiri Pirko33b13412016-11-10 12:31:04 +01001942mlxsw_sp_neigh_entry_lookup(struct mlxsw_sp *mlxsw_sp, struct neighbour *n)
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001943{
Jiri Pirko33b13412016-11-10 12:31:04 +01001944 struct mlxsw_sp_neigh_key key;
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001945
Jiri Pirko33b13412016-11-10 12:31:04 +01001946 key.n = n;
Ido Schimmel9011b672017-05-16 19:38:25 +02001947 return rhashtable_lookup_fast(&mlxsw_sp->router->neigh_ht,
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001948 &key, mlxsw_sp_neigh_ht_params);
1949}
1950
Yotam Gigic723c7352016-07-05 11:27:43 +02001951static void
1952mlxsw_sp_router_neighs_update_interval_init(struct mlxsw_sp *mlxsw_sp)
1953{
Arkadi Sharshevskya6c9b5d2017-07-18 10:10:18 +02001954 unsigned long interval;
Yotam Gigic723c7352016-07-05 11:27:43 +02001955
Ido Schimmelb5f3e0d2017-07-24 09:56:00 +02001956#if IS_ENABLED(CONFIG_IPV6)
Arkadi Sharshevskya6c9b5d2017-07-18 10:10:18 +02001957 interval = min_t(unsigned long,
1958 NEIGH_VAR(&arp_tbl.parms, DELAY_PROBE_TIME),
1959 NEIGH_VAR(&nd_tbl.parms, DELAY_PROBE_TIME));
Ido Schimmelb5f3e0d2017-07-24 09:56:00 +02001960#else
1961 interval = NEIGH_VAR(&arp_tbl.parms, DELAY_PROBE_TIME);
1962#endif
Ido Schimmel9011b672017-05-16 19:38:25 +02001963 mlxsw_sp->router->neighs_update.interval = jiffies_to_msecs(interval);
Yotam Gigic723c7352016-07-05 11:27:43 +02001964}
1965
1966static void mlxsw_sp_router_neigh_ent_ipv4_process(struct mlxsw_sp *mlxsw_sp,
1967 char *rauhtd_pl,
1968 int ent_index)
1969{
1970 struct net_device *dev;
1971 struct neighbour *n;
1972 __be32 dipn;
1973 u32 dip;
1974 u16 rif;
1975
1976 mlxsw_reg_rauhtd_ent_ipv4_unpack(rauhtd_pl, ent_index, &rif, &dip);
1977
Ido Schimmel5f9efff2017-05-16 19:38:27 +02001978 if (!mlxsw_sp->router->rifs[rif]) {
Yotam Gigic723c7352016-07-05 11:27:43 +02001979 dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Incorrect RIF in neighbour entry\n");
1980 return;
1981 }
1982
1983 dipn = htonl(dip);
Ido Schimmel5f9efff2017-05-16 19:38:27 +02001984 dev = mlxsw_sp->router->rifs[rif]->dev;
Yotam Gigic723c7352016-07-05 11:27:43 +02001985 n = neigh_lookup(&arp_tbl, &dipn, dev);
Yuval Mintz1ecdaea2018-01-24 10:02:09 +01001986 if (!n)
Yotam Gigic723c7352016-07-05 11:27:43 +02001987 return;
Yotam Gigic723c7352016-07-05 11:27:43 +02001988
1989 netdev_dbg(dev, "Updating neighbour with IP=%pI4h\n", &dip);
1990 neigh_event_send(n, NULL);
1991 neigh_release(n);
1992}
1993
Ido Schimmeldf9a21f2017-08-15 09:10:33 +02001994#if IS_ENABLED(CONFIG_IPV6)
Arkadi Sharshevsky60f040c2017-07-18 10:10:17 +02001995static void mlxsw_sp_router_neigh_ent_ipv6_process(struct mlxsw_sp *mlxsw_sp,
1996 char *rauhtd_pl,
1997 int rec_index)
1998{
1999 struct net_device *dev;
2000 struct neighbour *n;
2001 struct in6_addr dip;
2002 u16 rif;
2003
2004 mlxsw_reg_rauhtd_ent_ipv6_unpack(rauhtd_pl, rec_index, &rif,
2005 (char *) &dip);
2006
2007 if (!mlxsw_sp->router->rifs[rif]) {
2008 dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Incorrect RIF in neighbour entry\n");
2009 return;
2010 }
2011
2012 dev = mlxsw_sp->router->rifs[rif]->dev;
2013 n = neigh_lookup(&nd_tbl, &dip, dev);
Yuval Mintz1ecdaea2018-01-24 10:02:09 +01002014 if (!n)
Arkadi Sharshevsky60f040c2017-07-18 10:10:17 +02002015 return;
Arkadi Sharshevsky60f040c2017-07-18 10:10:17 +02002016
2017 netdev_dbg(dev, "Updating neighbour with IP=%pI6c\n", &dip);
2018 neigh_event_send(n, NULL);
2019 neigh_release(n);
2020}
Ido Schimmelb5f3e0d2017-07-24 09:56:00 +02002021#else
2022static void mlxsw_sp_router_neigh_ent_ipv6_process(struct mlxsw_sp *mlxsw_sp,
2023 char *rauhtd_pl,
2024 int rec_index)
2025{
2026}
2027#endif
Arkadi Sharshevsky60f040c2017-07-18 10:10:17 +02002028
Yotam Gigic723c7352016-07-05 11:27:43 +02002029static void mlxsw_sp_router_neigh_rec_ipv4_process(struct mlxsw_sp *mlxsw_sp,
2030 char *rauhtd_pl,
2031 int rec_index)
2032{
2033 u8 num_entries;
2034 int i;
2035
2036 num_entries = mlxsw_reg_rauhtd_ipv4_rec_num_entries_get(rauhtd_pl,
2037 rec_index);
2038 /* Hardware starts counting at 0, so add 1. */
2039 num_entries++;
2040
2041 /* Each record consists of several neighbour entries. */
2042 for (i = 0; i < num_entries; i++) {
2043 int ent_index;
2044
2045 ent_index = rec_index * MLXSW_REG_RAUHTD_IPV4_ENT_PER_REC + i;
2046 mlxsw_sp_router_neigh_ent_ipv4_process(mlxsw_sp, rauhtd_pl,
2047 ent_index);
2048 }
2049
2050}
2051
Arkadi Sharshevsky60f040c2017-07-18 10:10:17 +02002052static void mlxsw_sp_router_neigh_rec_ipv6_process(struct mlxsw_sp *mlxsw_sp,
2053 char *rauhtd_pl,
2054 int rec_index)
2055{
2056 /* One record contains one entry. */
2057 mlxsw_sp_router_neigh_ent_ipv6_process(mlxsw_sp, rauhtd_pl,
2058 rec_index);
2059}
2060
Yotam Gigic723c7352016-07-05 11:27:43 +02002061static void mlxsw_sp_router_neigh_rec_process(struct mlxsw_sp *mlxsw_sp,
2062 char *rauhtd_pl, int rec_index)
2063{
2064 switch (mlxsw_reg_rauhtd_rec_type_get(rauhtd_pl, rec_index)) {
2065 case MLXSW_REG_RAUHTD_TYPE_IPV4:
2066 mlxsw_sp_router_neigh_rec_ipv4_process(mlxsw_sp, rauhtd_pl,
2067 rec_index);
2068 break;
2069 case MLXSW_REG_RAUHTD_TYPE_IPV6:
Arkadi Sharshevsky60f040c2017-07-18 10:10:17 +02002070 mlxsw_sp_router_neigh_rec_ipv6_process(mlxsw_sp, rauhtd_pl,
2071 rec_index);
Yotam Gigic723c7352016-07-05 11:27:43 +02002072 break;
2073 }
2074}
2075
Arkadi Sharshevsky42cdb332016-11-11 16:34:26 +01002076static bool mlxsw_sp_router_rauhtd_is_full(char *rauhtd_pl)
2077{
2078 u8 num_rec, last_rec_index, num_entries;
2079
2080 num_rec = mlxsw_reg_rauhtd_num_rec_get(rauhtd_pl);
2081 last_rec_index = num_rec - 1;
2082
2083 if (num_rec < MLXSW_REG_RAUHTD_REC_MAX_NUM)
2084 return false;
2085 if (mlxsw_reg_rauhtd_rec_type_get(rauhtd_pl, last_rec_index) ==
2086 MLXSW_REG_RAUHTD_TYPE_IPV6)
2087 return true;
2088
2089 num_entries = mlxsw_reg_rauhtd_ipv4_rec_num_entries_get(rauhtd_pl,
2090 last_rec_index);
2091 if (++num_entries == MLXSW_REG_RAUHTD_IPV4_ENT_PER_REC)
2092 return true;
2093 return false;
2094}
2095
Arkadi Sharshevsky60f040c2017-07-18 10:10:17 +02002096static int
2097__mlxsw_sp_router_neighs_update_rauhtd(struct mlxsw_sp *mlxsw_sp,
2098 char *rauhtd_pl,
2099 enum mlxsw_reg_rauhtd_type type)
Yotam Gigic723c7352016-07-05 11:27:43 +02002100{
Arkadi Sharshevsky60f040c2017-07-18 10:10:17 +02002101 int i, num_rec;
2102 int err;
Yotam Gigic723c7352016-07-05 11:27:43 +02002103
2104 /* Make sure the neighbour's netdev isn't removed in the
2105 * process.
2106 */
2107 rtnl_lock();
2108 do {
Arkadi Sharshevsky60f040c2017-07-18 10:10:17 +02002109 mlxsw_reg_rauhtd_pack(rauhtd_pl, type);
Yotam Gigic723c7352016-07-05 11:27:43 +02002110 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(rauhtd),
2111 rauhtd_pl);
2112 if (err) {
Petr Machata7ff176f2017-10-02 12:21:57 +02002113 dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Failed to dump neighbour table\n");
Yotam Gigic723c7352016-07-05 11:27:43 +02002114 break;
2115 }
2116 num_rec = mlxsw_reg_rauhtd_num_rec_get(rauhtd_pl);
2117 for (i = 0; i < num_rec; i++)
2118 mlxsw_sp_router_neigh_rec_process(mlxsw_sp, rauhtd_pl,
2119 i);
Arkadi Sharshevsky42cdb332016-11-11 16:34:26 +01002120 } while (mlxsw_sp_router_rauhtd_is_full(rauhtd_pl));
Yotam Gigic723c7352016-07-05 11:27:43 +02002121 rtnl_unlock();
2122
Arkadi Sharshevsky60f040c2017-07-18 10:10:17 +02002123 return err;
2124}
2125
2126static int mlxsw_sp_router_neighs_update_rauhtd(struct mlxsw_sp *mlxsw_sp)
2127{
2128 enum mlxsw_reg_rauhtd_type type;
2129 char *rauhtd_pl;
2130 int err;
2131
2132 rauhtd_pl = kmalloc(MLXSW_REG_RAUHTD_LEN, GFP_KERNEL);
2133 if (!rauhtd_pl)
2134 return -ENOMEM;
2135
2136 type = MLXSW_REG_RAUHTD_TYPE_IPV4;
2137 err = __mlxsw_sp_router_neighs_update_rauhtd(mlxsw_sp, rauhtd_pl, type);
2138 if (err)
2139 goto out;
2140
2141 type = MLXSW_REG_RAUHTD_TYPE_IPV6;
2142 err = __mlxsw_sp_router_neighs_update_rauhtd(mlxsw_sp, rauhtd_pl, type);
2143out:
Yotam Gigic723c7352016-07-05 11:27:43 +02002144 kfree(rauhtd_pl);
Yotam Gigib2157142016-07-05 11:27:51 +02002145 return err;
2146}
2147
2148static void mlxsw_sp_router_neighs_update_nh(struct mlxsw_sp *mlxsw_sp)
2149{
2150 struct mlxsw_sp_neigh_entry *neigh_entry;
2151
2152 /* Take RTNL mutex here to prevent lists from changes */
2153 rtnl_lock();
Ido Schimmel9011b672017-05-16 19:38:25 +02002154 list_for_each_entry(neigh_entry, &mlxsw_sp->router->nexthop_neighs_list,
Ido Schimmel8a0b7272017-02-06 16:20:15 +01002155 nexthop_neighs_list_node)
Yotam Gigib2157142016-07-05 11:27:51 +02002156 /* If this neigh have nexthops, make the kernel think this neigh
2157 * is active regardless of the traffic.
2158 */
Ido Schimmel8a0b7272017-02-06 16:20:15 +01002159 neigh_event_send(neigh_entry->key.n, NULL);
Yotam Gigib2157142016-07-05 11:27:51 +02002160 rtnl_unlock();
2161}
2162
2163static void
2164mlxsw_sp_router_neighs_update_work_schedule(struct mlxsw_sp *mlxsw_sp)
2165{
Ido Schimmel9011b672017-05-16 19:38:25 +02002166 unsigned long interval = mlxsw_sp->router->neighs_update.interval;
Yotam Gigib2157142016-07-05 11:27:51 +02002167
Ido Schimmel9011b672017-05-16 19:38:25 +02002168 mlxsw_core_schedule_dw(&mlxsw_sp->router->neighs_update.dw,
Yotam Gigib2157142016-07-05 11:27:51 +02002169 msecs_to_jiffies(interval));
2170}
2171
2172static void mlxsw_sp_router_neighs_update_work(struct work_struct *work)
2173{
Ido Schimmel9011b672017-05-16 19:38:25 +02002174 struct mlxsw_sp_router *router;
Yotam Gigib2157142016-07-05 11:27:51 +02002175 int err;
2176
Ido Schimmel9011b672017-05-16 19:38:25 +02002177 router = container_of(work, struct mlxsw_sp_router,
2178 neighs_update.dw.work);
2179 err = mlxsw_sp_router_neighs_update_rauhtd(router->mlxsw_sp);
Yotam Gigib2157142016-07-05 11:27:51 +02002180 if (err)
Ido Schimmel9011b672017-05-16 19:38:25 +02002181 dev_err(router->mlxsw_sp->bus_info->dev, "Could not update kernel for neigh activity");
Yotam Gigib2157142016-07-05 11:27:51 +02002182
Ido Schimmel9011b672017-05-16 19:38:25 +02002183 mlxsw_sp_router_neighs_update_nh(router->mlxsw_sp);
Yotam Gigib2157142016-07-05 11:27:51 +02002184
Ido Schimmel9011b672017-05-16 19:38:25 +02002185 mlxsw_sp_router_neighs_update_work_schedule(router->mlxsw_sp);
Yotam Gigic723c7352016-07-05 11:27:43 +02002186}
2187
Yotam Gigi0b2361d2016-07-05 11:27:52 +02002188static void mlxsw_sp_router_probe_unresolved_nexthops(struct work_struct *work)
2189{
2190 struct mlxsw_sp_neigh_entry *neigh_entry;
Ido Schimmel9011b672017-05-16 19:38:25 +02002191 struct mlxsw_sp_router *router;
Yotam Gigi0b2361d2016-07-05 11:27:52 +02002192
Ido Schimmel9011b672017-05-16 19:38:25 +02002193 router = container_of(work, struct mlxsw_sp_router,
2194 nexthop_probe_dw.work);
Yotam Gigi0b2361d2016-07-05 11:27:52 +02002195 /* Iterate over nexthop neighbours, find those who are unresolved and
2196 * send arp on them. This solves the chicken-egg problem when
2197 * the nexthop wouldn't get offloaded until the neighbor is resolved
2198 * but it wouldn't get resolved ever in case traffic is flowing in HW
2199 * using different nexthop.
2200 *
2201 * Take RTNL mutex here to prevent lists from changes.
2202 */
2203 rtnl_lock();
Ido Schimmel9011b672017-05-16 19:38:25 +02002204 list_for_each_entry(neigh_entry, &router->nexthop_neighs_list,
Ido Schimmel8a0b7272017-02-06 16:20:15 +01002205 nexthop_neighs_list_node)
Ido Schimmel01b1aa32017-02-06 16:20:16 +01002206 if (!neigh_entry->connected)
Jiri Pirko33b13412016-11-10 12:31:04 +01002207 neigh_event_send(neigh_entry->key.n, NULL);
Yotam Gigi0b2361d2016-07-05 11:27:52 +02002208 rtnl_unlock();
2209
Ido Schimmel9011b672017-05-16 19:38:25 +02002210 mlxsw_core_schedule_dw(&router->nexthop_probe_dw,
Yotam Gigi0b2361d2016-07-05 11:27:52 +02002211 MLXSW_SP_UNRESOLVED_NH_PROBE_INTERVAL);
2212}
2213
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002214static void
2215mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp *mlxsw_sp,
2216 struct mlxsw_sp_neigh_entry *neigh_entry,
2217 bool removing);
2218
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002219static enum mlxsw_reg_rauht_op mlxsw_sp_rauht_op(bool adding)
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002220{
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002221 return adding ? MLXSW_REG_RAUHT_OP_WRITE_ADD :
2222 MLXSW_REG_RAUHT_OP_WRITE_DELETE;
2223}
2224
2225static void
2226mlxsw_sp_router_neigh_entry_op4(struct mlxsw_sp *mlxsw_sp,
2227 struct mlxsw_sp_neigh_entry *neigh_entry,
2228 enum mlxsw_reg_rauht_op op)
2229{
Jiri Pirko33b13412016-11-10 12:31:04 +01002230 struct neighbour *n = neigh_entry->key.n;
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002231 u32 dip = ntohl(*((__be32 *) n->primary_key));
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002232 char rauht_pl[MLXSW_REG_RAUHT_LEN];
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002233
2234 mlxsw_reg_rauht_pack4(rauht_pl, op, neigh_entry->rif, neigh_entry->ha,
2235 dip);
Arkadi Sharshevsky7cfcbc72017-08-24 08:40:08 +02002236 if (neigh_entry->counter_valid)
2237 mlxsw_reg_rauht_pack_counter(rauht_pl,
2238 neigh_entry->counter_index);
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002239 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rauht), rauht_pl);
2240}
2241
2242static void
Arkadi Sharshevskyd5eb89c2017-07-18 10:10:15 +02002243mlxsw_sp_router_neigh_entry_op6(struct mlxsw_sp *mlxsw_sp,
2244 struct mlxsw_sp_neigh_entry *neigh_entry,
2245 enum mlxsw_reg_rauht_op op)
2246{
2247 struct neighbour *n = neigh_entry->key.n;
2248 char rauht_pl[MLXSW_REG_RAUHT_LEN];
2249 const char *dip = n->primary_key;
2250
2251 mlxsw_reg_rauht_pack6(rauht_pl, op, neigh_entry->rif, neigh_entry->ha,
2252 dip);
Arkadi Sharshevsky7cfcbc72017-08-24 08:40:08 +02002253 if (neigh_entry->counter_valid)
2254 mlxsw_reg_rauht_pack_counter(rauht_pl,
2255 neigh_entry->counter_index);
Arkadi Sharshevskyd5eb89c2017-07-18 10:10:15 +02002256 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rauht), rauht_pl);
2257}
2258
Arkadi Sharshevsky1d1056d82017-08-31 17:59:13 +02002259bool mlxsw_sp_neigh_ipv6_ignore(struct mlxsw_sp_neigh_entry *neigh_entry)
Arkadi Sharshevskyd5eb89c2017-07-18 10:10:15 +02002260{
Arkadi Sharshevsky1d1056d82017-08-31 17:59:13 +02002261 struct neighbour *n = neigh_entry->key.n;
2262
Arkadi Sharshevskyd5eb89c2017-07-18 10:10:15 +02002263 /* Packets with a link-local destination address are trapped
2264 * after LPM lookup and never reach the neighbour table, so
2265 * there is no need to program such neighbours to the device.
2266 */
2267 if (ipv6_addr_type((struct in6_addr *) &n->primary_key) &
2268 IPV6_ADDR_LINKLOCAL)
2269 return true;
2270 return false;
2271}
2272
2273static void
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002274mlxsw_sp_neigh_entry_update(struct mlxsw_sp *mlxsw_sp,
2275 struct mlxsw_sp_neigh_entry *neigh_entry,
2276 bool adding)
2277{
2278 if (!adding && !neigh_entry->connected)
2279 return;
2280 neigh_entry->connected = adding;
Ido Schimmelb5f3e0d2017-07-24 09:56:00 +02002281 if (neigh_entry->key.n->tbl->family == AF_INET) {
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002282 mlxsw_sp_router_neigh_entry_op4(mlxsw_sp, neigh_entry,
2283 mlxsw_sp_rauht_op(adding));
Ido Schimmelb5f3e0d2017-07-24 09:56:00 +02002284 } else if (neigh_entry->key.n->tbl->family == AF_INET6) {
Arkadi Sharshevsky1d1056d82017-08-31 17:59:13 +02002285 if (mlxsw_sp_neigh_ipv6_ignore(neigh_entry))
Arkadi Sharshevskyd5eb89c2017-07-18 10:10:15 +02002286 return;
2287 mlxsw_sp_router_neigh_entry_op6(mlxsw_sp, neigh_entry,
2288 mlxsw_sp_rauht_op(adding));
2289 } else {
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002290 WARN_ON_ONCE(1);
Arkadi Sharshevskyd5eb89c2017-07-18 10:10:15 +02002291 }
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002292}
2293
Arkadi Sharshevskya481d712017-08-24 08:40:10 +02002294void
2295mlxsw_sp_neigh_entry_counter_update(struct mlxsw_sp *mlxsw_sp,
2296 struct mlxsw_sp_neigh_entry *neigh_entry,
2297 bool adding)
2298{
2299 if (adding)
2300 mlxsw_sp_neigh_counter_alloc(mlxsw_sp, neigh_entry);
2301 else
2302 mlxsw_sp_neigh_counter_free(mlxsw_sp, neigh_entry);
2303 mlxsw_sp_neigh_entry_update(mlxsw_sp, neigh_entry, true);
2304}
2305
Ido Schimmelceb88812017-11-02 17:14:07 +01002306struct mlxsw_sp_netevent_work {
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002307 struct work_struct work;
2308 struct mlxsw_sp *mlxsw_sp;
2309 struct neighbour *n;
2310};
2311
2312static void mlxsw_sp_router_neigh_event_work(struct work_struct *work)
2313{
Ido Schimmelceb88812017-11-02 17:14:07 +01002314 struct mlxsw_sp_netevent_work *net_work =
2315 container_of(work, struct mlxsw_sp_netevent_work, work);
2316 struct mlxsw_sp *mlxsw_sp = net_work->mlxsw_sp;
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002317 struct mlxsw_sp_neigh_entry *neigh_entry;
Ido Schimmelceb88812017-11-02 17:14:07 +01002318 struct neighbour *n = net_work->n;
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002319 unsigned char ha[ETH_ALEN];
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002320 bool entry_connected;
Ido Schimmel93a87e52016-12-23 09:32:49 +01002321 u8 nud_state, dead;
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002322
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002323 /* If these parameters are changed after we release the lock,
2324 * then we are guaranteed to receive another event letting us
2325 * know about it.
2326 */
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002327 read_lock_bh(&n->lock);
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002328 memcpy(ha, n->ha, ETH_ALEN);
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002329 nud_state = n->nud_state;
Ido Schimmel93a87e52016-12-23 09:32:49 +01002330 dead = n->dead;
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002331 read_unlock_bh(&n->lock);
2332
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002333 rtnl_lock();
Petr Machata803335a2018-02-27 14:53:46 +01002334 mlxsw_sp_span_respin(mlxsw_sp);
2335
Ido Schimmel93a87e52016-12-23 09:32:49 +01002336 entry_connected = nud_state & NUD_VALID && !dead;
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002337 neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp, n);
2338 if (!entry_connected && !neigh_entry)
2339 goto out;
2340 if (!neigh_entry) {
2341 neigh_entry = mlxsw_sp_neigh_entry_create(mlxsw_sp, n);
2342 if (IS_ERR(neigh_entry))
2343 goto out;
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002344 }
2345
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002346 memcpy(neigh_entry->ha, ha, ETH_ALEN);
2347 mlxsw_sp_neigh_entry_update(mlxsw_sp, neigh_entry, entry_connected);
2348 mlxsw_sp_nexthop_neigh_update(mlxsw_sp, neigh_entry, !entry_connected);
2349
2350 if (!neigh_entry->connected && list_empty(&neigh_entry->nexthop_list))
2351 mlxsw_sp_neigh_entry_destroy(mlxsw_sp, neigh_entry);
2352
2353out:
2354 rtnl_unlock();
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002355 neigh_release(n);
Ido Schimmelceb88812017-11-02 17:14:07 +01002356 kfree(net_work);
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002357}
2358
Ido Schimmel28678f02017-11-02 17:14:10 +01002359static int mlxsw_sp_mp_hash_init(struct mlxsw_sp *mlxsw_sp);
2360
2361static void mlxsw_sp_router_mp_hash_event_work(struct work_struct *work)
2362{
2363 struct mlxsw_sp_netevent_work *net_work =
2364 container_of(work, struct mlxsw_sp_netevent_work, work);
2365 struct mlxsw_sp *mlxsw_sp = net_work->mlxsw_sp;
2366
2367 mlxsw_sp_mp_hash_init(mlxsw_sp);
2368 kfree(net_work);
2369}
2370
2371static int mlxsw_sp_router_netevent_event(struct notifier_block *nb,
Ido Schimmel48fac882017-11-02 17:14:06 +01002372 unsigned long event, void *ptr)
Yotam Gigic723c7352016-07-05 11:27:43 +02002373{
Ido Schimmelceb88812017-11-02 17:14:07 +01002374 struct mlxsw_sp_netevent_work *net_work;
Yotam Gigic723c7352016-07-05 11:27:43 +02002375 struct mlxsw_sp_port *mlxsw_sp_port;
Ido Schimmel28678f02017-11-02 17:14:10 +01002376 struct mlxsw_sp_router *router;
Yotam Gigic723c7352016-07-05 11:27:43 +02002377 struct mlxsw_sp *mlxsw_sp;
2378 unsigned long interval;
2379 struct neigh_parms *p;
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002380 struct neighbour *n;
Ido Schimmel28678f02017-11-02 17:14:10 +01002381 struct net *net;
Yotam Gigic723c7352016-07-05 11:27:43 +02002382
2383 switch (event) {
2384 case NETEVENT_DELAY_PROBE_TIME_UPDATE:
2385 p = ptr;
2386
2387 /* We don't care about changes in the default table. */
Ido Schimmelb5f3e0d2017-07-24 09:56:00 +02002388 if (!p->dev || (p->tbl->family != AF_INET &&
2389 p->tbl->family != AF_INET6))
Yotam Gigic723c7352016-07-05 11:27:43 +02002390 return NOTIFY_DONE;
2391
2392 /* We are in atomic context and can't take RTNL mutex,
2393 * so use RCU variant to walk the device chain.
2394 */
2395 mlxsw_sp_port = mlxsw_sp_port_lower_dev_hold(p->dev);
2396 if (!mlxsw_sp_port)
2397 return NOTIFY_DONE;
2398
2399 mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2400 interval = jiffies_to_msecs(NEIGH_VAR(p, DELAY_PROBE_TIME));
Ido Schimmel9011b672017-05-16 19:38:25 +02002401 mlxsw_sp->router->neighs_update.interval = interval;
Yotam Gigic723c7352016-07-05 11:27:43 +02002402
2403 mlxsw_sp_port_dev_put(mlxsw_sp_port);
2404 break;
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002405 case NETEVENT_NEIGH_UPDATE:
2406 n = ptr;
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002407
Ido Schimmelb5f3e0d2017-07-24 09:56:00 +02002408 if (n->tbl->family != AF_INET && n->tbl->family != AF_INET6)
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002409 return NOTIFY_DONE;
2410
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002411 mlxsw_sp_port = mlxsw_sp_port_lower_dev_hold(n->dev);
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002412 if (!mlxsw_sp_port)
2413 return NOTIFY_DONE;
2414
Ido Schimmelceb88812017-11-02 17:14:07 +01002415 net_work = kzalloc(sizeof(*net_work), GFP_ATOMIC);
2416 if (!net_work) {
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002417 mlxsw_sp_port_dev_put(mlxsw_sp_port);
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002418 return NOTIFY_BAD;
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002419 }
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002420
Ido Schimmelceb88812017-11-02 17:14:07 +01002421 INIT_WORK(&net_work->work, mlxsw_sp_router_neigh_event_work);
2422 net_work->mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2423 net_work->n = n;
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002424
2425 /* Take a reference to ensure the neighbour won't be
2426 * destructed until we drop the reference in delayed
2427 * work.
2428 */
2429 neigh_clone(n);
Ido Schimmelceb88812017-11-02 17:14:07 +01002430 mlxsw_core_schedule_work(&net_work->work);
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002431 mlxsw_sp_port_dev_put(mlxsw_sp_port);
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002432 break;
David Ahern3192dac2018-03-02 08:32:16 -08002433 case NETEVENT_IPV4_MPATH_HASH_UPDATE:
David Ahern5e18b9c552018-03-02 08:32:19 -08002434 case NETEVENT_IPV6_MPATH_HASH_UPDATE:
Ido Schimmel28678f02017-11-02 17:14:10 +01002435 net = ptr;
2436
2437 if (!net_eq(net, &init_net))
2438 return NOTIFY_DONE;
2439
2440 net_work = kzalloc(sizeof(*net_work), GFP_ATOMIC);
2441 if (!net_work)
2442 return NOTIFY_BAD;
2443
2444 router = container_of(nb, struct mlxsw_sp_router, netevent_nb);
2445 INIT_WORK(&net_work->work, mlxsw_sp_router_mp_hash_event_work);
2446 net_work->mlxsw_sp = router->mlxsw_sp;
2447 mlxsw_core_schedule_work(&net_work->work);
2448 break;
Yotam Gigic723c7352016-07-05 11:27:43 +02002449 }
2450
2451 return NOTIFY_DONE;
2452}
2453
Jiri Pirko6cf3c972016-07-05 11:27:39 +02002454static int mlxsw_sp_neigh_init(struct mlxsw_sp *mlxsw_sp)
2455{
Yotam Gigic723c7352016-07-05 11:27:43 +02002456 int err;
2457
Ido Schimmel9011b672017-05-16 19:38:25 +02002458 err = rhashtable_init(&mlxsw_sp->router->neigh_ht,
Yotam Gigic723c7352016-07-05 11:27:43 +02002459 &mlxsw_sp_neigh_ht_params);
2460 if (err)
2461 return err;
2462
2463 /* Initialize the polling interval according to the default
2464 * table.
2465 */
2466 mlxsw_sp_router_neighs_update_interval_init(mlxsw_sp);
2467
Yotam Gigi0b2361d2016-07-05 11:27:52 +02002468 /* Create the delayed works for the activity_update */
Ido Schimmel9011b672017-05-16 19:38:25 +02002469 INIT_DELAYED_WORK(&mlxsw_sp->router->neighs_update.dw,
Yotam Gigic723c7352016-07-05 11:27:43 +02002470 mlxsw_sp_router_neighs_update_work);
Ido Schimmel9011b672017-05-16 19:38:25 +02002471 INIT_DELAYED_WORK(&mlxsw_sp->router->nexthop_probe_dw,
Yotam Gigi0b2361d2016-07-05 11:27:52 +02002472 mlxsw_sp_router_probe_unresolved_nexthops);
Ido Schimmel9011b672017-05-16 19:38:25 +02002473 mlxsw_core_schedule_dw(&mlxsw_sp->router->neighs_update.dw, 0);
2474 mlxsw_core_schedule_dw(&mlxsw_sp->router->nexthop_probe_dw, 0);
Yotam Gigic723c7352016-07-05 11:27:43 +02002475 return 0;
Jiri Pirko6cf3c972016-07-05 11:27:39 +02002476}
2477
2478static void mlxsw_sp_neigh_fini(struct mlxsw_sp *mlxsw_sp)
2479{
Ido Schimmel9011b672017-05-16 19:38:25 +02002480 cancel_delayed_work_sync(&mlxsw_sp->router->neighs_update.dw);
2481 cancel_delayed_work_sync(&mlxsw_sp->router->nexthop_probe_dw);
2482 rhashtable_destroy(&mlxsw_sp->router->neigh_ht);
Jiri Pirko6cf3c972016-07-05 11:27:39 +02002483}
2484
Ido Schimmel9665b742017-02-08 11:16:42 +01002485static void mlxsw_sp_neigh_rif_gone_sync(struct mlxsw_sp *mlxsw_sp,
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01002486 struct mlxsw_sp_rif *rif)
Ido Schimmel9665b742017-02-08 11:16:42 +01002487{
2488 struct mlxsw_sp_neigh_entry *neigh_entry, *tmp;
2489
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01002490 list_for_each_entry_safe(neigh_entry, tmp, &rif->neigh_list,
Petr Machata8ba6b302017-12-17 17:16:43 +01002491 rif_list_node) {
2492 mlxsw_sp_neigh_entry_update(mlxsw_sp, neigh_entry, false);
Ido Schimmel9665b742017-02-08 11:16:42 +01002493 mlxsw_sp_neigh_entry_destroy(mlxsw_sp, neigh_entry);
Petr Machata8ba6b302017-12-17 17:16:43 +01002494 }
Ido Schimmel9665b742017-02-08 11:16:42 +01002495}
2496
Petr Machata35225e42017-09-02 23:49:22 +02002497enum mlxsw_sp_nexthop_type {
2498 MLXSW_SP_NEXTHOP_TYPE_ETH,
Petr Machata1012b9a2017-09-02 23:49:23 +02002499 MLXSW_SP_NEXTHOP_TYPE_IPIP,
Petr Machata35225e42017-09-02 23:49:22 +02002500};
2501
Ido Schimmelc53b8e12017-02-08 11:16:30 +01002502struct mlxsw_sp_nexthop_key {
2503 struct fib_nh *fib_nh;
2504};
2505
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002506struct mlxsw_sp_nexthop {
2507 struct list_head neigh_list_node; /* member of neigh entry list */
Ido Schimmel9665b742017-02-08 11:16:42 +01002508 struct list_head rif_list_node;
Arkadi Sharshevskydbe45982017-09-25 10:32:23 +02002509 struct list_head router_list_node;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002510 struct mlxsw_sp_nexthop_group *nh_grp; /* pointer back to the group
2511 * this belongs to
2512 */
Ido Schimmelc53b8e12017-02-08 11:16:30 +01002513 struct rhash_head ht_node;
2514 struct mlxsw_sp_nexthop_key key;
Ido Schimmel58adf2c2017-07-18 10:10:19 +02002515 unsigned char gw_addr[sizeof(struct in6_addr)];
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02002516 int ifindex;
Ido Schimmel408bd942017-10-22 23:11:46 +02002517 int nh_weight;
Ido Schimmeleb789982017-10-22 23:11:48 +02002518 int norm_nh_weight;
2519 int num_adj_entries;
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01002520 struct mlxsw_sp_rif *rif;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002521 u8 should_offload:1, /* set indicates this neigh is connected and
2522 * should be put to KVD linear area of this group.
2523 */
2524 offloaded:1, /* set in case the neigh is actually put into
2525 * KVD linear area of this group.
2526 */
2527 update:1; /* set indicates that MAC of this neigh should be
2528 * updated in HW
2529 */
Petr Machata35225e42017-09-02 23:49:22 +02002530 enum mlxsw_sp_nexthop_type type;
2531 union {
2532 struct mlxsw_sp_neigh_entry *neigh_entry;
Petr Machata1012b9a2017-09-02 23:49:23 +02002533 struct mlxsw_sp_ipip_entry *ipip_entry;
Petr Machata35225e42017-09-02 23:49:22 +02002534 };
Arkadi Sharshevskya5390272017-09-25 10:32:28 +02002535 unsigned int counter_index;
2536 bool counter_valid;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002537};
2538
2539struct mlxsw_sp_nexthop_group {
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02002540 void *priv;
Ido Schimmele9ad5e72017-02-08 11:16:29 +01002541 struct rhash_head ht_node;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002542 struct list_head fib_list; /* list of fib entries that use this group */
Ido Schimmel58adf2c2017-07-18 10:10:19 +02002543 struct neigh_table *neigh_tbl;
Ido Schimmelb3e8d1e2017-02-08 11:16:32 +01002544 u8 adj_index_valid:1,
2545 gateway:1; /* routes using the group use a gateway */
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002546 u32 adj_index;
2547 u16 ecmp_size;
2548 u16 count;
Ido Schimmeleb789982017-10-22 23:11:48 +02002549 int sum_norm_weight;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002550 struct mlxsw_sp_nexthop nexthops[0];
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01002551#define nh_rif nexthops[0].rif
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002552};
2553
Arkadi Sharshevsky427e6522017-09-25 10:32:30 +02002554void mlxsw_sp_nexthop_counter_alloc(struct mlxsw_sp *mlxsw_sp,
2555 struct mlxsw_sp_nexthop *nh)
Arkadi Sharshevskya5390272017-09-25 10:32:28 +02002556{
2557 struct devlink *devlink;
2558
2559 devlink = priv_to_devlink(mlxsw_sp->core);
2560 if (!devlink_dpipe_table_counter_enabled(devlink,
2561 MLXSW_SP_DPIPE_TABLE_NAME_ADJ))
2562 return;
2563
2564 if (mlxsw_sp_flow_counter_alloc(mlxsw_sp, &nh->counter_index))
2565 return;
2566
2567 nh->counter_valid = true;
2568}
2569
Arkadi Sharshevsky427e6522017-09-25 10:32:30 +02002570void mlxsw_sp_nexthop_counter_free(struct mlxsw_sp *mlxsw_sp,
2571 struct mlxsw_sp_nexthop *nh)
Arkadi Sharshevskya5390272017-09-25 10:32:28 +02002572{
2573 if (!nh->counter_valid)
2574 return;
2575 mlxsw_sp_flow_counter_free(mlxsw_sp, nh->counter_index);
2576 nh->counter_valid = false;
2577}
2578
2579int mlxsw_sp_nexthop_counter_get(struct mlxsw_sp *mlxsw_sp,
2580 struct mlxsw_sp_nexthop *nh, u64 *p_counter)
2581{
2582 if (!nh->counter_valid)
2583 return -EINVAL;
2584
2585 return mlxsw_sp_flow_counter_get(mlxsw_sp, nh->counter_index,
2586 p_counter, NULL);
2587}
2588
Arkadi Sharshevskyc556cd22017-09-25 10:32:25 +02002589struct mlxsw_sp_nexthop *mlxsw_sp_nexthop_next(struct mlxsw_sp_router *router,
2590 struct mlxsw_sp_nexthop *nh)
2591{
2592 if (!nh) {
2593 if (list_empty(&router->nexthop_list))
2594 return NULL;
2595 else
2596 return list_first_entry(&router->nexthop_list,
2597 typeof(*nh), router_list_node);
2598 }
2599 if (list_is_last(&nh->router_list_node, &router->nexthop_list))
2600 return NULL;
2601 return list_next_entry(nh, router_list_node);
2602}
2603
2604bool mlxsw_sp_nexthop_offload(struct mlxsw_sp_nexthop *nh)
2605{
2606 return nh->offloaded;
2607}
2608
2609unsigned char *mlxsw_sp_nexthop_ha(struct mlxsw_sp_nexthop *nh)
2610{
2611 if (!nh->offloaded)
2612 return NULL;
2613 return nh->neigh_entry->ha;
2614}
2615
2616int mlxsw_sp_nexthop_indexes(struct mlxsw_sp_nexthop *nh, u32 *p_adj_index,
Ido Schimmele69cd9d2017-10-22 23:11:43 +02002617 u32 *p_adj_size, u32 *p_adj_hash_index)
Arkadi Sharshevskyc556cd22017-09-25 10:32:25 +02002618{
2619 struct mlxsw_sp_nexthop_group *nh_grp = nh->nh_grp;
2620 u32 adj_hash_index = 0;
2621 int i;
2622
2623 if (!nh->offloaded || !nh_grp->adj_index_valid)
2624 return -EINVAL;
2625
2626 *p_adj_index = nh_grp->adj_index;
Ido Schimmele69cd9d2017-10-22 23:11:43 +02002627 *p_adj_size = nh_grp->ecmp_size;
Arkadi Sharshevskyc556cd22017-09-25 10:32:25 +02002628
2629 for (i = 0; i < nh_grp->count; i++) {
2630 struct mlxsw_sp_nexthop *nh_iter = &nh_grp->nexthops[i];
2631
2632 if (nh_iter == nh)
2633 break;
2634 if (nh_iter->offloaded)
Ido Schimmeleb789982017-10-22 23:11:48 +02002635 adj_hash_index += nh_iter->num_adj_entries;
Arkadi Sharshevskyc556cd22017-09-25 10:32:25 +02002636 }
2637
2638 *p_adj_hash_index = adj_hash_index;
2639 return 0;
2640}
2641
2642struct mlxsw_sp_rif *mlxsw_sp_nexthop_rif(struct mlxsw_sp_nexthop *nh)
2643{
2644 return nh->rif;
2645}
2646
2647bool mlxsw_sp_nexthop_group_has_ipip(struct mlxsw_sp_nexthop *nh)
2648{
2649 struct mlxsw_sp_nexthop_group *nh_grp = nh->nh_grp;
2650 int i;
2651
2652 for (i = 0; i < nh_grp->count; i++) {
2653 struct mlxsw_sp_nexthop *nh_iter = &nh_grp->nexthops[i];
2654
2655 if (nh_iter->type == MLXSW_SP_NEXTHOP_TYPE_IPIP)
2656 return true;
2657 }
2658 return false;
2659}
2660
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02002661static struct fib_info *
2662mlxsw_sp_nexthop4_group_fi(const struct mlxsw_sp_nexthop_group *nh_grp)
2663{
2664 return nh_grp->priv;
2665}
2666
2667struct mlxsw_sp_nexthop_group_cmp_arg {
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02002668 enum mlxsw_sp_l3proto proto;
2669 union {
2670 struct fib_info *fi;
2671 struct mlxsw_sp_fib6_entry *fib6_entry;
2672 };
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02002673};
2674
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02002675static bool
2676mlxsw_sp_nexthop6_group_has_nexthop(const struct mlxsw_sp_nexthop_group *nh_grp,
Ido Schimmel3743d882018-01-12 17:15:59 +01002677 const struct in6_addr *gw, int ifindex,
2678 int weight)
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02002679{
2680 int i;
2681
2682 for (i = 0; i < nh_grp->count; i++) {
2683 const struct mlxsw_sp_nexthop *nh;
2684
2685 nh = &nh_grp->nexthops[i];
Ido Schimmel3743d882018-01-12 17:15:59 +01002686 if (nh->ifindex == ifindex && nh->nh_weight == weight &&
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02002687 ipv6_addr_equal(gw, (struct in6_addr *) nh->gw_addr))
2688 return true;
2689 }
2690
2691 return false;
2692}
2693
2694static bool
2695mlxsw_sp_nexthop6_group_cmp(const struct mlxsw_sp_nexthop_group *nh_grp,
2696 const struct mlxsw_sp_fib6_entry *fib6_entry)
2697{
2698 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
2699
2700 if (nh_grp->count != fib6_entry->nrt6)
2701 return false;
2702
2703 list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) {
2704 struct in6_addr *gw;
Ido Schimmel3743d882018-01-12 17:15:59 +01002705 int ifindex, weight;
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02002706
2707 ifindex = mlxsw_sp_rt6->rt->dst.dev->ifindex;
Ido Schimmel3743d882018-01-12 17:15:59 +01002708 weight = mlxsw_sp_rt6->rt->rt6i_nh_weight;
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02002709 gw = &mlxsw_sp_rt6->rt->rt6i_gateway;
Ido Schimmel3743d882018-01-12 17:15:59 +01002710 if (!mlxsw_sp_nexthop6_group_has_nexthop(nh_grp, gw, ifindex,
2711 weight))
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02002712 return false;
2713 }
2714
2715 return true;
2716}
2717
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02002718static int
2719mlxsw_sp_nexthop_group_cmp(struct rhashtable_compare_arg *arg, const void *ptr)
2720{
2721 const struct mlxsw_sp_nexthop_group_cmp_arg *cmp_arg = arg->key;
2722 const struct mlxsw_sp_nexthop_group *nh_grp = ptr;
2723
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02002724 switch (cmp_arg->proto) {
2725 case MLXSW_SP_L3_PROTO_IPV4:
2726 return cmp_arg->fi != mlxsw_sp_nexthop4_group_fi(nh_grp);
2727 case MLXSW_SP_L3_PROTO_IPV6:
2728 return !mlxsw_sp_nexthop6_group_cmp(nh_grp,
2729 cmp_arg->fib6_entry);
2730 default:
2731 WARN_ON(1);
2732 return 1;
2733 }
2734}
2735
2736static int
2737mlxsw_sp_nexthop_group_type(const struct mlxsw_sp_nexthop_group *nh_grp)
2738{
2739 return nh_grp->neigh_tbl->family;
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02002740}
2741
2742static u32 mlxsw_sp_nexthop_group_hash_obj(const void *data, u32 len, u32 seed)
2743{
2744 const struct mlxsw_sp_nexthop_group *nh_grp = data;
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02002745 const struct mlxsw_sp_nexthop *nh;
2746 struct fib_info *fi;
2747 unsigned int val;
2748 int i;
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02002749
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02002750 switch (mlxsw_sp_nexthop_group_type(nh_grp)) {
2751 case AF_INET:
2752 fi = mlxsw_sp_nexthop4_group_fi(nh_grp);
2753 return jhash(&fi, sizeof(fi), seed);
2754 case AF_INET6:
2755 val = nh_grp->count;
2756 for (i = 0; i < nh_grp->count; i++) {
2757 nh = &nh_grp->nexthops[i];
2758 val ^= nh->ifindex;
2759 }
2760 return jhash(&val, sizeof(val), seed);
2761 default:
2762 WARN_ON(1);
2763 return 0;
2764 }
2765}
2766
2767static u32
2768mlxsw_sp_nexthop6_group_hash(struct mlxsw_sp_fib6_entry *fib6_entry, u32 seed)
2769{
2770 unsigned int val = fib6_entry->nrt6;
2771 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
2772 struct net_device *dev;
2773
2774 list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) {
2775 dev = mlxsw_sp_rt6->rt->dst.dev;
2776 val ^= dev->ifindex;
2777 }
2778
2779 return jhash(&val, sizeof(val), seed);
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02002780}
2781
2782static u32
2783mlxsw_sp_nexthop_group_hash(const void *data, u32 len, u32 seed)
2784{
2785 const struct mlxsw_sp_nexthop_group_cmp_arg *cmp_arg = data;
2786
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02002787 switch (cmp_arg->proto) {
2788 case MLXSW_SP_L3_PROTO_IPV4:
2789 return jhash(&cmp_arg->fi, sizeof(cmp_arg->fi), seed);
2790 case MLXSW_SP_L3_PROTO_IPV6:
2791 return mlxsw_sp_nexthop6_group_hash(cmp_arg->fib6_entry, seed);
2792 default:
2793 WARN_ON(1);
2794 return 0;
2795 }
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02002796}
2797
Ido Schimmele9ad5e72017-02-08 11:16:29 +01002798static const struct rhashtable_params mlxsw_sp_nexthop_group_ht_params = {
Ido Schimmele9ad5e72017-02-08 11:16:29 +01002799 .head_offset = offsetof(struct mlxsw_sp_nexthop_group, ht_node),
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02002800 .hashfn = mlxsw_sp_nexthop_group_hash,
2801 .obj_hashfn = mlxsw_sp_nexthop_group_hash_obj,
2802 .obj_cmpfn = mlxsw_sp_nexthop_group_cmp,
Ido Schimmele9ad5e72017-02-08 11:16:29 +01002803};
2804
2805static int mlxsw_sp_nexthop_group_insert(struct mlxsw_sp *mlxsw_sp,
2806 struct mlxsw_sp_nexthop_group *nh_grp)
2807{
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02002808 if (mlxsw_sp_nexthop_group_type(nh_grp) == AF_INET6 &&
2809 !nh_grp->gateway)
2810 return 0;
2811
Ido Schimmel9011b672017-05-16 19:38:25 +02002812 return rhashtable_insert_fast(&mlxsw_sp->router->nexthop_group_ht,
Ido Schimmele9ad5e72017-02-08 11:16:29 +01002813 &nh_grp->ht_node,
2814 mlxsw_sp_nexthop_group_ht_params);
2815}
2816
2817static void mlxsw_sp_nexthop_group_remove(struct mlxsw_sp *mlxsw_sp,
2818 struct mlxsw_sp_nexthop_group *nh_grp)
2819{
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02002820 if (mlxsw_sp_nexthop_group_type(nh_grp) == AF_INET6 &&
2821 !nh_grp->gateway)
2822 return;
2823
Ido Schimmel9011b672017-05-16 19:38:25 +02002824 rhashtable_remove_fast(&mlxsw_sp->router->nexthop_group_ht,
Ido Schimmele9ad5e72017-02-08 11:16:29 +01002825 &nh_grp->ht_node,
2826 mlxsw_sp_nexthop_group_ht_params);
2827}
2828
2829static struct mlxsw_sp_nexthop_group *
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02002830mlxsw_sp_nexthop4_group_lookup(struct mlxsw_sp *mlxsw_sp,
2831 struct fib_info *fi)
Ido Schimmele9ad5e72017-02-08 11:16:29 +01002832{
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02002833 struct mlxsw_sp_nexthop_group_cmp_arg cmp_arg;
2834
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02002835 cmp_arg.proto = MLXSW_SP_L3_PROTO_IPV4;
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02002836 cmp_arg.fi = fi;
2837 return rhashtable_lookup_fast(&mlxsw_sp->router->nexthop_group_ht,
2838 &cmp_arg,
Ido Schimmele9ad5e72017-02-08 11:16:29 +01002839 mlxsw_sp_nexthop_group_ht_params);
2840}
2841
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02002842static struct mlxsw_sp_nexthop_group *
2843mlxsw_sp_nexthop6_group_lookup(struct mlxsw_sp *mlxsw_sp,
2844 struct mlxsw_sp_fib6_entry *fib6_entry)
2845{
2846 struct mlxsw_sp_nexthop_group_cmp_arg cmp_arg;
2847
2848 cmp_arg.proto = MLXSW_SP_L3_PROTO_IPV6;
2849 cmp_arg.fib6_entry = fib6_entry;
2850 return rhashtable_lookup_fast(&mlxsw_sp->router->nexthop_group_ht,
2851 &cmp_arg,
2852 mlxsw_sp_nexthop_group_ht_params);
2853}
2854
Ido Schimmelc53b8e12017-02-08 11:16:30 +01002855static const struct rhashtable_params mlxsw_sp_nexthop_ht_params = {
2856 .key_offset = offsetof(struct mlxsw_sp_nexthop, key),
2857 .head_offset = offsetof(struct mlxsw_sp_nexthop, ht_node),
2858 .key_len = sizeof(struct mlxsw_sp_nexthop_key),
2859};
2860
2861static int mlxsw_sp_nexthop_insert(struct mlxsw_sp *mlxsw_sp,
2862 struct mlxsw_sp_nexthop *nh)
2863{
Ido Schimmel9011b672017-05-16 19:38:25 +02002864 return rhashtable_insert_fast(&mlxsw_sp->router->nexthop_ht,
Ido Schimmelc53b8e12017-02-08 11:16:30 +01002865 &nh->ht_node, mlxsw_sp_nexthop_ht_params);
2866}
2867
2868static void mlxsw_sp_nexthop_remove(struct mlxsw_sp *mlxsw_sp,
2869 struct mlxsw_sp_nexthop *nh)
2870{
Ido Schimmel9011b672017-05-16 19:38:25 +02002871 rhashtable_remove_fast(&mlxsw_sp->router->nexthop_ht, &nh->ht_node,
Ido Schimmelc53b8e12017-02-08 11:16:30 +01002872 mlxsw_sp_nexthop_ht_params);
2873}
2874
Ido Schimmelad178c82017-02-08 11:16:40 +01002875static struct mlxsw_sp_nexthop *
2876mlxsw_sp_nexthop_lookup(struct mlxsw_sp *mlxsw_sp,
2877 struct mlxsw_sp_nexthop_key key)
2878{
Ido Schimmel9011b672017-05-16 19:38:25 +02002879 return rhashtable_lookup_fast(&mlxsw_sp->router->nexthop_ht, &key,
Ido Schimmelad178c82017-02-08 11:16:40 +01002880 mlxsw_sp_nexthop_ht_params);
2881}
2882
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002883static int mlxsw_sp_adj_index_mass_update_vr(struct mlxsw_sp *mlxsw_sp,
Ido Schimmel76610eb2017-03-10 08:53:41 +01002884 const struct mlxsw_sp_fib *fib,
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002885 u32 adj_index, u16 ecmp_size,
2886 u32 new_adj_index,
2887 u16 new_ecmp_size)
2888{
2889 char raleu_pl[MLXSW_REG_RALEU_LEN];
2890
Ido Schimmel1a9234e662016-09-19 08:29:26 +02002891 mlxsw_reg_raleu_pack(raleu_pl,
Ido Schimmel76610eb2017-03-10 08:53:41 +01002892 (enum mlxsw_reg_ralxx_protocol) fib->proto,
2893 fib->vr->id, adj_index, ecmp_size, new_adj_index,
Ido Schimmel1a9234e662016-09-19 08:29:26 +02002894 new_ecmp_size);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002895 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raleu), raleu_pl);
2896}
2897
2898static int mlxsw_sp_adj_index_mass_update(struct mlxsw_sp *mlxsw_sp,
2899 struct mlxsw_sp_nexthop_group *nh_grp,
2900 u32 old_adj_index, u16 old_ecmp_size)
2901{
2902 struct mlxsw_sp_fib_entry *fib_entry;
Ido Schimmel76610eb2017-03-10 08:53:41 +01002903 struct mlxsw_sp_fib *fib = NULL;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002904 int err;
2905
2906 list_for_each_entry(fib_entry, &nh_grp->fib_list, nexthop_group_node) {
Ido Schimmel76610eb2017-03-10 08:53:41 +01002907 if (fib == fib_entry->fib_node->fib)
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002908 continue;
Ido Schimmel76610eb2017-03-10 08:53:41 +01002909 fib = fib_entry->fib_node->fib;
2910 err = mlxsw_sp_adj_index_mass_update_vr(mlxsw_sp, fib,
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002911 old_adj_index,
2912 old_ecmp_size,
2913 nh_grp->adj_index,
2914 nh_grp->ecmp_size);
2915 if (err)
2916 return err;
2917 }
2918 return 0;
2919}
2920
Ido Schimmeleb789982017-10-22 23:11:48 +02002921static int __mlxsw_sp_nexthop_update(struct mlxsw_sp *mlxsw_sp, u32 adj_index,
2922 struct mlxsw_sp_nexthop *nh)
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002923{
2924 struct mlxsw_sp_neigh_entry *neigh_entry = nh->neigh_entry;
2925 char ratr_pl[MLXSW_REG_RATR_LEN];
2926
2927 mlxsw_reg_ratr_pack(ratr_pl, MLXSW_REG_RATR_OP_WRITE_WRITE_ENTRY,
Petr Machata89e41982017-09-02 23:49:15 +02002928 true, MLXSW_REG_RATR_TYPE_ETHERNET,
2929 adj_index, neigh_entry->rif);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002930 mlxsw_reg_ratr_eth_entry_pack(ratr_pl, neigh_entry->ha);
Arkadi Sharshevskya5390272017-09-25 10:32:28 +02002931 if (nh->counter_valid)
2932 mlxsw_reg_ratr_counter_pack(ratr_pl, nh->counter_index, true);
2933 else
2934 mlxsw_reg_ratr_counter_pack(ratr_pl, 0, false);
2935
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002936 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ratr), ratr_pl);
2937}
2938
Ido Schimmeleb789982017-10-22 23:11:48 +02002939int mlxsw_sp_nexthop_update(struct mlxsw_sp *mlxsw_sp, u32 adj_index,
2940 struct mlxsw_sp_nexthop *nh)
2941{
2942 int i;
2943
2944 for (i = 0; i < nh->num_adj_entries; i++) {
2945 int err;
2946
2947 err = __mlxsw_sp_nexthop_update(mlxsw_sp, adj_index + i, nh);
2948 if (err)
2949 return err;
2950 }
2951
2952 return 0;
2953}
2954
2955static int __mlxsw_sp_nexthop_ipip_update(struct mlxsw_sp *mlxsw_sp,
2956 u32 adj_index,
2957 struct mlxsw_sp_nexthop *nh)
Petr Machata1012b9a2017-09-02 23:49:23 +02002958{
2959 const struct mlxsw_sp_ipip_ops *ipip_ops;
2960
2961 ipip_ops = mlxsw_sp->router->ipip_ops_arr[nh->ipip_entry->ipipt];
2962 return ipip_ops->nexthop_update(mlxsw_sp, adj_index, nh->ipip_entry);
2963}
2964
Ido Schimmeleb789982017-10-22 23:11:48 +02002965static int mlxsw_sp_nexthop_ipip_update(struct mlxsw_sp *mlxsw_sp,
2966 u32 adj_index,
2967 struct mlxsw_sp_nexthop *nh)
2968{
2969 int i;
2970
2971 for (i = 0; i < nh->num_adj_entries; i++) {
2972 int err;
2973
2974 err = __mlxsw_sp_nexthop_ipip_update(mlxsw_sp, adj_index + i,
2975 nh);
2976 if (err)
2977 return err;
2978 }
2979
2980 return 0;
2981}
2982
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002983static int
Petr Machata35225e42017-09-02 23:49:22 +02002984mlxsw_sp_nexthop_group_update(struct mlxsw_sp *mlxsw_sp,
2985 struct mlxsw_sp_nexthop_group *nh_grp,
2986 bool reallocate)
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002987{
2988 u32 adj_index = nh_grp->adj_index; /* base */
2989 struct mlxsw_sp_nexthop *nh;
2990 int i;
2991 int err;
2992
2993 for (i = 0; i < nh_grp->count; i++) {
2994 nh = &nh_grp->nexthops[i];
2995
2996 if (!nh->should_offload) {
2997 nh->offloaded = 0;
2998 continue;
2999 }
3000
Ido Schimmela59b7e02017-01-23 11:11:42 +01003001 if (nh->update || reallocate) {
Petr Machata35225e42017-09-02 23:49:22 +02003002 switch (nh->type) {
3003 case MLXSW_SP_NEXTHOP_TYPE_ETH:
Arkadi Sharshevskya5390272017-09-25 10:32:28 +02003004 err = mlxsw_sp_nexthop_update
Petr Machata35225e42017-09-02 23:49:22 +02003005 (mlxsw_sp, adj_index, nh);
3006 break;
Petr Machata1012b9a2017-09-02 23:49:23 +02003007 case MLXSW_SP_NEXTHOP_TYPE_IPIP:
3008 err = mlxsw_sp_nexthop_ipip_update
3009 (mlxsw_sp, adj_index, nh);
3010 break;
Petr Machata35225e42017-09-02 23:49:22 +02003011 }
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003012 if (err)
3013 return err;
3014 nh->update = 0;
3015 nh->offloaded = 1;
3016 }
Ido Schimmeleb789982017-10-22 23:11:48 +02003017 adj_index += nh->num_adj_entries;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003018 }
3019 return 0;
3020}
3021
Ido Schimmel1819ae32017-07-21 18:04:28 +02003022static bool
3023mlxsw_sp_fib_node_entry_is_first(const struct mlxsw_sp_fib_node *fib_node,
3024 const struct mlxsw_sp_fib_entry *fib_entry);
3025
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003026static int
3027mlxsw_sp_nexthop_fib_entries_update(struct mlxsw_sp *mlxsw_sp,
3028 struct mlxsw_sp_nexthop_group *nh_grp)
3029{
3030 struct mlxsw_sp_fib_entry *fib_entry;
3031 int err;
3032
3033 list_for_each_entry(fib_entry, &nh_grp->fib_list, nexthop_group_node) {
Ido Schimmel1819ae32017-07-21 18:04:28 +02003034 if (!mlxsw_sp_fib_node_entry_is_first(fib_entry->fib_node,
3035 fib_entry))
3036 continue;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003037 err = mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
3038 if (err)
3039 return err;
3040 }
3041 return 0;
3042}
3043
3044static void
Ido Schimmel77d964e2017-08-02 09:56:05 +02003045mlxsw_sp_fib_entry_offload_refresh(struct mlxsw_sp_fib_entry *fib_entry,
3046 enum mlxsw_reg_ralue_op op, int err);
3047
3048static void
3049mlxsw_sp_nexthop_fib_entries_refresh(struct mlxsw_sp_nexthop_group *nh_grp)
3050{
3051 enum mlxsw_reg_ralue_op op = MLXSW_REG_RALUE_OP_WRITE_WRITE;
3052 struct mlxsw_sp_fib_entry *fib_entry;
3053
3054 list_for_each_entry(fib_entry, &nh_grp->fib_list, nexthop_group_node) {
3055 if (!mlxsw_sp_fib_node_entry_is_first(fib_entry->fib_node,
3056 fib_entry))
3057 continue;
3058 mlxsw_sp_fib_entry_offload_refresh(fib_entry, op, 0);
3059 }
3060}
3061
Ido Schimmel425a08c2017-10-22 23:11:47 +02003062static void mlxsw_sp_adj_grp_size_round_up(u16 *p_adj_grp_size)
3063{
3064 /* Valid sizes for an adjacency group are:
3065 * 1-64, 512, 1024, 2048 and 4096.
3066 */
3067 if (*p_adj_grp_size <= 64)
3068 return;
3069 else if (*p_adj_grp_size <= 512)
3070 *p_adj_grp_size = 512;
3071 else if (*p_adj_grp_size <= 1024)
3072 *p_adj_grp_size = 1024;
3073 else if (*p_adj_grp_size <= 2048)
3074 *p_adj_grp_size = 2048;
3075 else
3076 *p_adj_grp_size = 4096;
3077}
3078
3079static void mlxsw_sp_adj_grp_size_round_down(u16 *p_adj_grp_size,
3080 unsigned int alloc_size)
3081{
3082 if (alloc_size >= 4096)
3083 *p_adj_grp_size = 4096;
3084 else if (alloc_size >= 2048)
3085 *p_adj_grp_size = 2048;
3086 else if (alloc_size >= 1024)
3087 *p_adj_grp_size = 1024;
3088 else if (alloc_size >= 512)
3089 *p_adj_grp_size = 512;
3090}
3091
3092static int mlxsw_sp_fix_adj_grp_size(struct mlxsw_sp *mlxsw_sp,
3093 u16 *p_adj_grp_size)
3094{
3095 unsigned int alloc_size;
3096 int err;
3097
3098 /* Round up the requested group size to the next size supported
3099 * by the device and make sure the request can be satisfied.
3100 */
3101 mlxsw_sp_adj_grp_size_round_up(p_adj_grp_size);
3102 err = mlxsw_sp_kvdl_alloc_size_query(mlxsw_sp, *p_adj_grp_size,
3103 &alloc_size);
3104 if (err)
3105 return err;
3106 /* It is possible the allocation results in more allocated
3107 * entries than requested. Try to use as much of them as
3108 * possible.
3109 */
3110 mlxsw_sp_adj_grp_size_round_down(p_adj_grp_size, alloc_size);
3111
3112 return 0;
3113}
3114
Ido Schimmel77d964e2017-08-02 09:56:05 +02003115static void
Ido Schimmeleb789982017-10-22 23:11:48 +02003116mlxsw_sp_nexthop_group_normalize(struct mlxsw_sp_nexthop_group *nh_grp)
3117{
3118 int i, g = 0, sum_norm_weight = 0;
3119 struct mlxsw_sp_nexthop *nh;
3120
3121 for (i = 0; i < nh_grp->count; i++) {
3122 nh = &nh_grp->nexthops[i];
3123
3124 if (!nh->should_offload)
3125 continue;
3126 if (g > 0)
3127 g = gcd(nh->nh_weight, g);
3128 else
3129 g = nh->nh_weight;
3130 }
3131
3132 for (i = 0; i < nh_grp->count; i++) {
3133 nh = &nh_grp->nexthops[i];
3134
3135 if (!nh->should_offload)
3136 continue;
3137 nh->norm_nh_weight = nh->nh_weight / g;
3138 sum_norm_weight += nh->norm_nh_weight;
3139 }
3140
3141 nh_grp->sum_norm_weight = sum_norm_weight;
3142}
3143
3144static void
3145mlxsw_sp_nexthop_group_rebalance(struct mlxsw_sp_nexthop_group *nh_grp)
3146{
3147 int total = nh_grp->sum_norm_weight;
3148 u16 ecmp_size = nh_grp->ecmp_size;
3149 int i, weight = 0, lower_bound = 0;
3150
3151 for (i = 0; i < nh_grp->count; i++) {
3152 struct mlxsw_sp_nexthop *nh = &nh_grp->nexthops[i];
3153 int upper_bound;
3154
3155 if (!nh->should_offload)
3156 continue;
3157 weight += nh->norm_nh_weight;
3158 upper_bound = DIV_ROUND_CLOSEST(ecmp_size * weight, total);
3159 nh->num_adj_entries = upper_bound - lower_bound;
3160 lower_bound = upper_bound;
3161 }
3162}
3163
3164static void
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003165mlxsw_sp_nexthop_group_refresh(struct mlxsw_sp *mlxsw_sp,
3166 struct mlxsw_sp_nexthop_group *nh_grp)
3167{
Ido Schimmeleb789982017-10-22 23:11:48 +02003168 u16 ecmp_size, old_ecmp_size;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003169 struct mlxsw_sp_nexthop *nh;
3170 bool offload_change = false;
3171 u32 adj_index;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003172 bool old_adj_index_valid;
3173 u32 old_adj_index;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003174 int i;
3175 int err;
3176
Ido Schimmelb3e8d1e2017-02-08 11:16:32 +01003177 if (!nh_grp->gateway) {
3178 mlxsw_sp_nexthop_fib_entries_update(mlxsw_sp, nh_grp);
3179 return;
3180 }
3181
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003182 for (i = 0; i < nh_grp->count; i++) {
3183 nh = &nh_grp->nexthops[i];
3184
Petr Machata56b8a9e2017-07-31 09:27:29 +02003185 if (nh->should_offload != nh->offloaded) {
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003186 offload_change = true;
3187 if (nh->should_offload)
3188 nh->update = 1;
3189 }
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003190 }
3191 if (!offload_change) {
3192 /* Nothing was added or removed, so no need to reallocate. Just
3193 * update MAC on existing adjacency indexes.
3194 */
Petr Machata35225e42017-09-02 23:49:22 +02003195 err = mlxsw_sp_nexthop_group_update(mlxsw_sp, nh_grp, false);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003196 if (err) {
3197 dev_warn(mlxsw_sp->bus_info->dev, "Failed to update neigh MAC in adjacency table.\n");
3198 goto set_trap;
3199 }
3200 return;
3201 }
Ido Schimmeleb789982017-10-22 23:11:48 +02003202 mlxsw_sp_nexthop_group_normalize(nh_grp);
3203 if (!nh_grp->sum_norm_weight)
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003204 /* No neigh of this group is connected so we just set
3205 * the trap and let everthing flow through kernel.
3206 */
3207 goto set_trap;
3208
Ido Schimmeleb789982017-10-22 23:11:48 +02003209 ecmp_size = nh_grp->sum_norm_weight;
Ido Schimmel425a08c2017-10-22 23:11:47 +02003210 err = mlxsw_sp_fix_adj_grp_size(mlxsw_sp, &ecmp_size);
3211 if (err)
3212 /* No valid allocation size available. */
3213 goto set_trap;
3214
Arkadi Sharshevsky13124442017-03-25 08:28:22 +01003215 err = mlxsw_sp_kvdl_alloc(mlxsw_sp, ecmp_size, &adj_index);
3216 if (err) {
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003217 /* We ran out of KVD linear space, just set the
3218 * trap and let everything flow through kernel.
3219 */
3220 dev_warn(mlxsw_sp->bus_info->dev, "Failed to allocate KVD linear area for nexthop group.\n");
3221 goto set_trap;
3222 }
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003223 old_adj_index_valid = nh_grp->adj_index_valid;
3224 old_adj_index = nh_grp->adj_index;
3225 old_ecmp_size = nh_grp->ecmp_size;
3226 nh_grp->adj_index_valid = 1;
3227 nh_grp->adj_index = adj_index;
3228 nh_grp->ecmp_size = ecmp_size;
Ido Schimmeleb789982017-10-22 23:11:48 +02003229 mlxsw_sp_nexthop_group_rebalance(nh_grp);
Petr Machata35225e42017-09-02 23:49:22 +02003230 err = mlxsw_sp_nexthop_group_update(mlxsw_sp, nh_grp, true);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003231 if (err) {
3232 dev_warn(mlxsw_sp->bus_info->dev, "Failed to update neigh MAC in adjacency table.\n");
3233 goto set_trap;
3234 }
3235
3236 if (!old_adj_index_valid) {
3237 /* The trap was set for fib entries, so we have to call
3238 * fib entry update to unset it and use adjacency index.
3239 */
3240 err = mlxsw_sp_nexthop_fib_entries_update(mlxsw_sp, nh_grp);
3241 if (err) {
3242 dev_warn(mlxsw_sp->bus_info->dev, "Failed to add adjacency index to fib entries.\n");
3243 goto set_trap;
3244 }
3245 return;
3246 }
3247
3248 err = mlxsw_sp_adj_index_mass_update(mlxsw_sp, nh_grp,
3249 old_adj_index, old_ecmp_size);
3250 mlxsw_sp_kvdl_free(mlxsw_sp, old_adj_index);
3251 if (err) {
3252 dev_warn(mlxsw_sp->bus_info->dev, "Failed to mass-update adjacency index for nexthop group.\n");
3253 goto set_trap;
3254 }
Ido Schimmel77d964e2017-08-02 09:56:05 +02003255
3256 /* Offload state within the group changed, so update the flags. */
3257 mlxsw_sp_nexthop_fib_entries_refresh(nh_grp);
3258
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003259 return;
3260
3261set_trap:
3262 old_adj_index_valid = nh_grp->adj_index_valid;
3263 nh_grp->adj_index_valid = 0;
3264 for (i = 0; i < nh_grp->count; i++) {
3265 nh = &nh_grp->nexthops[i];
3266 nh->offloaded = 0;
3267 }
3268 err = mlxsw_sp_nexthop_fib_entries_update(mlxsw_sp, nh_grp);
3269 if (err)
3270 dev_warn(mlxsw_sp->bus_info->dev, "Failed to set traps for fib entries.\n");
3271 if (old_adj_index_valid)
3272 mlxsw_sp_kvdl_free(mlxsw_sp, nh_grp->adj_index);
3273}
3274
3275static void __mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp_nexthop *nh,
3276 bool removing)
3277{
Petr Machata213666a2017-07-31 09:27:30 +02003278 if (!removing)
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003279 nh->should_offload = 1;
Ido Schimmel8764a822017-12-25 08:57:35 +01003280 else
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003281 nh->should_offload = 0;
3282 nh->update = 1;
3283}
3284
3285static void
3286mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp *mlxsw_sp,
3287 struct mlxsw_sp_neigh_entry *neigh_entry,
3288 bool removing)
3289{
3290 struct mlxsw_sp_nexthop *nh;
3291
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003292 list_for_each_entry(nh, &neigh_entry->nexthop_list,
3293 neigh_list_node) {
3294 __mlxsw_sp_nexthop_neigh_update(nh, removing);
3295 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nh_grp);
3296 }
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003297}
3298
Ido Schimmel9665b742017-02-08 11:16:42 +01003299static void mlxsw_sp_nexthop_rif_init(struct mlxsw_sp_nexthop *nh,
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003300 struct mlxsw_sp_rif *rif)
Ido Schimmel9665b742017-02-08 11:16:42 +01003301{
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003302 if (nh->rif)
Ido Schimmel9665b742017-02-08 11:16:42 +01003303 return;
3304
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003305 nh->rif = rif;
3306 list_add(&nh->rif_list_node, &rif->nexthop_list);
Ido Schimmel9665b742017-02-08 11:16:42 +01003307}
3308
3309static void mlxsw_sp_nexthop_rif_fini(struct mlxsw_sp_nexthop *nh)
3310{
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003311 if (!nh->rif)
Ido Schimmel9665b742017-02-08 11:16:42 +01003312 return;
3313
3314 list_del(&nh->rif_list_node);
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003315 nh->rif = NULL;
Ido Schimmel9665b742017-02-08 11:16:42 +01003316}
3317
Ido Schimmela8c97012017-02-08 11:16:35 +01003318static int mlxsw_sp_nexthop_neigh_init(struct mlxsw_sp *mlxsw_sp,
3319 struct mlxsw_sp_nexthop *nh)
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003320{
3321 struct mlxsw_sp_neigh_entry *neigh_entry;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003322 struct neighbour *n;
Ido Schimmel93a87e52016-12-23 09:32:49 +01003323 u8 nud_state, dead;
Ido Schimmelc53b8e12017-02-08 11:16:30 +01003324 int err;
3325
Ido Schimmelad178c82017-02-08 11:16:40 +01003326 if (!nh->nh_grp->gateway || nh->neigh_entry)
Ido Schimmelb8399a12017-02-08 11:16:33 +01003327 return 0;
3328
Jiri Pirko33b13412016-11-10 12:31:04 +01003329 /* Take a reference of neigh here ensuring that neigh would
Petr Machata8de3c172017-07-31 09:27:25 +02003330 * not be destructed before the nexthop entry is finished.
Jiri Pirko33b13412016-11-10 12:31:04 +01003331 * The reference is taken either in neigh_lookup() or
Ido Schimmelfd76d912017-02-06 16:20:17 +01003332 * in neigh_create() in case n is not found.
Jiri Pirko33b13412016-11-10 12:31:04 +01003333 */
Ido Schimmel58adf2c2017-07-18 10:10:19 +02003334 n = neigh_lookup(nh->nh_grp->neigh_tbl, &nh->gw_addr, nh->rif->dev);
Jiri Pirko33b13412016-11-10 12:31:04 +01003335 if (!n) {
Ido Schimmel58adf2c2017-07-18 10:10:19 +02003336 n = neigh_create(nh->nh_grp->neigh_tbl, &nh->gw_addr,
3337 nh->rif->dev);
Ido Schimmela8c97012017-02-08 11:16:35 +01003338 if (IS_ERR(n))
3339 return PTR_ERR(n);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003340 neigh_event_send(n, NULL);
Jiri Pirko33b13412016-11-10 12:31:04 +01003341 }
3342 neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp, n);
3343 if (!neigh_entry) {
Ido Schimmel5c8802f2017-02-06 16:20:13 +01003344 neigh_entry = mlxsw_sp_neigh_entry_create(mlxsw_sp, n);
3345 if (IS_ERR(neigh_entry)) {
Ido Schimmelc53b8e12017-02-08 11:16:30 +01003346 err = -EINVAL;
3347 goto err_neigh_entry_create;
Ido Schimmel5c8802f2017-02-06 16:20:13 +01003348 }
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003349 }
Yotam Gigib2157142016-07-05 11:27:51 +02003350
3351 /* If that is the first nexthop connected to that neigh, add to
3352 * nexthop_neighs_list
3353 */
3354 if (list_empty(&neigh_entry->nexthop_list))
3355 list_add_tail(&neigh_entry->nexthop_neighs_list_node,
Ido Schimmel9011b672017-05-16 19:38:25 +02003356 &mlxsw_sp->router->nexthop_neighs_list);
Yotam Gigib2157142016-07-05 11:27:51 +02003357
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003358 nh->neigh_entry = neigh_entry;
3359 list_add_tail(&nh->neigh_list_node, &neigh_entry->nexthop_list);
3360 read_lock_bh(&n->lock);
3361 nud_state = n->nud_state;
Ido Schimmel93a87e52016-12-23 09:32:49 +01003362 dead = n->dead;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003363 read_unlock_bh(&n->lock);
Ido Schimmel93a87e52016-12-23 09:32:49 +01003364 __mlxsw_sp_nexthop_neigh_update(nh, !(nud_state & NUD_VALID && !dead));
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003365
3366 return 0;
Ido Schimmelc53b8e12017-02-08 11:16:30 +01003367
3368err_neigh_entry_create:
3369 neigh_release(n);
Ido Schimmelc53b8e12017-02-08 11:16:30 +01003370 return err;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003371}
3372
Ido Schimmela8c97012017-02-08 11:16:35 +01003373static void mlxsw_sp_nexthop_neigh_fini(struct mlxsw_sp *mlxsw_sp,
3374 struct mlxsw_sp_nexthop *nh)
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003375{
3376 struct mlxsw_sp_neigh_entry *neigh_entry = nh->neigh_entry;
Ido Schimmela8c97012017-02-08 11:16:35 +01003377 struct neighbour *n;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003378
Ido Schimmelb8399a12017-02-08 11:16:33 +01003379 if (!neigh_entry)
Ido Schimmela8c97012017-02-08 11:16:35 +01003380 return;
3381 n = neigh_entry->key.n;
Ido Schimmelb8399a12017-02-08 11:16:33 +01003382
Ido Schimmel58312122016-12-23 09:32:50 +01003383 __mlxsw_sp_nexthop_neigh_update(nh, true);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003384 list_del(&nh->neigh_list_node);
Ido Schimmele58be792017-02-08 11:16:28 +01003385 nh->neigh_entry = NULL;
Yotam Gigib2157142016-07-05 11:27:51 +02003386
3387 /* If that is the last nexthop connected to that neigh, remove from
3388 * nexthop_neighs_list
3389 */
Ido Schimmele58be792017-02-08 11:16:28 +01003390 if (list_empty(&neigh_entry->nexthop_list))
3391 list_del(&neigh_entry->nexthop_neighs_list_node);
Yotam Gigib2157142016-07-05 11:27:51 +02003392
Ido Schimmel5c8802f2017-02-06 16:20:13 +01003393 if (!neigh_entry->connected && list_empty(&neigh_entry->nexthop_list))
3394 mlxsw_sp_neigh_entry_destroy(mlxsw_sp, neigh_entry);
3395
3396 neigh_release(n);
Ido Schimmela8c97012017-02-08 11:16:35 +01003397}
Ido Schimmelc53b8e12017-02-08 11:16:30 +01003398
Petr Machata44b0fff2017-11-03 10:03:44 +01003399static bool mlxsw_sp_ipip_netdev_ul_up(struct net_device *ol_dev)
3400{
3401 struct net_device *ul_dev = __mlxsw_sp_ipip_netdev_ul_dev_get(ol_dev);
3402
3403 return ul_dev ? (ul_dev->flags & IFF_UP) : true;
3404}
3405
Petr Machatad97cda52017-11-28 13:17:13 +01003406static void mlxsw_sp_nexthop_ipip_init(struct mlxsw_sp *mlxsw_sp,
3407 struct mlxsw_sp_nexthop *nh,
3408 struct mlxsw_sp_ipip_entry *ipip_entry)
Petr Machata1012b9a2017-09-02 23:49:23 +02003409{
Petr Machata44b0fff2017-11-03 10:03:44 +01003410 bool removing;
3411
Petr Machata1012b9a2017-09-02 23:49:23 +02003412 if (!nh->nh_grp->gateway || nh->ipip_entry)
Petr Machatad97cda52017-11-28 13:17:13 +01003413 return;
Petr Machata1012b9a2017-09-02 23:49:23 +02003414
Petr Machatad97cda52017-11-28 13:17:13 +01003415 nh->ipip_entry = ipip_entry;
3416 removing = !mlxsw_sp_ipip_netdev_ul_up(ipip_entry->ol_dev);
Petr Machata44b0fff2017-11-03 10:03:44 +01003417 __mlxsw_sp_nexthop_neigh_update(nh, removing);
Petr Machatad97cda52017-11-28 13:17:13 +01003418 mlxsw_sp_nexthop_rif_init(nh, &ipip_entry->ol_lb->common);
Petr Machata1012b9a2017-09-02 23:49:23 +02003419}
3420
3421static void mlxsw_sp_nexthop_ipip_fini(struct mlxsw_sp *mlxsw_sp,
3422 struct mlxsw_sp_nexthop *nh)
3423{
3424 struct mlxsw_sp_ipip_entry *ipip_entry = nh->ipip_entry;
3425
3426 if (!ipip_entry)
3427 return;
3428
3429 __mlxsw_sp_nexthop_neigh_update(nh, true);
Petr Machata1012b9a2017-09-02 23:49:23 +02003430 nh->ipip_entry = NULL;
3431}
3432
3433static bool mlxsw_sp_nexthop4_ipip_type(const struct mlxsw_sp *mlxsw_sp,
3434 const struct fib_nh *fib_nh,
3435 enum mlxsw_sp_ipip_type *p_ipipt)
3436{
3437 struct net_device *dev = fib_nh->nh_dev;
3438
3439 return dev &&
3440 fib_nh->nh_parent->fib_type == RTN_UNICAST &&
3441 mlxsw_sp_netdev_ipip_type(mlxsw_sp, dev, p_ipipt);
3442}
3443
Petr Machata35225e42017-09-02 23:49:22 +02003444static void mlxsw_sp_nexthop_type_fini(struct mlxsw_sp *mlxsw_sp,
3445 struct mlxsw_sp_nexthop *nh)
3446{
3447 switch (nh->type) {
3448 case MLXSW_SP_NEXTHOP_TYPE_ETH:
3449 mlxsw_sp_nexthop_neigh_fini(mlxsw_sp, nh);
3450 mlxsw_sp_nexthop_rif_fini(nh);
3451 break;
Petr Machata1012b9a2017-09-02 23:49:23 +02003452 case MLXSW_SP_NEXTHOP_TYPE_IPIP:
Petr Machatade0f43c2017-10-02 12:14:57 +02003453 mlxsw_sp_nexthop_rif_fini(nh);
Petr Machata1012b9a2017-09-02 23:49:23 +02003454 mlxsw_sp_nexthop_ipip_fini(mlxsw_sp, nh);
3455 break;
Petr Machata35225e42017-09-02 23:49:22 +02003456 }
3457}
3458
3459static int mlxsw_sp_nexthop4_type_init(struct mlxsw_sp *mlxsw_sp,
3460 struct mlxsw_sp_nexthop *nh,
3461 struct fib_nh *fib_nh)
3462{
Petr Machatad97cda52017-11-28 13:17:13 +01003463 const struct mlxsw_sp_ipip_ops *ipip_ops;
Petr Machata35225e42017-09-02 23:49:22 +02003464 struct net_device *dev = fib_nh->nh_dev;
Petr Machatad97cda52017-11-28 13:17:13 +01003465 struct mlxsw_sp_ipip_entry *ipip_entry;
Petr Machata35225e42017-09-02 23:49:22 +02003466 struct mlxsw_sp_rif *rif;
3467 int err;
3468
Petr Machatad97cda52017-11-28 13:17:13 +01003469 ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, dev);
3470 if (ipip_entry) {
3471 ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt];
3472 if (ipip_ops->can_offload(mlxsw_sp, dev,
3473 MLXSW_SP_L3_PROTO_IPV4)) {
3474 nh->type = MLXSW_SP_NEXTHOP_TYPE_IPIP;
3475 mlxsw_sp_nexthop_ipip_init(mlxsw_sp, nh, ipip_entry);
3476 return 0;
3477 }
Petr Machata1012b9a2017-09-02 23:49:23 +02003478 }
3479
Petr Machata35225e42017-09-02 23:49:22 +02003480 nh->type = MLXSW_SP_NEXTHOP_TYPE_ETH;
3481 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
3482 if (!rif)
3483 return 0;
3484
3485 mlxsw_sp_nexthop_rif_init(nh, rif);
3486 err = mlxsw_sp_nexthop_neigh_init(mlxsw_sp, nh);
3487 if (err)
3488 goto err_neigh_init;
3489
3490 return 0;
3491
3492err_neigh_init:
3493 mlxsw_sp_nexthop_rif_fini(nh);
3494 return err;
3495}
3496
3497static void mlxsw_sp_nexthop4_type_fini(struct mlxsw_sp *mlxsw_sp,
3498 struct mlxsw_sp_nexthop *nh)
3499{
3500 mlxsw_sp_nexthop_type_fini(mlxsw_sp, nh);
3501}
3502
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02003503static int mlxsw_sp_nexthop4_init(struct mlxsw_sp *mlxsw_sp,
3504 struct mlxsw_sp_nexthop_group *nh_grp,
3505 struct mlxsw_sp_nexthop *nh,
3506 struct fib_nh *fib_nh)
Ido Schimmela8c97012017-02-08 11:16:35 +01003507{
3508 struct net_device *dev = fib_nh->nh_dev;
Ido Schimmeldf6dd79b2017-02-08 14:36:49 +01003509 struct in_device *in_dev;
Ido Schimmela8c97012017-02-08 11:16:35 +01003510 int err;
3511
3512 nh->nh_grp = nh_grp;
3513 nh->key.fib_nh = fib_nh;
Ido Schimmel408bd942017-10-22 23:11:46 +02003514#ifdef CONFIG_IP_ROUTE_MULTIPATH
3515 nh->nh_weight = fib_nh->nh_weight;
3516#else
3517 nh->nh_weight = 1;
3518#endif
Ido Schimmel58adf2c2017-07-18 10:10:19 +02003519 memcpy(&nh->gw_addr, &fib_nh->nh_gw, sizeof(fib_nh->nh_gw));
Ido Schimmela8c97012017-02-08 11:16:35 +01003520 err = mlxsw_sp_nexthop_insert(mlxsw_sp, nh);
3521 if (err)
3522 return err;
3523
Arkadi Sharshevskya5390272017-09-25 10:32:28 +02003524 mlxsw_sp_nexthop_counter_alloc(mlxsw_sp, nh);
Arkadi Sharshevskydbe45982017-09-25 10:32:23 +02003525 list_add_tail(&nh->router_list_node, &mlxsw_sp->router->nexthop_list);
3526
Ido Schimmel97989ee2017-03-10 08:53:38 +01003527 if (!dev)
3528 return 0;
3529
Ido Schimmeldf6dd79b2017-02-08 14:36:49 +01003530 in_dev = __in_dev_get_rtnl(dev);
3531 if (in_dev && IN_DEV_IGNORE_ROUTES_WITH_LINKDOWN(in_dev) &&
3532 fib_nh->nh_flags & RTNH_F_LINKDOWN)
3533 return 0;
3534
Petr Machata35225e42017-09-02 23:49:22 +02003535 err = mlxsw_sp_nexthop4_type_init(mlxsw_sp, nh, fib_nh);
Ido Schimmela8c97012017-02-08 11:16:35 +01003536 if (err)
3537 goto err_nexthop_neigh_init;
3538
3539 return 0;
3540
3541err_nexthop_neigh_init:
3542 mlxsw_sp_nexthop_remove(mlxsw_sp, nh);
3543 return err;
3544}
3545
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02003546static void mlxsw_sp_nexthop4_fini(struct mlxsw_sp *mlxsw_sp,
3547 struct mlxsw_sp_nexthop *nh)
Ido Schimmela8c97012017-02-08 11:16:35 +01003548{
Petr Machata35225e42017-09-02 23:49:22 +02003549 mlxsw_sp_nexthop4_type_fini(mlxsw_sp, nh);
Arkadi Sharshevskydbe45982017-09-25 10:32:23 +02003550 list_del(&nh->router_list_node);
Arkadi Sharshevskya5390272017-09-25 10:32:28 +02003551 mlxsw_sp_nexthop_counter_free(mlxsw_sp, nh);
Ido Schimmelc53b8e12017-02-08 11:16:30 +01003552 mlxsw_sp_nexthop_remove(mlxsw_sp, nh);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003553}
3554
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02003555static void mlxsw_sp_nexthop4_event(struct mlxsw_sp *mlxsw_sp,
3556 unsigned long event, struct fib_nh *fib_nh)
Ido Schimmelad178c82017-02-08 11:16:40 +01003557{
3558 struct mlxsw_sp_nexthop_key key;
3559 struct mlxsw_sp_nexthop *nh;
Ido Schimmelad178c82017-02-08 11:16:40 +01003560
Ido Schimmel9011b672017-05-16 19:38:25 +02003561 if (mlxsw_sp->router->aborted)
Ido Schimmelad178c82017-02-08 11:16:40 +01003562 return;
3563
3564 key.fib_nh = fib_nh;
3565 nh = mlxsw_sp_nexthop_lookup(mlxsw_sp, key);
3566 if (WARN_ON_ONCE(!nh))
3567 return;
3568
Ido Schimmelad178c82017-02-08 11:16:40 +01003569 switch (event) {
3570 case FIB_EVENT_NH_ADD:
Petr Machata35225e42017-09-02 23:49:22 +02003571 mlxsw_sp_nexthop4_type_init(mlxsw_sp, nh, fib_nh);
Ido Schimmelad178c82017-02-08 11:16:40 +01003572 break;
3573 case FIB_EVENT_NH_DEL:
Petr Machata35225e42017-09-02 23:49:22 +02003574 mlxsw_sp_nexthop4_type_fini(mlxsw_sp, nh);
Ido Schimmelad178c82017-02-08 11:16:40 +01003575 break;
3576 }
3577
3578 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nh_grp);
3579}
3580
Petr Machata0c5f1cd2017-11-03 10:03:38 +01003581static void mlxsw_sp_nexthop_rif_update(struct mlxsw_sp *mlxsw_sp,
3582 struct mlxsw_sp_rif *rif)
3583{
3584 struct mlxsw_sp_nexthop *nh;
Petr Machata44b0fff2017-11-03 10:03:44 +01003585 bool removing;
Petr Machata0c5f1cd2017-11-03 10:03:38 +01003586
3587 list_for_each_entry(nh, &rif->nexthop_list, rif_list_node) {
Petr Machata44b0fff2017-11-03 10:03:44 +01003588 switch (nh->type) {
3589 case MLXSW_SP_NEXTHOP_TYPE_ETH:
3590 removing = false;
3591 break;
3592 case MLXSW_SP_NEXTHOP_TYPE_IPIP:
3593 removing = !mlxsw_sp_ipip_netdev_ul_up(rif->dev);
3594 break;
3595 default:
3596 WARN_ON(1);
3597 continue;
3598 }
3599
3600 __mlxsw_sp_nexthop_neigh_update(nh, removing);
Petr Machata0c5f1cd2017-11-03 10:03:38 +01003601 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nh_grp);
3602 }
3603}
3604
Petr Machata09dbf622017-11-28 13:17:14 +01003605static void mlxsw_sp_nexthop_rif_migrate(struct mlxsw_sp *mlxsw_sp,
3606 struct mlxsw_sp_rif *old_rif,
3607 struct mlxsw_sp_rif *new_rif)
3608{
3609 struct mlxsw_sp_nexthop *nh;
3610
3611 list_splice_init(&old_rif->nexthop_list, &new_rif->nexthop_list);
3612 list_for_each_entry(nh, &new_rif->nexthop_list, rif_list_node)
3613 nh->rif = new_rif;
3614 mlxsw_sp_nexthop_rif_update(mlxsw_sp, new_rif);
3615}
3616
Ido Schimmel9665b742017-02-08 11:16:42 +01003617static void mlxsw_sp_nexthop_rif_gone_sync(struct mlxsw_sp *mlxsw_sp,
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003618 struct mlxsw_sp_rif *rif)
Ido Schimmel9665b742017-02-08 11:16:42 +01003619{
3620 struct mlxsw_sp_nexthop *nh, *tmp;
3621
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003622 list_for_each_entry_safe(nh, tmp, &rif->nexthop_list, rif_list_node) {
Petr Machata35225e42017-09-02 23:49:22 +02003623 mlxsw_sp_nexthop_type_fini(mlxsw_sp, nh);
Ido Schimmel9665b742017-02-08 11:16:42 +01003624 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nh_grp);
3625 }
3626}
3627
Petr Machata9b014512017-09-02 23:49:20 +02003628static bool mlxsw_sp_fi_is_gateway(const struct mlxsw_sp *mlxsw_sp,
3629 const struct fib_info *fi)
3630{
Petr Machata1012b9a2017-09-02 23:49:23 +02003631 return fi->fib_nh->nh_scope == RT_SCOPE_LINK ||
3632 mlxsw_sp_nexthop4_ipip_type(mlxsw_sp, fi->fib_nh, NULL);
Petr Machata9b014512017-09-02 23:49:20 +02003633}
3634
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003635static struct mlxsw_sp_nexthop_group *
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02003636mlxsw_sp_nexthop4_group_create(struct mlxsw_sp *mlxsw_sp, struct fib_info *fi)
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003637{
3638 struct mlxsw_sp_nexthop_group *nh_grp;
3639 struct mlxsw_sp_nexthop *nh;
3640 struct fib_nh *fib_nh;
3641 size_t alloc_size;
3642 int i;
3643 int err;
3644
3645 alloc_size = sizeof(*nh_grp) +
3646 fi->fib_nhs * sizeof(struct mlxsw_sp_nexthop);
3647 nh_grp = kzalloc(alloc_size, GFP_KERNEL);
3648 if (!nh_grp)
3649 return ERR_PTR(-ENOMEM);
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02003650 nh_grp->priv = fi;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003651 INIT_LIST_HEAD(&nh_grp->fib_list);
Ido Schimmel58adf2c2017-07-18 10:10:19 +02003652 nh_grp->neigh_tbl = &arp_tbl;
3653
Petr Machata9b014512017-09-02 23:49:20 +02003654 nh_grp->gateway = mlxsw_sp_fi_is_gateway(mlxsw_sp, fi);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003655 nh_grp->count = fi->fib_nhs;
Ido Schimmel7387dbb2017-07-12 09:12:53 +02003656 fib_info_hold(fi);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003657 for (i = 0; i < nh_grp->count; i++) {
3658 nh = &nh_grp->nexthops[i];
3659 fib_nh = &fi->fib_nh[i];
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02003660 err = mlxsw_sp_nexthop4_init(mlxsw_sp, nh_grp, nh, fib_nh);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003661 if (err)
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02003662 goto err_nexthop4_init;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003663 }
Ido Schimmele9ad5e72017-02-08 11:16:29 +01003664 err = mlxsw_sp_nexthop_group_insert(mlxsw_sp, nh_grp);
3665 if (err)
3666 goto err_nexthop_group_insert;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003667 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
3668 return nh_grp;
3669
Ido Schimmele9ad5e72017-02-08 11:16:29 +01003670err_nexthop_group_insert:
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02003671err_nexthop4_init:
Ido Schimmeldf6dd79b2017-02-08 14:36:49 +01003672 for (i--; i >= 0; i--) {
3673 nh = &nh_grp->nexthops[i];
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02003674 mlxsw_sp_nexthop4_fini(mlxsw_sp, nh);
Ido Schimmeldf6dd79b2017-02-08 14:36:49 +01003675 }
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02003676 fib_info_put(fi);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003677 kfree(nh_grp);
3678 return ERR_PTR(err);
3679}
3680
3681static void
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02003682mlxsw_sp_nexthop4_group_destroy(struct mlxsw_sp *mlxsw_sp,
3683 struct mlxsw_sp_nexthop_group *nh_grp)
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003684{
3685 struct mlxsw_sp_nexthop *nh;
3686 int i;
3687
Ido Schimmele9ad5e72017-02-08 11:16:29 +01003688 mlxsw_sp_nexthop_group_remove(mlxsw_sp, nh_grp);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003689 for (i = 0; i < nh_grp->count; i++) {
3690 nh = &nh_grp->nexthops[i];
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02003691 mlxsw_sp_nexthop4_fini(mlxsw_sp, nh);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003692 }
Ido Schimmel58312122016-12-23 09:32:50 +01003693 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
3694 WARN_ON_ONCE(nh_grp->adj_index_valid);
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02003695 fib_info_put(mlxsw_sp_nexthop4_group_fi(nh_grp));
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003696 kfree(nh_grp);
3697}
3698
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02003699static int mlxsw_sp_nexthop4_group_get(struct mlxsw_sp *mlxsw_sp,
3700 struct mlxsw_sp_fib_entry *fib_entry,
3701 struct fib_info *fi)
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003702{
3703 struct mlxsw_sp_nexthop_group *nh_grp;
3704
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02003705 nh_grp = mlxsw_sp_nexthop4_group_lookup(mlxsw_sp, fi);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003706 if (!nh_grp) {
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02003707 nh_grp = mlxsw_sp_nexthop4_group_create(mlxsw_sp, fi);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003708 if (IS_ERR(nh_grp))
3709 return PTR_ERR(nh_grp);
3710 }
3711 list_add_tail(&fib_entry->nexthop_group_node, &nh_grp->fib_list);
3712 fib_entry->nh_group = nh_grp;
3713 return 0;
3714}
3715
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02003716static void mlxsw_sp_nexthop4_group_put(struct mlxsw_sp *mlxsw_sp,
3717 struct mlxsw_sp_fib_entry *fib_entry)
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003718{
3719 struct mlxsw_sp_nexthop_group *nh_grp = fib_entry->nh_group;
3720
3721 list_del(&fib_entry->nexthop_group_node);
3722 if (!list_empty(&nh_grp->fib_list))
3723 return;
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02003724 mlxsw_sp_nexthop4_group_destroy(mlxsw_sp, nh_grp);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003725}
3726
Ido Schimmel013b20f2017-02-08 11:16:36 +01003727static bool
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02003728mlxsw_sp_fib4_entry_should_offload(const struct mlxsw_sp_fib_entry *fib_entry)
3729{
3730 struct mlxsw_sp_fib4_entry *fib4_entry;
3731
3732 fib4_entry = container_of(fib_entry, struct mlxsw_sp_fib4_entry,
3733 common);
3734 return !fib4_entry->tos;
3735}
3736
3737static bool
Ido Schimmel013b20f2017-02-08 11:16:36 +01003738mlxsw_sp_fib_entry_should_offload(const struct mlxsw_sp_fib_entry *fib_entry)
3739{
3740 struct mlxsw_sp_nexthop_group *nh_group = fib_entry->nh_group;
3741
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02003742 switch (fib_entry->fib_node->fib->proto) {
3743 case MLXSW_SP_L3_PROTO_IPV4:
3744 if (!mlxsw_sp_fib4_entry_should_offload(fib_entry))
3745 return false;
3746 break;
3747 case MLXSW_SP_L3_PROTO_IPV6:
3748 break;
3749 }
Ido Schimmel9aecce12017-02-09 10:28:42 +01003750
Ido Schimmel013b20f2017-02-08 11:16:36 +01003751 switch (fib_entry->type) {
3752 case MLXSW_SP_FIB_ENTRY_TYPE_REMOTE:
3753 return !!nh_group->adj_index_valid;
3754 case MLXSW_SP_FIB_ENTRY_TYPE_LOCAL:
Ido Schimmel70ad3502017-02-08 11:16:38 +01003755 return !!nh_group->nh_rif;
Petr Machata4607f6d2017-09-02 23:49:25 +02003756 case MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP:
3757 return true;
Ido Schimmel013b20f2017-02-08 11:16:36 +01003758 default:
3759 return false;
3760 }
3761}
3762
Ido Schimmel428b8512017-08-03 13:28:28 +02003763static struct mlxsw_sp_nexthop *
3764mlxsw_sp_rt6_nexthop(struct mlxsw_sp_nexthop_group *nh_grp,
3765 const struct mlxsw_sp_rt6 *mlxsw_sp_rt6)
3766{
3767 int i;
3768
3769 for (i = 0; i < nh_grp->count; i++) {
3770 struct mlxsw_sp_nexthop *nh = &nh_grp->nexthops[i];
3771 struct rt6_info *rt = mlxsw_sp_rt6->rt;
3772
3773 if (nh->rif && nh->rif->dev == rt->dst.dev &&
3774 ipv6_addr_equal((const struct in6_addr *) &nh->gw_addr,
3775 &rt->rt6i_gateway))
3776 return nh;
3777 continue;
3778 }
3779
3780 return NULL;
3781}
3782
Ido Schimmel3984d1a2017-08-02 09:56:03 +02003783static void
3784mlxsw_sp_fib4_entry_offload_set(struct mlxsw_sp_fib_entry *fib_entry)
3785{
3786 struct mlxsw_sp_nexthop_group *nh_grp = fib_entry->nh_group;
3787 int i;
3788
Petr Machata4607f6d2017-09-02 23:49:25 +02003789 if (fib_entry->type == MLXSW_SP_FIB_ENTRY_TYPE_LOCAL ||
3790 fib_entry->type == MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP) {
Ido Schimmel3984d1a2017-08-02 09:56:03 +02003791 nh_grp->nexthops->key.fib_nh->nh_flags |= RTNH_F_OFFLOAD;
3792 return;
3793 }
3794
3795 for (i = 0; i < nh_grp->count; i++) {
3796 struct mlxsw_sp_nexthop *nh = &nh_grp->nexthops[i];
3797
3798 if (nh->offloaded)
3799 nh->key.fib_nh->nh_flags |= RTNH_F_OFFLOAD;
3800 else
3801 nh->key.fib_nh->nh_flags &= ~RTNH_F_OFFLOAD;
3802 }
3803}
3804
3805static void
3806mlxsw_sp_fib4_entry_offload_unset(struct mlxsw_sp_fib_entry *fib_entry)
3807{
3808 struct mlxsw_sp_nexthop_group *nh_grp = fib_entry->nh_group;
3809 int i;
3810
Ido Schimmeld1c95af2018-02-17 00:30:44 +01003811 if (!list_is_singular(&nh_grp->fib_list))
3812 return;
3813
Ido Schimmel3984d1a2017-08-02 09:56:03 +02003814 for (i = 0; i < nh_grp->count; i++) {
3815 struct mlxsw_sp_nexthop *nh = &nh_grp->nexthops[i];
3816
3817 nh->key.fib_nh->nh_flags &= ~RTNH_F_OFFLOAD;
3818 }
3819}
3820
Ido Schimmel428b8512017-08-03 13:28:28 +02003821static void
3822mlxsw_sp_fib6_entry_offload_set(struct mlxsw_sp_fib_entry *fib_entry)
3823{
3824 struct mlxsw_sp_fib6_entry *fib6_entry;
3825 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
3826
3827 fib6_entry = container_of(fib_entry, struct mlxsw_sp_fib6_entry,
3828 common);
3829
3830 if (fib_entry->type == MLXSW_SP_FIB_ENTRY_TYPE_LOCAL) {
3831 list_first_entry(&fib6_entry->rt6_list, struct mlxsw_sp_rt6,
Ido Schimmelfe400792017-08-15 09:09:49 +02003832 list)->rt->rt6i_nh_flags |= RTNH_F_OFFLOAD;
Ido Schimmel428b8512017-08-03 13:28:28 +02003833 return;
3834 }
3835
3836 list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) {
3837 struct mlxsw_sp_nexthop_group *nh_grp = fib_entry->nh_group;
3838 struct mlxsw_sp_nexthop *nh;
3839
3840 nh = mlxsw_sp_rt6_nexthop(nh_grp, mlxsw_sp_rt6);
3841 if (nh && nh->offloaded)
Ido Schimmelfe400792017-08-15 09:09:49 +02003842 mlxsw_sp_rt6->rt->rt6i_nh_flags |= RTNH_F_OFFLOAD;
Ido Schimmel428b8512017-08-03 13:28:28 +02003843 else
Ido Schimmelfe400792017-08-15 09:09:49 +02003844 mlxsw_sp_rt6->rt->rt6i_nh_flags &= ~RTNH_F_OFFLOAD;
Ido Schimmel428b8512017-08-03 13:28:28 +02003845 }
3846}
3847
3848static void
3849mlxsw_sp_fib6_entry_offload_unset(struct mlxsw_sp_fib_entry *fib_entry)
3850{
3851 struct mlxsw_sp_fib6_entry *fib6_entry;
3852 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
3853
3854 fib6_entry = container_of(fib_entry, struct mlxsw_sp_fib6_entry,
3855 common);
3856 list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) {
3857 struct rt6_info *rt = mlxsw_sp_rt6->rt;
3858
Ido Schimmelfe400792017-08-15 09:09:49 +02003859 rt->rt6i_nh_flags &= ~RTNH_F_OFFLOAD;
Ido Schimmel428b8512017-08-03 13:28:28 +02003860 }
3861}
3862
Ido Schimmel013b20f2017-02-08 11:16:36 +01003863static void mlxsw_sp_fib_entry_offload_set(struct mlxsw_sp_fib_entry *fib_entry)
3864{
Ido Schimmel76610eb2017-03-10 08:53:41 +01003865 switch (fib_entry->fib_node->fib->proto) {
Ido Schimmel013b20f2017-02-08 11:16:36 +01003866 case MLXSW_SP_L3_PROTO_IPV4:
Ido Schimmel3984d1a2017-08-02 09:56:03 +02003867 mlxsw_sp_fib4_entry_offload_set(fib_entry);
Ido Schimmel013b20f2017-02-08 11:16:36 +01003868 break;
3869 case MLXSW_SP_L3_PROTO_IPV6:
Ido Schimmel428b8512017-08-03 13:28:28 +02003870 mlxsw_sp_fib6_entry_offload_set(fib_entry);
3871 break;
Ido Schimmel013b20f2017-02-08 11:16:36 +01003872 }
3873}
3874
3875static void
3876mlxsw_sp_fib_entry_offload_unset(struct mlxsw_sp_fib_entry *fib_entry)
3877{
Ido Schimmel76610eb2017-03-10 08:53:41 +01003878 switch (fib_entry->fib_node->fib->proto) {
Ido Schimmel013b20f2017-02-08 11:16:36 +01003879 case MLXSW_SP_L3_PROTO_IPV4:
Ido Schimmel3984d1a2017-08-02 09:56:03 +02003880 mlxsw_sp_fib4_entry_offload_unset(fib_entry);
Ido Schimmel013b20f2017-02-08 11:16:36 +01003881 break;
3882 case MLXSW_SP_L3_PROTO_IPV6:
Ido Schimmel428b8512017-08-03 13:28:28 +02003883 mlxsw_sp_fib6_entry_offload_unset(fib_entry);
3884 break;
Ido Schimmel013b20f2017-02-08 11:16:36 +01003885 }
Ido Schimmel013b20f2017-02-08 11:16:36 +01003886}
3887
3888static void
3889mlxsw_sp_fib_entry_offload_refresh(struct mlxsw_sp_fib_entry *fib_entry,
3890 enum mlxsw_reg_ralue_op op, int err)
3891{
3892 switch (op) {
3893 case MLXSW_REG_RALUE_OP_WRITE_DELETE:
Ido Schimmel013b20f2017-02-08 11:16:36 +01003894 return mlxsw_sp_fib_entry_offload_unset(fib_entry);
3895 case MLXSW_REG_RALUE_OP_WRITE_WRITE:
3896 if (err)
3897 return;
Ido Schimmel1353ee72017-08-02 09:56:04 +02003898 if (mlxsw_sp_fib_entry_should_offload(fib_entry))
Ido Schimmel013b20f2017-02-08 11:16:36 +01003899 mlxsw_sp_fib_entry_offload_set(fib_entry);
Petr Machata85f44a12017-10-02 12:21:58 +02003900 else
Ido Schimmel013b20f2017-02-08 11:16:36 +01003901 mlxsw_sp_fib_entry_offload_unset(fib_entry);
3902 return;
3903 default:
3904 return;
3905 }
3906}
3907
Ido Schimmel9dbf4d72017-07-18 10:10:24 +02003908static void
3909mlxsw_sp_fib_entry_ralue_pack(char *ralue_pl,
3910 const struct mlxsw_sp_fib_entry *fib_entry,
3911 enum mlxsw_reg_ralue_op op)
3912{
3913 struct mlxsw_sp_fib *fib = fib_entry->fib_node->fib;
3914 enum mlxsw_reg_ralxx_protocol proto;
3915 u32 *p_dip;
3916
3917 proto = (enum mlxsw_reg_ralxx_protocol) fib->proto;
3918
3919 switch (fib->proto) {
3920 case MLXSW_SP_L3_PROTO_IPV4:
3921 p_dip = (u32 *) fib_entry->fib_node->key.addr;
3922 mlxsw_reg_ralue_pack4(ralue_pl, proto, op, fib->vr->id,
3923 fib_entry->fib_node->key.prefix_len,
3924 *p_dip);
3925 break;
3926 case MLXSW_SP_L3_PROTO_IPV6:
3927 mlxsw_reg_ralue_pack6(ralue_pl, proto, op, fib->vr->id,
3928 fib_entry->fib_node->key.prefix_len,
3929 fib_entry->fib_node->key.addr);
3930 break;
3931 }
3932}
3933
3934static int mlxsw_sp_fib_entry_op_remote(struct mlxsw_sp *mlxsw_sp,
3935 struct mlxsw_sp_fib_entry *fib_entry,
3936 enum mlxsw_reg_ralue_op op)
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003937{
3938 char ralue_pl[MLXSW_REG_RALUE_LEN];
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003939 enum mlxsw_reg_ralue_trap_action trap_action;
3940 u16 trap_id = 0;
3941 u32 adjacency_index = 0;
3942 u16 ecmp_size = 0;
3943
3944 /* In case the nexthop group adjacency index is valid, use it
3945 * with provided ECMP size. Otherwise, setup trap and pass
3946 * traffic to kernel.
3947 */
Ido Schimmel4b411472017-02-08 11:16:37 +01003948 if (mlxsw_sp_fib_entry_should_offload(fib_entry)) {
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003949 trap_action = MLXSW_REG_RALUE_TRAP_ACTION_NOP;
3950 adjacency_index = fib_entry->nh_group->adj_index;
3951 ecmp_size = fib_entry->nh_group->ecmp_size;
3952 } else {
3953 trap_action = MLXSW_REG_RALUE_TRAP_ACTION_TRAP;
3954 trap_id = MLXSW_TRAP_ID_RTR_INGRESS0;
3955 }
3956
Ido Schimmel9dbf4d72017-07-18 10:10:24 +02003957 mlxsw_sp_fib_entry_ralue_pack(ralue_pl, fib_entry, op);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003958 mlxsw_reg_ralue_act_remote_pack(ralue_pl, trap_action, trap_id,
3959 adjacency_index, ecmp_size);
3960 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
3961}
3962
Ido Schimmel9dbf4d72017-07-18 10:10:24 +02003963static int mlxsw_sp_fib_entry_op_local(struct mlxsw_sp *mlxsw_sp,
3964 struct mlxsw_sp_fib_entry *fib_entry,
3965 enum mlxsw_reg_ralue_op op)
Jiri Pirko61c503f2016-07-04 08:23:11 +02003966{
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003967 struct mlxsw_sp_rif *rif = fib_entry->nh_group->nh_rif;
Ido Schimmel70ad3502017-02-08 11:16:38 +01003968 enum mlxsw_reg_ralue_trap_action trap_action;
Jiri Pirko61c503f2016-07-04 08:23:11 +02003969 char ralue_pl[MLXSW_REG_RALUE_LEN];
Ido Schimmel70ad3502017-02-08 11:16:38 +01003970 u16 trap_id = 0;
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003971 u16 rif_index = 0;
Ido Schimmel70ad3502017-02-08 11:16:38 +01003972
3973 if (mlxsw_sp_fib_entry_should_offload(fib_entry)) {
3974 trap_action = MLXSW_REG_RALUE_TRAP_ACTION_NOP;
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003975 rif_index = rif->rif_index;
Ido Schimmel70ad3502017-02-08 11:16:38 +01003976 } else {
3977 trap_action = MLXSW_REG_RALUE_TRAP_ACTION_TRAP;
3978 trap_id = MLXSW_TRAP_ID_RTR_INGRESS0;
3979 }
Jiri Pirko61c503f2016-07-04 08:23:11 +02003980
Ido Schimmel9dbf4d72017-07-18 10:10:24 +02003981 mlxsw_sp_fib_entry_ralue_pack(ralue_pl, fib_entry, op);
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003982 mlxsw_reg_ralue_act_local_pack(ralue_pl, trap_action, trap_id,
3983 rif_index);
Jiri Pirko61c503f2016-07-04 08:23:11 +02003984 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
3985}
3986
Ido Schimmel9dbf4d72017-07-18 10:10:24 +02003987static int mlxsw_sp_fib_entry_op_trap(struct mlxsw_sp *mlxsw_sp,
3988 struct mlxsw_sp_fib_entry *fib_entry,
3989 enum mlxsw_reg_ralue_op op)
Jiri Pirko61c503f2016-07-04 08:23:11 +02003990{
3991 char ralue_pl[MLXSW_REG_RALUE_LEN];
Jiri Pirko61c503f2016-07-04 08:23:11 +02003992
Ido Schimmel9dbf4d72017-07-18 10:10:24 +02003993 mlxsw_sp_fib_entry_ralue_pack(ralue_pl, fib_entry, op);
Jiri Pirko61c503f2016-07-04 08:23:11 +02003994 mlxsw_reg_ralue_act_ip2me_pack(ralue_pl);
3995 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
3996}
3997
Petr Machata4607f6d2017-09-02 23:49:25 +02003998static int
3999mlxsw_sp_fib_entry_op_ipip_decap(struct mlxsw_sp *mlxsw_sp,
4000 struct mlxsw_sp_fib_entry *fib_entry,
4001 enum mlxsw_reg_ralue_op op)
4002{
4003 struct mlxsw_sp_ipip_entry *ipip_entry = fib_entry->decap.ipip_entry;
4004 const struct mlxsw_sp_ipip_ops *ipip_ops;
4005
4006 if (WARN_ON(!ipip_entry))
4007 return -EINVAL;
4008
4009 ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt];
4010 return ipip_ops->fib_entry_op(mlxsw_sp, ipip_entry, op,
4011 fib_entry->decap.tunnel_index);
4012}
4013
Ido Schimmel9dbf4d72017-07-18 10:10:24 +02004014static int __mlxsw_sp_fib_entry_op(struct mlxsw_sp *mlxsw_sp,
4015 struct mlxsw_sp_fib_entry *fib_entry,
4016 enum mlxsw_reg_ralue_op op)
Jiri Pirko61c503f2016-07-04 08:23:11 +02004017{
4018 switch (fib_entry->type) {
4019 case MLXSW_SP_FIB_ENTRY_TYPE_REMOTE:
Ido Schimmel9dbf4d72017-07-18 10:10:24 +02004020 return mlxsw_sp_fib_entry_op_remote(mlxsw_sp, fib_entry, op);
Jiri Pirko61c503f2016-07-04 08:23:11 +02004021 case MLXSW_SP_FIB_ENTRY_TYPE_LOCAL:
Ido Schimmel9dbf4d72017-07-18 10:10:24 +02004022 return mlxsw_sp_fib_entry_op_local(mlxsw_sp, fib_entry, op);
Jiri Pirko61c503f2016-07-04 08:23:11 +02004023 case MLXSW_SP_FIB_ENTRY_TYPE_TRAP:
Ido Schimmel9dbf4d72017-07-18 10:10:24 +02004024 return mlxsw_sp_fib_entry_op_trap(mlxsw_sp, fib_entry, op);
Petr Machata4607f6d2017-09-02 23:49:25 +02004025 case MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP:
4026 return mlxsw_sp_fib_entry_op_ipip_decap(mlxsw_sp,
4027 fib_entry, op);
Jiri Pirko61c503f2016-07-04 08:23:11 +02004028 }
4029 return -EINVAL;
4030}
4031
4032static int mlxsw_sp_fib_entry_op(struct mlxsw_sp *mlxsw_sp,
4033 struct mlxsw_sp_fib_entry *fib_entry,
4034 enum mlxsw_reg_ralue_op op)
4035{
Ido Schimmel9dbf4d72017-07-18 10:10:24 +02004036 int err = __mlxsw_sp_fib_entry_op(mlxsw_sp, fib_entry, op);
Ido Schimmel013b20f2017-02-08 11:16:36 +01004037
Ido Schimmel013b20f2017-02-08 11:16:36 +01004038 mlxsw_sp_fib_entry_offload_refresh(fib_entry, op, err);
Ido Schimmel9dbf4d72017-07-18 10:10:24 +02004039
Ido Schimmel013b20f2017-02-08 11:16:36 +01004040 return err;
Jiri Pirko61c503f2016-07-04 08:23:11 +02004041}
4042
4043static int mlxsw_sp_fib_entry_update(struct mlxsw_sp *mlxsw_sp,
4044 struct mlxsw_sp_fib_entry *fib_entry)
4045{
Jiri Pirko7146da32016-09-01 10:37:41 +02004046 return mlxsw_sp_fib_entry_op(mlxsw_sp, fib_entry,
4047 MLXSW_REG_RALUE_OP_WRITE_WRITE);
Jiri Pirko61c503f2016-07-04 08:23:11 +02004048}
4049
4050static int mlxsw_sp_fib_entry_del(struct mlxsw_sp *mlxsw_sp,
4051 struct mlxsw_sp_fib_entry *fib_entry)
4052{
4053 return mlxsw_sp_fib_entry_op(mlxsw_sp, fib_entry,
4054 MLXSW_REG_RALUE_OP_WRITE_DELETE);
4055}
4056
Jiri Pirko61c503f2016-07-04 08:23:11 +02004057static int
Ido Schimmel013b20f2017-02-08 11:16:36 +01004058mlxsw_sp_fib4_entry_type_set(struct mlxsw_sp *mlxsw_sp,
4059 const struct fib_entry_notifier_info *fen_info,
4060 struct mlxsw_sp_fib_entry *fib_entry)
Jiri Pirko61c503f2016-07-04 08:23:11 +02004061{
Petr Machata4607f6d2017-09-02 23:49:25 +02004062 union mlxsw_sp_l3addr dip = { .addr4 = htonl(fen_info->dst) };
4063 struct net_device *dev = fen_info->fi->fib_dev;
4064 struct mlxsw_sp_ipip_entry *ipip_entry;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02004065 struct fib_info *fi = fen_info->fi;
Jiri Pirko61c503f2016-07-04 08:23:11 +02004066
Ido Schimmel97989ee2017-03-10 08:53:38 +01004067 switch (fen_info->type) {
Ido Schimmel97989ee2017-03-10 08:53:38 +01004068 case RTN_LOCAL:
Petr Machata4607f6d2017-09-02 23:49:25 +02004069 ipip_entry = mlxsw_sp_ipip_entry_find_by_decap(mlxsw_sp, dev,
4070 MLXSW_SP_L3_PROTO_IPV4, dip);
Petr Machata57c77ce2017-11-28 13:17:11 +01004071 if (ipip_entry && ipip_entry->ol_dev->flags & IFF_UP) {
Petr Machata4607f6d2017-09-02 23:49:25 +02004072 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP;
4073 return mlxsw_sp_fib_entry_decap_init(mlxsw_sp,
4074 fib_entry,
4075 ipip_entry);
4076 }
4077 /* fall through */
4078 case RTN_BROADCAST:
Jiri Pirko61c503f2016-07-04 08:23:11 +02004079 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
4080 return 0;
Ido Schimmel97989ee2017-03-10 08:53:38 +01004081 case RTN_UNREACHABLE: /* fall through */
4082 case RTN_BLACKHOLE: /* fall through */
4083 case RTN_PROHIBIT:
4084 /* Packets hitting these routes need to be trapped, but
4085 * can do so with a lower priority than packets directed
4086 * at the host, so use action type local instead of trap.
4087 */
Jiri Pirkob45f64d2016-09-26 12:52:31 +02004088 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_LOCAL;
Ido Schimmel97989ee2017-03-10 08:53:38 +01004089 return 0;
4090 case RTN_UNICAST:
Petr Machata9b014512017-09-02 23:49:20 +02004091 if (mlxsw_sp_fi_is_gateway(mlxsw_sp, fi))
Ido Schimmel97989ee2017-03-10 08:53:38 +01004092 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_REMOTE;
Petr Machata9b014512017-09-02 23:49:20 +02004093 else
4094 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_LOCAL;
Ido Schimmel97989ee2017-03-10 08:53:38 +01004095 return 0;
4096 default:
4097 return -EINVAL;
4098 }
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02004099}
4100
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004101static struct mlxsw_sp_fib4_entry *
Ido Schimmel9aecce12017-02-09 10:28:42 +01004102mlxsw_sp_fib4_entry_create(struct mlxsw_sp *mlxsw_sp,
4103 struct mlxsw_sp_fib_node *fib_node,
4104 const struct fib_entry_notifier_info *fen_info)
Jiri Pirko5b004412016-09-01 10:37:40 +02004105{
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004106 struct mlxsw_sp_fib4_entry *fib4_entry;
Jiri Pirko5b004412016-09-01 10:37:40 +02004107 struct mlxsw_sp_fib_entry *fib_entry;
Ido Schimmel9aecce12017-02-09 10:28:42 +01004108 int err;
4109
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004110 fib4_entry = kzalloc(sizeof(*fib4_entry), GFP_KERNEL);
4111 if (!fib4_entry)
4112 return ERR_PTR(-ENOMEM);
4113 fib_entry = &fib4_entry->common;
Ido Schimmel9aecce12017-02-09 10:28:42 +01004114
4115 err = mlxsw_sp_fib4_entry_type_set(mlxsw_sp, fen_info, fib_entry);
4116 if (err)
4117 goto err_fib4_entry_type_set;
4118
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02004119 err = mlxsw_sp_nexthop4_group_get(mlxsw_sp, fib_entry, fen_info->fi);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004120 if (err)
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02004121 goto err_nexthop4_group_get;
Ido Schimmel9aecce12017-02-09 10:28:42 +01004122
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004123 fib4_entry->prio = fen_info->fi->fib_priority;
4124 fib4_entry->tb_id = fen_info->tb_id;
4125 fib4_entry->type = fen_info->type;
4126 fib4_entry->tos = fen_info->tos;
Ido Schimmel9aecce12017-02-09 10:28:42 +01004127
4128 fib_entry->fib_node = fib_node;
4129
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004130 return fib4_entry;
Ido Schimmel9aecce12017-02-09 10:28:42 +01004131
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02004132err_nexthop4_group_get:
Ido Schimmel9aecce12017-02-09 10:28:42 +01004133err_fib4_entry_type_set:
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004134 kfree(fib4_entry);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004135 return ERR_PTR(err);
4136}
4137
4138static void mlxsw_sp_fib4_entry_destroy(struct mlxsw_sp *mlxsw_sp,
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004139 struct mlxsw_sp_fib4_entry *fib4_entry)
Ido Schimmel9aecce12017-02-09 10:28:42 +01004140{
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02004141 mlxsw_sp_nexthop4_group_put(mlxsw_sp, &fib4_entry->common);
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004142 kfree(fib4_entry);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004143}
4144
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004145static struct mlxsw_sp_fib4_entry *
Ido Schimmel9aecce12017-02-09 10:28:42 +01004146mlxsw_sp_fib4_entry_lookup(struct mlxsw_sp *mlxsw_sp,
4147 const struct fib_entry_notifier_info *fen_info)
4148{
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004149 struct mlxsw_sp_fib4_entry *fib4_entry;
Ido Schimmel9aecce12017-02-09 10:28:42 +01004150 struct mlxsw_sp_fib_node *fib_node;
Ido Schimmel160e22a2017-07-18 10:10:20 +02004151 struct mlxsw_sp_fib *fib;
4152 struct mlxsw_sp_vr *vr;
Ido Schimmel9aecce12017-02-09 10:28:42 +01004153
Ido Schimmel160e22a2017-07-18 10:10:20 +02004154 vr = mlxsw_sp_vr_find(mlxsw_sp, fen_info->tb_id);
4155 if (!vr)
4156 return NULL;
4157 fib = mlxsw_sp_vr_fib(vr, MLXSW_SP_L3_PROTO_IPV4);
4158
4159 fib_node = mlxsw_sp_fib_node_lookup(fib, &fen_info->dst,
4160 sizeof(fen_info->dst),
4161 fen_info->dst_len);
4162 if (!fib_node)
Ido Schimmel9aecce12017-02-09 10:28:42 +01004163 return NULL;
4164
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004165 list_for_each_entry(fib4_entry, &fib_node->entry_list, common.list) {
4166 if (fib4_entry->tb_id == fen_info->tb_id &&
4167 fib4_entry->tos == fen_info->tos &&
4168 fib4_entry->type == fen_info->type &&
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02004169 mlxsw_sp_nexthop4_group_fi(fib4_entry->common.nh_group) ==
4170 fen_info->fi) {
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004171 return fib4_entry;
Ido Schimmel9aecce12017-02-09 10:28:42 +01004172 }
4173 }
4174
4175 return NULL;
4176}
4177
4178static const struct rhashtable_params mlxsw_sp_fib_ht_params = {
4179 .key_offset = offsetof(struct mlxsw_sp_fib_node, key),
4180 .head_offset = offsetof(struct mlxsw_sp_fib_node, ht_node),
4181 .key_len = sizeof(struct mlxsw_sp_fib_key),
4182 .automatic_shrinking = true,
4183};
4184
4185static int mlxsw_sp_fib_node_insert(struct mlxsw_sp_fib *fib,
4186 struct mlxsw_sp_fib_node *fib_node)
4187{
4188 return rhashtable_insert_fast(&fib->ht, &fib_node->ht_node,
4189 mlxsw_sp_fib_ht_params);
4190}
4191
4192static void mlxsw_sp_fib_node_remove(struct mlxsw_sp_fib *fib,
4193 struct mlxsw_sp_fib_node *fib_node)
4194{
4195 rhashtable_remove_fast(&fib->ht, &fib_node->ht_node,
4196 mlxsw_sp_fib_ht_params);
4197}
4198
4199static struct mlxsw_sp_fib_node *
4200mlxsw_sp_fib_node_lookup(struct mlxsw_sp_fib *fib, const void *addr,
4201 size_t addr_len, unsigned char prefix_len)
4202{
4203 struct mlxsw_sp_fib_key key;
4204
4205 memset(&key, 0, sizeof(key));
4206 memcpy(key.addr, addr, addr_len);
4207 key.prefix_len = prefix_len;
4208 return rhashtable_lookup_fast(&fib->ht, &key, mlxsw_sp_fib_ht_params);
4209}
4210
4211static struct mlxsw_sp_fib_node *
Ido Schimmel76610eb2017-03-10 08:53:41 +01004212mlxsw_sp_fib_node_create(struct mlxsw_sp_fib *fib, const void *addr,
Ido Schimmel9aecce12017-02-09 10:28:42 +01004213 size_t addr_len, unsigned char prefix_len)
4214{
4215 struct mlxsw_sp_fib_node *fib_node;
4216
4217 fib_node = kzalloc(sizeof(*fib_node), GFP_KERNEL);
4218 if (!fib_node)
4219 return NULL;
4220
4221 INIT_LIST_HEAD(&fib_node->entry_list);
Ido Schimmel76610eb2017-03-10 08:53:41 +01004222 list_add(&fib_node->list, &fib->node_list);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004223 memcpy(fib_node->key.addr, addr, addr_len);
4224 fib_node->key.prefix_len = prefix_len;
Ido Schimmel9aecce12017-02-09 10:28:42 +01004225
4226 return fib_node;
4227}
4228
4229static void mlxsw_sp_fib_node_destroy(struct mlxsw_sp_fib_node *fib_node)
4230{
Ido Schimmel9aecce12017-02-09 10:28:42 +01004231 list_del(&fib_node->list);
4232 WARN_ON(!list_empty(&fib_node->entry_list));
4233 kfree(fib_node);
4234}
4235
4236static bool
4237mlxsw_sp_fib_node_entry_is_first(const struct mlxsw_sp_fib_node *fib_node,
4238 const struct mlxsw_sp_fib_entry *fib_entry)
4239{
4240 return list_first_entry(&fib_node->entry_list,
4241 struct mlxsw_sp_fib_entry, list) == fib_entry;
4242}
4243
Ido Schimmelfc922bb2017-08-14 10:54:05 +02004244static int mlxsw_sp_fib_lpm_tree_link(struct mlxsw_sp *mlxsw_sp,
Ido Schimmelfc922bb2017-08-14 10:54:05 +02004245 struct mlxsw_sp_fib_node *fib_node)
4246{
Ido Schimmel2b52ce02018-01-22 09:17:42 +01004247 struct mlxsw_sp_prefix_usage req_prefix_usage;
Ido Schimmel3aad95d2018-01-22 09:17:41 +01004248 struct mlxsw_sp_fib *fib = fib_node->fib;
Ido Schimmelfc922bb2017-08-14 10:54:05 +02004249 struct mlxsw_sp_lpm_tree *lpm_tree;
4250 int err;
4251
Ido Schimmel2b52ce02018-01-22 09:17:42 +01004252 lpm_tree = mlxsw_sp->router->lpm.proto_trees[fib->proto];
4253 if (lpm_tree->prefix_ref_count[fib_node->key.prefix_len] != 0)
4254 goto out;
Ido Schimmelfc922bb2017-08-14 10:54:05 +02004255
Ido Schimmel2b52ce02018-01-22 09:17:42 +01004256 mlxsw_sp_prefix_usage_cpy(&req_prefix_usage, &lpm_tree->prefix_usage);
4257 mlxsw_sp_prefix_usage_set(&req_prefix_usage, fib_node->key.prefix_len);
Ido Schimmelfc922bb2017-08-14 10:54:05 +02004258 lpm_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, &req_prefix_usage,
4259 fib->proto);
4260 if (IS_ERR(lpm_tree))
4261 return PTR_ERR(lpm_tree);
4262
Ido Schimmelfc922bb2017-08-14 10:54:05 +02004263 err = mlxsw_sp_vrs_lpm_tree_replace(mlxsw_sp, fib, lpm_tree);
4264 if (err)
Ido Schimmel2b52ce02018-01-22 09:17:42 +01004265 goto err_lpm_tree_replace;
Ido Schimmelfc922bb2017-08-14 10:54:05 +02004266
Ido Schimmel2b52ce02018-01-22 09:17:42 +01004267out:
4268 lpm_tree->prefix_ref_count[fib_node->key.prefix_len]++;
Ido Schimmelfc922bb2017-08-14 10:54:05 +02004269 return 0;
Ido Schimmel2b52ce02018-01-22 09:17:42 +01004270
4271err_lpm_tree_replace:
4272 mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
4273 return err;
Ido Schimmelfc922bb2017-08-14 10:54:05 +02004274}
4275
4276static void mlxsw_sp_fib_lpm_tree_unlink(struct mlxsw_sp *mlxsw_sp,
Ido Schimmel3aad95d2018-01-22 09:17:41 +01004277 struct mlxsw_sp_fib_node *fib_node)
Ido Schimmelfc922bb2017-08-14 10:54:05 +02004278{
Ido Schimmel2b52ce02018-01-22 09:17:42 +01004279 struct mlxsw_sp_lpm_tree *lpm_tree = fib_node->fib->lpm_tree;
4280 struct mlxsw_sp_prefix_usage req_prefix_usage;
Ido Schimmel3aad95d2018-01-22 09:17:41 +01004281 struct mlxsw_sp_fib *fib = fib_node->fib;
Ido Schimmel2b52ce02018-01-22 09:17:42 +01004282 int err;
Ido Schimmel3aad95d2018-01-22 09:17:41 +01004283
Ido Schimmel2b52ce02018-01-22 09:17:42 +01004284 if (--lpm_tree->prefix_ref_count[fib_node->key.prefix_len] != 0)
Ido Schimmelfc922bb2017-08-14 10:54:05 +02004285 return;
Ido Schimmel2b52ce02018-01-22 09:17:42 +01004286 /* Try to construct a new LPM tree from the current prefix usage
4287 * minus the unused one. If we fail, continue using the old one.
Ido Schimmel4fd00312018-01-22 09:17:40 +01004288 */
Ido Schimmel2b52ce02018-01-22 09:17:42 +01004289 mlxsw_sp_prefix_usage_cpy(&req_prefix_usage, &lpm_tree->prefix_usage);
4290 mlxsw_sp_prefix_usage_clear(&req_prefix_usage,
4291 fib_node->key.prefix_len);
4292 lpm_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, &req_prefix_usage,
4293 fib->proto);
4294 if (IS_ERR(lpm_tree))
4295 return;
Ido Schimmelfc922bb2017-08-14 10:54:05 +02004296
Ido Schimmel2b52ce02018-01-22 09:17:42 +01004297 err = mlxsw_sp_vrs_lpm_tree_replace(mlxsw_sp, fib, lpm_tree);
4298 if (err)
4299 goto err_lpm_tree_replace;
Ido Schimmel9aecce12017-02-09 10:28:42 +01004300
Ido Schimmel2b52ce02018-01-22 09:17:42 +01004301 return;
Ido Schimmel9aecce12017-02-09 10:28:42 +01004302
Ido Schimmel2b52ce02018-01-22 09:17:42 +01004303err_lpm_tree_replace:
4304 mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004305}
4306
Ido Schimmel76610eb2017-03-10 08:53:41 +01004307static int mlxsw_sp_fib_node_init(struct mlxsw_sp *mlxsw_sp,
4308 struct mlxsw_sp_fib_node *fib_node,
4309 struct mlxsw_sp_fib *fib)
4310{
Ido Schimmel76610eb2017-03-10 08:53:41 +01004311 int err;
4312
4313 err = mlxsw_sp_fib_node_insert(fib, fib_node);
4314 if (err)
4315 return err;
4316 fib_node->fib = fib;
4317
Ido Schimmel3aad95d2018-01-22 09:17:41 +01004318 err = mlxsw_sp_fib_lpm_tree_link(mlxsw_sp, fib_node);
Ido Schimmelfc922bb2017-08-14 10:54:05 +02004319 if (err)
4320 goto err_fib_lpm_tree_link;
Ido Schimmel76610eb2017-03-10 08:53:41 +01004321
Ido Schimmel76610eb2017-03-10 08:53:41 +01004322 return 0;
4323
Ido Schimmelfc922bb2017-08-14 10:54:05 +02004324err_fib_lpm_tree_link:
Ido Schimmel76610eb2017-03-10 08:53:41 +01004325 fib_node->fib = NULL;
4326 mlxsw_sp_fib_node_remove(fib, fib_node);
4327 return err;
4328}
4329
4330static void mlxsw_sp_fib_node_fini(struct mlxsw_sp *mlxsw_sp,
4331 struct mlxsw_sp_fib_node *fib_node)
4332{
Ido Schimmel76610eb2017-03-10 08:53:41 +01004333 struct mlxsw_sp_fib *fib = fib_node->fib;
4334
Ido Schimmel3aad95d2018-01-22 09:17:41 +01004335 mlxsw_sp_fib_lpm_tree_unlink(mlxsw_sp, fib_node);
Ido Schimmel76610eb2017-03-10 08:53:41 +01004336 fib_node->fib = NULL;
4337 mlxsw_sp_fib_node_remove(fib, fib_node);
4338}
4339
Ido Schimmel9aecce12017-02-09 10:28:42 +01004340static struct mlxsw_sp_fib_node *
Ido Schimmel731ea1c2017-07-18 10:10:21 +02004341mlxsw_sp_fib_node_get(struct mlxsw_sp *mlxsw_sp, u32 tb_id, const void *addr,
4342 size_t addr_len, unsigned char prefix_len,
4343 enum mlxsw_sp_l3proto proto)
Ido Schimmel9aecce12017-02-09 10:28:42 +01004344{
4345 struct mlxsw_sp_fib_node *fib_node;
Ido Schimmel76610eb2017-03-10 08:53:41 +01004346 struct mlxsw_sp_fib *fib;
Jiri Pirko5b004412016-09-01 10:37:40 +02004347 struct mlxsw_sp_vr *vr;
4348 int err;
4349
David Ahernf8fa9b42017-10-18 09:56:56 -07004350 vr = mlxsw_sp_vr_get(mlxsw_sp, tb_id, NULL);
Jiri Pirko5b004412016-09-01 10:37:40 +02004351 if (IS_ERR(vr))
4352 return ERR_CAST(vr);
Ido Schimmel731ea1c2017-07-18 10:10:21 +02004353 fib = mlxsw_sp_vr_fib(vr, proto);
Jiri Pirko5b004412016-09-01 10:37:40 +02004354
Ido Schimmel731ea1c2017-07-18 10:10:21 +02004355 fib_node = mlxsw_sp_fib_node_lookup(fib, addr, addr_len, prefix_len);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004356 if (fib_node)
4357 return fib_node;
4358
Ido Schimmel731ea1c2017-07-18 10:10:21 +02004359 fib_node = mlxsw_sp_fib_node_create(fib, addr, addr_len, prefix_len);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004360 if (!fib_node) {
Jiri Pirko5b004412016-09-01 10:37:40 +02004361 err = -ENOMEM;
Ido Schimmel9aecce12017-02-09 10:28:42 +01004362 goto err_fib_node_create;
Jiri Pirko5b004412016-09-01 10:37:40 +02004363 }
Jiri Pirko5b004412016-09-01 10:37:40 +02004364
Ido Schimmel76610eb2017-03-10 08:53:41 +01004365 err = mlxsw_sp_fib_node_init(mlxsw_sp, fib_node, fib);
4366 if (err)
4367 goto err_fib_node_init;
4368
Ido Schimmel9aecce12017-02-09 10:28:42 +01004369 return fib_node;
Jiri Pirko5b004412016-09-01 10:37:40 +02004370
Ido Schimmel76610eb2017-03-10 08:53:41 +01004371err_fib_node_init:
4372 mlxsw_sp_fib_node_destroy(fib_node);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004373err_fib_node_create:
Ido Schimmel2b52ce02018-01-22 09:17:42 +01004374 mlxsw_sp_vr_put(mlxsw_sp, vr);
Jiri Pirko5b004412016-09-01 10:37:40 +02004375 return ERR_PTR(err);
4376}
4377
Ido Schimmel731ea1c2017-07-18 10:10:21 +02004378static void mlxsw_sp_fib_node_put(struct mlxsw_sp *mlxsw_sp,
4379 struct mlxsw_sp_fib_node *fib_node)
Jiri Pirko5b004412016-09-01 10:37:40 +02004380{
Ido Schimmel76610eb2017-03-10 08:53:41 +01004381 struct mlxsw_sp_vr *vr = fib_node->fib->vr;
Jiri Pirko5b004412016-09-01 10:37:40 +02004382
Ido Schimmel9aecce12017-02-09 10:28:42 +01004383 if (!list_empty(&fib_node->entry_list))
4384 return;
Ido Schimmel76610eb2017-03-10 08:53:41 +01004385 mlxsw_sp_fib_node_fini(mlxsw_sp, fib_node);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004386 mlxsw_sp_fib_node_destroy(fib_node);
Ido Schimmel2b52ce02018-01-22 09:17:42 +01004387 mlxsw_sp_vr_put(mlxsw_sp, vr);
Jiri Pirko5b004412016-09-01 10:37:40 +02004388}
4389
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004390static struct mlxsw_sp_fib4_entry *
Ido Schimmel9aecce12017-02-09 10:28:42 +01004391mlxsw_sp_fib4_node_entry_find(const struct mlxsw_sp_fib_node *fib_node,
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004392 const struct mlxsw_sp_fib4_entry *new4_entry)
Jiri Pirko61c503f2016-07-04 08:23:11 +02004393{
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004394 struct mlxsw_sp_fib4_entry *fib4_entry;
Ido Schimmel9aecce12017-02-09 10:28:42 +01004395
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004396 list_for_each_entry(fib4_entry, &fib_node->entry_list, common.list) {
4397 if (fib4_entry->tb_id > new4_entry->tb_id)
Ido Schimmel9aecce12017-02-09 10:28:42 +01004398 continue;
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004399 if (fib4_entry->tb_id != new4_entry->tb_id)
Ido Schimmel9aecce12017-02-09 10:28:42 +01004400 break;
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004401 if (fib4_entry->tos > new4_entry->tos)
Ido Schimmel9aecce12017-02-09 10:28:42 +01004402 continue;
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004403 if (fib4_entry->prio >= new4_entry->prio ||
4404 fib4_entry->tos < new4_entry->tos)
4405 return fib4_entry;
Ido Schimmel9aecce12017-02-09 10:28:42 +01004406 }
4407
4408 return NULL;
4409}
4410
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004411static int
4412mlxsw_sp_fib4_node_list_append(struct mlxsw_sp_fib4_entry *fib4_entry,
4413 struct mlxsw_sp_fib4_entry *new4_entry)
Ido Schimmel4283bce2017-02-09 10:28:43 +01004414{
4415 struct mlxsw_sp_fib_node *fib_node;
4416
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004417 if (WARN_ON(!fib4_entry))
Ido Schimmel4283bce2017-02-09 10:28:43 +01004418 return -EINVAL;
4419
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004420 fib_node = fib4_entry->common.fib_node;
4421 list_for_each_entry_from(fib4_entry, &fib_node->entry_list,
4422 common.list) {
4423 if (fib4_entry->tb_id != new4_entry->tb_id ||
4424 fib4_entry->tos != new4_entry->tos ||
4425 fib4_entry->prio != new4_entry->prio)
Ido Schimmel4283bce2017-02-09 10:28:43 +01004426 break;
4427 }
4428
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004429 list_add_tail(&new4_entry->common.list, &fib4_entry->common.list);
Ido Schimmel4283bce2017-02-09 10:28:43 +01004430 return 0;
4431}
4432
Ido Schimmel9aecce12017-02-09 10:28:42 +01004433static int
Ido Schimmel9efbee62017-07-18 10:10:28 +02004434mlxsw_sp_fib4_node_list_insert(struct mlxsw_sp_fib4_entry *new4_entry,
Ido Schimmel599cf8f2017-02-09 10:28:44 +01004435 bool replace, bool append)
Ido Schimmel9aecce12017-02-09 10:28:42 +01004436{
Ido Schimmel9efbee62017-07-18 10:10:28 +02004437 struct mlxsw_sp_fib_node *fib_node = new4_entry->common.fib_node;
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004438 struct mlxsw_sp_fib4_entry *fib4_entry;
Ido Schimmel9aecce12017-02-09 10:28:42 +01004439
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004440 fib4_entry = mlxsw_sp_fib4_node_entry_find(fib_node, new4_entry);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004441
Ido Schimmel4283bce2017-02-09 10:28:43 +01004442 if (append)
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004443 return mlxsw_sp_fib4_node_list_append(fib4_entry, new4_entry);
4444 if (replace && WARN_ON(!fib4_entry))
Ido Schimmel599cf8f2017-02-09 10:28:44 +01004445 return -EINVAL;
Ido Schimmel4283bce2017-02-09 10:28:43 +01004446
Ido Schimmel599cf8f2017-02-09 10:28:44 +01004447 /* Insert new entry before replaced one, so that we can later
4448 * remove the second.
4449 */
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004450 if (fib4_entry) {
4451 list_add_tail(&new4_entry->common.list,
4452 &fib4_entry->common.list);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004453 } else {
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004454 struct mlxsw_sp_fib4_entry *last;
Ido Schimmel9aecce12017-02-09 10:28:42 +01004455
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004456 list_for_each_entry(last, &fib_node->entry_list, common.list) {
4457 if (new4_entry->tb_id > last->tb_id)
Ido Schimmel9aecce12017-02-09 10:28:42 +01004458 break;
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004459 fib4_entry = last;
Ido Schimmel9aecce12017-02-09 10:28:42 +01004460 }
4461
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004462 if (fib4_entry)
4463 list_add(&new4_entry->common.list,
4464 &fib4_entry->common.list);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004465 else
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004466 list_add(&new4_entry->common.list,
4467 &fib_node->entry_list);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004468 }
4469
4470 return 0;
4471}
4472
4473static void
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004474mlxsw_sp_fib4_node_list_remove(struct mlxsw_sp_fib4_entry *fib4_entry)
Ido Schimmel9aecce12017-02-09 10:28:42 +01004475{
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004476 list_del(&fib4_entry->common.list);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004477}
4478
Ido Schimmel80c238f2017-07-18 10:10:29 +02004479static int mlxsw_sp_fib_node_entry_add(struct mlxsw_sp *mlxsw_sp,
4480 struct mlxsw_sp_fib_entry *fib_entry)
Ido Schimmel9aecce12017-02-09 10:28:42 +01004481{
Ido Schimmel9efbee62017-07-18 10:10:28 +02004482 struct mlxsw_sp_fib_node *fib_node = fib_entry->fib_node;
4483
Ido Schimmel9aecce12017-02-09 10:28:42 +01004484 if (!mlxsw_sp_fib_node_entry_is_first(fib_node, fib_entry))
4485 return 0;
4486
4487 /* To prevent packet loss, overwrite the previously offloaded
4488 * entry.
4489 */
4490 if (!list_is_singular(&fib_node->entry_list)) {
4491 enum mlxsw_reg_ralue_op op = MLXSW_REG_RALUE_OP_WRITE_DELETE;
4492 struct mlxsw_sp_fib_entry *n = list_next_entry(fib_entry, list);
4493
4494 mlxsw_sp_fib_entry_offload_refresh(n, op, 0);
4495 }
4496
4497 return mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
4498}
4499
Ido Schimmel80c238f2017-07-18 10:10:29 +02004500static void mlxsw_sp_fib_node_entry_del(struct mlxsw_sp *mlxsw_sp,
4501 struct mlxsw_sp_fib_entry *fib_entry)
Ido Schimmel9aecce12017-02-09 10:28:42 +01004502{
Ido Schimmel9efbee62017-07-18 10:10:28 +02004503 struct mlxsw_sp_fib_node *fib_node = fib_entry->fib_node;
4504
Ido Schimmel9aecce12017-02-09 10:28:42 +01004505 if (!mlxsw_sp_fib_node_entry_is_first(fib_node, fib_entry))
4506 return;
4507
4508 /* Promote the next entry by overwriting the deleted entry */
4509 if (!list_is_singular(&fib_node->entry_list)) {
4510 struct mlxsw_sp_fib_entry *n = list_next_entry(fib_entry, list);
4511 enum mlxsw_reg_ralue_op op = MLXSW_REG_RALUE_OP_WRITE_DELETE;
4512
4513 mlxsw_sp_fib_entry_update(mlxsw_sp, n);
4514 mlxsw_sp_fib_entry_offload_refresh(fib_entry, op, 0);
4515 return;
4516 }
4517
4518 mlxsw_sp_fib_entry_del(mlxsw_sp, fib_entry);
4519}
4520
4521static int mlxsw_sp_fib4_node_entry_link(struct mlxsw_sp *mlxsw_sp,
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004522 struct mlxsw_sp_fib4_entry *fib4_entry,
Ido Schimmel599cf8f2017-02-09 10:28:44 +01004523 bool replace, bool append)
Ido Schimmel9aecce12017-02-09 10:28:42 +01004524{
Ido Schimmel9aecce12017-02-09 10:28:42 +01004525 int err;
4526
Ido Schimmel9efbee62017-07-18 10:10:28 +02004527 err = mlxsw_sp_fib4_node_list_insert(fib4_entry, replace, append);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004528 if (err)
4529 return err;
4530
Ido Schimmel80c238f2017-07-18 10:10:29 +02004531 err = mlxsw_sp_fib_node_entry_add(mlxsw_sp, &fib4_entry->common);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004532 if (err)
Ido Schimmel80c238f2017-07-18 10:10:29 +02004533 goto err_fib_node_entry_add;
Ido Schimmel9aecce12017-02-09 10:28:42 +01004534
Ido Schimmel9aecce12017-02-09 10:28:42 +01004535 return 0;
4536
Ido Schimmel80c238f2017-07-18 10:10:29 +02004537err_fib_node_entry_add:
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004538 mlxsw_sp_fib4_node_list_remove(fib4_entry);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004539 return err;
4540}
4541
4542static void
4543mlxsw_sp_fib4_node_entry_unlink(struct mlxsw_sp *mlxsw_sp,
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004544 struct mlxsw_sp_fib4_entry *fib4_entry)
Ido Schimmel9aecce12017-02-09 10:28:42 +01004545{
Ido Schimmel80c238f2017-07-18 10:10:29 +02004546 mlxsw_sp_fib_node_entry_del(mlxsw_sp, &fib4_entry->common);
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004547 mlxsw_sp_fib4_node_list_remove(fib4_entry);
Petr Machata4607f6d2017-09-02 23:49:25 +02004548
4549 if (fib4_entry->common.type == MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP)
4550 mlxsw_sp_fib_entry_decap_fini(mlxsw_sp, &fib4_entry->common);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004551}
4552
Ido Schimmel599cf8f2017-02-09 10:28:44 +01004553static void mlxsw_sp_fib4_entry_replace(struct mlxsw_sp *mlxsw_sp,
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004554 struct mlxsw_sp_fib4_entry *fib4_entry,
Ido Schimmel599cf8f2017-02-09 10:28:44 +01004555 bool replace)
4556{
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004557 struct mlxsw_sp_fib_node *fib_node = fib4_entry->common.fib_node;
4558 struct mlxsw_sp_fib4_entry *replaced;
Ido Schimmel599cf8f2017-02-09 10:28:44 +01004559
4560 if (!replace)
4561 return;
4562
4563 /* We inserted the new entry before replaced one */
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004564 replaced = list_next_entry(fib4_entry, common.list);
Ido Schimmel599cf8f2017-02-09 10:28:44 +01004565
4566 mlxsw_sp_fib4_node_entry_unlink(mlxsw_sp, replaced);
4567 mlxsw_sp_fib4_entry_destroy(mlxsw_sp, replaced);
Ido Schimmel731ea1c2017-07-18 10:10:21 +02004568 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
Ido Schimmel599cf8f2017-02-09 10:28:44 +01004569}
4570
Ido Schimmel9aecce12017-02-09 10:28:42 +01004571static int
4572mlxsw_sp_router_fib4_add(struct mlxsw_sp *mlxsw_sp,
Ido Schimmel4283bce2017-02-09 10:28:43 +01004573 const struct fib_entry_notifier_info *fen_info,
Ido Schimmel599cf8f2017-02-09 10:28:44 +01004574 bool replace, bool append)
Ido Schimmel9aecce12017-02-09 10:28:42 +01004575{
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004576 struct mlxsw_sp_fib4_entry *fib4_entry;
Ido Schimmel9aecce12017-02-09 10:28:42 +01004577 struct mlxsw_sp_fib_node *fib_node;
Jiri Pirko61c503f2016-07-04 08:23:11 +02004578 int err;
4579
Ido Schimmel9011b672017-05-16 19:38:25 +02004580 if (mlxsw_sp->router->aborted)
Jiri Pirkob45f64d2016-09-26 12:52:31 +02004581 return 0;
4582
Ido Schimmel731ea1c2017-07-18 10:10:21 +02004583 fib_node = mlxsw_sp_fib_node_get(mlxsw_sp, fen_info->tb_id,
4584 &fen_info->dst, sizeof(fen_info->dst),
4585 fen_info->dst_len,
4586 MLXSW_SP_L3_PROTO_IPV4);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004587 if (IS_ERR(fib_node)) {
4588 dev_warn(mlxsw_sp->bus_info->dev, "Failed to get FIB node\n");
4589 return PTR_ERR(fib_node);
4590 }
4591
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004592 fib4_entry = mlxsw_sp_fib4_entry_create(mlxsw_sp, fib_node, fen_info);
4593 if (IS_ERR(fib4_entry)) {
Ido Schimmel9aecce12017-02-09 10:28:42 +01004594 dev_warn(mlxsw_sp->bus_info->dev, "Failed to create FIB entry\n");
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004595 err = PTR_ERR(fib4_entry);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004596 goto err_fib4_entry_create;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02004597 }
Jiri Pirko61c503f2016-07-04 08:23:11 +02004598
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004599 err = mlxsw_sp_fib4_node_entry_link(mlxsw_sp, fib4_entry, replace,
Ido Schimmel599cf8f2017-02-09 10:28:44 +01004600 append);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02004601 if (err) {
Ido Schimmel9aecce12017-02-09 10:28:42 +01004602 dev_warn(mlxsw_sp->bus_info->dev, "Failed to link FIB entry to node\n");
4603 goto err_fib4_node_entry_link;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02004604 }
Ido Schimmel9aecce12017-02-09 10:28:42 +01004605
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004606 mlxsw_sp_fib4_entry_replace(mlxsw_sp, fib4_entry, replace);
Ido Schimmel599cf8f2017-02-09 10:28:44 +01004607
Jiri Pirko61c503f2016-07-04 08:23:11 +02004608 return 0;
4609
Ido Schimmel9aecce12017-02-09 10:28:42 +01004610err_fib4_node_entry_link:
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004611 mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib4_entry);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004612err_fib4_entry_create:
Ido Schimmel731ea1c2017-07-18 10:10:21 +02004613 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
Jiri Pirko61c503f2016-07-04 08:23:11 +02004614 return err;
4615}
4616
Jiri Pirko37956d72016-10-20 16:05:43 +02004617static void mlxsw_sp_router_fib4_del(struct mlxsw_sp *mlxsw_sp,
4618 struct fib_entry_notifier_info *fen_info)
Jiri Pirko61c503f2016-07-04 08:23:11 +02004619{
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004620 struct mlxsw_sp_fib4_entry *fib4_entry;
Ido Schimmel9aecce12017-02-09 10:28:42 +01004621 struct mlxsw_sp_fib_node *fib_node;
Jiri Pirko61c503f2016-07-04 08:23:11 +02004622
Ido Schimmel9011b672017-05-16 19:38:25 +02004623 if (mlxsw_sp->router->aborted)
Jiri Pirko37956d72016-10-20 16:05:43 +02004624 return;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02004625
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004626 fib4_entry = mlxsw_sp_fib4_entry_lookup(mlxsw_sp, fen_info);
4627 if (WARN_ON(!fib4_entry))
Jiri Pirko37956d72016-10-20 16:05:43 +02004628 return;
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004629 fib_node = fib4_entry->common.fib_node;
Jiri Pirko5b004412016-09-01 10:37:40 +02004630
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004631 mlxsw_sp_fib4_node_entry_unlink(mlxsw_sp, fib4_entry);
4632 mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib4_entry);
Ido Schimmel731ea1c2017-07-18 10:10:21 +02004633 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
Jiri Pirko61c503f2016-07-04 08:23:11 +02004634}
Jiri Pirkob45f64d2016-09-26 12:52:31 +02004635
Ido Schimmel428b8512017-08-03 13:28:28 +02004636static bool mlxsw_sp_fib6_rt_should_ignore(const struct rt6_info *rt)
4637{
4638 /* Packets with link-local destination IP arriving to the router
4639 * are trapped to the CPU, so no need to program specific routes
4640 * for them.
4641 */
4642 if (ipv6_addr_type(&rt->rt6i_dst.addr) & IPV6_ADDR_LINKLOCAL)
4643 return true;
4644
4645 /* Multicast routes aren't supported, so ignore them. Neighbour
4646 * Discovery packets are specifically trapped.
4647 */
4648 if (ipv6_addr_type(&rt->rt6i_dst.addr) & IPV6_ADDR_MULTICAST)
4649 return true;
4650
4651 /* Cloned routes are irrelevant in the forwarding path. */
4652 if (rt->rt6i_flags & RTF_CACHE)
4653 return true;
4654
4655 return false;
4656}
4657
4658static struct mlxsw_sp_rt6 *mlxsw_sp_rt6_create(struct rt6_info *rt)
4659{
4660 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
4661
4662 mlxsw_sp_rt6 = kzalloc(sizeof(*mlxsw_sp_rt6), GFP_KERNEL);
4663 if (!mlxsw_sp_rt6)
4664 return ERR_PTR(-ENOMEM);
4665
4666 /* In case of route replace, replaced route is deleted with
4667 * no notification. Take reference to prevent accessing freed
4668 * memory.
4669 */
4670 mlxsw_sp_rt6->rt = rt;
4671 rt6_hold(rt);
4672
4673 return mlxsw_sp_rt6;
4674}
4675
4676#if IS_ENABLED(CONFIG_IPV6)
4677static void mlxsw_sp_rt6_release(struct rt6_info *rt)
4678{
4679 rt6_release(rt);
4680}
4681#else
4682static void mlxsw_sp_rt6_release(struct rt6_info *rt)
4683{
4684}
4685#endif
4686
4687static void mlxsw_sp_rt6_destroy(struct mlxsw_sp_rt6 *mlxsw_sp_rt6)
4688{
4689 mlxsw_sp_rt6_release(mlxsw_sp_rt6->rt);
4690 kfree(mlxsw_sp_rt6);
4691}
4692
4693static bool mlxsw_sp_fib6_rt_can_mp(const struct rt6_info *rt)
4694{
4695 /* RTF_CACHE routes are ignored */
4696 return (rt->rt6i_flags & (RTF_GATEWAY | RTF_ADDRCONF)) == RTF_GATEWAY;
4697}
4698
4699static struct rt6_info *
4700mlxsw_sp_fib6_entry_rt(const struct mlxsw_sp_fib6_entry *fib6_entry)
4701{
4702 return list_first_entry(&fib6_entry->rt6_list, struct mlxsw_sp_rt6,
4703 list)->rt;
4704}
4705
4706static struct mlxsw_sp_fib6_entry *
4707mlxsw_sp_fib6_node_mp_entry_find(const struct mlxsw_sp_fib_node *fib_node,
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02004708 const struct rt6_info *nrt, bool replace)
Ido Schimmel428b8512017-08-03 13:28:28 +02004709{
4710 struct mlxsw_sp_fib6_entry *fib6_entry;
4711
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02004712 if (!mlxsw_sp_fib6_rt_can_mp(nrt) || replace)
Ido Schimmel428b8512017-08-03 13:28:28 +02004713 return NULL;
4714
4715 list_for_each_entry(fib6_entry, &fib_node->entry_list, common.list) {
4716 struct rt6_info *rt = mlxsw_sp_fib6_entry_rt(fib6_entry);
4717
4718 /* RT6_TABLE_LOCAL and RT6_TABLE_MAIN share the same
4719 * virtual router.
4720 */
4721 if (rt->rt6i_table->tb6_id > nrt->rt6i_table->tb6_id)
4722 continue;
4723 if (rt->rt6i_table->tb6_id != nrt->rt6i_table->tb6_id)
4724 break;
4725 if (rt->rt6i_metric < nrt->rt6i_metric)
4726 continue;
4727 if (rt->rt6i_metric == nrt->rt6i_metric &&
4728 mlxsw_sp_fib6_rt_can_mp(rt))
4729 return fib6_entry;
4730 if (rt->rt6i_metric > nrt->rt6i_metric)
4731 break;
4732 }
4733
4734 return NULL;
4735}
4736
4737static struct mlxsw_sp_rt6 *
4738mlxsw_sp_fib6_entry_rt_find(const struct mlxsw_sp_fib6_entry *fib6_entry,
4739 const struct rt6_info *rt)
4740{
4741 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
4742
4743 list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) {
4744 if (mlxsw_sp_rt6->rt == rt)
4745 return mlxsw_sp_rt6;
4746 }
4747
4748 return NULL;
4749}
4750
Petr Machata8f28a302017-09-02 23:49:24 +02004751static bool mlxsw_sp_nexthop6_ipip_type(const struct mlxsw_sp *mlxsw_sp,
4752 const struct rt6_info *rt,
4753 enum mlxsw_sp_ipip_type *ret)
4754{
4755 return rt->dst.dev &&
4756 mlxsw_sp_netdev_ipip_type(mlxsw_sp, rt->dst.dev, ret);
4757}
4758
Petr Machata35225e42017-09-02 23:49:22 +02004759static int mlxsw_sp_nexthop6_type_init(struct mlxsw_sp *mlxsw_sp,
4760 struct mlxsw_sp_nexthop_group *nh_grp,
4761 struct mlxsw_sp_nexthop *nh,
4762 const struct rt6_info *rt)
Ido Schimmel428b8512017-08-03 13:28:28 +02004763{
Petr Machatad97cda52017-11-28 13:17:13 +01004764 const struct mlxsw_sp_ipip_ops *ipip_ops;
4765 struct mlxsw_sp_ipip_entry *ipip_entry;
Ido Schimmel428b8512017-08-03 13:28:28 +02004766 struct net_device *dev = rt->dst.dev;
4767 struct mlxsw_sp_rif *rif;
4768 int err;
4769
Petr Machatad97cda52017-11-28 13:17:13 +01004770 ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, dev);
4771 if (ipip_entry) {
4772 ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt];
4773 if (ipip_ops->can_offload(mlxsw_sp, dev,
4774 MLXSW_SP_L3_PROTO_IPV6)) {
4775 nh->type = MLXSW_SP_NEXTHOP_TYPE_IPIP;
4776 mlxsw_sp_nexthop_ipip_init(mlxsw_sp, nh, ipip_entry);
4777 return 0;
4778 }
Petr Machata8f28a302017-09-02 23:49:24 +02004779 }
4780
Petr Machata35225e42017-09-02 23:49:22 +02004781 nh->type = MLXSW_SP_NEXTHOP_TYPE_ETH;
Ido Schimmel428b8512017-08-03 13:28:28 +02004782 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
4783 if (!rif)
4784 return 0;
4785 mlxsw_sp_nexthop_rif_init(nh, rif);
4786
4787 err = mlxsw_sp_nexthop_neigh_init(mlxsw_sp, nh);
4788 if (err)
4789 goto err_nexthop_neigh_init;
4790
4791 return 0;
4792
4793err_nexthop_neigh_init:
4794 mlxsw_sp_nexthop_rif_fini(nh);
4795 return err;
4796}
4797
Petr Machata35225e42017-09-02 23:49:22 +02004798static void mlxsw_sp_nexthop6_type_fini(struct mlxsw_sp *mlxsw_sp,
4799 struct mlxsw_sp_nexthop *nh)
4800{
4801 mlxsw_sp_nexthop_type_fini(mlxsw_sp, nh);
4802}
4803
4804static int mlxsw_sp_nexthop6_init(struct mlxsw_sp *mlxsw_sp,
4805 struct mlxsw_sp_nexthop_group *nh_grp,
4806 struct mlxsw_sp_nexthop *nh,
4807 const struct rt6_info *rt)
4808{
4809 struct net_device *dev = rt->dst.dev;
4810
4811 nh->nh_grp = nh_grp;
Ido Schimmel3743d882018-01-12 17:15:59 +01004812 nh->nh_weight = rt->rt6i_nh_weight;
Petr Machata35225e42017-09-02 23:49:22 +02004813 memcpy(&nh->gw_addr, &rt->rt6i_gateway, sizeof(nh->gw_addr));
Arkadi Sharshevskya5390272017-09-25 10:32:28 +02004814 mlxsw_sp_nexthop_counter_alloc(mlxsw_sp, nh);
Petr Machata35225e42017-09-02 23:49:22 +02004815
Arkadi Sharshevskydbe45982017-09-25 10:32:23 +02004816 list_add_tail(&nh->router_list_node, &mlxsw_sp->router->nexthop_list);
4817
Petr Machata35225e42017-09-02 23:49:22 +02004818 if (!dev)
4819 return 0;
4820 nh->ifindex = dev->ifindex;
4821
4822 return mlxsw_sp_nexthop6_type_init(mlxsw_sp, nh_grp, nh, rt);
4823}
4824
Ido Schimmel428b8512017-08-03 13:28:28 +02004825static void mlxsw_sp_nexthop6_fini(struct mlxsw_sp *mlxsw_sp,
4826 struct mlxsw_sp_nexthop *nh)
4827{
Petr Machata35225e42017-09-02 23:49:22 +02004828 mlxsw_sp_nexthop6_type_fini(mlxsw_sp, nh);
Arkadi Sharshevskydbe45982017-09-25 10:32:23 +02004829 list_del(&nh->router_list_node);
Arkadi Sharshevskya5390272017-09-25 10:32:28 +02004830 mlxsw_sp_nexthop_counter_free(mlxsw_sp, nh);
Ido Schimmel428b8512017-08-03 13:28:28 +02004831}
4832
Petr Machataf6050ee2017-09-02 23:49:21 +02004833static bool mlxsw_sp_rt6_is_gateway(const struct mlxsw_sp *mlxsw_sp,
4834 const struct rt6_info *rt)
4835{
Petr Machata8f28a302017-09-02 23:49:24 +02004836 return rt->rt6i_flags & RTF_GATEWAY ||
4837 mlxsw_sp_nexthop6_ipip_type(mlxsw_sp, rt, NULL);
Petr Machataf6050ee2017-09-02 23:49:21 +02004838}
4839
Ido Schimmel428b8512017-08-03 13:28:28 +02004840static struct mlxsw_sp_nexthop_group *
4841mlxsw_sp_nexthop6_group_create(struct mlxsw_sp *mlxsw_sp,
4842 struct mlxsw_sp_fib6_entry *fib6_entry)
4843{
4844 struct mlxsw_sp_nexthop_group *nh_grp;
4845 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
4846 struct mlxsw_sp_nexthop *nh;
4847 size_t alloc_size;
4848 int i = 0;
4849 int err;
4850
4851 alloc_size = sizeof(*nh_grp) +
4852 fib6_entry->nrt6 * sizeof(struct mlxsw_sp_nexthop);
4853 nh_grp = kzalloc(alloc_size, GFP_KERNEL);
4854 if (!nh_grp)
4855 return ERR_PTR(-ENOMEM);
4856 INIT_LIST_HEAD(&nh_grp->fib_list);
4857#if IS_ENABLED(CONFIG_IPV6)
4858 nh_grp->neigh_tbl = &nd_tbl;
4859#endif
4860 mlxsw_sp_rt6 = list_first_entry(&fib6_entry->rt6_list,
4861 struct mlxsw_sp_rt6, list);
Petr Machataf6050ee2017-09-02 23:49:21 +02004862 nh_grp->gateway = mlxsw_sp_rt6_is_gateway(mlxsw_sp, mlxsw_sp_rt6->rt);
Ido Schimmel428b8512017-08-03 13:28:28 +02004863 nh_grp->count = fib6_entry->nrt6;
4864 for (i = 0; i < nh_grp->count; i++) {
4865 struct rt6_info *rt = mlxsw_sp_rt6->rt;
4866
4867 nh = &nh_grp->nexthops[i];
4868 err = mlxsw_sp_nexthop6_init(mlxsw_sp, nh_grp, nh, rt);
4869 if (err)
4870 goto err_nexthop6_init;
4871 mlxsw_sp_rt6 = list_next_entry(mlxsw_sp_rt6, list);
4872 }
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02004873
4874 err = mlxsw_sp_nexthop_group_insert(mlxsw_sp, nh_grp);
4875 if (err)
4876 goto err_nexthop_group_insert;
4877
Ido Schimmel428b8512017-08-03 13:28:28 +02004878 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
4879 return nh_grp;
4880
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02004881err_nexthop_group_insert:
Ido Schimmel428b8512017-08-03 13:28:28 +02004882err_nexthop6_init:
4883 for (i--; i >= 0; i--) {
4884 nh = &nh_grp->nexthops[i];
4885 mlxsw_sp_nexthop6_fini(mlxsw_sp, nh);
4886 }
4887 kfree(nh_grp);
4888 return ERR_PTR(err);
4889}
4890
4891static void
4892mlxsw_sp_nexthop6_group_destroy(struct mlxsw_sp *mlxsw_sp,
4893 struct mlxsw_sp_nexthop_group *nh_grp)
4894{
4895 struct mlxsw_sp_nexthop *nh;
4896 int i = nh_grp->count;
4897
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02004898 mlxsw_sp_nexthop_group_remove(mlxsw_sp, nh_grp);
Ido Schimmel428b8512017-08-03 13:28:28 +02004899 for (i--; i >= 0; i--) {
4900 nh = &nh_grp->nexthops[i];
4901 mlxsw_sp_nexthop6_fini(mlxsw_sp, nh);
4902 }
4903 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
4904 WARN_ON(nh_grp->adj_index_valid);
4905 kfree(nh_grp);
4906}
4907
4908static int mlxsw_sp_nexthop6_group_get(struct mlxsw_sp *mlxsw_sp,
4909 struct mlxsw_sp_fib6_entry *fib6_entry)
4910{
4911 struct mlxsw_sp_nexthop_group *nh_grp;
4912
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02004913 nh_grp = mlxsw_sp_nexthop6_group_lookup(mlxsw_sp, fib6_entry);
4914 if (!nh_grp) {
4915 nh_grp = mlxsw_sp_nexthop6_group_create(mlxsw_sp, fib6_entry);
4916 if (IS_ERR(nh_grp))
4917 return PTR_ERR(nh_grp);
4918 }
Ido Schimmel428b8512017-08-03 13:28:28 +02004919
4920 list_add_tail(&fib6_entry->common.nexthop_group_node,
4921 &nh_grp->fib_list);
4922 fib6_entry->common.nh_group = nh_grp;
4923
4924 return 0;
4925}
4926
4927static void mlxsw_sp_nexthop6_group_put(struct mlxsw_sp *mlxsw_sp,
4928 struct mlxsw_sp_fib_entry *fib_entry)
4929{
4930 struct mlxsw_sp_nexthop_group *nh_grp = fib_entry->nh_group;
4931
4932 list_del(&fib_entry->nexthop_group_node);
4933 if (!list_empty(&nh_grp->fib_list))
4934 return;
4935 mlxsw_sp_nexthop6_group_destroy(mlxsw_sp, nh_grp);
4936}
4937
4938static int
4939mlxsw_sp_nexthop6_group_update(struct mlxsw_sp *mlxsw_sp,
4940 struct mlxsw_sp_fib6_entry *fib6_entry)
4941{
4942 struct mlxsw_sp_nexthop_group *old_nh_grp = fib6_entry->common.nh_group;
4943 int err;
4944
4945 fib6_entry->common.nh_group = NULL;
4946 list_del(&fib6_entry->common.nexthop_group_node);
4947
4948 err = mlxsw_sp_nexthop6_group_get(mlxsw_sp, fib6_entry);
4949 if (err)
4950 goto err_nexthop6_group_get;
4951
4952 /* In case this entry is offloaded, then the adjacency index
4953 * currently associated with it in the device's table is that
4954 * of the old group. Start using the new one instead.
4955 */
4956 err = mlxsw_sp_fib_node_entry_add(mlxsw_sp, &fib6_entry->common);
4957 if (err)
4958 goto err_fib_node_entry_add;
4959
4960 if (list_empty(&old_nh_grp->fib_list))
4961 mlxsw_sp_nexthop6_group_destroy(mlxsw_sp, old_nh_grp);
4962
4963 return 0;
4964
4965err_fib_node_entry_add:
4966 mlxsw_sp_nexthop6_group_put(mlxsw_sp, &fib6_entry->common);
4967err_nexthop6_group_get:
4968 list_add_tail(&fib6_entry->common.nexthop_group_node,
4969 &old_nh_grp->fib_list);
4970 fib6_entry->common.nh_group = old_nh_grp;
4971 return err;
4972}
4973
4974static int
4975mlxsw_sp_fib6_entry_nexthop_add(struct mlxsw_sp *mlxsw_sp,
4976 struct mlxsw_sp_fib6_entry *fib6_entry,
4977 struct rt6_info *rt)
4978{
4979 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
4980 int err;
4981
4982 mlxsw_sp_rt6 = mlxsw_sp_rt6_create(rt);
4983 if (IS_ERR(mlxsw_sp_rt6))
4984 return PTR_ERR(mlxsw_sp_rt6);
4985
4986 list_add_tail(&mlxsw_sp_rt6->list, &fib6_entry->rt6_list);
4987 fib6_entry->nrt6++;
4988
4989 err = mlxsw_sp_nexthop6_group_update(mlxsw_sp, fib6_entry);
4990 if (err)
4991 goto err_nexthop6_group_update;
4992
4993 return 0;
4994
4995err_nexthop6_group_update:
4996 fib6_entry->nrt6--;
4997 list_del(&mlxsw_sp_rt6->list);
4998 mlxsw_sp_rt6_destroy(mlxsw_sp_rt6);
4999 return err;
5000}
5001
5002static void
5003mlxsw_sp_fib6_entry_nexthop_del(struct mlxsw_sp *mlxsw_sp,
5004 struct mlxsw_sp_fib6_entry *fib6_entry,
5005 struct rt6_info *rt)
5006{
5007 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
5008
5009 mlxsw_sp_rt6 = mlxsw_sp_fib6_entry_rt_find(fib6_entry, rt);
5010 if (WARN_ON(!mlxsw_sp_rt6))
5011 return;
5012
5013 fib6_entry->nrt6--;
5014 list_del(&mlxsw_sp_rt6->list);
5015 mlxsw_sp_nexthop6_group_update(mlxsw_sp, fib6_entry);
5016 mlxsw_sp_rt6_destroy(mlxsw_sp_rt6);
5017}
5018
Petr Machataf6050ee2017-09-02 23:49:21 +02005019static void mlxsw_sp_fib6_entry_type_set(struct mlxsw_sp *mlxsw_sp,
5020 struct mlxsw_sp_fib_entry *fib_entry,
Ido Schimmel428b8512017-08-03 13:28:28 +02005021 const struct rt6_info *rt)
5022{
5023 /* Packets hitting RTF_REJECT routes need to be discarded by the
5024 * stack. We can rely on their destination device not having a
5025 * RIF (it's the loopback device) and can thus use action type
5026 * local, which will cause them to be trapped with a lower
5027 * priority than packets that need to be locally received.
5028 */
Ido Schimmeld3b6d372017-09-01 10:58:55 +02005029 if (rt->rt6i_flags & (RTF_LOCAL | RTF_ANYCAST))
Ido Schimmel428b8512017-08-03 13:28:28 +02005030 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
5031 else if (rt->rt6i_flags & RTF_REJECT)
5032 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_LOCAL;
Petr Machataf6050ee2017-09-02 23:49:21 +02005033 else if (mlxsw_sp_rt6_is_gateway(mlxsw_sp, rt))
Ido Schimmel428b8512017-08-03 13:28:28 +02005034 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_REMOTE;
5035 else
5036 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_LOCAL;
5037}
5038
5039static void
5040mlxsw_sp_fib6_entry_rt_destroy_all(struct mlxsw_sp_fib6_entry *fib6_entry)
5041{
5042 struct mlxsw_sp_rt6 *mlxsw_sp_rt6, *tmp;
5043
5044 list_for_each_entry_safe(mlxsw_sp_rt6, tmp, &fib6_entry->rt6_list,
5045 list) {
5046 fib6_entry->nrt6--;
5047 list_del(&mlxsw_sp_rt6->list);
5048 mlxsw_sp_rt6_destroy(mlxsw_sp_rt6);
5049 }
5050}
5051
5052static struct mlxsw_sp_fib6_entry *
5053mlxsw_sp_fib6_entry_create(struct mlxsw_sp *mlxsw_sp,
5054 struct mlxsw_sp_fib_node *fib_node,
5055 struct rt6_info *rt)
5056{
5057 struct mlxsw_sp_fib6_entry *fib6_entry;
5058 struct mlxsw_sp_fib_entry *fib_entry;
5059 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
5060 int err;
5061
5062 fib6_entry = kzalloc(sizeof(*fib6_entry), GFP_KERNEL);
5063 if (!fib6_entry)
5064 return ERR_PTR(-ENOMEM);
5065 fib_entry = &fib6_entry->common;
5066
5067 mlxsw_sp_rt6 = mlxsw_sp_rt6_create(rt);
5068 if (IS_ERR(mlxsw_sp_rt6)) {
5069 err = PTR_ERR(mlxsw_sp_rt6);
5070 goto err_rt6_create;
5071 }
5072
Petr Machataf6050ee2017-09-02 23:49:21 +02005073 mlxsw_sp_fib6_entry_type_set(mlxsw_sp, fib_entry, mlxsw_sp_rt6->rt);
Ido Schimmel428b8512017-08-03 13:28:28 +02005074
5075 INIT_LIST_HEAD(&fib6_entry->rt6_list);
5076 list_add_tail(&mlxsw_sp_rt6->list, &fib6_entry->rt6_list);
5077 fib6_entry->nrt6 = 1;
5078 err = mlxsw_sp_nexthop6_group_get(mlxsw_sp, fib6_entry);
5079 if (err)
5080 goto err_nexthop6_group_get;
5081
5082 fib_entry->fib_node = fib_node;
5083
5084 return fib6_entry;
5085
5086err_nexthop6_group_get:
5087 list_del(&mlxsw_sp_rt6->list);
5088 mlxsw_sp_rt6_destroy(mlxsw_sp_rt6);
5089err_rt6_create:
5090 kfree(fib6_entry);
5091 return ERR_PTR(err);
5092}
5093
5094static void mlxsw_sp_fib6_entry_destroy(struct mlxsw_sp *mlxsw_sp,
5095 struct mlxsw_sp_fib6_entry *fib6_entry)
5096{
5097 mlxsw_sp_nexthop6_group_put(mlxsw_sp, &fib6_entry->common);
5098 mlxsw_sp_fib6_entry_rt_destroy_all(fib6_entry);
5099 WARN_ON(fib6_entry->nrt6);
5100 kfree(fib6_entry);
5101}
5102
5103static struct mlxsw_sp_fib6_entry *
5104mlxsw_sp_fib6_node_entry_find(const struct mlxsw_sp_fib_node *fib_node,
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02005105 const struct rt6_info *nrt, bool replace)
Ido Schimmel428b8512017-08-03 13:28:28 +02005106{
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02005107 struct mlxsw_sp_fib6_entry *fib6_entry, *fallback = NULL;
Ido Schimmel428b8512017-08-03 13:28:28 +02005108
5109 list_for_each_entry(fib6_entry, &fib_node->entry_list, common.list) {
5110 struct rt6_info *rt = mlxsw_sp_fib6_entry_rt(fib6_entry);
5111
5112 if (rt->rt6i_table->tb6_id > nrt->rt6i_table->tb6_id)
5113 continue;
5114 if (rt->rt6i_table->tb6_id != nrt->rt6i_table->tb6_id)
5115 break;
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02005116 if (replace && rt->rt6i_metric == nrt->rt6i_metric) {
5117 if (mlxsw_sp_fib6_rt_can_mp(rt) ==
5118 mlxsw_sp_fib6_rt_can_mp(nrt))
5119 return fib6_entry;
5120 if (mlxsw_sp_fib6_rt_can_mp(nrt))
5121 fallback = fallback ?: fib6_entry;
5122 }
Ido Schimmel428b8512017-08-03 13:28:28 +02005123 if (rt->rt6i_metric > nrt->rt6i_metric)
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02005124 return fallback ?: fib6_entry;
Ido Schimmel428b8512017-08-03 13:28:28 +02005125 }
5126
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02005127 return fallback;
Ido Schimmel428b8512017-08-03 13:28:28 +02005128}
5129
5130static int
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02005131mlxsw_sp_fib6_node_list_insert(struct mlxsw_sp_fib6_entry *new6_entry,
5132 bool replace)
Ido Schimmel428b8512017-08-03 13:28:28 +02005133{
5134 struct mlxsw_sp_fib_node *fib_node = new6_entry->common.fib_node;
5135 struct rt6_info *nrt = mlxsw_sp_fib6_entry_rt(new6_entry);
5136 struct mlxsw_sp_fib6_entry *fib6_entry;
5137
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02005138 fib6_entry = mlxsw_sp_fib6_node_entry_find(fib_node, nrt, replace);
5139
5140 if (replace && WARN_ON(!fib6_entry))
5141 return -EINVAL;
Ido Schimmel428b8512017-08-03 13:28:28 +02005142
5143 if (fib6_entry) {
5144 list_add_tail(&new6_entry->common.list,
5145 &fib6_entry->common.list);
5146 } else {
5147 struct mlxsw_sp_fib6_entry *last;
5148
5149 list_for_each_entry(last, &fib_node->entry_list, common.list) {
5150 struct rt6_info *rt = mlxsw_sp_fib6_entry_rt(last);
5151
5152 if (nrt->rt6i_table->tb6_id > rt->rt6i_table->tb6_id)
5153 break;
5154 fib6_entry = last;
5155 }
5156
5157 if (fib6_entry)
5158 list_add(&new6_entry->common.list,
5159 &fib6_entry->common.list);
5160 else
5161 list_add(&new6_entry->common.list,
5162 &fib_node->entry_list);
5163 }
5164
5165 return 0;
5166}
5167
5168static void
5169mlxsw_sp_fib6_node_list_remove(struct mlxsw_sp_fib6_entry *fib6_entry)
5170{
5171 list_del(&fib6_entry->common.list);
5172}
5173
5174static int mlxsw_sp_fib6_node_entry_link(struct mlxsw_sp *mlxsw_sp,
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02005175 struct mlxsw_sp_fib6_entry *fib6_entry,
5176 bool replace)
Ido Schimmel428b8512017-08-03 13:28:28 +02005177{
5178 int err;
5179
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02005180 err = mlxsw_sp_fib6_node_list_insert(fib6_entry, replace);
Ido Schimmel428b8512017-08-03 13:28:28 +02005181 if (err)
5182 return err;
5183
5184 err = mlxsw_sp_fib_node_entry_add(mlxsw_sp, &fib6_entry->common);
5185 if (err)
5186 goto err_fib_node_entry_add;
5187
5188 return 0;
5189
5190err_fib_node_entry_add:
5191 mlxsw_sp_fib6_node_list_remove(fib6_entry);
5192 return err;
5193}
5194
5195static void
5196mlxsw_sp_fib6_node_entry_unlink(struct mlxsw_sp *mlxsw_sp,
5197 struct mlxsw_sp_fib6_entry *fib6_entry)
5198{
5199 mlxsw_sp_fib_node_entry_del(mlxsw_sp, &fib6_entry->common);
5200 mlxsw_sp_fib6_node_list_remove(fib6_entry);
5201}
5202
5203static struct mlxsw_sp_fib6_entry *
5204mlxsw_sp_fib6_entry_lookup(struct mlxsw_sp *mlxsw_sp,
5205 const struct rt6_info *rt)
5206{
5207 struct mlxsw_sp_fib6_entry *fib6_entry;
5208 struct mlxsw_sp_fib_node *fib_node;
5209 struct mlxsw_sp_fib *fib;
5210 struct mlxsw_sp_vr *vr;
5211
5212 vr = mlxsw_sp_vr_find(mlxsw_sp, rt->rt6i_table->tb6_id);
5213 if (!vr)
5214 return NULL;
5215 fib = mlxsw_sp_vr_fib(vr, MLXSW_SP_L3_PROTO_IPV6);
5216
5217 fib_node = mlxsw_sp_fib_node_lookup(fib, &rt->rt6i_dst.addr,
5218 sizeof(rt->rt6i_dst.addr),
5219 rt->rt6i_dst.plen);
5220 if (!fib_node)
5221 return NULL;
5222
5223 list_for_each_entry(fib6_entry, &fib_node->entry_list, common.list) {
5224 struct rt6_info *iter_rt = mlxsw_sp_fib6_entry_rt(fib6_entry);
5225
5226 if (rt->rt6i_table->tb6_id == iter_rt->rt6i_table->tb6_id &&
5227 rt->rt6i_metric == iter_rt->rt6i_metric &&
5228 mlxsw_sp_fib6_entry_rt_find(fib6_entry, rt))
5229 return fib6_entry;
5230 }
5231
5232 return NULL;
5233}
5234
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02005235static void mlxsw_sp_fib6_entry_replace(struct mlxsw_sp *mlxsw_sp,
5236 struct mlxsw_sp_fib6_entry *fib6_entry,
5237 bool replace)
5238{
5239 struct mlxsw_sp_fib_node *fib_node = fib6_entry->common.fib_node;
5240 struct mlxsw_sp_fib6_entry *replaced;
5241
5242 if (!replace)
5243 return;
5244
5245 replaced = list_next_entry(fib6_entry, common.list);
5246
5247 mlxsw_sp_fib6_node_entry_unlink(mlxsw_sp, replaced);
5248 mlxsw_sp_fib6_entry_destroy(mlxsw_sp, replaced);
5249 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
5250}
5251
Ido Schimmel428b8512017-08-03 13:28:28 +02005252static int mlxsw_sp_router_fib6_add(struct mlxsw_sp *mlxsw_sp,
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02005253 struct rt6_info *rt, bool replace)
Ido Schimmel428b8512017-08-03 13:28:28 +02005254{
5255 struct mlxsw_sp_fib6_entry *fib6_entry;
5256 struct mlxsw_sp_fib_node *fib_node;
5257 int err;
5258
5259 if (mlxsw_sp->router->aborted)
5260 return 0;
5261
Ido Schimmelf36f5ac2017-08-03 13:28:30 +02005262 if (rt->rt6i_src.plen)
5263 return -EINVAL;
5264
Ido Schimmel428b8512017-08-03 13:28:28 +02005265 if (mlxsw_sp_fib6_rt_should_ignore(rt))
5266 return 0;
5267
5268 fib_node = mlxsw_sp_fib_node_get(mlxsw_sp, rt->rt6i_table->tb6_id,
5269 &rt->rt6i_dst.addr,
5270 sizeof(rt->rt6i_dst.addr),
5271 rt->rt6i_dst.plen,
5272 MLXSW_SP_L3_PROTO_IPV6);
5273 if (IS_ERR(fib_node))
5274 return PTR_ERR(fib_node);
5275
5276 /* Before creating a new entry, try to append route to an existing
5277 * multipath entry.
5278 */
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02005279 fib6_entry = mlxsw_sp_fib6_node_mp_entry_find(fib_node, rt, replace);
Ido Schimmel428b8512017-08-03 13:28:28 +02005280 if (fib6_entry) {
5281 err = mlxsw_sp_fib6_entry_nexthop_add(mlxsw_sp, fib6_entry, rt);
5282 if (err)
5283 goto err_fib6_entry_nexthop_add;
5284 return 0;
5285 }
5286
5287 fib6_entry = mlxsw_sp_fib6_entry_create(mlxsw_sp, fib_node, rt);
5288 if (IS_ERR(fib6_entry)) {
5289 err = PTR_ERR(fib6_entry);
5290 goto err_fib6_entry_create;
5291 }
5292
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02005293 err = mlxsw_sp_fib6_node_entry_link(mlxsw_sp, fib6_entry, replace);
Ido Schimmel428b8512017-08-03 13:28:28 +02005294 if (err)
5295 goto err_fib6_node_entry_link;
5296
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02005297 mlxsw_sp_fib6_entry_replace(mlxsw_sp, fib6_entry, replace);
5298
Ido Schimmel428b8512017-08-03 13:28:28 +02005299 return 0;
5300
5301err_fib6_node_entry_link:
5302 mlxsw_sp_fib6_entry_destroy(mlxsw_sp, fib6_entry);
5303err_fib6_entry_create:
5304err_fib6_entry_nexthop_add:
5305 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
5306 return err;
5307}
5308
5309static void mlxsw_sp_router_fib6_del(struct mlxsw_sp *mlxsw_sp,
5310 struct rt6_info *rt)
5311{
5312 struct mlxsw_sp_fib6_entry *fib6_entry;
5313 struct mlxsw_sp_fib_node *fib_node;
5314
5315 if (mlxsw_sp->router->aborted)
5316 return;
5317
5318 if (mlxsw_sp_fib6_rt_should_ignore(rt))
5319 return;
5320
5321 fib6_entry = mlxsw_sp_fib6_entry_lookup(mlxsw_sp, rt);
5322 if (WARN_ON(!fib6_entry))
5323 return;
5324
5325 /* If route is part of a multipath entry, but not the last one
5326 * removed, then only reduce its nexthop group.
5327 */
5328 if (!list_is_singular(&fib6_entry->rt6_list)) {
5329 mlxsw_sp_fib6_entry_nexthop_del(mlxsw_sp, fib6_entry, rt);
5330 return;
5331 }
5332
5333 fib_node = fib6_entry->common.fib_node;
5334
5335 mlxsw_sp_fib6_node_entry_unlink(mlxsw_sp, fib6_entry);
5336 mlxsw_sp_fib6_entry_destroy(mlxsw_sp, fib6_entry);
5337 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
5338}
5339
Ido Schimmelbc65a8a2017-07-18 10:10:25 +02005340static int __mlxsw_sp_router_set_abort_trap(struct mlxsw_sp *mlxsw_sp,
5341 enum mlxsw_reg_ralxx_protocol proto,
5342 u8 tree_id)
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005343{
5344 char ralta_pl[MLXSW_REG_RALTA_LEN];
5345 char ralst_pl[MLXSW_REG_RALST_LEN];
Ido Schimmelb5d90e62017-03-10 08:53:43 +01005346 int i, err;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005347
Ido Schimmelbc65a8a2017-07-18 10:10:25 +02005348 mlxsw_reg_ralta_pack(ralta_pl, true, proto, tree_id);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005349 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralta), ralta_pl);
5350 if (err)
5351 return err;
5352
Ido Schimmelbc65a8a2017-07-18 10:10:25 +02005353 mlxsw_reg_ralst_pack(ralst_pl, 0xff, tree_id);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005354 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralst), ralst_pl);
5355 if (err)
5356 return err;
5357
Ido Schimmelb5d90e62017-03-10 08:53:43 +01005358 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
Ido Schimmel9011b672017-05-16 19:38:25 +02005359 struct mlxsw_sp_vr *vr = &mlxsw_sp->router->vrs[i];
Ido Schimmelb5d90e62017-03-10 08:53:43 +01005360 char raltb_pl[MLXSW_REG_RALTB_LEN];
5361 char ralue_pl[MLXSW_REG_RALUE_LEN];
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005362
Ido Schimmelbc65a8a2017-07-18 10:10:25 +02005363 mlxsw_reg_raltb_pack(raltb_pl, vr->id, proto, tree_id);
Ido Schimmelb5d90e62017-03-10 08:53:43 +01005364 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raltb),
5365 raltb_pl);
5366 if (err)
5367 return err;
5368
Ido Schimmelbc65a8a2017-07-18 10:10:25 +02005369 mlxsw_reg_ralue_pack(ralue_pl, proto,
5370 MLXSW_REG_RALUE_OP_WRITE_WRITE, vr->id, 0);
Ido Schimmelb5d90e62017-03-10 08:53:43 +01005371 mlxsw_reg_ralue_act_ip2me_pack(ralue_pl);
5372 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue),
5373 ralue_pl);
5374 if (err)
5375 return err;
5376 }
5377
5378 return 0;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005379}
5380
Yotam Gigid42b0962017-09-27 08:23:20 +02005381static int mlxsw_sp_router_fibmr_add(struct mlxsw_sp *mlxsw_sp,
5382 struct mfc_entry_notifier_info *men_info,
5383 bool replace)
5384{
5385 struct mlxsw_sp_vr *vr;
5386
5387 if (mlxsw_sp->router->aborted)
5388 return 0;
5389
David Ahernf8fa9b42017-10-18 09:56:56 -07005390 vr = mlxsw_sp_vr_get(mlxsw_sp, men_info->tb_id, NULL);
Yotam Gigid42b0962017-09-27 08:23:20 +02005391 if (IS_ERR(vr))
5392 return PTR_ERR(vr);
5393
Yuval Mintz54c4cad2018-03-26 15:01:32 +03005394 return mlxsw_sp_mr_route4_add(vr->mr4_table,
5395 (struct mfc_cache *) men_info->mfc,
5396 replace);
Yotam Gigid42b0962017-09-27 08:23:20 +02005397}
5398
5399static void mlxsw_sp_router_fibmr_del(struct mlxsw_sp *mlxsw_sp,
5400 struct mfc_entry_notifier_info *men_info)
5401{
5402 struct mlxsw_sp_vr *vr;
5403
5404 if (mlxsw_sp->router->aborted)
5405 return;
5406
5407 vr = mlxsw_sp_vr_find(mlxsw_sp, men_info->tb_id);
5408 if (WARN_ON(!vr))
5409 return;
5410
Yuval Mintz54c4cad2018-03-26 15:01:32 +03005411 mlxsw_sp_mr_route4_del(vr->mr4_table,
5412 (struct mfc_cache *) men_info->mfc);
Ido Schimmel2b52ce02018-01-22 09:17:42 +01005413 mlxsw_sp_vr_put(mlxsw_sp, vr);
Yotam Gigid42b0962017-09-27 08:23:20 +02005414}
5415
5416static int
5417mlxsw_sp_router_fibmr_vif_add(struct mlxsw_sp *mlxsw_sp,
5418 struct vif_entry_notifier_info *ven_info)
5419{
5420 struct mlxsw_sp_rif *rif;
5421 struct mlxsw_sp_vr *vr;
5422
5423 if (mlxsw_sp->router->aborted)
5424 return 0;
5425
David Ahernf8fa9b42017-10-18 09:56:56 -07005426 vr = mlxsw_sp_vr_get(mlxsw_sp, ven_info->tb_id, NULL);
Yotam Gigid42b0962017-09-27 08:23:20 +02005427 if (IS_ERR(vr))
5428 return PTR_ERR(vr);
5429
5430 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, ven_info->dev);
5431 return mlxsw_sp_mr_vif_add(vr->mr4_table, ven_info->dev,
5432 ven_info->vif_index,
5433 ven_info->vif_flags, rif);
5434}
5435
5436static void
5437mlxsw_sp_router_fibmr_vif_del(struct mlxsw_sp *mlxsw_sp,
5438 struct vif_entry_notifier_info *ven_info)
5439{
5440 struct mlxsw_sp_vr *vr;
5441
5442 if (mlxsw_sp->router->aborted)
5443 return;
5444
5445 vr = mlxsw_sp_vr_find(mlxsw_sp, ven_info->tb_id);
5446 if (WARN_ON(!vr))
5447 return;
5448
5449 mlxsw_sp_mr_vif_del(vr->mr4_table, ven_info->vif_index);
Ido Schimmel2b52ce02018-01-22 09:17:42 +01005450 mlxsw_sp_vr_put(mlxsw_sp, vr);
Yotam Gigid42b0962017-09-27 08:23:20 +02005451}
5452
Ido Schimmelbc65a8a2017-07-18 10:10:25 +02005453static int mlxsw_sp_router_set_abort_trap(struct mlxsw_sp *mlxsw_sp)
5454{
5455 enum mlxsw_reg_ralxx_protocol proto = MLXSW_REG_RALXX_PROTOCOL_IPV4;
5456 int err;
5457
5458 err = __mlxsw_sp_router_set_abort_trap(mlxsw_sp, proto,
5459 MLXSW_SP_LPM_TREE_MIN);
5460 if (err)
5461 return err;
5462
Yotam Gigid42b0962017-09-27 08:23:20 +02005463 /* The multicast router code does not need an abort trap as by default,
5464 * packets that don't match any routes are trapped to the CPU.
5465 */
5466
Ido Schimmelbc65a8a2017-07-18 10:10:25 +02005467 proto = MLXSW_REG_RALXX_PROTOCOL_IPV6;
5468 return __mlxsw_sp_router_set_abort_trap(mlxsw_sp, proto,
5469 MLXSW_SP_LPM_TREE_MIN + 1);
5470}
5471
Ido Schimmel9aecce12017-02-09 10:28:42 +01005472static void mlxsw_sp_fib4_node_flush(struct mlxsw_sp *mlxsw_sp,
5473 struct mlxsw_sp_fib_node *fib_node)
5474{
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02005475 struct mlxsw_sp_fib4_entry *fib4_entry, *tmp;
Ido Schimmel9aecce12017-02-09 10:28:42 +01005476
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02005477 list_for_each_entry_safe(fib4_entry, tmp, &fib_node->entry_list,
5478 common.list) {
5479 bool do_break = &tmp->common.list == &fib_node->entry_list;
Ido Schimmel9aecce12017-02-09 10:28:42 +01005480
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02005481 mlxsw_sp_fib4_node_entry_unlink(mlxsw_sp, fib4_entry);
5482 mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib4_entry);
Ido Schimmel731ea1c2017-07-18 10:10:21 +02005483 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
Ido Schimmel9aecce12017-02-09 10:28:42 +01005484 /* Break when entry list is empty and node was freed.
5485 * Otherwise, we'll access freed memory in the next
5486 * iteration.
5487 */
5488 if (do_break)
5489 break;
5490 }
5491}
5492
Ido Schimmel428b8512017-08-03 13:28:28 +02005493static void mlxsw_sp_fib6_node_flush(struct mlxsw_sp *mlxsw_sp,
5494 struct mlxsw_sp_fib_node *fib_node)
5495{
5496 struct mlxsw_sp_fib6_entry *fib6_entry, *tmp;
5497
5498 list_for_each_entry_safe(fib6_entry, tmp, &fib_node->entry_list,
5499 common.list) {
5500 bool do_break = &tmp->common.list == &fib_node->entry_list;
5501
5502 mlxsw_sp_fib6_node_entry_unlink(mlxsw_sp, fib6_entry);
5503 mlxsw_sp_fib6_entry_destroy(mlxsw_sp, fib6_entry);
5504 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
5505 if (do_break)
5506 break;
5507 }
5508}
5509
Ido Schimmel9aecce12017-02-09 10:28:42 +01005510static void mlxsw_sp_fib_node_flush(struct mlxsw_sp *mlxsw_sp,
5511 struct mlxsw_sp_fib_node *fib_node)
5512{
Ido Schimmel76610eb2017-03-10 08:53:41 +01005513 switch (fib_node->fib->proto) {
Ido Schimmel9aecce12017-02-09 10:28:42 +01005514 case MLXSW_SP_L3_PROTO_IPV4:
5515 mlxsw_sp_fib4_node_flush(mlxsw_sp, fib_node);
5516 break;
5517 case MLXSW_SP_L3_PROTO_IPV6:
Ido Schimmel428b8512017-08-03 13:28:28 +02005518 mlxsw_sp_fib6_node_flush(mlxsw_sp, fib_node);
Ido Schimmel9aecce12017-02-09 10:28:42 +01005519 break;
5520 }
5521}
5522
Ido Schimmel76610eb2017-03-10 08:53:41 +01005523static void mlxsw_sp_vr_fib_flush(struct mlxsw_sp *mlxsw_sp,
5524 struct mlxsw_sp_vr *vr,
5525 enum mlxsw_sp_l3proto proto)
5526{
5527 struct mlxsw_sp_fib *fib = mlxsw_sp_vr_fib(vr, proto);
5528 struct mlxsw_sp_fib_node *fib_node, *tmp;
5529
5530 list_for_each_entry_safe(fib_node, tmp, &fib->node_list, list) {
5531 bool do_break = &tmp->list == &fib->node_list;
5532
5533 mlxsw_sp_fib_node_flush(mlxsw_sp, fib_node);
5534 if (do_break)
5535 break;
5536 }
5537}
5538
Ido Schimmelac571de2016-11-14 11:26:32 +01005539static void mlxsw_sp_router_fib_flush(struct mlxsw_sp *mlxsw_sp)
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005540{
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005541 int i;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005542
Jiri Pirkoc1a38312016-10-21 16:07:23 +02005543 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
Ido Schimmel9011b672017-05-16 19:38:25 +02005544 struct mlxsw_sp_vr *vr = &mlxsw_sp->router->vrs[i];
Ido Schimmelac571de2016-11-14 11:26:32 +01005545
Ido Schimmel76610eb2017-03-10 08:53:41 +01005546 if (!mlxsw_sp_vr_is_used(vr))
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005547 continue;
Yotam Gigid42b0962017-09-27 08:23:20 +02005548
5549 mlxsw_sp_mr_table_flush(vr->mr4_table);
Ido Schimmel76610eb2017-03-10 08:53:41 +01005550 mlxsw_sp_vr_fib_flush(mlxsw_sp, vr, MLXSW_SP_L3_PROTO_IPV4);
Ido Schimmela3d9bc52017-07-18 10:10:22 +02005551
5552 /* If virtual router was only used for IPv4, then it's no
5553 * longer used.
5554 */
5555 if (!mlxsw_sp_vr_is_used(vr))
5556 continue;
5557 mlxsw_sp_vr_fib_flush(mlxsw_sp, vr, MLXSW_SP_L3_PROTO_IPV6);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005558 }
Ido Schimmelac571de2016-11-14 11:26:32 +01005559}
5560
Ido Schimmelbc65a8a2017-07-18 10:10:25 +02005561static void mlxsw_sp_router_fib_abort(struct mlxsw_sp *mlxsw_sp)
Ido Schimmelac571de2016-11-14 11:26:32 +01005562{
5563 int err;
5564
Ido Schimmel9011b672017-05-16 19:38:25 +02005565 if (mlxsw_sp->router->aborted)
Ido Schimmeld331d302016-11-16 09:51:58 +01005566 return;
5567 dev_warn(mlxsw_sp->bus_info->dev, "FIB abort triggered. Note that FIB entries are no longer being offloaded to this device.\n");
Ido Schimmelac571de2016-11-14 11:26:32 +01005568 mlxsw_sp_router_fib_flush(mlxsw_sp);
Ido Schimmel9011b672017-05-16 19:38:25 +02005569 mlxsw_sp->router->aborted = true;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005570 err = mlxsw_sp_router_set_abort_trap(mlxsw_sp);
5571 if (err)
5572 dev_warn(mlxsw_sp->bus_info->dev, "Failed to set abort trap.\n");
5573}
5574
Ido Schimmel30572242016-12-03 16:45:01 +01005575struct mlxsw_sp_fib_event_work {
Ido Schimmela0e47612017-02-06 16:20:10 +01005576 struct work_struct work;
Ido Schimmelad178c82017-02-08 11:16:40 +01005577 union {
Ido Schimmel428b8512017-08-03 13:28:28 +02005578 struct fib6_entry_notifier_info fen6_info;
Ido Schimmelad178c82017-02-08 11:16:40 +01005579 struct fib_entry_notifier_info fen_info;
Ido Schimmel5d7bfd12017-03-16 09:08:14 +01005580 struct fib_rule_notifier_info fr_info;
Ido Schimmelad178c82017-02-08 11:16:40 +01005581 struct fib_nh_notifier_info fnh_info;
Yotam Gigid42b0962017-09-27 08:23:20 +02005582 struct mfc_entry_notifier_info men_info;
5583 struct vif_entry_notifier_info ven_info;
Ido Schimmelad178c82017-02-08 11:16:40 +01005584 };
Ido Schimmel30572242016-12-03 16:45:01 +01005585 struct mlxsw_sp *mlxsw_sp;
5586 unsigned long event;
5587};
5588
Ido Schimmel66a57632017-08-03 13:28:26 +02005589static void mlxsw_sp_router_fib4_event_work(struct work_struct *work)
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005590{
Ido Schimmel30572242016-12-03 16:45:01 +01005591 struct mlxsw_sp_fib_event_work *fib_work =
Ido Schimmela0e47612017-02-06 16:20:10 +01005592 container_of(work, struct mlxsw_sp_fib_event_work, work);
Ido Schimmel30572242016-12-03 16:45:01 +01005593 struct mlxsw_sp *mlxsw_sp = fib_work->mlxsw_sp;
Ido Schimmel599cf8f2017-02-09 10:28:44 +01005594 bool replace, append;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005595 int err;
5596
Ido Schimmel30572242016-12-03 16:45:01 +01005597 /* Protect internal structures from changes */
5598 rtnl_lock();
Petr Machata803335a2018-02-27 14:53:46 +01005599 mlxsw_sp_span_respin(mlxsw_sp);
5600
Ido Schimmel30572242016-12-03 16:45:01 +01005601 switch (fib_work->event) {
Ido Schimmel599cf8f2017-02-09 10:28:44 +01005602 case FIB_EVENT_ENTRY_REPLACE: /* fall through */
Ido Schimmel4283bce2017-02-09 10:28:43 +01005603 case FIB_EVENT_ENTRY_APPEND: /* fall through */
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005604 case FIB_EVENT_ENTRY_ADD:
Ido Schimmel599cf8f2017-02-09 10:28:44 +01005605 replace = fib_work->event == FIB_EVENT_ENTRY_REPLACE;
Ido Schimmel4283bce2017-02-09 10:28:43 +01005606 append = fib_work->event == FIB_EVENT_ENTRY_APPEND;
5607 err = mlxsw_sp_router_fib4_add(mlxsw_sp, &fib_work->fen_info,
Ido Schimmel599cf8f2017-02-09 10:28:44 +01005608 replace, append);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005609 if (err)
Ido Schimmelbc65a8a2017-07-18 10:10:25 +02005610 mlxsw_sp_router_fib_abort(mlxsw_sp);
Ido Schimmel30572242016-12-03 16:45:01 +01005611 fib_info_put(fib_work->fen_info.fi);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005612 break;
5613 case FIB_EVENT_ENTRY_DEL:
Ido Schimmel30572242016-12-03 16:45:01 +01005614 mlxsw_sp_router_fib4_del(mlxsw_sp, &fib_work->fen_info);
5615 fib_info_put(fib_work->fen_info.fi);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005616 break;
David Ahern1f279232017-10-27 17:37:14 -07005617 case FIB_EVENT_RULE_ADD:
5618 /* if we get here, a rule was added that we do not support.
5619 * just do the fib_abort
5620 */
5621 mlxsw_sp_router_fib_abort(mlxsw_sp);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005622 break;
Ido Schimmelad178c82017-02-08 11:16:40 +01005623 case FIB_EVENT_NH_ADD: /* fall through */
5624 case FIB_EVENT_NH_DEL:
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02005625 mlxsw_sp_nexthop4_event(mlxsw_sp, fib_work->event,
5626 fib_work->fnh_info.fib_nh);
Ido Schimmelad178c82017-02-08 11:16:40 +01005627 fib_info_put(fib_work->fnh_info.fib_nh->nh_parent);
5628 break;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005629 }
Ido Schimmel30572242016-12-03 16:45:01 +01005630 rtnl_unlock();
5631 kfree(fib_work);
5632}
5633
Ido Schimmel66a57632017-08-03 13:28:26 +02005634static void mlxsw_sp_router_fib6_event_work(struct work_struct *work)
5635{
Ido Schimmel583419f2017-08-03 13:28:27 +02005636 struct mlxsw_sp_fib_event_work *fib_work =
5637 container_of(work, struct mlxsw_sp_fib_event_work, work);
5638 struct mlxsw_sp *mlxsw_sp = fib_work->mlxsw_sp;
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02005639 bool replace;
Ido Schimmel428b8512017-08-03 13:28:28 +02005640 int err;
Ido Schimmel583419f2017-08-03 13:28:27 +02005641
5642 rtnl_lock();
Petr Machata803335a2018-02-27 14:53:46 +01005643 mlxsw_sp_span_respin(mlxsw_sp);
5644
Ido Schimmel583419f2017-08-03 13:28:27 +02005645 switch (fib_work->event) {
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02005646 case FIB_EVENT_ENTRY_REPLACE: /* fall through */
Ido Schimmel428b8512017-08-03 13:28:28 +02005647 case FIB_EVENT_ENTRY_ADD:
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02005648 replace = fib_work->event == FIB_EVENT_ENTRY_REPLACE;
Ido Schimmel428b8512017-08-03 13:28:28 +02005649 err = mlxsw_sp_router_fib6_add(mlxsw_sp,
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02005650 fib_work->fen6_info.rt, replace);
Ido Schimmel428b8512017-08-03 13:28:28 +02005651 if (err)
5652 mlxsw_sp_router_fib_abort(mlxsw_sp);
5653 mlxsw_sp_rt6_release(fib_work->fen6_info.rt);
5654 break;
5655 case FIB_EVENT_ENTRY_DEL:
5656 mlxsw_sp_router_fib6_del(mlxsw_sp, fib_work->fen6_info.rt);
5657 mlxsw_sp_rt6_release(fib_work->fen6_info.rt);
5658 break;
David Ahern1f279232017-10-27 17:37:14 -07005659 case FIB_EVENT_RULE_ADD:
5660 /* if we get here, a rule was added that we do not support.
5661 * just do the fib_abort
5662 */
5663 mlxsw_sp_router_fib_abort(mlxsw_sp);
Ido Schimmel583419f2017-08-03 13:28:27 +02005664 break;
5665 }
5666 rtnl_unlock();
5667 kfree(fib_work);
Ido Schimmel66a57632017-08-03 13:28:26 +02005668}
5669
Yotam Gigid42b0962017-09-27 08:23:20 +02005670static void mlxsw_sp_router_fibmr_event_work(struct work_struct *work)
5671{
5672 struct mlxsw_sp_fib_event_work *fib_work =
5673 container_of(work, struct mlxsw_sp_fib_event_work, work);
5674 struct mlxsw_sp *mlxsw_sp = fib_work->mlxsw_sp;
Yotam Gigid42b0962017-09-27 08:23:20 +02005675 bool replace;
5676 int err;
5677
5678 rtnl_lock();
5679 switch (fib_work->event) {
5680 case FIB_EVENT_ENTRY_REPLACE: /* fall through */
5681 case FIB_EVENT_ENTRY_ADD:
5682 replace = fib_work->event == FIB_EVENT_ENTRY_REPLACE;
5683
5684 err = mlxsw_sp_router_fibmr_add(mlxsw_sp, &fib_work->men_info,
5685 replace);
5686 if (err)
5687 mlxsw_sp_router_fib_abort(mlxsw_sp);
Yuval Mintz8c13af22018-03-26 15:01:36 +03005688 mr_cache_put(fib_work->men_info.mfc);
Yotam Gigid42b0962017-09-27 08:23:20 +02005689 break;
5690 case FIB_EVENT_ENTRY_DEL:
5691 mlxsw_sp_router_fibmr_del(mlxsw_sp, &fib_work->men_info);
Yuval Mintz8c13af22018-03-26 15:01:36 +03005692 mr_cache_put(fib_work->men_info.mfc);
Yotam Gigid42b0962017-09-27 08:23:20 +02005693 break;
5694 case FIB_EVENT_VIF_ADD:
5695 err = mlxsw_sp_router_fibmr_vif_add(mlxsw_sp,
5696 &fib_work->ven_info);
5697 if (err)
5698 mlxsw_sp_router_fib_abort(mlxsw_sp);
5699 dev_put(fib_work->ven_info.dev);
5700 break;
5701 case FIB_EVENT_VIF_DEL:
5702 mlxsw_sp_router_fibmr_vif_del(mlxsw_sp,
5703 &fib_work->ven_info);
5704 dev_put(fib_work->ven_info.dev);
5705 break;
David Ahern1f279232017-10-27 17:37:14 -07005706 case FIB_EVENT_RULE_ADD:
5707 /* if we get here, a rule was added that we do not support.
5708 * just do the fib_abort
5709 */
5710 mlxsw_sp_router_fib_abort(mlxsw_sp);
Yotam Gigid42b0962017-09-27 08:23:20 +02005711 break;
5712 }
5713 rtnl_unlock();
5714 kfree(fib_work);
5715}
5716
Ido Schimmel66a57632017-08-03 13:28:26 +02005717static void mlxsw_sp_router_fib4_event(struct mlxsw_sp_fib_event_work *fib_work,
5718 struct fib_notifier_info *info)
5719{
David Ahern3c75f9b2017-10-18 15:01:38 -07005720 struct fib_entry_notifier_info *fen_info;
David Ahern3c75f9b2017-10-18 15:01:38 -07005721 struct fib_nh_notifier_info *fnh_info;
5722
Ido Schimmel66a57632017-08-03 13:28:26 +02005723 switch (fib_work->event) {
5724 case FIB_EVENT_ENTRY_REPLACE: /* fall through */
5725 case FIB_EVENT_ENTRY_APPEND: /* fall through */
5726 case FIB_EVENT_ENTRY_ADD: /* fall through */
5727 case FIB_EVENT_ENTRY_DEL:
David Ahern3c75f9b2017-10-18 15:01:38 -07005728 fen_info = container_of(info, struct fib_entry_notifier_info,
5729 info);
5730 fib_work->fen_info = *fen_info;
5731 /* Take reference on fib_info to prevent it from being
Ido Schimmel66a57632017-08-03 13:28:26 +02005732 * freed while work is queued. Release it afterwards.
5733 */
5734 fib_info_hold(fib_work->fen_info.fi);
5735 break;
Ido Schimmel66a57632017-08-03 13:28:26 +02005736 case FIB_EVENT_NH_ADD: /* fall through */
5737 case FIB_EVENT_NH_DEL:
David Ahern3c75f9b2017-10-18 15:01:38 -07005738 fnh_info = container_of(info, struct fib_nh_notifier_info,
5739 info);
5740 fib_work->fnh_info = *fnh_info;
Ido Schimmel66a57632017-08-03 13:28:26 +02005741 fib_info_hold(fib_work->fnh_info.fib_nh->nh_parent);
5742 break;
5743 }
5744}
5745
5746static void mlxsw_sp_router_fib6_event(struct mlxsw_sp_fib_event_work *fib_work,
5747 struct fib_notifier_info *info)
5748{
David Ahern3c75f9b2017-10-18 15:01:38 -07005749 struct fib6_entry_notifier_info *fen6_info;
David Ahern3c75f9b2017-10-18 15:01:38 -07005750
Ido Schimmel583419f2017-08-03 13:28:27 +02005751 switch (fib_work->event) {
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02005752 case FIB_EVENT_ENTRY_REPLACE: /* fall through */
Ido Schimmel428b8512017-08-03 13:28:28 +02005753 case FIB_EVENT_ENTRY_ADD: /* fall through */
5754 case FIB_EVENT_ENTRY_DEL:
David Ahern3c75f9b2017-10-18 15:01:38 -07005755 fen6_info = container_of(info, struct fib6_entry_notifier_info,
5756 info);
5757 fib_work->fen6_info = *fen6_info;
Ido Schimmel428b8512017-08-03 13:28:28 +02005758 rt6_hold(fib_work->fen6_info.rt);
5759 break;
Ido Schimmel583419f2017-08-03 13:28:27 +02005760 }
Ido Schimmel66a57632017-08-03 13:28:26 +02005761}
5762
Yotam Gigid42b0962017-09-27 08:23:20 +02005763static void
5764mlxsw_sp_router_fibmr_event(struct mlxsw_sp_fib_event_work *fib_work,
5765 struct fib_notifier_info *info)
5766{
5767 switch (fib_work->event) {
5768 case FIB_EVENT_ENTRY_REPLACE: /* fall through */
5769 case FIB_EVENT_ENTRY_ADD: /* fall through */
5770 case FIB_EVENT_ENTRY_DEL:
5771 memcpy(&fib_work->men_info, info, sizeof(fib_work->men_info));
Yuval Mintz8c13af22018-03-26 15:01:36 +03005772 mr_cache_hold(fib_work->men_info.mfc);
Yotam Gigid42b0962017-09-27 08:23:20 +02005773 break;
5774 case FIB_EVENT_VIF_ADD: /* fall through */
5775 case FIB_EVENT_VIF_DEL:
5776 memcpy(&fib_work->ven_info, info, sizeof(fib_work->ven_info));
5777 dev_hold(fib_work->ven_info.dev);
5778 break;
David Ahern1f279232017-10-27 17:37:14 -07005779 }
5780}
5781
5782static int mlxsw_sp_router_fib_rule_event(unsigned long event,
5783 struct fib_notifier_info *info,
5784 struct mlxsw_sp *mlxsw_sp)
5785{
5786 struct netlink_ext_ack *extack = info->extack;
5787 struct fib_rule_notifier_info *fr_info;
5788 struct fib_rule *rule;
5789 int err = 0;
5790
5791 /* nothing to do at the moment */
5792 if (event == FIB_EVENT_RULE_DEL)
5793 return 0;
5794
5795 if (mlxsw_sp->router->aborted)
5796 return 0;
5797
5798 fr_info = container_of(info, struct fib_rule_notifier_info, info);
5799 rule = fr_info->rule;
5800
5801 switch (info->family) {
5802 case AF_INET:
5803 if (!fib4_rule_default(rule) && !rule->l3mdev)
5804 err = -1;
5805 break;
5806 case AF_INET6:
5807 if (!fib6_rule_default(rule) && !rule->l3mdev)
5808 err = -1;
5809 break;
5810 case RTNL_FAMILY_IPMR:
5811 if (!ipmr_rule_default(rule) && !rule->l3mdev)
5812 err = -1;
Yotam Gigid42b0962017-09-27 08:23:20 +02005813 break;
5814 }
David Ahern1f279232017-10-27 17:37:14 -07005815
5816 if (err < 0)
Arkadi Sharshevsky6c677752018-02-13 11:29:05 +01005817 NL_SET_ERR_MSG_MOD(extack, "FIB rules not supported. Aborting offload");
David Ahern1f279232017-10-27 17:37:14 -07005818
5819 return err;
Yotam Gigid42b0962017-09-27 08:23:20 +02005820}
5821
Ido Schimmel30572242016-12-03 16:45:01 +01005822/* Called with rcu_read_lock() */
5823static int mlxsw_sp_router_fib_event(struct notifier_block *nb,
5824 unsigned long event, void *ptr)
5825{
Ido Schimmel30572242016-12-03 16:45:01 +01005826 struct mlxsw_sp_fib_event_work *fib_work;
5827 struct fib_notifier_info *info = ptr;
Ido Schimmel7e39d112017-05-16 19:38:28 +02005828 struct mlxsw_sp_router *router;
David Ahern1f279232017-10-27 17:37:14 -07005829 int err;
Ido Schimmel30572242016-12-03 16:45:01 +01005830
Ido Schimmel8e29f972017-09-15 15:31:07 +02005831 if (!net_eq(info->net, &init_net) ||
Yotam Gigi664375e2017-09-27 08:23:22 +02005832 (info->family != AF_INET && info->family != AF_INET6 &&
5833 info->family != RTNL_FAMILY_IPMR))
Ido Schimmel30572242016-12-03 16:45:01 +01005834 return NOTIFY_DONE;
5835
David Ahern1f279232017-10-27 17:37:14 -07005836 router = container_of(nb, struct mlxsw_sp_router, fib_nb);
5837
5838 switch (event) {
5839 case FIB_EVENT_RULE_ADD: /* fall through */
5840 case FIB_EVENT_RULE_DEL:
5841 err = mlxsw_sp_router_fib_rule_event(event, info,
5842 router->mlxsw_sp);
5843 if (!err)
5844 return NOTIFY_DONE;
5845 }
5846
Ido Schimmel30572242016-12-03 16:45:01 +01005847 fib_work = kzalloc(sizeof(*fib_work), GFP_ATOMIC);
5848 if (WARN_ON(!fib_work))
5849 return NOTIFY_BAD;
5850
Ido Schimmel7e39d112017-05-16 19:38:28 +02005851 fib_work->mlxsw_sp = router->mlxsw_sp;
Ido Schimmel30572242016-12-03 16:45:01 +01005852 fib_work->event = event;
5853
Ido Schimmel66a57632017-08-03 13:28:26 +02005854 switch (info->family) {
5855 case AF_INET:
5856 INIT_WORK(&fib_work->work, mlxsw_sp_router_fib4_event_work);
5857 mlxsw_sp_router_fib4_event(fib_work, info);
Ido Schimmel30572242016-12-03 16:45:01 +01005858 break;
Ido Schimmel66a57632017-08-03 13:28:26 +02005859 case AF_INET6:
5860 INIT_WORK(&fib_work->work, mlxsw_sp_router_fib6_event_work);
5861 mlxsw_sp_router_fib6_event(fib_work, info);
Ido Schimmelad178c82017-02-08 11:16:40 +01005862 break;
Yotam Gigid42b0962017-09-27 08:23:20 +02005863 case RTNL_FAMILY_IPMR:
5864 INIT_WORK(&fib_work->work, mlxsw_sp_router_fibmr_event_work);
5865 mlxsw_sp_router_fibmr_event(fib_work, info);
5866 break;
Ido Schimmel30572242016-12-03 16:45:01 +01005867 }
5868
Ido Schimmela0e47612017-02-06 16:20:10 +01005869 mlxsw_core_schedule_work(&fib_work->work);
Ido Schimmel30572242016-12-03 16:45:01 +01005870
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005871 return NOTIFY_DONE;
5872}
5873
Ido Schimmel4724ba562017-03-10 08:53:39 +01005874static struct mlxsw_sp_rif *
5875mlxsw_sp_rif_find_by_dev(const struct mlxsw_sp *mlxsw_sp,
5876 const struct net_device *dev)
5877{
5878 int i;
5879
5880 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++)
Ido Schimmel5f9efff2017-05-16 19:38:27 +02005881 if (mlxsw_sp->router->rifs[i] &&
5882 mlxsw_sp->router->rifs[i]->dev == dev)
5883 return mlxsw_sp->router->rifs[i];
Ido Schimmel4724ba562017-03-10 08:53:39 +01005884
5885 return NULL;
5886}
5887
5888static int mlxsw_sp_router_rif_disable(struct mlxsw_sp *mlxsw_sp, u16 rif)
5889{
5890 char ritr_pl[MLXSW_REG_RITR_LEN];
5891 int err;
5892
5893 mlxsw_reg_ritr_rif_pack(ritr_pl, rif);
5894 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
5895 if (WARN_ON_ONCE(err))
5896 return err;
5897
5898 mlxsw_reg_ritr_enable_set(ritr_pl, false);
5899 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
5900}
5901
5902static void mlxsw_sp_router_rif_gone_sync(struct mlxsw_sp *mlxsw_sp,
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01005903 struct mlxsw_sp_rif *rif)
Ido Schimmel4724ba562017-03-10 08:53:39 +01005904{
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01005905 mlxsw_sp_router_rif_disable(mlxsw_sp, rif->rif_index);
5906 mlxsw_sp_nexthop_rif_gone_sync(mlxsw_sp, rif);
5907 mlxsw_sp_neigh_rif_gone_sync(mlxsw_sp, rif);
Ido Schimmel4724ba562017-03-10 08:53:39 +01005908}
5909
Arkadi Sharshevsky5ea12372017-07-18 10:10:13 +02005910static bool
5911mlxsw_sp_rif_should_config(struct mlxsw_sp_rif *rif, struct net_device *dev,
5912 unsigned long event)
Ido Schimmel4724ba562017-03-10 08:53:39 +01005913{
Arkadi Sharshevsky5ea12372017-07-18 10:10:13 +02005914 struct inet6_dev *inet6_dev;
5915 bool addr_list_empty = true;
5916 struct in_device *idev;
5917
Ido Schimmel4724ba562017-03-10 08:53:39 +01005918 switch (event) {
5919 case NETDEV_UP:
Petr Machataf1b1f272017-07-31 09:27:28 +02005920 return rif == NULL;
Ido Schimmel4724ba562017-03-10 08:53:39 +01005921 case NETDEV_DOWN:
Arkadi Sharshevsky5ea12372017-07-18 10:10:13 +02005922 idev = __in_dev_get_rtnl(dev);
5923 if (idev && idev->ifa_list)
5924 addr_list_empty = false;
5925
5926 inet6_dev = __in6_dev_get(dev);
5927 if (addr_list_empty && inet6_dev &&
5928 !list_empty(&inet6_dev->addr_list))
5929 addr_list_empty = false;
5930
5931 if (rif && addr_list_empty &&
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01005932 !netif_is_l3_slave(rif->dev))
Ido Schimmel4724ba562017-03-10 08:53:39 +01005933 return true;
5934 /* It is possible we already removed the RIF ourselves
5935 * if it was assigned to a netdev that is now a bridge
5936 * or LAG slave.
5937 */
5938 return false;
5939 }
5940
5941 return false;
5942}
5943
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02005944static enum mlxsw_sp_rif_type
5945mlxsw_sp_dev_rif_type(const struct mlxsw_sp *mlxsw_sp,
5946 const struct net_device *dev)
5947{
5948 enum mlxsw_sp_fid_type type;
5949
Petr Machata6ddb7422017-09-02 23:49:19 +02005950 if (mlxsw_sp_netdev_ipip_type(mlxsw_sp, dev, NULL))
5951 return MLXSW_SP_RIF_TYPE_IPIP_LB;
5952
5953 /* Otherwise RIF type is derived from the type of the underlying FID. */
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02005954 if (is_vlan_dev(dev) && netif_is_bridge_master(vlan_dev_real_dev(dev)))
5955 type = MLXSW_SP_FID_TYPE_8021Q;
5956 else if (netif_is_bridge_master(dev) && br_vlan_enabled(dev))
5957 type = MLXSW_SP_FID_TYPE_8021Q;
5958 else if (netif_is_bridge_master(dev))
5959 type = MLXSW_SP_FID_TYPE_8021D;
5960 else
5961 type = MLXSW_SP_FID_TYPE_RFID;
5962
5963 return mlxsw_sp_fid_type_rif_type(mlxsw_sp, type);
5964}
5965
Ido Schimmelde5ed992017-06-04 16:53:40 +02005966static int mlxsw_sp_rif_index_alloc(struct mlxsw_sp *mlxsw_sp, u16 *p_rif_index)
Ido Schimmel4724ba562017-03-10 08:53:39 +01005967{
5968 int i;
5969
Ido Schimmelde5ed992017-06-04 16:53:40 +02005970 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++) {
5971 if (!mlxsw_sp->router->rifs[i]) {
5972 *p_rif_index = i;
5973 return 0;
5974 }
5975 }
Ido Schimmel4724ba562017-03-10 08:53:39 +01005976
Ido Schimmelde5ed992017-06-04 16:53:40 +02005977 return -ENOBUFS;
Ido Schimmel4724ba562017-03-10 08:53:39 +01005978}
5979
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02005980static struct mlxsw_sp_rif *mlxsw_sp_rif_alloc(size_t rif_size, u16 rif_index,
5981 u16 vr_id,
5982 struct net_device *l3_dev)
Ido Schimmel4724ba562017-03-10 08:53:39 +01005983{
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01005984 struct mlxsw_sp_rif *rif;
Ido Schimmel4724ba562017-03-10 08:53:39 +01005985
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02005986 rif = kzalloc(rif_size, GFP_KERNEL);
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01005987 if (!rif)
Ido Schimmel4724ba562017-03-10 08:53:39 +01005988 return NULL;
5989
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01005990 INIT_LIST_HEAD(&rif->nexthop_list);
5991 INIT_LIST_HEAD(&rif->neigh_list);
5992 ether_addr_copy(rif->addr, l3_dev->dev_addr);
5993 rif->mtu = l3_dev->mtu;
5994 rif->vr_id = vr_id;
5995 rif->dev = l3_dev;
5996 rif->rif_index = rif_index;
Ido Schimmel4724ba562017-03-10 08:53:39 +01005997
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01005998 return rif;
Ido Schimmel4724ba562017-03-10 08:53:39 +01005999}
6000
Ido Schimmel5f9efff2017-05-16 19:38:27 +02006001struct mlxsw_sp_rif *mlxsw_sp_rif_by_index(const struct mlxsw_sp *mlxsw_sp,
6002 u16 rif_index)
6003{
6004 return mlxsw_sp->router->rifs[rif_index];
6005}
6006
Arkadi Sharshevskyfd1b9d42017-03-28 17:24:16 +02006007u16 mlxsw_sp_rif_index(const struct mlxsw_sp_rif *rif)
6008{
6009 return rif->rif_index;
6010}
6011
Petr Machata92107cf2017-09-02 23:49:28 +02006012u16 mlxsw_sp_ipip_lb_rif_index(const struct mlxsw_sp_rif_ipip_lb *lb_rif)
6013{
6014 return lb_rif->common.rif_index;
6015}
6016
6017u16 mlxsw_sp_ipip_lb_ul_vr_id(const struct mlxsw_sp_rif_ipip_lb *lb_rif)
6018{
6019 return lb_rif->ul_vr_id;
6020}
6021
Arkadi Sharshevskyfd1b9d42017-03-28 17:24:16 +02006022int mlxsw_sp_rif_dev_ifindex(const struct mlxsw_sp_rif *rif)
6023{
6024 return rif->dev->ifindex;
6025}
6026
Yotam Gigi91e4d592017-09-19 10:00:19 +02006027const struct net_device *mlxsw_sp_rif_dev(const struct mlxsw_sp_rif *rif)
6028{
6029 return rif->dev;
6030}
6031
Ido Schimmel4724ba562017-03-10 08:53:39 +01006032static struct mlxsw_sp_rif *
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006033mlxsw_sp_rif_create(struct mlxsw_sp *mlxsw_sp,
David Ahernf8fa9b42017-10-18 09:56:56 -07006034 const struct mlxsw_sp_rif_params *params,
6035 struct netlink_ext_ack *extack)
Ido Schimmel4724ba562017-03-10 08:53:39 +01006036{
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006037 u32 tb_id = l3mdev_fib_table(params->dev);
6038 const struct mlxsw_sp_rif_ops *ops;
Petr Machata010cadf2017-09-02 23:49:18 +02006039 struct mlxsw_sp_fid *fid = NULL;
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006040 enum mlxsw_sp_rif_type type;
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01006041 struct mlxsw_sp_rif *rif;
Ido Schimmela1107482017-05-26 08:37:39 +02006042 struct mlxsw_sp_vr *vr;
6043 u16 rif_index;
Ido Schimmel4724ba562017-03-10 08:53:39 +01006044 int err;
6045
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006046 type = mlxsw_sp_dev_rif_type(mlxsw_sp, params->dev);
6047 ops = mlxsw_sp->router->rif_ops_arr[type];
6048
David Ahernf8fa9b42017-10-18 09:56:56 -07006049 vr = mlxsw_sp_vr_get(mlxsw_sp, tb_id ? : RT_TABLE_MAIN, extack);
Ido Schimmelc9ec53f2017-05-26 08:37:38 +02006050 if (IS_ERR(vr))
6051 return ERR_CAST(vr);
Petr Machata28a04c72017-10-02 12:14:56 +02006052 vr->rif_count++;
Ido Schimmelc9ec53f2017-05-26 08:37:38 +02006053
Ido Schimmelde5ed992017-06-04 16:53:40 +02006054 err = mlxsw_sp_rif_index_alloc(mlxsw_sp, &rif_index);
David Ahernf8fa9b42017-10-18 09:56:56 -07006055 if (err) {
Arkadi Sharshevsky6c677752018-02-13 11:29:05 +01006056 NL_SET_ERR_MSG_MOD(extack, "Exceeded number of supported router interfaces");
Ido Schimmelde5ed992017-06-04 16:53:40 +02006057 goto err_rif_index_alloc;
David Ahernf8fa9b42017-10-18 09:56:56 -07006058 }
Ido Schimmel4724ba562017-03-10 08:53:39 +01006059
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006060 rif = mlxsw_sp_rif_alloc(ops->rif_size, rif_index, vr->id, params->dev);
Ido Schimmela13a5942017-05-26 08:37:33 +02006061 if (!rif) {
6062 err = -ENOMEM;
6063 goto err_rif_alloc;
6064 }
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006065 rif->mlxsw_sp = mlxsw_sp;
6066 rif->ops = ops;
Ido Schimmela13a5942017-05-26 08:37:33 +02006067
Petr Machata010cadf2017-09-02 23:49:18 +02006068 if (ops->fid_get) {
6069 fid = ops->fid_get(rif);
6070 if (IS_ERR(fid)) {
6071 err = PTR_ERR(fid);
6072 goto err_fid_get;
6073 }
6074 rif->fid = fid;
Ido Schimmel4d93cee2017-05-26 08:37:34 +02006075 }
6076
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006077 if (ops->setup)
6078 ops->setup(rif, params);
6079
6080 err = ops->configure(rif);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006081 if (err)
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006082 goto err_configure;
Ido Schimmel4724ba562017-03-10 08:53:39 +01006083
Yotam Gigid42b0962017-09-27 08:23:20 +02006084 err = mlxsw_sp_mr_rif_add(vr->mr4_table, rif);
6085 if (err)
6086 goto err_mr_rif_add;
6087
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006088 mlxsw_sp_rif_counters_alloc(rif);
Ido Schimmel5f9efff2017-05-16 19:38:27 +02006089 mlxsw_sp->router->rifs[rif_index] = rif;
Ido Schimmel4724ba562017-03-10 08:53:39 +01006090
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01006091 return rif;
Ido Schimmel4724ba562017-03-10 08:53:39 +01006092
Yotam Gigid42b0962017-09-27 08:23:20 +02006093err_mr_rif_add:
6094 ops->deconfigure(rif);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006095err_configure:
Petr Machata010cadf2017-09-02 23:49:18 +02006096 if (fid)
6097 mlxsw_sp_fid_put(fid);
Ido Schimmela1107482017-05-26 08:37:39 +02006098err_fid_get:
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006099 kfree(rif);
6100err_rif_alloc:
Ido Schimmelde5ed992017-06-04 16:53:40 +02006101err_rif_index_alloc:
Petr Machata28a04c72017-10-02 12:14:56 +02006102 vr->rif_count--;
Ido Schimmel2b52ce02018-01-22 09:17:42 +01006103 mlxsw_sp_vr_put(mlxsw_sp, vr);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006104 return ERR_PTR(err);
6105}
6106
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006107void mlxsw_sp_rif_destroy(struct mlxsw_sp_rif *rif)
Ido Schimmel4724ba562017-03-10 08:53:39 +01006108{
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006109 const struct mlxsw_sp_rif_ops *ops = rif->ops;
6110 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
Ido Schimmela1107482017-05-26 08:37:39 +02006111 struct mlxsw_sp_fid *fid = rif->fid;
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006112 struct mlxsw_sp_vr *vr;
Ido Schimmel4724ba562017-03-10 08:53:39 +01006113
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01006114 mlxsw_sp_router_rif_gone_sync(mlxsw_sp, rif);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006115 vr = &mlxsw_sp->router->vrs[rif->vr_id];
Arkadi Sharshevskye0c0afd2017-03-28 17:24:15 +02006116
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006117 mlxsw_sp->router->rifs[rif->rif_index] = NULL;
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006118 mlxsw_sp_rif_counters_free(rif);
Yotam Gigid42b0962017-09-27 08:23:20 +02006119 mlxsw_sp_mr_rif_del(vr->mr4_table, rif);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006120 ops->deconfigure(rif);
Petr Machata010cadf2017-09-02 23:49:18 +02006121 if (fid)
6122 /* Loopback RIFs are not associated with a FID. */
6123 mlxsw_sp_fid_put(fid);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006124 kfree(rif);
Petr Machata28a04c72017-10-02 12:14:56 +02006125 vr->rif_count--;
Ido Schimmel2b52ce02018-01-22 09:17:42 +01006126 mlxsw_sp_vr_put(mlxsw_sp, vr);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006127}
6128
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006129static void
6130mlxsw_sp_rif_subport_params_init(struct mlxsw_sp_rif_params *params,
6131 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
6132{
6133 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
6134
6135 params->vid = mlxsw_sp_port_vlan->vid;
6136 params->lag = mlxsw_sp_port->lagged;
6137 if (params->lag)
6138 params->lag_id = mlxsw_sp_port->lag_id;
6139 else
6140 params->system_port = mlxsw_sp_port->local_port;
6141}
6142
Ido Schimmel7cbecf22017-05-26 08:37:28 +02006143static int
Ido Schimmela1107482017-05-26 08:37:39 +02006144mlxsw_sp_port_vlan_router_join(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan,
David Ahernf8fa9b42017-10-18 09:56:56 -07006145 struct net_device *l3_dev,
6146 struct netlink_ext_ack *extack)
Ido Schimmel4724ba562017-03-10 08:53:39 +01006147{
Ido Schimmel7cbecf22017-05-26 08:37:28 +02006148 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
Ido Schimmel1b8f09a2017-05-26 08:37:36 +02006149 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
Ido Schimmel7cbecf22017-05-26 08:37:28 +02006150 u16 vid = mlxsw_sp_port_vlan->vid;
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01006151 struct mlxsw_sp_rif *rif;
Ido Schimmela1107482017-05-26 08:37:39 +02006152 struct mlxsw_sp_fid *fid;
Ido Schimmel03ea01e2017-05-23 21:56:30 +02006153 int err;
Ido Schimmel4724ba562017-03-10 08:53:39 +01006154
Ido Schimmel1b8f09a2017-05-26 08:37:36 +02006155 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev);
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01006156 if (!rif) {
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006157 struct mlxsw_sp_rif_params params = {
6158 .dev = l3_dev,
6159 };
6160
6161 mlxsw_sp_rif_subport_params_init(&params, mlxsw_sp_port_vlan);
David Ahernf8fa9b42017-10-18 09:56:56 -07006162 rif = mlxsw_sp_rif_create(mlxsw_sp, &params, extack);
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01006163 if (IS_ERR(rif))
6164 return PTR_ERR(rif);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006165 }
6166
Ido Schimmela1107482017-05-26 08:37:39 +02006167 /* FID was already created, just take a reference */
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006168 fid = rif->ops->fid_get(rif);
Ido Schimmela1107482017-05-26 08:37:39 +02006169 err = mlxsw_sp_fid_port_vid_map(fid, mlxsw_sp_port, vid);
6170 if (err)
6171 goto err_fid_port_vid_map;
6172
Ido Schimmel7cbecf22017-05-26 08:37:28 +02006173 err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, false);
Ido Schimmel03ea01e2017-05-23 21:56:30 +02006174 if (err)
6175 goto err_port_vid_learning_set;
6176
Ido Schimmel7cbecf22017-05-26 08:37:28 +02006177 err = mlxsw_sp_port_vid_stp_set(mlxsw_sp_port, vid,
Ido Schimmel03ea01e2017-05-23 21:56:30 +02006178 BR_STATE_FORWARDING);
6179 if (err)
6180 goto err_port_vid_stp_set;
6181
Ido Schimmela1107482017-05-26 08:37:39 +02006182 mlxsw_sp_port_vlan->fid = fid;
Ido Schimmel4724ba562017-03-10 08:53:39 +01006183
Ido Schimmel4724ba562017-03-10 08:53:39 +01006184 return 0;
Ido Schimmel03ea01e2017-05-23 21:56:30 +02006185
6186err_port_vid_stp_set:
Ido Schimmel7cbecf22017-05-26 08:37:28 +02006187 mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, true);
Ido Schimmel03ea01e2017-05-23 21:56:30 +02006188err_port_vid_learning_set:
Ido Schimmela1107482017-05-26 08:37:39 +02006189 mlxsw_sp_fid_port_vid_unmap(fid, mlxsw_sp_port, vid);
6190err_fid_port_vid_map:
6191 mlxsw_sp_fid_put(fid);
Ido Schimmel03ea01e2017-05-23 21:56:30 +02006192 return err;
Ido Schimmel4724ba562017-03-10 08:53:39 +01006193}
6194
Ido Schimmela1107482017-05-26 08:37:39 +02006195void
6196mlxsw_sp_port_vlan_router_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
Ido Schimmel4724ba562017-03-10 08:53:39 +01006197{
Ido Schimmelce95e152017-05-26 08:37:27 +02006198 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
Ido Schimmel7cbecf22017-05-26 08:37:28 +02006199 struct mlxsw_sp_fid *fid = mlxsw_sp_port_vlan->fid;
Ido Schimmelce95e152017-05-26 08:37:27 +02006200 u16 vid = mlxsw_sp_port_vlan->vid;
Ido Schimmelce95e152017-05-26 08:37:27 +02006201
Ido Schimmela1107482017-05-26 08:37:39 +02006202 if (WARN_ON(mlxsw_sp_fid_type(fid) != MLXSW_SP_FID_TYPE_RFID))
6203 return;
Ido Schimmel4aafc362017-05-26 08:37:25 +02006204
Ido Schimmela1107482017-05-26 08:37:39 +02006205 mlxsw_sp_port_vlan->fid = NULL;
Ido Schimmel7cbecf22017-05-26 08:37:28 +02006206 mlxsw_sp_port_vid_stp_set(mlxsw_sp_port, vid, BR_STATE_BLOCKING);
6207 mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, true);
Ido Schimmela1107482017-05-26 08:37:39 +02006208 mlxsw_sp_fid_port_vid_unmap(fid, mlxsw_sp_port, vid);
6209 /* If router port holds the last reference on the rFID, then the
6210 * associated Sub-port RIF will be destroyed.
6211 */
6212 mlxsw_sp_fid_put(fid);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006213}
6214
Ido Schimmel7cbecf22017-05-26 08:37:28 +02006215static int mlxsw_sp_inetaddr_port_vlan_event(struct net_device *l3_dev,
6216 struct net_device *port_dev,
David Ahernf8fa9b42017-10-18 09:56:56 -07006217 unsigned long event, u16 vid,
6218 struct netlink_ext_ack *extack)
Ido Schimmel4724ba562017-03-10 08:53:39 +01006219{
6220 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(port_dev);
Ido Schimmelce95e152017-05-26 08:37:27 +02006221 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
Ido Schimmel4724ba562017-03-10 08:53:39 +01006222
Ido Schimmelce95e152017-05-26 08:37:27 +02006223 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
Ido Schimmel7cbecf22017-05-26 08:37:28 +02006224 if (WARN_ON(!mlxsw_sp_port_vlan))
6225 return -EINVAL;
Ido Schimmel4724ba562017-03-10 08:53:39 +01006226
6227 switch (event) {
6228 case NETDEV_UP:
Ido Schimmela1107482017-05-26 08:37:39 +02006229 return mlxsw_sp_port_vlan_router_join(mlxsw_sp_port_vlan,
David Ahernf8fa9b42017-10-18 09:56:56 -07006230 l3_dev, extack);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006231 case NETDEV_DOWN:
Ido Schimmela1107482017-05-26 08:37:39 +02006232 mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port_vlan);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006233 break;
6234 }
6235
6236 return 0;
6237}
6238
6239static int mlxsw_sp_inetaddr_port_event(struct net_device *port_dev,
David Ahernf8fa9b42017-10-18 09:56:56 -07006240 unsigned long event,
6241 struct netlink_ext_ack *extack)
Ido Schimmel4724ba562017-03-10 08:53:39 +01006242{
Jiri Pirko2b94e582017-04-18 16:55:37 +02006243 if (netif_is_bridge_port(port_dev) ||
6244 netif_is_lag_port(port_dev) ||
6245 netif_is_ovs_port(port_dev))
Ido Schimmel4724ba562017-03-10 08:53:39 +01006246 return 0;
6247
David Ahernf8fa9b42017-10-18 09:56:56 -07006248 return mlxsw_sp_inetaddr_port_vlan_event(port_dev, port_dev, event, 1,
6249 extack);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006250}
6251
6252static int __mlxsw_sp_inetaddr_lag_event(struct net_device *l3_dev,
6253 struct net_device *lag_dev,
David Ahernf8fa9b42017-10-18 09:56:56 -07006254 unsigned long event, u16 vid,
6255 struct netlink_ext_ack *extack)
Ido Schimmel4724ba562017-03-10 08:53:39 +01006256{
6257 struct net_device *port_dev;
6258 struct list_head *iter;
6259 int err;
6260
6261 netdev_for_each_lower_dev(lag_dev, port_dev, iter) {
6262 if (mlxsw_sp_port_dev_check(port_dev)) {
Ido Schimmel7cbecf22017-05-26 08:37:28 +02006263 err = mlxsw_sp_inetaddr_port_vlan_event(l3_dev,
6264 port_dev,
David Ahernf8fa9b42017-10-18 09:56:56 -07006265 event, vid,
6266 extack);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006267 if (err)
6268 return err;
6269 }
6270 }
6271
6272 return 0;
6273}
6274
6275static int mlxsw_sp_inetaddr_lag_event(struct net_device *lag_dev,
David Ahernf8fa9b42017-10-18 09:56:56 -07006276 unsigned long event,
6277 struct netlink_ext_ack *extack)
Ido Schimmel4724ba562017-03-10 08:53:39 +01006278{
6279 if (netif_is_bridge_port(lag_dev))
6280 return 0;
6281
David Ahernf8fa9b42017-10-18 09:56:56 -07006282 return __mlxsw_sp_inetaddr_lag_event(lag_dev, lag_dev, event, 1,
6283 extack);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006284}
6285
Ido Schimmel4724ba562017-03-10 08:53:39 +01006286static int mlxsw_sp_inetaddr_bridge_event(struct net_device *l3_dev,
David Ahernf8fa9b42017-10-18 09:56:56 -07006287 unsigned long event,
6288 struct netlink_ext_ack *extack)
Ido Schimmel4724ba562017-03-10 08:53:39 +01006289{
6290 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(l3_dev);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006291 struct mlxsw_sp_rif_params params = {
6292 .dev = l3_dev,
6293 };
Ido Schimmela1107482017-05-26 08:37:39 +02006294 struct mlxsw_sp_rif *rif;
Ido Schimmel4724ba562017-03-10 08:53:39 +01006295
6296 switch (event) {
6297 case NETDEV_UP:
David Ahernf8fa9b42017-10-18 09:56:56 -07006298 rif = mlxsw_sp_rif_create(mlxsw_sp, &params, extack);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006299 if (IS_ERR(rif))
6300 return PTR_ERR(rif);
6301 break;
Ido Schimmel4724ba562017-03-10 08:53:39 +01006302 case NETDEV_DOWN:
Ido Schimmela1107482017-05-26 08:37:39 +02006303 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006304 mlxsw_sp_rif_destroy(rif);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006305 break;
6306 }
6307
6308 return 0;
6309}
6310
6311static int mlxsw_sp_inetaddr_vlan_event(struct net_device *vlan_dev,
David Ahernf8fa9b42017-10-18 09:56:56 -07006312 unsigned long event,
6313 struct netlink_ext_ack *extack)
Ido Schimmel4724ba562017-03-10 08:53:39 +01006314{
6315 struct net_device *real_dev = vlan_dev_real_dev(vlan_dev);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006316 u16 vid = vlan_dev_vlan_id(vlan_dev);
6317
Ido Schimmel6b27c8a2017-06-28 09:03:12 +03006318 if (netif_is_bridge_port(vlan_dev))
6319 return 0;
6320
Ido Schimmel4724ba562017-03-10 08:53:39 +01006321 if (mlxsw_sp_port_dev_check(real_dev))
Ido Schimmel7cbecf22017-05-26 08:37:28 +02006322 return mlxsw_sp_inetaddr_port_vlan_event(vlan_dev, real_dev,
David Ahernf8fa9b42017-10-18 09:56:56 -07006323 event, vid, extack);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006324 else if (netif_is_lag_master(real_dev))
6325 return __mlxsw_sp_inetaddr_lag_event(vlan_dev, real_dev, event,
David Ahernf8fa9b42017-10-18 09:56:56 -07006326 vid, extack);
Ido Schimmelc57529e2017-05-26 08:37:31 +02006327 else if (netif_is_bridge_master(real_dev) && br_vlan_enabled(real_dev))
David Ahernf8fa9b42017-10-18 09:56:56 -07006328 return mlxsw_sp_inetaddr_bridge_event(vlan_dev, event, extack);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006329
6330 return 0;
6331}
6332
Ido Schimmelb1e45522017-04-30 19:47:14 +03006333static int __mlxsw_sp_inetaddr_event(struct net_device *dev,
David Ahernf8fa9b42017-10-18 09:56:56 -07006334 unsigned long event,
6335 struct netlink_ext_ack *extack)
Ido Schimmelb1e45522017-04-30 19:47:14 +03006336{
6337 if (mlxsw_sp_port_dev_check(dev))
David Ahernf8fa9b42017-10-18 09:56:56 -07006338 return mlxsw_sp_inetaddr_port_event(dev, event, extack);
Ido Schimmelb1e45522017-04-30 19:47:14 +03006339 else if (netif_is_lag_master(dev))
David Ahernf8fa9b42017-10-18 09:56:56 -07006340 return mlxsw_sp_inetaddr_lag_event(dev, event, extack);
Ido Schimmelb1e45522017-04-30 19:47:14 +03006341 else if (netif_is_bridge_master(dev))
David Ahernf8fa9b42017-10-18 09:56:56 -07006342 return mlxsw_sp_inetaddr_bridge_event(dev, event, extack);
Ido Schimmelb1e45522017-04-30 19:47:14 +03006343 else if (is_vlan_dev(dev))
David Ahernf8fa9b42017-10-18 09:56:56 -07006344 return mlxsw_sp_inetaddr_vlan_event(dev, event, extack);
Ido Schimmelb1e45522017-04-30 19:47:14 +03006345 else
6346 return 0;
6347}
6348
Ido Schimmel4724ba562017-03-10 08:53:39 +01006349int mlxsw_sp_inetaddr_event(struct notifier_block *unused,
6350 unsigned long event, void *ptr)
6351{
6352 struct in_ifaddr *ifa = (struct in_ifaddr *) ptr;
6353 struct net_device *dev = ifa->ifa_dev->dev;
6354 struct mlxsw_sp *mlxsw_sp;
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01006355 struct mlxsw_sp_rif *rif;
Ido Schimmel4724ba562017-03-10 08:53:39 +01006356 int err = 0;
6357
David Ahern89d5dd22017-10-18 09:56:55 -07006358 /* NETDEV_UP event is handled by mlxsw_sp_inetaddr_valid_event */
6359 if (event == NETDEV_UP)
6360 goto out;
6361
6362 mlxsw_sp = mlxsw_sp_lower_get(dev);
6363 if (!mlxsw_sp)
6364 goto out;
6365
6366 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
6367 if (!mlxsw_sp_rif_should_config(rif, dev, event))
6368 goto out;
6369
David Ahernf8fa9b42017-10-18 09:56:56 -07006370 err = __mlxsw_sp_inetaddr_event(dev, event, NULL);
David Ahern89d5dd22017-10-18 09:56:55 -07006371out:
6372 return notifier_from_errno(err);
6373}
6374
6375int mlxsw_sp_inetaddr_valid_event(struct notifier_block *unused,
6376 unsigned long event, void *ptr)
6377{
6378 struct in_validator_info *ivi = (struct in_validator_info *) ptr;
6379 struct net_device *dev = ivi->ivi_dev->dev;
6380 struct mlxsw_sp *mlxsw_sp;
6381 struct mlxsw_sp_rif *rif;
6382 int err = 0;
6383
Ido Schimmel4724ba562017-03-10 08:53:39 +01006384 mlxsw_sp = mlxsw_sp_lower_get(dev);
6385 if (!mlxsw_sp)
6386 goto out;
6387
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01006388 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
Arkadi Sharshevsky5ea12372017-07-18 10:10:13 +02006389 if (!mlxsw_sp_rif_should_config(rif, dev, event))
Ido Schimmel4724ba562017-03-10 08:53:39 +01006390 goto out;
6391
David Ahernf8fa9b42017-10-18 09:56:56 -07006392 err = __mlxsw_sp_inetaddr_event(dev, event, ivi->extack);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006393out:
6394 return notifier_from_errno(err);
6395}
6396
Arkadi Sharshevsky5ea12372017-07-18 10:10:13 +02006397struct mlxsw_sp_inet6addr_event_work {
6398 struct work_struct work;
6399 struct net_device *dev;
6400 unsigned long event;
6401};
6402
6403static void mlxsw_sp_inet6addr_event_work(struct work_struct *work)
6404{
6405 struct mlxsw_sp_inet6addr_event_work *inet6addr_work =
6406 container_of(work, struct mlxsw_sp_inet6addr_event_work, work);
6407 struct net_device *dev = inet6addr_work->dev;
6408 unsigned long event = inet6addr_work->event;
6409 struct mlxsw_sp *mlxsw_sp;
6410 struct mlxsw_sp_rif *rif;
6411
6412 rtnl_lock();
6413 mlxsw_sp = mlxsw_sp_lower_get(dev);
6414 if (!mlxsw_sp)
6415 goto out;
6416
6417 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
6418 if (!mlxsw_sp_rif_should_config(rif, dev, event))
6419 goto out;
6420
David Ahernf8fa9b42017-10-18 09:56:56 -07006421 __mlxsw_sp_inetaddr_event(dev, event, NULL);
Arkadi Sharshevsky5ea12372017-07-18 10:10:13 +02006422out:
6423 rtnl_unlock();
6424 dev_put(dev);
6425 kfree(inet6addr_work);
6426}
6427
6428/* Called with rcu_read_lock() */
6429int mlxsw_sp_inet6addr_event(struct notifier_block *unused,
6430 unsigned long event, void *ptr)
6431{
6432 struct inet6_ifaddr *if6 = (struct inet6_ifaddr *) ptr;
6433 struct mlxsw_sp_inet6addr_event_work *inet6addr_work;
6434 struct net_device *dev = if6->idev->dev;
6435
David Ahern89d5dd22017-10-18 09:56:55 -07006436 /* NETDEV_UP event is handled by mlxsw_sp_inet6addr_valid_event */
6437 if (event == NETDEV_UP)
6438 return NOTIFY_DONE;
6439
Arkadi Sharshevsky5ea12372017-07-18 10:10:13 +02006440 if (!mlxsw_sp_port_dev_lower_find_rcu(dev))
6441 return NOTIFY_DONE;
6442
6443 inet6addr_work = kzalloc(sizeof(*inet6addr_work), GFP_ATOMIC);
6444 if (!inet6addr_work)
6445 return NOTIFY_BAD;
6446
6447 INIT_WORK(&inet6addr_work->work, mlxsw_sp_inet6addr_event_work);
6448 inet6addr_work->dev = dev;
6449 inet6addr_work->event = event;
6450 dev_hold(dev);
6451 mlxsw_core_schedule_work(&inet6addr_work->work);
6452
6453 return NOTIFY_DONE;
6454}
6455
David Ahern89d5dd22017-10-18 09:56:55 -07006456int mlxsw_sp_inet6addr_valid_event(struct notifier_block *unused,
6457 unsigned long event, void *ptr)
6458{
6459 struct in6_validator_info *i6vi = (struct in6_validator_info *) ptr;
6460 struct net_device *dev = i6vi->i6vi_dev->dev;
6461 struct mlxsw_sp *mlxsw_sp;
6462 struct mlxsw_sp_rif *rif;
6463 int err = 0;
6464
6465 mlxsw_sp = mlxsw_sp_lower_get(dev);
6466 if (!mlxsw_sp)
6467 goto out;
6468
6469 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
6470 if (!mlxsw_sp_rif_should_config(rif, dev, event))
6471 goto out;
6472
David Ahernf8fa9b42017-10-18 09:56:56 -07006473 err = __mlxsw_sp_inetaddr_event(dev, event, i6vi->extack);
David Ahern89d5dd22017-10-18 09:56:55 -07006474out:
6475 return notifier_from_errno(err);
6476}
6477
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01006478static int mlxsw_sp_rif_edit(struct mlxsw_sp *mlxsw_sp, u16 rif_index,
Ido Schimmel4724ba562017-03-10 08:53:39 +01006479 const char *mac, int mtu)
6480{
6481 char ritr_pl[MLXSW_REG_RITR_LEN];
6482 int err;
6483
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01006484 mlxsw_reg_ritr_rif_pack(ritr_pl, rif_index);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006485 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
6486 if (err)
6487 return err;
6488
6489 mlxsw_reg_ritr_mtu_set(ritr_pl, mtu);
6490 mlxsw_reg_ritr_if_mac_memcpy_to(ritr_pl, mac);
6491 mlxsw_reg_ritr_op_set(ritr_pl, MLXSW_REG_RITR_RIF_CREATE);
6492 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
6493}
6494
6495int mlxsw_sp_netdevice_router_port_event(struct net_device *dev)
6496{
6497 struct mlxsw_sp *mlxsw_sp;
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01006498 struct mlxsw_sp_rif *rif;
Ido Schimmela1107482017-05-26 08:37:39 +02006499 u16 fid_index;
Ido Schimmel4724ba562017-03-10 08:53:39 +01006500 int err;
6501
6502 mlxsw_sp = mlxsw_sp_lower_get(dev);
6503 if (!mlxsw_sp)
6504 return 0;
6505
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01006506 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
6507 if (!rif)
Ido Schimmel4724ba562017-03-10 08:53:39 +01006508 return 0;
Ido Schimmela1107482017-05-26 08:37:39 +02006509 fid_index = mlxsw_sp_fid_index(rif->fid);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006510
Ido Schimmela1107482017-05-26 08:37:39 +02006511 err = mlxsw_sp_rif_fdb_op(mlxsw_sp, rif->addr, fid_index, false);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006512 if (err)
6513 return err;
6514
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01006515 err = mlxsw_sp_rif_edit(mlxsw_sp, rif->rif_index, dev->dev_addr,
6516 dev->mtu);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006517 if (err)
6518 goto err_rif_edit;
6519
Ido Schimmela1107482017-05-26 08:37:39 +02006520 err = mlxsw_sp_rif_fdb_op(mlxsw_sp, dev->dev_addr, fid_index, true);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006521 if (err)
6522 goto err_rif_fdb_op;
6523
Yotam Gigifd890fe2017-09-27 08:23:21 +02006524 if (rif->mtu != dev->mtu) {
6525 struct mlxsw_sp_vr *vr;
6526
6527 /* The RIF is relevant only to its mr_table instance, as unlike
6528 * unicast routing, in multicast routing a RIF cannot be shared
6529 * between several multicast routing tables.
6530 */
6531 vr = &mlxsw_sp->router->vrs[rif->vr_id];
6532 mlxsw_sp_mr_rif_mtu_update(vr->mr4_table, rif, dev->mtu);
6533 }
6534
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01006535 ether_addr_copy(rif->addr, dev->dev_addr);
6536 rif->mtu = dev->mtu;
Ido Schimmel4724ba562017-03-10 08:53:39 +01006537
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01006538 netdev_dbg(dev, "Updated RIF=%d\n", rif->rif_index);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006539
6540 return 0;
6541
6542err_rif_fdb_op:
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01006543 mlxsw_sp_rif_edit(mlxsw_sp, rif->rif_index, rif->addr, rif->mtu);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006544err_rif_edit:
Ido Schimmela1107482017-05-26 08:37:39 +02006545 mlxsw_sp_rif_fdb_op(mlxsw_sp, rif->addr, fid_index, true);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006546 return err;
6547}
6548
Ido Schimmelb1e45522017-04-30 19:47:14 +03006549static int mlxsw_sp_port_vrf_join(struct mlxsw_sp *mlxsw_sp,
David Ahernf8fa9b42017-10-18 09:56:56 -07006550 struct net_device *l3_dev,
6551 struct netlink_ext_ack *extack)
Ido Schimmel7179eb52017-03-16 09:08:18 +01006552{
Ido Schimmelb1e45522017-04-30 19:47:14 +03006553 struct mlxsw_sp_rif *rif;
Ido Schimmel7179eb52017-03-16 09:08:18 +01006554
Ido Schimmelb1e45522017-04-30 19:47:14 +03006555 /* If netdev is already associated with a RIF, then we need to
6556 * destroy it and create a new one with the new virtual router ID.
Ido Schimmel7179eb52017-03-16 09:08:18 +01006557 */
Ido Schimmelb1e45522017-04-30 19:47:14 +03006558 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev);
6559 if (rif)
David Ahernf8fa9b42017-10-18 09:56:56 -07006560 __mlxsw_sp_inetaddr_event(l3_dev, NETDEV_DOWN, extack);
Ido Schimmel7179eb52017-03-16 09:08:18 +01006561
David Ahernf8fa9b42017-10-18 09:56:56 -07006562 return __mlxsw_sp_inetaddr_event(l3_dev, NETDEV_UP, extack);
Ido Schimmel7179eb52017-03-16 09:08:18 +01006563}
6564
Ido Schimmelb1e45522017-04-30 19:47:14 +03006565static void mlxsw_sp_port_vrf_leave(struct mlxsw_sp *mlxsw_sp,
6566 struct net_device *l3_dev)
Ido Schimmel7179eb52017-03-16 09:08:18 +01006567{
Ido Schimmelb1e45522017-04-30 19:47:14 +03006568 struct mlxsw_sp_rif *rif;
Ido Schimmel7179eb52017-03-16 09:08:18 +01006569
Ido Schimmelb1e45522017-04-30 19:47:14 +03006570 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev);
6571 if (!rif)
Ido Schimmel7179eb52017-03-16 09:08:18 +01006572 return;
David Ahernf8fa9b42017-10-18 09:56:56 -07006573 __mlxsw_sp_inetaddr_event(l3_dev, NETDEV_DOWN, NULL);
Ido Schimmel7179eb52017-03-16 09:08:18 +01006574}
6575
Ido Schimmelb1e45522017-04-30 19:47:14 +03006576int mlxsw_sp_netdevice_vrf_event(struct net_device *l3_dev, unsigned long event,
6577 struct netdev_notifier_changeupper_info *info)
Ido Schimmel3d70e4582017-03-16 09:08:19 +01006578{
Ido Schimmelb1e45522017-04-30 19:47:14 +03006579 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(l3_dev);
6580 int err = 0;
Ido Schimmel3d70e4582017-03-16 09:08:19 +01006581
Ido Schimmelb1e45522017-04-30 19:47:14 +03006582 if (!mlxsw_sp)
6583 return 0;
Ido Schimmel3d70e4582017-03-16 09:08:19 +01006584
Ido Schimmelb1e45522017-04-30 19:47:14 +03006585 switch (event) {
6586 case NETDEV_PRECHANGEUPPER:
6587 return 0;
6588 case NETDEV_CHANGEUPPER:
David Ahernf8fa9b42017-10-18 09:56:56 -07006589 if (info->linking) {
6590 struct netlink_ext_ack *extack;
6591
6592 extack = netdev_notifier_info_to_extack(&info->info);
6593 err = mlxsw_sp_port_vrf_join(mlxsw_sp, l3_dev, extack);
6594 } else {
Ido Schimmelb1e45522017-04-30 19:47:14 +03006595 mlxsw_sp_port_vrf_leave(mlxsw_sp, l3_dev);
David Ahernf8fa9b42017-10-18 09:56:56 -07006596 }
Ido Schimmelb1e45522017-04-30 19:47:14 +03006597 break;
6598 }
Ido Schimmel3d70e4582017-03-16 09:08:19 +01006599
Ido Schimmelb1e45522017-04-30 19:47:14 +03006600 return err;
Ido Schimmel3d70e4582017-03-16 09:08:19 +01006601}
6602
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006603static struct mlxsw_sp_rif_subport *
6604mlxsw_sp_rif_subport_rif(const struct mlxsw_sp_rif *rif)
Ido Schimmela1107482017-05-26 08:37:39 +02006605{
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006606 return container_of(rif, struct mlxsw_sp_rif_subport, common);
Ido Schimmela1107482017-05-26 08:37:39 +02006607}
6608
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006609static void mlxsw_sp_rif_subport_setup(struct mlxsw_sp_rif *rif,
6610 const struct mlxsw_sp_rif_params *params)
6611{
6612 struct mlxsw_sp_rif_subport *rif_subport;
6613
6614 rif_subport = mlxsw_sp_rif_subport_rif(rif);
6615 rif_subport->vid = params->vid;
6616 rif_subport->lag = params->lag;
6617 if (params->lag)
6618 rif_subport->lag_id = params->lag_id;
6619 else
6620 rif_subport->system_port = params->system_port;
6621}
6622
6623static int mlxsw_sp_rif_subport_op(struct mlxsw_sp_rif *rif, bool enable)
6624{
6625 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
6626 struct mlxsw_sp_rif_subport *rif_subport;
6627 char ritr_pl[MLXSW_REG_RITR_LEN];
6628
6629 rif_subport = mlxsw_sp_rif_subport_rif(rif);
6630 mlxsw_reg_ritr_pack(ritr_pl, enable, MLXSW_REG_RITR_SP_IF,
Petr Machata9571e822017-09-02 23:49:14 +02006631 rif->rif_index, rif->vr_id, rif->dev->mtu);
6632 mlxsw_reg_ritr_mac_pack(ritr_pl, rif->dev->dev_addr);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006633 mlxsw_reg_ritr_sp_if_pack(ritr_pl, rif_subport->lag,
6634 rif_subport->lag ? rif_subport->lag_id :
6635 rif_subport->system_port,
6636 rif_subport->vid);
6637
6638 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
6639}
6640
6641static int mlxsw_sp_rif_subport_configure(struct mlxsw_sp_rif *rif)
6642{
Petr Machata010cadf2017-09-02 23:49:18 +02006643 int err;
6644
6645 err = mlxsw_sp_rif_subport_op(rif, true);
6646 if (err)
6647 return err;
6648
6649 err = mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
6650 mlxsw_sp_fid_index(rif->fid), true);
6651 if (err)
6652 goto err_rif_fdb_op;
6653
6654 mlxsw_sp_fid_rif_set(rif->fid, rif);
6655 return 0;
6656
6657err_rif_fdb_op:
6658 mlxsw_sp_rif_subport_op(rif, false);
6659 return err;
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006660}
6661
6662static void mlxsw_sp_rif_subport_deconfigure(struct mlxsw_sp_rif *rif)
6663{
Petr Machata010cadf2017-09-02 23:49:18 +02006664 struct mlxsw_sp_fid *fid = rif->fid;
6665
6666 mlxsw_sp_fid_rif_set(fid, NULL);
6667 mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
6668 mlxsw_sp_fid_index(fid), false);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006669 mlxsw_sp_rif_subport_op(rif, false);
6670}
6671
6672static struct mlxsw_sp_fid *
6673mlxsw_sp_rif_subport_fid_get(struct mlxsw_sp_rif *rif)
6674{
6675 return mlxsw_sp_fid_rfid_get(rif->mlxsw_sp, rif->rif_index);
6676}
6677
6678static const struct mlxsw_sp_rif_ops mlxsw_sp_rif_subport_ops = {
6679 .type = MLXSW_SP_RIF_TYPE_SUBPORT,
6680 .rif_size = sizeof(struct mlxsw_sp_rif_subport),
6681 .setup = mlxsw_sp_rif_subport_setup,
6682 .configure = mlxsw_sp_rif_subport_configure,
6683 .deconfigure = mlxsw_sp_rif_subport_deconfigure,
6684 .fid_get = mlxsw_sp_rif_subport_fid_get,
6685};
6686
6687static int mlxsw_sp_rif_vlan_fid_op(struct mlxsw_sp_rif *rif,
6688 enum mlxsw_reg_ritr_if_type type,
6689 u16 vid_fid, bool enable)
6690{
6691 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
6692 char ritr_pl[MLXSW_REG_RITR_LEN];
6693
6694 mlxsw_reg_ritr_pack(ritr_pl, enable, type, rif->rif_index, rif->vr_id,
Petr Machata9571e822017-09-02 23:49:14 +02006695 rif->dev->mtu);
6696 mlxsw_reg_ritr_mac_pack(ritr_pl, rif->dev->dev_addr);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006697 mlxsw_reg_ritr_fid_set(ritr_pl, type, vid_fid);
6698
6699 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
6700}
6701
Yotam Gigib35750f2017-10-09 11:15:33 +02006702u8 mlxsw_sp_router_port(const struct mlxsw_sp *mlxsw_sp)
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006703{
6704 return mlxsw_core_max_ports(mlxsw_sp->core) + 1;
6705}
6706
6707static int mlxsw_sp_rif_vlan_configure(struct mlxsw_sp_rif *rif)
6708{
6709 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
6710 u16 vid = mlxsw_sp_fid_8021q_vid(rif->fid);
6711 int err;
6712
6713 err = mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_VLAN_IF, vid, true);
6714 if (err)
6715 return err;
6716
Ido Schimmel0d284812017-07-18 10:10:12 +02006717 err = mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
6718 mlxsw_sp_router_port(mlxsw_sp), true);
6719 if (err)
6720 goto err_fid_mc_flood_set;
6721
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006722 err = mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
6723 mlxsw_sp_router_port(mlxsw_sp), true);
6724 if (err)
6725 goto err_fid_bc_flood_set;
6726
Petr Machata010cadf2017-09-02 23:49:18 +02006727 err = mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
6728 mlxsw_sp_fid_index(rif->fid), true);
6729 if (err)
6730 goto err_rif_fdb_op;
6731
6732 mlxsw_sp_fid_rif_set(rif->fid, rif);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006733 return 0;
6734
Petr Machata010cadf2017-09-02 23:49:18 +02006735err_rif_fdb_op:
6736 mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
6737 mlxsw_sp_router_port(mlxsw_sp), false);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006738err_fid_bc_flood_set:
Ido Schimmel0d284812017-07-18 10:10:12 +02006739 mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
6740 mlxsw_sp_router_port(mlxsw_sp), false);
6741err_fid_mc_flood_set:
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006742 mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_VLAN_IF, vid, false);
6743 return err;
6744}
6745
6746static void mlxsw_sp_rif_vlan_deconfigure(struct mlxsw_sp_rif *rif)
6747{
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006748 u16 vid = mlxsw_sp_fid_8021q_vid(rif->fid);
Petr Machata010cadf2017-09-02 23:49:18 +02006749 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
6750 struct mlxsw_sp_fid *fid = rif->fid;
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006751
Petr Machata010cadf2017-09-02 23:49:18 +02006752 mlxsw_sp_fid_rif_set(fid, NULL);
6753 mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
6754 mlxsw_sp_fid_index(fid), false);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006755 mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
6756 mlxsw_sp_router_port(mlxsw_sp), false);
Ido Schimmel0d284812017-07-18 10:10:12 +02006757 mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
6758 mlxsw_sp_router_port(mlxsw_sp), false);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006759 mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_VLAN_IF, vid, false);
6760}
6761
6762static struct mlxsw_sp_fid *
6763mlxsw_sp_rif_vlan_fid_get(struct mlxsw_sp_rif *rif)
6764{
6765 u16 vid = is_vlan_dev(rif->dev) ? vlan_dev_vlan_id(rif->dev) : 1;
6766
6767 return mlxsw_sp_fid_8021q_get(rif->mlxsw_sp, vid);
6768}
6769
6770static const struct mlxsw_sp_rif_ops mlxsw_sp_rif_vlan_ops = {
6771 .type = MLXSW_SP_RIF_TYPE_VLAN,
6772 .rif_size = sizeof(struct mlxsw_sp_rif),
6773 .configure = mlxsw_sp_rif_vlan_configure,
6774 .deconfigure = mlxsw_sp_rif_vlan_deconfigure,
6775 .fid_get = mlxsw_sp_rif_vlan_fid_get,
6776};
6777
6778static int mlxsw_sp_rif_fid_configure(struct mlxsw_sp_rif *rif)
6779{
6780 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
6781 u16 fid_index = mlxsw_sp_fid_index(rif->fid);
6782 int err;
6783
6784 err = mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_FID_IF, fid_index,
6785 true);
6786 if (err)
6787 return err;
6788
Ido Schimmel0d284812017-07-18 10:10:12 +02006789 err = mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
6790 mlxsw_sp_router_port(mlxsw_sp), true);
6791 if (err)
6792 goto err_fid_mc_flood_set;
6793
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006794 err = mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
6795 mlxsw_sp_router_port(mlxsw_sp), true);
6796 if (err)
6797 goto err_fid_bc_flood_set;
6798
Petr Machata010cadf2017-09-02 23:49:18 +02006799 err = mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
6800 mlxsw_sp_fid_index(rif->fid), true);
6801 if (err)
6802 goto err_rif_fdb_op;
6803
6804 mlxsw_sp_fid_rif_set(rif->fid, rif);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006805 return 0;
6806
Petr Machata010cadf2017-09-02 23:49:18 +02006807err_rif_fdb_op:
6808 mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
6809 mlxsw_sp_router_port(mlxsw_sp), false);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006810err_fid_bc_flood_set:
Ido Schimmel0d284812017-07-18 10:10:12 +02006811 mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
6812 mlxsw_sp_router_port(mlxsw_sp), false);
6813err_fid_mc_flood_set:
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006814 mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_FID_IF, fid_index, false);
6815 return err;
6816}
6817
6818static void mlxsw_sp_rif_fid_deconfigure(struct mlxsw_sp_rif *rif)
6819{
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006820 u16 fid_index = mlxsw_sp_fid_index(rif->fid);
Petr Machata010cadf2017-09-02 23:49:18 +02006821 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
6822 struct mlxsw_sp_fid *fid = rif->fid;
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006823
Petr Machata010cadf2017-09-02 23:49:18 +02006824 mlxsw_sp_fid_rif_set(fid, NULL);
6825 mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
6826 mlxsw_sp_fid_index(fid), false);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006827 mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
6828 mlxsw_sp_router_port(mlxsw_sp), false);
Ido Schimmel0d284812017-07-18 10:10:12 +02006829 mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
6830 mlxsw_sp_router_port(mlxsw_sp), false);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006831 mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_FID_IF, fid_index, false);
6832}
6833
6834static struct mlxsw_sp_fid *
6835mlxsw_sp_rif_fid_fid_get(struct mlxsw_sp_rif *rif)
6836{
6837 return mlxsw_sp_fid_8021d_get(rif->mlxsw_sp, rif->dev->ifindex);
6838}
6839
6840static const struct mlxsw_sp_rif_ops mlxsw_sp_rif_fid_ops = {
6841 .type = MLXSW_SP_RIF_TYPE_FID,
6842 .rif_size = sizeof(struct mlxsw_sp_rif),
6843 .configure = mlxsw_sp_rif_fid_configure,
6844 .deconfigure = mlxsw_sp_rif_fid_deconfigure,
6845 .fid_get = mlxsw_sp_rif_fid_fid_get,
6846};
6847
Petr Machata6ddb7422017-09-02 23:49:19 +02006848static struct mlxsw_sp_rif_ipip_lb *
6849mlxsw_sp_rif_ipip_lb_rif(struct mlxsw_sp_rif *rif)
6850{
6851 return container_of(rif, struct mlxsw_sp_rif_ipip_lb, common);
6852}
6853
6854static void
6855mlxsw_sp_rif_ipip_lb_setup(struct mlxsw_sp_rif *rif,
6856 const struct mlxsw_sp_rif_params *params)
6857{
6858 struct mlxsw_sp_rif_params_ipip_lb *params_lb;
6859 struct mlxsw_sp_rif_ipip_lb *rif_lb;
6860
6861 params_lb = container_of(params, struct mlxsw_sp_rif_params_ipip_lb,
6862 common);
6863 rif_lb = mlxsw_sp_rif_ipip_lb_rif(rif);
6864 rif_lb->lb_config = params_lb->lb_config;
6865}
6866
6867static int
6868mlxsw_sp_rif_ipip_lb_op(struct mlxsw_sp_rif_ipip_lb *lb_rif,
6869 struct mlxsw_sp_vr *ul_vr, bool enable)
6870{
6871 struct mlxsw_sp_rif_ipip_lb_config lb_cf = lb_rif->lb_config;
6872 struct mlxsw_sp_rif *rif = &lb_rif->common;
6873 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
6874 char ritr_pl[MLXSW_REG_RITR_LEN];
6875 u32 saddr4;
6876
6877 switch (lb_cf.ul_protocol) {
6878 case MLXSW_SP_L3_PROTO_IPV4:
6879 saddr4 = be32_to_cpu(lb_cf.saddr.addr4);
6880 mlxsw_reg_ritr_pack(ritr_pl, enable, MLXSW_REG_RITR_LOOPBACK_IF,
6881 rif->rif_index, rif->vr_id, rif->dev->mtu);
6882 mlxsw_reg_ritr_loopback_ipip4_pack(ritr_pl, lb_cf.lb_ipipt,
6883 MLXSW_REG_RITR_LOOPBACK_IPIP_OPTIONS_GRE_KEY_PRESET,
6884 ul_vr->id, saddr4, lb_cf.okey);
6885 break;
6886
6887 case MLXSW_SP_L3_PROTO_IPV6:
6888 return -EAFNOSUPPORT;
6889 }
6890
6891 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
6892}
6893
6894static int
6895mlxsw_sp_rif_ipip_lb_configure(struct mlxsw_sp_rif *rif)
6896{
6897 struct mlxsw_sp_rif_ipip_lb *lb_rif = mlxsw_sp_rif_ipip_lb_rif(rif);
6898 u32 ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(rif->dev);
6899 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
6900 struct mlxsw_sp_vr *ul_vr;
6901 int err;
6902
David Ahernf8fa9b42017-10-18 09:56:56 -07006903 ul_vr = mlxsw_sp_vr_get(mlxsw_sp, ul_tb_id, NULL);
Petr Machata6ddb7422017-09-02 23:49:19 +02006904 if (IS_ERR(ul_vr))
6905 return PTR_ERR(ul_vr);
6906
6907 err = mlxsw_sp_rif_ipip_lb_op(lb_rif, ul_vr, true);
6908 if (err)
6909 goto err_loopback_op;
6910
6911 lb_rif->ul_vr_id = ul_vr->id;
6912 ++ul_vr->rif_count;
6913 return 0;
6914
6915err_loopback_op:
Ido Schimmel2b52ce02018-01-22 09:17:42 +01006916 mlxsw_sp_vr_put(mlxsw_sp, ul_vr);
Petr Machata6ddb7422017-09-02 23:49:19 +02006917 return err;
6918}
6919
6920static void mlxsw_sp_rif_ipip_lb_deconfigure(struct mlxsw_sp_rif *rif)
6921{
6922 struct mlxsw_sp_rif_ipip_lb *lb_rif = mlxsw_sp_rif_ipip_lb_rif(rif);
6923 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
6924 struct mlxsw_sp_vr *ul_vr;
6925
6926 ul_vr = &mlxsw_sp->router->vrs[lb_rif->ul_vr_id];
6927 mlxsw_sp_rif_ipip_lb_op(lb_rif, ul_vr, false);
6928
6929 --ul_vr->rif_count;
Ido Schimmel2b52ce02018-01-22 09:17:42 +01006930 mlxsw_sp_vr_put(mlxsw_sp, ul_vr);
Petr Machata6ddb7422017-09-02 23:49:19 +02006931}
6932
6933static const struct mlxsw_sp_rif_ops mlxsw_sp_rif_ipip_lb_ops = {
6934 .type = MLXSW_SP_RIF_TYPE_IPIP_LB,
6935 .rif_size = sizeof(struct mlxsw_sp_rif_ipip_lb),
6936 .setup = mlxsw_sp_rif_ipip_lb_setup,
6937 .configure = mlxsw_sp_rif_ipip_lb_configure,
6938 .deconfigure = mlxsw_sp_rif_ipip_lb_deconfigure,
6939};
6940
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006941static const struct mlxsw_sp_rif_ops *mlxsw_sp_rif_ops_arr[] = {
6942 [MLXSW_SP_RIF_TYPE_SUBPORT] = &mlxsw_sp_rif_subport_ops,
6943 [MLXSW_SP_RIF_TYPE_VLAN] = &mlxsw_sp_rif_vlan_ops,
6944 [MLXSW_SP_RIF_TYPE_FID] = &mlxsw_sp_rif_fid_ops,
Petr Machata6ddb7422017-09-02 23:49:19 +02006945 [MLXSW_SP_RIF_TYPE_IPIP_LB] = &mlxsw_sp_rif_ipip_lb_ops,
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006946};
6947
Ido Schimmel348b8fc2017-05-16 19:38:29 +02006948static int mlxsw_sp_rifs_init(struct mlxsw_sp *mlxsw_sp)
6949{
6950 u64 max_rifs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS);
6951
6952 mlxsw_sp->router->rifs = kcalloc(max_rifs,
6953 sizeof(struct mlxsw_sp_rif *),
6954 GFP_KERNEL);
6955 if (!mlxsw_sp->router->rifs)
6956 return -ENOMEM;
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006957
6958 mlxsw_sp->router->rif_ops_arr = mlxsw_sp_rif_ops_arr;
6959
Ido Schimmel348b8fc2017-05-16 19:38:29 +02006960 return 0;
6961}
6962
6963static void mlxsw_sp_rifs_fini(struct mlxsw_sp *mlxsw_sp)
6964{
6965 int i;
6966
6967 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++)
6968 WARN_ON_ONCE(mlxsw_sp->router->rifs[i]);
6969
6970 kfree(mlxsw_sp->router->rifs);
6971}
6972
Petr Machatadcbda282017-10-20 09:16:16 +02006973static int
6974mlxsw_sp_ipip_config_tigcr(struct mlxsw_sp *mlxsw_sp)
6975{
6976 char tigcr_pl[MLXSW_REG_TIGCR_LEN];
6977
6978 mlxsw_reg_tigcr_pack(tigcr_pl, true, 0);
6979 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(tigcr), tigcr_pl);
6980}
6981
Petr Machata38ebc0f2017-09-02 23:49:17 +02006982static int mlxsw_sp_ipips_init(struct mlxsw_sp *mlxsw_sp)
6983{
6984 mlxsw_sp->router->ipip_ops_arr = mlxsw_sp_ipip_ops_arr;
Petr Machata1012b9a2017-09-02 23:49:23 +02006985 INIT_LIST_HEAD(&mlxsw_sp->router->ipip_list);
Petr Machatadcbda282017-10-20 09:16:16 +02006986 return mlxsw_sp_ipip_config_tigcr(mlxsw_sp);
Petr Machata38ebc0f2017-09-02 23:49:17 +02006987}
6988
6989static void mlxsw_sp_ipips_fini(struct mlxsw_sp *mlxsw_sp)
6990{
Petr Machata1012b9a2017-09-02 23:49:23 +02006991 WARN_ON(!list_empty(&mlxsw_sp->router->ipip_list));
Petr Machata38ebc0f2017-09-02 23:49:17 +02006992}
6993
Ido Schimmelc3852ef2016-12-03 16:45:07 +01006994static void mlxsw_sp_router_fib_dump_flush(struct notifier_block *nb)
6995{
Ido Schimmel7e39d112017-05-16 19:38:28 +02006996 struct mlxsw_sp_router *router;
Ido Schimmelc3852ef2016-12-03 16:45:07 +01006997
6998 /* Flush pending FIB notifications and then flush the device's
6999 * table before requesting another dump. The FIB notification
7000 * block is unregistered, so no need to take RTNL.
7001 */
7002 mlxsw_core_flush_owq();
Ido Schimmel7e39d112017-05-16 19:38:28 +02007003 router = container_of(nb, struct mlxsw_sp_router, fib_nb);
7004 mlxsw_sp_router_fib_flush(router->mlxsw_sp);
Ido Schimmelc3852ef2016-12-03 16:45:07 +01007005}
7006
Ido Schimmelaf658b62017-11-02 17:14:09 +01007007#ifdef CONFIG_IP_ROUTE_MULTIPATH
7008static void mlxsw_sp_mp_hash_header_set(char *recr2_pl, int header)
7009{
7010 mlxsw_reg_recr2_outer_header_enables_set(recr2_pl, header, true);
7011}
7012
7013static void mlxsw_sp_mp_hash_field_set(char *recr2_pl, int field)
7014{
7015 mlxsw_reg_recr2_outer_header_fields_enable_set(recr2_pl, field, true);
7016}
7017
7018static void mlxsw_sp_mp4_hash_init(char *recr2_pl)
7019{
7020 bool only_l3 = !init_net.ipv4.sysctl_fib_multipath_hash_policy;
7021
7022 mlxsw_sp_mp_hash_header_set(recr2_pl,
7023 MLXSW_REG_RECR2_IPV4_EN_NOT_TCP_NOT_UDP);
7024 mlxsw_sp_mp_hash_header_set(recr2_pl, MLXSW_REG_RECR2_IPV4_EN_TCP_UDP);
7025 mlxsw_reg_recr2_ipv4_sip_enable(recr2_pl);
7026 mlxsw_reg_recr2_ipv4_dip_enable(recr2_pl);
7027 if (only_l3)
7028 return;
7029 mlxsw_sp_mp_hash_header_set(recr2_pl, MLXSW_REG_RECR2_TCP_UDP_EN_IPV4);
7030 mlxsw_sp_mp_hash_field_set(recr2_pl, MLXSW_REG_RECR2_IPV4_PROTOCOL);
7031 mlxsw_sp_mp_hash_field_set(recr2_pl, MLXSW_REG_RECR2_TCP_UDP_SPORT);
7032 mlxsw_sp_mp_hash_field_set(recr2_pl, MLXSW_REG_RECR2_TCP_UDP_DPORT);
7033}
7034
7035static void mlxsw_sp_mp6_hash_init(char *recr2_pl)
7036{
Petr Machata918ee502018-03-11 09:45:47 +02007037 bool only_l3 = !ip6_multipath_hash_policy(&init_net);
David Ahern5e18b9c552018-03-02 08:32:19 -08007038
Ido Schimmelaf658b62017-11-02 17:14:09 +01007039 mlxsw_sp_mp_hash_header_set(recr2_pl,
7040 MLXSW_REG_RECR2_IPV6_EN_NOT_TCP_NOT_UDP);
7041 mlxsw_sp_mp_hash_header_set(recr2_pl, MLXSW_REG_RECR2_IPV6_EN_TCP_UDP);
7042 mlxsw_reg_recr2_ipv6_sip_enable(recr2_pl);
7043 mlxsw_reg_recr2_ipv6_dip_enable(recr2_pl);
Ido Schimmelaf658b62017-11-02 17:14:09 +01007044 mlxsw_sp_mp_hash_field_set(recr2_pl, MLXSW_REG_RECR2_IPV6_NEXT_HEADER);
David Ahern5e18b9c552018-03-02 08:32:19 -08007045 if (only_l3) {
7046 mlxsw_sp_mp_hash_field_set(recr2_pl,
7047 MLXSW_REG_RECR2_IPV6_FLOW_LABEL);
7048 } else {
7049 mlxsw_sp_mp_hash_header_set(recr2_pl,
7050 MLXSW_REG_RECR2_TCP_UDP_EN_IPV6);
7051 mlxsw_sp_mp_hash_field_set(recr2_pl,
7052 MLXSW_REG_RECR2_TCP_UDP_SPORT);
7053 mlxsw_sp_mp_hash_field_set(recr2_pl,
7054 MLXSW_REG_RECR2_TCP_UDP_DPORT);
7055 }
Ido Schimmelaf658b62017-11-02 17:14:09 +01007056}
7057
7058static int mlxsw_sp_mp_hash_init(struct mlxsw_sp *mlxsw_sp)
7059{
7060 char recr2_pl[MLXSW_REG_RECR2_LEN];
7061 u32 seed;
7062
7063 get_random_bytes(&seed, sizeof(seed));
7064 mlxsw_reg_recr2_pack(recr2_pl, seed);
7065 mlxsw_sp_mp4_hash_init(recr2_pl);
7066 mlxsw_sp_mp6_hash_init(recr2_pl);
7067
7068 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(recr2), recr2_pl);
7069}
7070#else
7071static int mlxsw_sp_mp_hash_init(struct mlxsw_sp *mlxsw_sp)
7072{
7073 return 0;
7074}
7075#endif
7076
Yuval Mintz48276a22018-01-14 12:33:14 +01007077static int mlxsw_sp_dscp_init(struct mlxsw_sp *mlxsw_sp)
7078{
7079 char rdpm_pl[MLXSW_REG_RDPM_LEN];
7080 unsigned int i;
7081
7082 MLXSW_REG_ZERO(rdpm, rdpm_pl);
7083
7084 /* HW is determining switch priority based on DSCP-bits, but the
7085 * kernel is still doing that based on the ToS. Since there's a
7086 * mismatch in bits we need to make sure to translate the right
7087 * value ToS would observe, skipping the 2 least-significant ECN bits.
7088 */
7089 for (i = 0; i < MLXSW_REG_RDPM_DSCP_ENTRY_REC_MAX_COUNT; i++)
7090 mlxsw_reg_rdpm_pack(rdpm_pl, i, rt_tos2priority(i << 2));
7091
7092 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rdpm), rdpm_pl);
7093}
7094
Ido Schimmel4724ba562017-03-10 08:53:39 +01007095static int __mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp)
7096{
7097 char rgcr_pl[MLXSW_REG_RGCR_LEN];
7098 u64 max_rifs;
7099 int err;
7100
7101 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_RIFS))
7102 return -EIO;
Ido Schimmel4724ba562017-03-10 08:53:39 +01007103 max_rifs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS);
Ido Schimmel4724ba562017-03-10 08:53:39 +01007104
Arkadi Sharshevskye29237e2017-07-18 10:10:09 +02007105 mlxsw_reg_rgcr_pack(rgcr_pl, true, true);
Ido Schimmel4724ba562017-03-10 08:53:39 +01007106 mlxsw_reg_rgcr_max_router_interfaces_set(rgcr_pl, max_rifs);
Yuval Mintz48276a22018-01-14 12:33:14 +01007107 mlxsw_reg_rgcr_usp_set(rgcr_pl, true);
Ido Schimmel4724ba562017-03-10 08:53:39 +01007108 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rgcr), rgcr_pl);
7109 if (err)
Ido Schimmel348b8fc2017-05-16 19:38:29 +02007110 return err;
Ido Schimmel4724ba562017-03-10 08:53:39 +01007111 return 0;
Ido Schimmel4724ba562017-03-10 08:53:39 +01007112}
7113
7114static void __mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp)
7115{
7116 char rgcr_pl[MLXSW_REG_RGCR_LEN];
Ido Schimmel4724ba562017-03-10 08:53:39 +01007117
Arkadi Sharshevskye29237e2017-07-18 10:10:09 +02007118 mlxsw_reg_rgcr_pack(rgcr_pl, false, false);
Ido Schimmel4724ba562017-03-10 08:53:39 +01007119 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rgcr), rgcr_pl);
Ido Schimmel4724ba562017-03-10 08:53:39 +01007120}
7121
Jiri Pirkob45f64d2016-09-26 12:52:31 +02007122int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp)
7123{
Ido Schimmel9011b672017-05-16 19:38:25 +02007124 struct mlxsw_sp_router *router;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02007125 int err;
7126
Ido Schimmel9011b672017-05-16 19:38:25 +02007127 router = kzalloc(sizeof(*mlxsw_sp->router), GFP_KERNEL);
7128 if (!router)
7129 return -ENOMEM;
7130 mlxsw_sp->router = router;
7131 router->mlxsw_sp = mlxsw_sp;
7132
7133 INIT_LIST_HEAD(&mlxsw_sp->router->nexthop_neighs_list);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02007134 err = __mlxsw_sp_router_init(mlxsw_sp);
7135 if (err)
Ido Schimmel9011b672017-05-16 19:38:25 +02007136 goto err_router_init;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02007137
Ido Schimmel348b8fc2017-05-16 19:38:29 +02007138 err = mlxsw_sp_rifs_init(mlxsw_sp);
7139 if (err)
7140 goto err_rifs_init;
7141
Petr Machata38ebc0f2017-09-02 23:49:17 +02007142 err = mlxsw_sp_ipips_init(mlxsw_sp);
7143 if (err)
7144 goto err_ipips_init;
7145
Ido Schimmel9011b672017-05-16 19:38:25 +02007146 err = rhashtable_init(&mlxsw_sp->router->nexthop_ht,
Ido Schimmelc53b8e12017-02-08 11:16:30 +01007147 &mlxsw_sp_nexthop_ht_params);
7148 if (err)
7149 goto err_nexthop_ht_init;
7150
Ido Schimmel9011b672017-05-16 19:38:25 +02007151 err = rhashtable_init(&mlxsw_sp->router->nexthop_group_ht,
Ido Schimmele9ad5e72017-02-08 11:16:29 +01007152 &mlxsw_sp_nexthop_group_ht_params);
7153 if (err)
7154 goto err_nexthop_group_ht_init;
7155
Arkadi Sharshevskydbe45982017-09-25 10:32:23 +02007156 INIT_LIST_HEAD(&mlxsw_sp->router->nexthop_list);
Ido Schimmel8494ab02017-03-24 08:02:47 +01007157 err = mlxsw_sp_lpm_init(mlxsw_sp);
7158 if (err)
7159 goto err_lpm_init;
7160
Yotam Gigid42b0962017-09-27 08:23:20 +02007161 err = mlxsw_sp_mr_init(mlxsw_sp, &mlxsw_sp_mr_tcam_ops);
7162 if (err)
7163 goto err_mr_init;
7164
Jiri Pirkob45f64d2016-09-26 12:52:31 +02007165 err = mlxsw_sp_vrs_init(mlxsw_sp);
7166 if (err)
7167 goto err_vrs_init;
7168
Ido Schimmel8c9583a2016-10-27 15:12:57 +02007169 err = mlxsw_sp_neigh_init(mlxsw_sp);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02007170 if (err)
7171 goto err_neigh_init;
7172
Ido Schimmel48fac882017-11-02 17:14:06 +01007173 mlxsw_sp->router->netevent_nb.notifier_call =
7174 mlxsw_sp_router_netevent_event;
7175 err = register_netevent_notifier(&mlxsw_sp->router->netevent_nb);
7176 if (err)
7177 goto err_register_netevent_notifier;
7178
Ido Schimmelaf658b62017-11-02 17:14:09 +01007179 err = mlxsw_sp_mp_hash_init(mlxsw_sp);
7180 if (err)
7181 goto err_mp_hash_init;
7182
Yuval Mintz48276a22018-01-14 12:33:14 +01007183 err = mlxsw_sp_dscp_init(mlxsw_sp);
7184 if (err)
7185 goto err_dscp_init;
7186
Ido Schimmel7e39d112017-05-16 19:38:28 +02007187 mlxsw_sp->router->fib_nb.notifier_call = mlxsw_sp_router_fib_event;
7188 err = register_fib_notifier(&mlxsw_sp->router->fib_nb,
Ido Schimmelc3852ef2016-12-03 16:45:07 +01007189 mlxsw_sp_router_fib_dump_flush);
7190 if (err)
7191 goto err_register_fib_notifier;
7192
Jiri Pirkob45f64d2016-09-26 12:52:31 +02007193 return 0;
7194
Ido Schimmelc3852ef2016-12-03 16:45:07 +01007195err_register_fib_notifier:
Yuval Mintz48276a22018-01-14 12:33:14 +01007196err_dscp_init:
Ido Schimmelaf658b62017-11-02 17:14:09 +01007197err_mp_hash_init:
Ido Schimmel48fac882017-11-02 17:14:06 +01007198 unregister_netevent_notifier(&mlxsw_sp->router->netevent_nb);
7199err_register_netevent_notifier:
Ido Schimmelc3852ef2016-12-03 16:45:07 +01007200 mlxsw_sp_neigh_fini(mlxsw_sp);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02007201err_neigh_init:
7202 mlxsw_sp_vrs_fini(mlxsw_sp);
7203err_vrs_init:
Yotam Gigid42b0962017-09-27 08:23:20 +02007204 mlxsw_sp_mr_fini(mlxsw_sp);
7205err_mr_init:
Ido Schimmel8494ab02017-03-24 08:02:47 +01007206 mlxsw_sp_lpm_fini(mlxsw_sp);
7207err_lpm_init:
Ido Schimmel9011b672017-05-16 19:38:25 +02007208 rhashtable_destroy(&mlxsw_sp->router->nexthop_group_ht);
Ido Schimmele9ad5e72017-02-08 11:16:29 +01007209err_nexthop_group_ht_init:
Ido Schimmel9011b672017-05-16 19:38:25 +02007210 rhashtable_destroy(&mlxsw_sp->router->nexthop_ht);
Ido Schimmelc53b8e12017-02-08 11:16:30 +01007211err_nexthop_ht_init:
Petr Machata38ebc0f2017-09-02 23:49:17 +02007212 mlxsw_sp_ipips_fini(mlxsw_sp);
7213err_ipips_init:
Ido Schimmel348b8fc2017-05-16 19:38:29 +02007214 mlxsw_sp_rifs_fini(mlxsw_sp);
7215err_rifs_init:
Jiri Pirkob45f64d2016-09-26 12:52:31 +02007216 __mlxsw_sp_router_fini(mlxsw_sp);
Ido Schimmel9011b672017-05-16 19:38:25 +02007217err_router_init:
7218 kfree(mlxsw_sp->router);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02007219 return err;
7220}
7221
7222void mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp)
7223{
Ido Schimmel7e39d112017-05-16 19:38:28 +02007224 unregister_fib_notifier(&mlxsw_sp->router->fib_nb);
Ido Schimmel48fac882017-11-02 17:14:06 +01007225 unregister_netevent_notifier(&mlxsw_sp->router->netevent_nb);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02007226 mlxsw_sp_neigh_fini(mlxsw_sp);
7227 mlxsw_sp_vrs_fini(mlxsw_sp);
Yotam Gigid42b0962017-09-27 08:23:20 +02007228 mlxsw_sp_mr_fini(mlxsw_sp);
Ido Schimmel8494ab02017-03-24 08:02:47 +01007229 mlxsw_sp_lpm_fini(mlxsw_sp);
Ido Schimmel9011b672017-05-16 19:38:25 +02007230 rhashtable_destroy(&mlxsw_sp->router->nexthop_group_ht);
7231 rhashtable_destroy(&mlxsw_sp->router->nexthop_ht);
Petr Machata38ebc0f2017-09-02 23:49:17 +02007232 mlxsw_sp_ipips_fini(mlxsw_sp);
Ido Schimmel348b8fc2017-05-16 19:38:29 +02007233 mlxsw_sp_rifs_fini(mlxsw_sp);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02007234 __mlxsw_sp_router_fini(mlxsw_sp);
Ido Schimmel9011b672017-05-16 19:38:25 +02007235 kfree(mlxsw_sp->router);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02007236}