blob: 91170d940268b895f2a183997eab3b6fe7c7f898 [file] [log] [blame]
Ido Schimmel464dce12016-07-02 11:00:15 +02001/*
2 * drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
Petr Machata6ddb7422017-09-02 23:49:19 +02003 * Copyright (c) 2016-2017 Mellanox Technologies. All rights reserved.
Ido Schimmel464dce12016-07-02 11:00:15 +02004 * Copyright (c) 2016 Jiri Pirko <jiri@mellanox.com>
5 * Copyright (c) 2016 Ido Schimmel <idosch@mellanox.com>
Yotam Gigic723c7352016-07-05 11:27:43 +02006 * Copyright (c) 2016 Yotam Gigi <yotamg@mellanox.com>
Petr Machata6ddb7422017-09-02 23:49:19 +02007 * Copyright (c) 2017 Petr Machata <petrm@mellanox.com>
Ido Schimmel464dce12016-07-02 11:00:15 +02008 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions are met:
11 *
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. Neither the names of the copyright holders nor the names of its
18 * contributors may be used to endorse or promote products derived from
19 * this software without specific prior written permission.
20 *
21 * Alternatively, this software may be distributed under the terms of the
22 * GNU General Public License ("GPL") version 2 as published by the Free
23 * Software Foundation.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
26 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
29 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
36 */
37
38#include <linux/kernel.h>
39#include <linux/types.h>
Jiri Pirko5e9c16c2016-07-04 08:23:04 +020040#include <linux/rhashtable.h>
41#include <linux/bitops.h>
42#include <linux/in6.h>
Yotam Gigic723c7352016-07-05 11:27:43 +020043#include <linux/notifier.h>
Ido Schimmeldf6dd79b2017-02-08 14:36:49 +010044#include <linux/inetdevice.h>
Ido Schimmel9db032b2017-03-16 09:08:17 +010045#include <linux/netdevice.h>
Ido Schimmel03ea01e2017-05-23 21:56:30 +020046#include <linux/if_bridge.h>
Ido Schimmelb5f3e0d2017-07-24 09:56:00 +020047#include <linux/socket.h>
Ido Schimmel428b8512017-08-03 13:28:28 +020048#include <linux/route.h>
Ido Schimmeleb789982017-10-22 23:11:48 +020049#include <linux/gcd.h>
Ido Schimmelaf658b62017-11-02 17:14:09 +010050#include <linux/random.h>
Yotam Gigic723c7352016-07-05 11:27:43 +020051#include <net/netevent.h>
Jiri Pirko6cf3c972016-07-05 11:27:39 +020052#include <net/neighbour.h>
53#include <net/arp.h>
Jiri Pirkob45f64d2016-09-26 12:52:31 +020054#include <net/ip_fib.h>
Ido Schimmel583419f2017-08-03 13:28:27 +020055#include <net/ip6_fib.h>
Ido Schimmel5d7bfd12017-03-16 09:08:14 +010056#include <net/fib_rules.h>
Petr Machata6ddb7422017-09-02 23:49:19 +020057#include <net/ip_tunnels.h>
Ido Schimmel57837882017-03-16 09:08:16 +010058#include <net/l3mdev.h>
Arkadi Sharshevsky5ea12372017-07-18 10:10:13 +020059#include <net/addrconf.h>
Arkadi Sharshevskyd5eb89c2017-07-18 10:10:15 +020060#include <net/ndisc.h>
61#include <net/ipv6.h>
Ido Schimmel04b1d4e2017-08-03 13:28:11 +020062#include <net/fib_notifier.h>
Ido Schimmel464dce12016-07-02 11:00:15 +020063
64#include "spectrum.h"
65#include "core.h"
66#include "reg.h"
Arkadi Sharshevskye0c0afd2017-03-28 17:24:15 +020067#include "spectrum_cnt.h"
68#include "spectrum_dpipe.h"
Petr Machata38ebc0f2017-09-02 23:49:17 +020069#include "spectrum_ipip.h"
Yotam Gigid42b0962017-09-27 08:23:20 +020070#include "spectrum_mr.h"
71#include "spectrum_mr_tcam.h"
Arkadi Sharshevskye0c0afd2017-03-28 17:24:15 +020072#include "spectrum_router.h"
Ido Schimmel464dce12016-07-02 11:00:15 +020073
Ido Schimmel2b52ce02018-01-22 09:17:42 +010074struct mlxsw_sp_fib;
Ido Schimmel9011b672017-05-16 19:38:25 +020075struct mlxsw_sp_vr;
76struct mlxsw_sp_lpm_tree;
Ido Schimmele4f3c1c2017-05-26 08:37:40 +020077struct mlxsw_sp_rif_ops;
Ido Schimmel9011b672017-05-16 19:38:25 +020078
79struct mlxsw_sp_router {
80 struct mlxsw_sp *mlxsw_sp;
Ido Schimmel5f9efff2017-05-16 19:38:27 +020081 struct mlxsw_sp_rif **rifs;
Ido Schimmel9011b672017-05-16 19:38:25 +020082 struct mlxsw_sp_vr *vrs;
83 struct rhashtable neigh_ht;
84 struct rhashtable nexthop_group_ht;
85 struct rhashtable nexthop_ht;
Arkadi Sharshevskydbe45982017-09-25 10:32:23 +020086 struct list_head nexthop_list;
Ido Schimmel9011b672017-05-16 19:38:25 +020087 struct {
Ido Schimmel2b52ce02018-01-22 09:17:42 +010088 /* One tree for each protocol: IPv4 and IPv6 */
89 struct mlxsw_sp_lpm_tree *proto_trees[2];
Ido Schimmel9011b672017-05-16 19:38:25 +020090 struct mlxsw_sp_lpm_tree *trees;
91 unsigned int tree_count;
92 } lpm;
93 struct {
94 struct delayed_work dw;
95 unsigned long interval; /* ms */
96 } neighs_update;
97 struct delayed_work nexthop_probe_dw;
98#define MLXSW_SP_UNRESOLVED_NH_PROBE_INTERVAL 5000 /* ms */
99 struct list_head nexthop_neighs_list;
Petr Machata1012b9a2017-09-02 23:49:23 +0200100 struct list_head ipip_list;
Ido Schimmel9011b672017-05-16 19:38:25 +0200101 bool aborted;
Ido Schimmel7e39d112017-05-16 19:38:28 +0200102 struct notifier_block fib_nb;
Ido Schimmel48fac882017-11-02 17:14:06 +0100103 struct notifier_block netevent_nb;
Ido Schimmele4f3c1c2017-05-26 08:37:40 +0200104 const struct mlxsw_sp_rif_ops **rif_ops_arr;
Petr Machata38ebc0f2017-09-02 23:49:17 +0200105 const struct mlxsw_sp_ipip_ops **ipip_ops_arr;
Ido Schimmel9011b672017-05-16 19:38:25 +0200106};
107
Ido Schimmel4724ba562017-03-10 08:53:39 +0100108struct mlxsw_sp_rif {
109 struct list_head nexthop_list;
110 struct list_head neigh_list;
111 struct net_device *dev;
Ido Schimmela1107482017-05-26 08:37:39 +0200112 struct mlxsw_sp_fid *fid;
Ido Schimmel4724ba562017-03-10 08:53:39 +0100113 unsigned char addr[ETH_ALEN];
114 int mtu;
Arkadi Sharshevskybf952332017-03-17 09:38:00 +0100115 u16 rif_index;
Ido Schimmel69132292017-03-10 08:53:42 +0100116 u16 vr_id;
Ido Schimmele4f3c1c2017-05-26 08:37:40 +0200117 const struct mlxsw_sp_rif_ops *ops;
118 struct mlxsw_sp *mlxsw_sp;
119
Arkadi Sharshevskye0c0afd2017-03-28 17:24:15 +0200120 unsigned int counter_ingress;
121 bool counter_ingress_valid;
122 unsigned int counter_egress;
123 bool counter_egress_valid;
Ido Schimmel4724ba562017-03-10 08:53:39 +0100124};
125
Ido Schimmele4f3c1c2017-05-26 08:37:40 +0200126struct mlxsw_sp_rif_params {
127 struct net_device *dev;
128 union {
129 u16 system_port;
130 u16 lag_id;
131 };
132 u16 vid;
133 bool lag;
134};
135
Ido Schimmel4d93cee2017-05-26 08:37:34 +0200136struct mlxsw_sp_rif_subport {
137 struct mlxsw_sp_rif common;
138 union {
139 u16 system_port;
140 u16 lag_id;
141 };
142 u16 vid;
143 bool lag;
144};
145
Petr Machata6ddb7422017-09-02 23:49:19 +0200146struct mlxsw_sp_rif_ipip_lb {
147 struct mlxsw_sp_rif common;
148 struct mlxsw_sp_rif_ipip_lb_config lb_config;
149 u16 ul_vr_id; /* Reserved for Spectrum-2. */
150};
151
152struct mlxsw_sp_rif_params_ipip_lb {
153 struct mlxsw_sp_rif_params common;
154 struct mlxsw_sp_rif_ipip_lb_config lb_config;
155};
156
Ido Schimmele4f3c1c2017-05-26 08:37:40 +0200157struct mlxsw_sp_rif_ops {
158 enum mlxsw_sp_rif_type type;
159 size_t rif_size;
160
161 void (*setup)(struct mlxsw_sp_rif *rif,
162 const struct mlxsw_sp_rif_params *params);
163 int (*configure)(struct mlxsw_sp_rif *rif);
164 void (*deconfigure)(struct mlxsw_sp_rif *rif);
165 struct mlxsw_sp_fid * (*fid_get)(struct mlxsw_sp_rif *rif);
166};
167
Ido Schimmel2b52ce02018-01-22 09:17:42 +0100168static void mlxsw_sp_lpm_tree_hold(struct mlxsw_sp_lpm_tree *lpm_tree);
169static void mlxsw_sp_lpm_tree_put(struct mlxsw_sp *mlxsw_sp,
170 struct mlxsw_sp_lpm_tree *lpm_tree);
171static int mlxsw_sp_vr_lpm_tree_bind(struct mlxsw_sp *mlxsw_sp,
172 const struct mlxsw_sp_fib *fib,
173 u8 tree_id);
174static int mlxsw_sp_vr_lpm_tree_unbind(struct mlxsw_sp *mlxsw_sp,
175 const struct mlxsw_sp_fib *fib);
176
Arkadi Sharshevskye0c0afd2017-03-28 17:24:15 +0200177static unsigned int *
178mlxsw_sp_rif_p_counter_get(struct mlxsw_sp_rif *rif,
179 enum mlxsw_sp_rif_counter_dir dir)
180{
181 switch (dir) {
182 case MLXSW_SP_RIF_COUNTER_EGRESS:
183 return &rif->counter_egress;
184 case MLXSW_SP_RIF_COUNTER_INGRESS:
185 return &rif->counter_ingress;
186 }
187 return NULL;
188}
189
190static bool
191mlxsw_sp_rif_counter_valid_get(struct mlxsw_sp_rif *rif,
192 enum mlxsw_sp_rif_counter_dir dir)
193{
194 switch (dir) {
195 case MLXSW_SP_RIF_COUNTER_EGRESS:
196 return rif->counter_egress_valid;
197 case MLXSW_SP_RIF_COUNTER_INGRESS:
198 return rif->counter_ingress_valid;
199 }
200 return false;
201}
202
203static void
204mlxsw_sp_rif_counter_valid_set(struct mlxsw_sp_rif *rif,
205 enum mlxsw_sp_rif_counter_dir dir,
206 bool valid)
207{
208 switch (dir) {
209 case MLXSW_SP_RIF_COUNTER_EGRESS:
210 rif->counter_egress_valid = valid;
211 break;
212 case MLXSW_SP_RIF_COUNTER_INGRESS:
213 rif->counter_ingress_valid = valid;
214 break;
215 }
216}
217
218static int mlxsw_sp_rif_counter_edit(struct mlxsw_sp *mlxsw_sp, u16 rif_index,
219 unsigned int counter_index, bool enable,
220 enum mlxsw_sp_rif_counter_dir dir)
221{
222 char ritr_pl[MLXSW_REG_RITR_LEN];
223 bool is_egress = false;
224 int err;
225
226 if (dir == MLXSW_SP_RIF_COUNTER_EGRESS)
227 is_egress = true;
228 mlxsw_reg_ritr_rif_pack(ritr_pl, rif_index);
229 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
230 if (err)
231 return err;
232
233 mlxsw_reg_ritr_counter_pack(ritr_pl, counter_index, enable,
234 is_egress);
235 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
236}
237
238int mlxsw_sp_rif_counter_value_get(struct mlxsw_sp *mlxsw_sp,
239 struct mlxsw_sp_rif *rif,
240 enum mlxsw_sp_rif_counter_dir dir, u64 *cnt)
241{
242 char ricnt_pl[MLXSW_REG_RICNT_LEN];
243 unsigned int *p_counter_index;
244 bool valid;
245 int err;
246
247 valid = mlxsw_sp_rif_counter_valid_get(rif, dir);
248 if (!valid)
249 return -EINVAL;
250
251 p_counter_index = mlxsw_sp_rif_p_counter_get(rif, dir);
252 if (!p_counter_index)
253 return -EINVAL;
254 mlxsw_reg_ricnt_pack(ricnt_pl, *p_counter_index,
255 MLXSW_REG_RICNT_OPCODE_NOP);
256 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ricnt), ricnt_pl);
257 if (err)
258 return err;
259 *cnt = mlxsw_reg_ricnt_good_unicast_packets_get(ricnt_pl);
260 return 0;
261}
262
263static int mlxsw_sp_rif_counter_clear(struct mlxsw_sp *mlxsw_sp,
264 unsigned int counter_index)
265{
266 char ricnt_pl[MLXSW_REG_RICNT_LEN];
267
268 mlxsw_reg_ricnt_pack(ricnt_pl, counter_index,
269 MLXSW_REG_RICNT_OPCODE_CLEAR);
270 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ricnt), ricnt_pl);
271}
272
273int mlxsw_sp_rif_counter_alloc(struct mlxsw_sp *mlxsw_sp,
274 struct mlxsw_sp_rif *rif,
275 enum mlxsw_sp_rif_counter_dir dir)
276{
277 unsigned int *p_counter_index;
278 int err;
279
280 p_counter_index = mlxsw_sp_rif_p_counter_get(rif, dir);
281 if (!p_counter_index)
282 return -EINVAL;
283 err = mlxsw_sp_counter_alloc(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_RIF,
284 p_counter_index);
285 if (err)
286 return err;
287
288 err = mlxsw_sp_rif_counter_clear(mlxsw_sp, *p_counter_index);
289 if (err)
290 goto err_counter_clear;
291
292 err = mlxsw_sp_rif_counter_edit(mlxsw_sp, rif->rif_index,
293 *p_counter_index, true, dir);
294 if (err)
295 goto err_counter_edit;
296 mlxsw_sp_rif_counter_valid_set(rif, dir, true);
297 return 0;
298
299err_counter_edit:
300err_counter_clear:
301 mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_RIF,
302 *p_counter_index);
303 return err;
304}
305
306void mlxsw_sp_rif_counter_free(struct mlxsw_sp *mlxsw_sp,
307 struct mlxsw_sp_rif *rif,
308 enum mlxsw_sp_rif_counter_dir dir)
309{
310 unsigned int *p_counter_index;
311
Arkadi Sharshevsky6b1206b2017-05-18 09:18:53 +0200312 if (!mlxsw_sp_rif_counter_valid_get(rif, dir))
313 return;
314
Arkadi Sharshevskye0c0afd2017-03-28 17:24:15 +0200315 p_counter_index = mlxsw_sp_rif_p_counter_get(rif, dir);
316 if (WARN_ON(!p_counter_index))
317 return;
318 mlxsw_sp_rif_counter_edit(mlxsw_sp, rif->rif_index,
319 *p_counter_index, false, dir);
320 mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_RIF,
321 *p_counter_index);
322 mlxsw_sp_rif_counter_valid_set(rif, dir, false);
323}
324
Ido Schimmele4f3c1c2017-05-26 08:37:40 +0200325static void mlxsw_sp_rif_counters_alloc(struct mlxsw_sp_rif *rif)
326{
327 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
328 struct devlink *devlink;
329
330 devlink = priv_to_devlink(mlxsw_sp->core);
331 if (!devlink_dpipe_table_counter_enabled(devlink,
332 MLXSW_SP_DPIPE_TABLE_NAME_ERIF))
333 return;
334 mlxsw_sp_rif_counter_alloc(mlxsw_sp, rif, MLXSW_SP_RIF_COUNTER_EGRESS);
335}
336
337static void mlxsw_sp_rif_counters_free(struct mlxsw_sp_rif *rif)
338{
339 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
340
341 mlxsw_sp_rif_counter_free(mlxsw_sp, rif, MLXSW_SP_RIF_COUNTER_EGRESS);
342}
343
Ido Schimmel4724ba562017-03-10 08:53:39 +0100344static struct mlxsw_sp_rif *
345mlxsw_sp_rif_find_by_dev(const struct mlxsw_sp *mlxsw_sp,
346 const struct net_device *dev);
347
Ido Schimmel7dcc18a2017-07-18 10:10:30 +0200348#define MLXSW_SP_PREFIX_COUNT (sizeof(struct in6_addr) * BITS_PER_BYTE + 1)
Ido Schimmel9011b672017-05-16 19:38:25 +0200349
350struct mlxsw_sp_prefix_usage {
351 DECLARE_BITMAP(b, MLXSW_SP_PREFIX_COUNT);
352};
353
Jiri Pirko53342022016-07-04 08:23:08 +0200354#define mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage) \
355 for_each_set_bit(prefix, (prefix_usage)->b, MLXSW_SP_PREFIX_COUNT)
356
357static bool
358mlxsw_sp_prefix_usage_eq(struct mlxsw_sp_prefix_usage *prefix_usage1,
359 struct mlxsw_sp_prefix_usage *prefix_usage2)
360{
361 return !memcmp(prefix_usage1, prefix_usage2, sizeof(*prefix_usage1));
362}
363
Jiri Pirko6b75c482016-07-04 08:23:09 +0200364static void
365mlxsw_sp_prefix_usage_cpy(struct mlxsw_sp_prefix_usage *prefix_usage1,
366 struct mlxsw_sp_prefix_usage *prefix_usage2)
367{
368 memcpy(prefix_usage1, prefix_usage2, sizeof(*prefix_usage1));
369}
370
371static void
Jiri Pirko5e9c16c2016-07-04 08:23:04 +0200372mlxsw_sp_prefix_usage_set(struct mlxsw_sp_prefix_usage *prefix_usage,
373 unsigned char prefix_len)
374{
375 set_bit(prefix_len, prefix_usage->b);
376}
377
378static void
379mlxsw_sp_prefix_usage_clear(struct mlxsw_sp_prefix_usage *prefix_usage,
380 unsigned char prefix_len)
381{
382 clear_bit(prefix_len, prefix_usage->b);
383}
384
385struct mlxsw_sp_fib_key {
386 unsigned char addr[sizeof(struct in6_addr)];
387 unsigned char prefix_len;
388};
389
Jiri Pirko61c503f2016-07-04 08:23:11 +0200390enum mlxsw_sp_fib_entry_type {
391 MLXSW_SP_FIB_ENTRY_TYPE_REMOTE,
392 MLXSW_SP_FIB_ENTRY_TYPE_LOCAL,
393 MLXSW_SP_FIB_ENTRY_TYPE_TRAP,
Petr Machata4607f6d2017-09-02 23:49:25 +0200394
395 /* This is a special case of local delivery, where a packet should be
396 * decapsulated on reception. Note that there is no corresponding ENCAP,
397 * because that's a type of next hop, not of FIB entry. (There can be
398 * several next hops in a REMOTE entry, and some of them may be
399 * encapsulating entries.)
400 */
401 MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP,
Jiri Pirko61c503f2016-07-04 08:23:11 +0200402};
403
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +0200404struct mlxsw_sp_nexthop_group;
405
Ido Schimmel9aecce12017-02-09 10:28:42 +0100406struct mlxsw_sp_fib_node {
407 struct list_head entry_list;
Jiri Pirkob45f64d2016-09-26 12:52:31 +0200408 struct list_head list;
Ido Schimmel9aecce12017-02-09 10:28:42 +0100409 struct rhash_head ht_node;
Ido Schimmel76610eb2017-03-10 08:53:41 +0100410 struct mlxsw_sp_fib *fib;
Ido Schimmel9aecce12017-02-09 10:28:42 +0100411 struct mlxsw_sp_fib_key key;
412};
413
Petr Machata4607f6d2017-09-02 23:49:25 +0200414struct mlxsw_sp_fib_entry_decap {
415 struct mlxsw_sp_ipip_entry *ipip_entry;
416 u32 tunnel_index;
417};
418
Ido Schimmel9aecce12017-02-09 10:28:42 +0100419struct mlxsw_sp_fib_entry {
420 struct list_head list;
421 struct mlxsw_sp_fib_node *fib_node;
422 enum mlxsw_sp_fib_entry_type type;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +0200423 struct list_head nexthop_group_node;
424 struct mlxsw_sp_nexthop_group *nh_group;
Petr Machata4607f6d2017-09-02 23:49:25 +0200425 struct mlxsw_sp_fib_entry_decap decap; /* Valid for decap entries. */
Jiri Pirko5e9c16c2016-07-04 08:23:04 +0200426};
427
Ido Schimmel4f1c7f12017-07-18 10:10:26 +0200428struct mlxsw_sp_fib4_entry {
429 struct mlxsw_sp_fib_entry common;
430 u32 tb_id;
431 u32 prio;
432 u8 tos;
433 u8 type;
434};
435
Ido Schimmel428b8512017-08-03 13:28:28 +0200436struct mlxsw_sp_fib6_entry {
437 struct mlxsw_sp_fib_entry common;
438 struct list_head rt6_list;
439 unsigned int nrt6;
440};
441
442struct mlxsw_sp_rt6 {
443 struct list_head list;
444 struct rt6_info *rt;
445};
446
Ido Schimmel9011b672017-05-16 19:38:25 +0200447struct mlxsw_sp_lpm_tree {
448 u8 id; /* tree ID */
449 unsigned int ref_count;
450 enum mlxsw_sp_l3proto proto;
Ido Schimmel2b52ce02018-01-22 09:17:42 +0100451 unsigned long prefix_ref_count[MLXSW_SP_PREFIX_COUNT];
Ido Schimmel9011b672017-05-16 19:38:25 +0200452 struct mlxsw_sp_prefix_usage prefix_usage;
453};
454
Jiri Pirko5e9c16c2016-07-04 08:23:04 +0200455struct mlxsw_sp_fib {
456 struct rhashtable ht;
Ido Schimmel9aecce12017-02-09 10:28:42 +0100457 struct list_head node_list;
Ido Schimmel76610eb2017-03-10 08:53:41 +0100458 struct mlxsw_sp_vr *vr;
459 struct mlxsw_sp_lpm_tree *lpm_tree;
Ido Schimmel76610eb2017-03-10 08:53:41 +0100460 enum mlxsw_sp_l3proto proto;
Jiri Pirko5e9c16c2016-07-04 08:23:04 +0200461};
462
Ido Schimmel9011b672017-05-16 19:38:25 +0200463struct mlxsw_sp_vr {
464 u16 id; /* virtual router ID */
465 u32 tb_id; /* kernel fib table id */
466 unsigned int rif_count;
467 struct mlxsw_sp_fib *fib4;
Ido Schimmela3d9bc52017-07-18 10:10:22 +0200468 struct mlxsw_sp_fib *fib6;
Yotam Gigid42b0962017-09-27 08:23:20 +0200469 struct mlxsw_sp_mr_table *mr4_table;
Ido Schimmel9011b672017-05-16 19:38:25 +0200470};
471
Ido Schimmel9aecce12017-02-09 10:28:42 +0100472static const struct rhashtable_params mlxsw_sp_fib_ht_params;
Jiri Pirko5e9c16c2016-07-04 08:23:04 +0200473
Ido Schimmel2b52ce02018-01-22 09:17:42 +0100474static struct mlxsw_sp_fib *mlxsw_sp_fib_create(struct mlxsw_sp *mlxsw_sp,
475 struct mlxsw_sp_vr *vr,
Ido Schimmel76610eb2017-03-10 08:53:41 +0100476 enum mlxsw_sp_l3proto proto)
Jiri Pirko5e9c16c2016-07-04 08:23:04 +0200477{
Ido Schimmel2b52ce02018-01-22 09:17:42 +0100478 struct mlxsw_sp_lpm_tree *lpm_tree;
Jiri Pirko5e9c16c2016-07-04 08:23:04 +0200479 struct mlxsw_sp_fib *fib;
480 int err;
481
Ido Schimmel2b52ce02018-01-22 09:17:42 +0100482 lpm_tree = mlxsw_sp->router->lpm.proto_trees[proto];
Jiri Pirko5e9c16c2016-07-04 08:23:04 +0200483 fib = kzalloc(sizeof(*fib), GFP_KERNEL);
484 if (!fib)
485 return ERR_PTR(-ENOMEM);
486 err = rhashtable_init(&fib->ht, &mlxsw_sp_fib_ht_params);
487 if (err)
488 goto err_rhashtable_init;
Ido Schimmel9aecce12017-02-09 10:28:42 +0100489 INIT_LIST_HEAD(&fib->node_list);
Ido Schimmel76610eb2017-03-10 08:53:41 +0100490 fib->proto = proto;
491 fib->vr = vr;
Ido Schimmel2b52ce02018-01-22 09:17:42 +0100492 fib->lpm_tree = lpm_tree;
493 mlxsw_sp_lpm_tree_hold(lpm_tree);
494 err = mlxsw_sp_vr_lpm_tree_bind(mlxsw_sp, fib, lpm_tree->id);
495 if (err)
496 goto err_lpm_tree_bind;
Jiri Pirko5e9c16c2016-07-04 08:23:04 +0200497 return fib;
498
Ido Schimmel2b52ce02018-01-22 09:17:42 +0100499err_lpm_tree_bind:
500 mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
Jiri Pirko5e9c16c2016-07-04 08:23:04 +0200501err_rhashtable_init:
502 kfree(fib);
503 return ERR_PTR(err);
504}
505
Ido Schimmel2b52ce02018-01-22 09:17:42 +0100506static void mlxsw_sp_fib_destroy(struct mlxsw_sp *mlxsw_sp,
507 struct mlxsw_sp_fib *fib)
Jiri Pirko5e9c16c2016-07-04 08:23:04 +0200508{
Ido Schimmel2b52ce02018-01-22 09:17:42 +0100509 mlxsw_sp_vr_lpm_tree_unbind(mlxsw_sp, fib);
510 mlxsw_sp_lpm_tree_put(mlxsw_sp, fib->lpm_tree);
Ido Schimmel9aecce12017-02-09 10:28:42 +0100511 WARN_ON(!list_empty(&fib->node_list));
Jiri Pirko5e9c16c2016-07-04 08:23:04 +0200512 rhashtable_destroy(&fib->ht);
513 kfree(fib);
514}
515
Jiri Pirko53342022016-07-04 08:23:08 +0200516static struct mlxsw_sp_lpm_tree *
Ido Schimmel382dbb42017-03-10 08:53:40 +0100517mlxsw_sp_lpm_tree_find_unused(struct mlxsw_sp *mlxsw_sp)
Jiri Pirko53342022016-07-04 08:23:08 +0200518{
519 static struct mlxsw_sp_lpm_tree *lpm_tree;
520 int i;
521
Ido Schimmel9011b672017-05-16 19:38:25 +0200522 for (i = 0; i < mlxsw_sp->router->lpm.tree_count; i++) {
523 lpm_tree = &mlxsw_sp->router->lpm.trees[i];
Ido Schimmel382dbb42017-03-10 08:53:40 +0100524 if (lpm_tree->ref_count == 0)
525 return lpm_tree;
Jiri Pirko53342022016-07-04 08:23:08 +0200526 }
527 return NULL;
528}
529
530static int mlxsw_sp_lpm_tree_alloc(struct mlxsw_sp *mlxsw_sp,
531 struct mlxsw_sp_lpm_tree *lpm_tree)
532{
533 char ralta_pl[MLXSW_REG_RALTA_LEN];
534
Ido Schimmel1a9234e662016-09-19 08:29:26 +0200535 mlxsw_reg_ralta_pack(ralta_pl, true,
536 (enum mlxsw_reg_ralxx_protocol) lpm_tree->proto,
537 lpm_tree->id);
Jiri Pirko53342022016-07-04 08:23:08 +0200538 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralta), ralta_pl);
539}
540
Ido Schimmelcc702672017-08-14 10:54:03 +0200541static void mlxsw_sp_lpm_tree_free(struct mlxsw_sp *mlxsw_sp,
542 struct mlxsw_sp_lpm_tree *lpm_tree)
Jiri Pirko53342022016-07-04 08:23:08 +0200543{
544 char ralta_pl[MLXSW_REG_RALTA_LEN];
545
Ido Schimmel1a9234e662016-09-19 08:29:26 +0200546 mlxsw_reg_ralta_pack(ralta_pl, false,
547 (enum mlxsw_reg_ralxx_protocol) lpm_tree->proto,
548 lpm_tree->id);
Ido Schimmelcc702672017-08-14 10:54:03 +0200549 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralta), ralta_pl);
Jiri Pirko53342022016-07-04 08:23:08 +0200550}
551
552static int
553mlxsw_sp_lpm_tree_left_struct_set(struct mlxsw_sp *mlxsw_sp,
554 struct mlxsw_sp_prefix_usage *prefix_usage,
555 struct mlxsw_sp_lpm_tree *lpm_tree)
556{
557 char ralst_pl[MLXSW_REG_RALST_LEN];
558 u8 root_bin = 0;
559 u8 prefix;
560 u8 last_prefix = MLXSW_REG_RALST_BIN_NO_CHILD;
561
562 mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage)
563 root_bin = prefix;
564
565 mlxsw_reg_ralst_pack(ralst_pl, root_bin, lpm_tree->id);
566 mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage) {
567 if (prefix == 0)
568 continue;
569 mlxsw_reg_ralst_bin_pack(ralst_pl, prefix, last_prefix,
570 MLXSW_REG_RALST_BIN_NO_CHILD);
571 last_prefix = prefix;
572 }
573 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralst), ralst_pl);
574}
575
576static struct mlxsw_sp_lpm_tree *
577mlxsw_sp_lpm_tree_create(struct mlxsw_sp *mlxsw_sp,
578 struct mlxsw_sp_prefix_usage *prefix_usage,
Ido Schimmel382dbb42017-03-10 08:53:40 +0100579 enum mlxsw_sp_l3proto proto)
Jiri Pirko53342022016-07-04 08:23:08 +0200580{
581 struct mlxsw_sp_lpm_tree *lpm_tree;
582 int err;
583
Ido Schimmel382dbb42017-03-10 08:53:40 +0100584 lpm_tree = mlxsw_sp_lpm_tree_find_unused(mlxsw_sp);
Jiri Pirko53342022016-07-04 08:23:08 +0200585 if (!lpm_tree)
586 return ERR_PTR(-EBUSY);
587 lpm_tree->proto = proto;
588 err = mlxsw_sp_lpm_tree_alloc(mlxsw_sp, lpm_tree);
589 if (err)
590 return ERR_PTR(err);
591
592 err = mlxsw_sp_lpm_tree_left_struct_set(mlxsw_sp, prefix_usage,
593 lpm_tree);
594 if (err)
595 goto err_left_struct_set;
Jiri Pirko2083d362016-10-25 11:25:56 +0200596 memcpy(&lpm_tree->prefix_usage, prefix_usage,
597 sizeof(lpm_tree->prefix_usage));
Ido Schimmel2b52ce02018-01-22 09:17:42 +0100598 memset(&lpm_tree->prefix_ref_count, 0,
599 sizeof(lpm_tree->prefix_ref_count));
600 lpm_tree->ref_count = 1;
Jiri Pirko53342022016-07-04 08:23:08 +0200601 return lpm_tree;
602
603err_left_struct_set:
604 mlxsw_sp_lpm_tree_free(mlxsw_sp, lpm_tree);
605 return ERR_PTR(err);
606}
607
Ido Schimmelcc702672017-08-14 10:54:03 +0200608static void mlxsw_sp_lpm_tree_destroy(struct mlxsw_sp *mlxsw_sp,
609 struct mlxsw_sp_lpm_tree *lpm_tree)
Jiri Pirko53342022016-07-04 08:23:08 +0200610{
Ido Schimmelcc702672017-08-14 10:54:03 +0200611 mlxsw_sp_lpm_tree_free(mlxsw_sp, lpm_tree);
Jiri Pirko53342022016-07-04 08:23:08 +0200612}
613
614static struct mlxsw_sp_lpm_tree *
615mlxsw_sp_lpm_tree_get(struct mlxsw_sp *mlxsw_sp,
616 struct mlxsw_sp_prefix_usage *prefix_usage,
Ido Schimmel382dbb42017-03-10 08:53:40 +0100617 enum mlxsw_sp_l3proto proto)
Jiri Pirko53342022016-07-04 08:23:08 +0200618{
619 struct mlxsw_sp_lpm_tree *lpm_tree;
620 int i;
621
Ido Schimmel9011b672017-05-16 19:38:25 +0200622 for (i = 0; i < mlxsw_sp->router->lpm.tree_count; i++) {
623 lpm_tree = &mlxsw_sp->router->lpm.trees[i];
Jiri Pirko8b99bec2016-10-25 11:25:57 +0200624 if (lpm_tree->ref_count != 0 &&
625 lpm_tree->proto == proto &&
Jiri Pirko53342022016-07-04 08:23:08 +0200626 mlxsw_sp_prefix_usage_eq(&lpm_tree->prefix_usage,
Ido Schimmel2b52ce02018-01-22 09:17:42 +0100627 prefix_usage)) {
628 mlxsw_sp_lpm_tree_hold(lpm_tree);
Ido Schimmelfc922bb2017-08-14 10:54:05 +0200629 return lpm_tree;
Ido Schimmel2b52ce02018-01-22 09:17:42 +0100630 }
Jiri Pirko53342022016-07-04 08:23:08 +0200631 }
Ido Schimmelfc922bb2017-08-14 10:54:05 +0200632 return mlxsw_sp_lpm_tree_create(mlxsw_sp, prefix_usage, proto);
633}
Jiri Pirko53342022016-07-04 08:23:08 +0200634
Ido Schimmelfc922bb2017-08-14 10:54:05 +0200635static void mlxsw_sp_lpm_tree_hold(struct mlxsw_sp_lpm_tree *lpm_tree)
636{
Jiri Pirko53342022016-07-04 08:23:08 +0200637 lpm_tree->ref_count++;
Jiri Pirko53342022016-07-04 08:23:08 +0200638}
639
Ido Schimmelcc702672017-08-14 10:54:03 +0200640static void mlxsw_sp_lpm_tree_put(struct mlxsw_sp *mlxsw_sp,
641 struct mlxsw_sp_lpm_tree *lpm_tree)
Jiri Pirko53342022016-07-04 08:23:08 +0200642{
643 if (--lpm_tree->ref_count == 0)
Ido Schimmelcc702672017-08-14 10:54:03 +0200644 mlxsw_sp_lpm_tree_destroy(mlxsw_sp, lpm_tree);
Jiri Pirko53342022016-07-04 08:23:08 +0200645}
646
Ido Schimmeld7a60302017-06-08 08:47:43 +0200647#define MLXSW_SP_LPM_TREE_MIN 1 /* tree 0 is reserved */
Ido Schimmel8494ab02017-03-24 08:02:47 +0100648
649static int mlxsw_sp_lpm_init(struct mlxsw_sp *mlxsw_sp)
Jiri Pirko53342022016-07-04 08:23:08 +0200650{
Ido Schimmel2b52ce02018-01-22 09:17:42 +0100651 struct mlxsw_sp_prefix_usage req_prefix_usage = {{ 0 } };
Jiri Pirko53342022016-07-04 08:23:08 +0200652 struct mlxsw_sp_lpm_tree *lpm_tree;
Ido Schimmel8494ab02017-03-24 08:02:47 +0100653 u64 max_trees;
Ido Schimmel2b52ce02018-01-22 09:17:42 +0100654 int err, i;
Jiri Pirko53342022016-07-04 08:23:08 +0200655
Ido Schimmel8494ab02017-03-24 08:02:47 +0100656 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_LPM_TREES))
657 return -EIO;
658
659 max_trees = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_LPM_TREES);
Ido Schimmel9011b672017-05-16 19:38:25 +0200660 mlxsw_sp->router->lpm.tree_count = max_trees - MLXSW_SP_LPM_TREE_MIN;
661 mlxsw_sp->router->lpm.trees = kcalloc(mlxsw_sp->router->lpm.tree_count,
Ido Schimmel8494ab02017-03-24 08:02:47 +0100662 sizeof(struct mlxsw_sp_lpm_tree),
663 GFP_KERNEL);
Ido Schimmel9011b672017-05-16 19:38:25 +0200664 if (!mlxsw_sp->router->lpm.trees)
Ido Schimmel8494ab02017-03-24 08:02:47 +0100665 return -ENOMEM;
666
Ido Schimmel9011b672017-05-16 19:38:25 +0200667 for (i = 0; i < mlxsw_sp->router->lpm.tree_count; i++) {
668 lpm_tree = &mlxsw_sp->router->lpm.trees[i];
Jiri Pirko53342022016-07-04 08:23:08 +0200669 lpm_tree->id = i + MLXSW_SP_LPM_TREE_MIN;
670 }
Ido Schimmel8494ab02017-03-24 08:02:47 +0100671
Ido Schimmel2b52ce02018-01-22 09:17:42 +0100672 lpm_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, &req_prefix_usage,
673 MLXSW_SP_L3_PROTO_IPV4);
674 if (IS_ERR(lpm_tree)) {
675 err = PTR_ERR(lpm_tree);
676 goto err_ipv4_tree_get;
677 }
678 mlxsw_sp->router->lpm.proto_trees[MLXSW_SP_L3_PROTO_IPV4] = lpm_tree;
679
680 lpm_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, &req_prefix_usage,
681 MLXSW_SP_L3_PROTO_IPV6);
682 if (IS_ERR(lpm_tree)) {
683 err = PTR_ERR(lpm_tree);
684 goto err_ipv6_tree_get;
685 }
686 mlxsw_sp->router->lpm.proto_trees[MLXSW_SP_L3_PROTO_IPV6] = lpm_tree;
687
Ido Schimmel8494ab02017-03-24 08:02:47 +0100688 return 0;
Ido Schimmel2b52ce02018-01-22 09:17:42 +0100689
690err_ipv6_tree_get:
691 lpm_tree = mlxsw_sp->router->lpm.proto_trees[MLXSW_SP_L3_PROTO_IPV4];
692 mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
693err_ipv4_tree_get:
694 kfree(mlxsw_sp->router->lpm.trees);
695 return err;
Ido Schimmel8494ab02017-03-24 08:02:47 +0100696}
697
698static void mlxsw_sp_lpm_fini(struct mlxsw_sp *mlxsw_sp)
699{
Ido Schimmel2b52ce02018-01-22 09:17:42 +0100700 struct mlxsw_sp_lpm_tree *lpm_tree;
701
702 lpm_tree = mlxsw_sp->router->lpm.proto_trees[MLXSW_SP_L3_PROTO_IPV6];
703 mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
704
705 lpm_tree = mlxsw_sp->router->lpm.proto_trees[MLXSW_SP_L3_PROTO_IPV4];
706 mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
707
Ido Schimmel9011b672017-05-16 19:38:25 +0200708 kfree(mlxsw_sp->router->lpm.trees);
Jiri Pirko53342022016-07-04 08:23:08 +0200709}
710
Ido Schimmel76610eb2017-03-10 08:53:41 +0100711static bool mlxsw_sp_vr_is_used(const struct mlxsw_sp_vr *vr)
712{
Yotam Gigid42b0962017-09-27 08:23:20 +0200713 return !!vr->fib4 || !!vr->fib6 || !!vr->mr4_table;
Ido Schimmel76610eb2017-03-10 08:53:41 +0100714}
715
Jiri Pirko6b75c482016-07-04 08:23:09 +0200716static struct mlxsw_sp_vr *mlxsw_sp_vr_find_unused(struct mlxsw_sp *mlxsw_sp)
717{
718 struct mlxsw_sp_vr *vr;
719 int i;
720
Jiri Pirkoc1a38312016-10-21 16:07:23 +0200721 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
Ido Schimmel9011b672017-05-16 19:38:25 +0200722 vr = &mlxsw_sp->router->vrs[i];
Ido Schimmel76610eb2017-03-10 08:53:41 +0100723 if (!mlxsw_sp_vr_is_used(vr))
Jiri Pirko6b75c482016-07-04 08:23:09 +0200724 return vr;
725 }
726 return NULL;
727}
728
729static int mlxsw_sp_vr_lpm_tree_bind(struct mlxsw_sp *mlxsw_sp,
Ido Schimmel0adb2142017-08-14 10:54:04 +0200730 const struct mlxsw_sp_fib *fib, u8 tree_id)
Jiri Pirko6b75c482016-07-04 08:23:09 +0200731{
732 char raltb_pl[MLXSW_REG_RALTB_LEN];
733
Ido Schimmel76610eb2017-03-10 08:53:41 +0100734 mlxsw_reg_raltb_pack(raltb_pl, fib->vr->id,
735 (enum mlxsw_reg_ralxx_protocol) fib->proto,
Ido Schimmel0adb2142017-08-14 10:54:04 +0200736 tree_id);
Jiri Pirko6b75c482016-07-04 08:23:09 +0200737 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raltb), raltb_pl);
738}
739
740static int mlxsw_sp_vr_lpm_tree_unbind(struct mlxsw_sp *mlxsw_sp,
Ido Schimmel76610eb2017-03-10 08:53:41 +0100741 const struct mlxsw_sp_fib *fib)
Jiri Pirko6b75c482016-07-04 08:23:09 +0200742{
743 char raltb_pl[MLXSW_REG_RALTB_LEN];
744
745 /* Bind to tree 0 which is default */
Ido Schimmel76610eb2017-03-10 08:53:41 +0100746 mlxsw_reg_raltb_pack(raltb_pl, fib->vr->id,
747 (enum mlxsw_reg_ralxx_protocol) fib->proto, 0);
Jiri Pirko6b75c482016-07-04 08:23:09 +0200748 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raltb), raltb_pl);
749}
750
751static u32 mlxsw_sp_fix_tb_id(u32 tb_id)
752{
Yotam Gigi7e50d432017-09-27 08:23:19 +0200753 /* For our purpose, squash main, default and local tables into one */
754 if (tb_id == RT_TABLE_LOCAL || tb_id == RT_TABLE_DEFAULT)
Jiri Pirko6b75c482016-07-04 08:23:09 +0200755 tb_id = RT_TABLE_MAIN;
756 return tb_id;
757}
758
759static struct mlxsw_sp_vr *mlxsw_sp_vr_find(struct mlxsw_sp *mlxsw_sp,
Ido Schimmel76610eb2017-03-10 08:53:41 +0100760 u32 tb_id)
Jiri Pirko6b75c482016-07-04 08:23:09 +0200761{
762 struct mlxsw_sp_vr *vr;
763 int i;
764
765 tb_id = mlxsw_sp_fix_tb_id(tb_id);
Nogah Frankel9497c042016-09-20 11:16:54 +0200766
Jiri Pirkoc1a38312016-10-21 16:07:23 +0200767 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
Ido Schimmel9011b672017-05-16 19:38:25 +0200768 vr = &mlxsw_sp->router->vrs[i];
Ido Schimmel76610eb2017-03-10 08:53:41 +0100769 if (mlxsw_sp_vr_is_used(vr) && vr->tb_id == tb_id)
Jiri Pirko6b75c482016-07-04 08:23:09 +0200770 return vr;
771 }
772 return NULL;
773}
774
Ido Schimmel76610eb2017-03-10 08:53:41 +0100775static struct mlxsw_sp_fib *mlxsw_sp_vr_fib(const struct mlxsw_sp_vr *vr,
776 enum mlxsw_sp_l3proto proto)
Jiri Pirko6b75c482016-07-04 08:23:09 +0200777{
Ido Schimmel76610eb2017-03-10 08:53:41 +0100778 switch (proto) {
779 case MLXSW_SP_L3_PROTO_IPV4:
780 return vr->fib4;
781 case MLXSW_SP_L3_PROTO_IPV6:
Ido Schimmela3d9bc52017-07-18 10:10:22 +0200782 return vr->fib6;
Ido Schimmel76610eb2017-03-10 08:53:41 +0100783 }
784 return NULL;
785}
786
787static struct mlxsw_sp_vr *mlxsw_sp_vr_create(struct mlxsw_sp *mlxsw_sp,
David Ahernf8fa9b42017-10-18 09:56:56 -0700788 u32 tb_id,
789 struct netlink_ext_ack *extack)
Ido Schimmel76610eb2017-03-10 08:53:41 +0100790{
Jiri Pirko6b75c482016-07-04 08:23:09 +0200791 struct mlxsw_sp_vr *vr;
Ido Schimmela3d9bc52017-07-18 10:10:22 +0200792 int err;
Jiri Pirko6b75c482016-07-04 08:23:09 +0200793
794 vr = mlxsw_sp_vr_find_unused(mlxsw_sp);
David Ahernf8fa9b42017-10-18 09:56:56 -0700795 if (!vr) {
796 NL_SET_ERR_MSG(extack, "spectrum: Exceeded number of supported virtual routers");
Jiri Pirko6b75c482016-07-04 08:23:09 +0200797 return ERR_PTR(-EBUSY);
David Ahernf8fa9b42017-10-18 09:56:56 -0700798 }
Ido Schimmel2b52ce02018-01-22 09:17:42 +0100799 vr->fib4 = mlxsw_sp_fib_create(mlxsw_sp, vr, MLXSW_SP_L3_PROTO_IPV4);
Ido Schimmel76610eb2017-03-10 08:53:41 +0100800 if (IS_ERR(vr->fib4))
801 return ERR_CAST(vr->fib4);
Ido Schimmel2b52ce02018-01-22 09:17:42 +0100802 vr->fib6 = mlxsw_sp_fib_create(mlxsw_sp, vr, MLXSW_SP_L3_PROTO_IPV6);
Ido Schimmela3d9bc52017-07-18 10:10:22 +0200803 if (IS_ERR(vr->fib6)) {
804 err = PTR_ERR(vr->fib6);
805 goto err_fib6_create;
806 }
Yotam Gigid42b0962017-09-27 08:23:20 +0200807 vr->mr4_table = mlxsw_sp_mr_table_create(mlxsw_sp, vr->id,
808 MLXSW_SP_L3_PROTO_IPV4);
809 if (IS_ERR(vr->mr4_table)) {
810 err = PTR_ERR(vr->mr4_table);
811 goto err_mr_table_create;
812 }
Jiri Pirko6b75c482016-07-04 08:23:09 +0200813 vr->tb_id = tb_id;
Jiri Pirko6b75c482016-07-04 08:23:09 +0200814 return vr;
Ido Schimmela3d9bc52017-07-18 10:10:22 +0200815
Yotam Gigid42b0962017-09-27 08:23:20 +0200816err_mr_table_create:
Ido Schimmel2b52ce02018-01-22 09:17:42 +0100817 mlxsw_sp_fib_destroy(mlxsw_sp, vr->fib6);
Yotam Gigid42b0962017-09-27 08:23:20 +0200818 vr->fib6 = NULL;
Ido Schimmela3d9bc52017-07-18 10:10:22 +0200819err_fib6_create:
Ido Schimmel2b52ce02018-01-22 09:17:42 +0100820 mlxsw_sp_fib_destroy(mlxsw_sp, vr->fib4);
Ido Schimmela3d9bc52017-07-18 10:10:22 +0200821 vr->fib4 = NULL;
822 return ERR_PTR(err);
Jiri Pirko6b75c482016-07-04 08:23:09 +0200823}
824
Ido Schimmel2b52ce02018-01-22 09:17:42 +0100825static void mlxsw_sp_vr_destroy(struct mlxsw_sp *mlxsw_sp,
826 struct mlxsw_sp_vr *vr)
Jiri Pirko6b75c482016-07-04 08:23:09 +0200827{
Yotam Gigid42b0962017-09-27 08:23:20 +0200828 mlxsw_sp_mr_table_destroy(vr->mr4_table);
829 vr->mr4_table = NULL;
Ido Schimmel2b52ce02018-01-22 09:17:42 +0100830 mlxsw_sp_fib_destroy(mlxsw_sp, vr->fib6);
Ido Schimmela3d9bc52017-07-18 10:10:22 +0200831 vr->fib6 = NULL;
Ido Schimmel2b52ce02018-01-22 09:17:42 +0100832 mlxsw_sp_fib_destroy(mlxsw_sp, vr->fib4);
Ido Schimmel76610eb2017-03-10 08:53:41 +0100833 vr->fib4 = NULL;
Jiri Pirko6b75c482016-07-04 08:23:09 +0200834}
835
David Ahernf8fa9b42017-10-18 09:56:56 -0700836static struct mlxsw_sp_vr *mlxsw_sp_vr_get(struct mlxsw_sp *mlxsw_sp, u32 tb_id,
837 struct netlink_ext_ack *extack)
Jiri Pirko6b75c482016-07-04 08:23:09 +0200838{
839 struct mlxsw_sp_vr *vr;
Jiri Pirko6b75c482016-07-04 08:23:09 +0200840
841 tb_id = mlxsw_sp_fix_tb_id(tb_id);
Ido Schimmel76610eb2017-03-10 08:53:41 +0100842 vr = mlxsw_sp_vr_find(mlxsw_sp, tb_id);
843 if (!vr)
David Ahernf8fa9b42017-10-18 09:56:56 -0700844 vr = mlxsw_sp_vr_create(mlxsw_sp, tb_id, extack);
Jiri Pirko6b75c482016-07-04 08:23:09 +0200845 return vr;
846}
847
Ido Schimmel2b52ce02018-01-22 09:17:42 +0100848static void mlxsw_sp_vr_put(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_vr *vr)
Jiri Pirko6b75c482016-07-04 08:23:09 +0200849{
Ido Schimmela3d9bc52017-07-18 10:10:22 +0200850 if (!vr->rif_count && list_empty(&vr->fib4->node_list) &&
Yotam Gigid42b0962017-09-27 08:23:20 +0200851 list_empty(&vr->fib6->node_list) &&
852 mlxsw_sp_mr_table_empty(vr->mr4_table))
Ido Schimmel2b52ce02018-01-22 09:17:42 +0100853 mlxsw_sp_vr_destroy(mlxsw_sp, vr);
Jiri Pirko6b75c482016-07-04 08:23:09 +0200854}
855
Ido Schimmelfc922bb2017-08-14 10:54:05 +0200856static bool
857mlxsw_sp_vr_lpm_tree_should_replace(struct mlxsw_sp_vr *vr,
858 enum mlxsw_sp_l3proto proto, u8 tree_id)
859{
860 struct mlxsw_sp_fib *fib = mlxsw_sp_vr_fib(vr, proto);
861
862 if (!mlxsw_sp_vr_is_used(vr))
863 return false;
Ido Schimmel2b52ce02018-01-22 09:17:42 +0100864 if (fib->lpm_tree->id == tree_id)
Ido Schimmelfc922bb2017-08-14 10:54:05 +0200865 return true;
866 return false;
867}
868
869static int mlxsw_sp_vr_lpm_tree_replace(struct mlxsw_sp *mlxsw_sp,
870 struct mlxsw_sp_fib *fib,
871 struct mlxsw_sp_lpm_tree *new_tree)
872{
873 struct mlxsw_sp_lpm_tree *old_tree = fib->lpm_tree;
874 int err;
875
Ido Schimmelfc922bb2017-08-14 10:54:05 +0200876 fib->lpm_tree = new_tree;
877 mlxsw_sp_lpm_tree_hold(new_tree);
Ido Schimmeled604c52018-01-18 15:42:10 +0100878 err = mlxsw_sp_vr_lpm_tree_bind(mlxsw_sp, fib, new_tree->id);
879 if (err)
880 goto err_tree_bind;
Ido Schimmelfc922bb2017-08-14 10:54:05 +0200881 mlxsw_sp_lpm_tree_put(mlxsw_sp, old_tree);
882 return 0;
Ido Schimmeled604c52018-01-18 15:42:10 +0100883
884err_tree_bind:
885 mlxsw_sp_lpm_tree_put(mlxsw_sp, new_tree);
886 fib->lpm_tree = old_tree;
887 return err;
Ido Schimmelfc922bb2017-08-14 10:54:05 +0200888}
889
890static int mlxsw_sp_vrs_lpm_tree_replace(struct mlxsw_sp *mlxsw_sp,
891 struct mlxsw_sp_fib *fib,
892 struct mlxsw_sp_lpm_tree *new_tree)
893{
Ido Schimmelfc922bb2017-08-14 10:54:05 +0200894 enum mlxsw_sp_l3proto proto = fib->proto;
Ido Schimmel2b52ce02018-01-22 09:17:42 +0100895 struct mlxsw_sp_lpm_tree *old_tree;
Ido Schimmelfc922bb2017-08-14 10:54:05 +0200896 u8 old_id, new_id = new_tree->id;
897 struct mlxsw_sp_vr *vr;
898 int i, err;
899
Ido Schimmel2b52ce02018-01-22 09:17:42 +0100900 old_tree = mlxsw_sp->router->lpm.proto_trees[proto];
Ido Schimmelfc922bb2017-08-14 10:54:05 +0200901 old_id = old_tree->id;
902
903 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
904 vr = &mlxsw_sp->router->vrs[i];
905 if (!mlxsw_sp_vr_lpm_tree_should_replace(vr, proto, old_id))
906 continue;
907 err = mlxsw_sp_vr_lpm_tree_replace(mlxsw_sp,
908 mlxsw_sp_vr_fib(vr, proto),
909 new_tree);
910 if (err)
911 goto err_tree_replace;
912 }
913
Ido Schimmel2b52ce02018-01-22 09:17:42 +0100914 memcpy(new_tree->prefix_ref_count, old_tree->prefix_ref_count,
915 sizeof(new_tree->prefix_ref_count));
916 mlxsw_sp->router->lpm.proto_trees[proto] = new_tree;
917 mlxsw_sp_lpm_tree_put(mlxsw_sp, old_tree);
918
Ido Schimmelfc922bb2017-08-14 10:54:05 +0200919 return 0;
920
921err_tree_replace:
922 for (i--; i >= 0; i--) {
923 if (!mlxsw_sp_vr_lpm_tree_should_replace(vr, proto, new_id))
924 continue;
925 mlxsw_sp_vr_lpm_tree_replace(mlxsw_sp,
926 mlxsw_sp_vr_fib(vr, proto),
927 old_tree);
928 }
929 return err;
Ido Schimmelfc922bb2017-08-14 10:54:05 +0200930}
931
Nogah Frankel9497c042016-09-20 11:16:54 +0200932static int mlxsw_sp_vrs_init(struct mlxsw_sp *mlxsw_sp)
Jiri Pirko6b75c482016-07-04 08:23:09 +0200933{
934 struct mlxsw_sp_vr *vr;
Jiri Pirkoc1a38312016-10-21 16:07:23 +0200935 u64 max_vrs;
Jiri Pirko6b75c482016-07-04 08:23:09 +0200936 int i;
937
Jiri Pirkoc1a38312016-10-21 16:07:23 +0200938 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_VRS))
Nogah Frankel9497c042016-09-20 11:16:54 +0200939 return -EIO;
940
Jiri Pirkoc1a38312016-10-21 16:07:23 +0200941 max_vrs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS);
Ido Schimmel9011b672017-05-16 19:38:25 +0200942 mlxsw_sp->router->vrs = kcalloc(max_vrs, sizeof(struct mlxsw_sp_vr),
943 GFP_KERNEL);
944 if (!mlxsw_sp->router->vrs)
Nogah Frankel9497c042016-09-20 11:16:54 +0200945 return -ENOMEM;
946
Jiri Pirkoc1a38312016-10-21 16:07:23 +0200947 for (i = 0; i < max_vrs; i++) {
Ido Schimmel9011b672017-05-16 19:38:25 +0200948 vr = &mlxsw_sp->router->vrs[i];
Jiri Pirko6b75c482016-07-04 08:23:09 +0200949 vr->id = i;
950 }
Nogah Frankel9497c042016-09-20 11:16:54 +0200951
952 return 0;
953}
954
Ido Schimmelac571de2016-11-14 11:26:32 +0100955static void mlxsw_sp_router_fib_flush(struct mlxsw_sp *mlxsw_sp);
956
Nogah Frankel9497c042016-09-20 11:16:54 +0200957static void mlxsw_sp_vrs_fini(struct mlxsw_sp *mlxsw_sp)
958{
Ido Schimmel30572242016-12-03 16:45:01 +0100959 /* At this stage we're guaranteed not to have new incoming
960 * FIB notifications and the work queue is free from FIBs
961 * sitting on top of mlxsw netdevs. However, we can still
962 * have other FIBs queued. Flush the queue before flushing
963 * the device's tables. No need for locks, as we're the only
964 * writer.
965 */
966 mlxsw_core_flush_owq();
Ido Schimmelac571de2016-11-14 11:26:32 +0100967 mlxsw_sp_router_fib_flush(mlxsw_sp);
Ido Schimmel9011b672017-05-16 19:38:25 +0200968 kfree(mlxsw_sp->router->vrs);
Jiri Pirko6b75c482016-07-04 08:23:09 +0200969}
970
Petr Machata6ddb7422017-09-02 23:49:19 +0200971static struct net_device *
972__mlxsw_sp_ipip_netdev_ul_dev_get(const struct net_device *ol_dev)
973{
974 struct ip_tunnel *tun = netdev_priv(ol_dev);
975 struct net *net = dev_net(ol_dev);
976
977 return __dev_get_by_index(net, tun->parms.link);
978}
979
Petr Machata4cf04f32017-11-03 10:03:42 +0100980u32 mlxsw_sp_ipip_dev_ul_tb_id(const struct net_device *ol_dev)
Petr Machata6ddb7422017-09-02 23:49:19 +0200981{
982 struct net_device *d = __mlxsw_sp_ipip_netdev_ul_dev_get(ol_dev);
983
984 if (d)
985 return l3mdev_fib_table(d) ? : RT_TABLE_MAIN;
986 else
987 return l3mdev_fib_table(ol_dev) ? : RT_TABLE_MAIN;
988}
989
Petr Machata1012b9a2017-09-02 23:49:23 +0200990static struct mlxsw_sp_rif *
991mlxsw_sp_rif_create(struct mlxsw_sp *mlxsw_sp,
David Ahernf8fa9b42017-10-18 09:56:56 -0700992 const struct mlxsw_sp_rif_params *params,
993 struct netlink_ext_ack *extack);
Petr Machata1012b9a2017-09-02 23:49:23 +0200994
995static struct mlxsw_sp_rif_ipip_lb *
996mlxsw_sp_ipip_ol_ipip_lb_create(struct mlxsw_sp *mlxsw_sp,
997 enum mlxsw_sp_ipip_type ipipt,
Petr Machata7e75af62017-11-03 10:03:36 +0100998 struct net_device *ol_dev,
999 struct netlink_ext_ack *extack)
Petr Machata1012b9a2017-09-02 23:49:23 +02001000{
1001 struct mlxsw_sp_rif_params_ipip_lb lb_params;
1002 const struct mlxsw_sp_ipip_ops *ipip_ops;
1003 struct mlxsw_sp_rif *rif;
1004
1005 ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipipt];
1006 lb_params = (struct mlxsw_sp_rif_params_ipip_lb) {
1007 .common.dev = ol_dev,
1008 .common.lag = false,
1009 .lb_config = ipip_ops->ol_loopback_config(mlxsw_sp, ol_dev),
1010 };
1011
Petr Machata7e75af62017-11-03 10:03:36 +01001012 rif = mlxsw_sp_rif_create(mlxsw_sp, &lb_params.common, extack);
Petr Machata1012b9a2017-09-02 23:49:23 +02001013 if (IS_ERR(rif))
1014 return ERR_CAST(rif);
1015 return container_of(rif, struct mlxsw_sp_rif_ipip_lb, common);
1016}
1017
1018static struct mlxsw_sp_ipip_entry *
1019mlxsw_sp_ipip_entry_alloc(struct mlxsw_sp *mlxsw_sp,
1020 enum mlxsw_sp_ipip_type ipipt,
1021 struct net_device *ol_dev)
1022{
1023 struct mlxsw_sp_ipip_entry *ipip_entry;
1024 struct mlxsw_sp_ipip_entry *ret = NULL;
1025
1026 ipip_entry = kzalloc(sizeof(*ipip_entry), GFP_KERNEL);
1027 if (!ipip_entry)
1028 return ERR_PTR(-ENOMEM);
1029
1030 ipip_entry->ol_lb = mlxsw_sp_ipip_ol_ipip_lb_create(mlxsw_sp, ipipt,
Petr Machata7e75af62017-11-03 10:03:36 +01001031 ol_dev, NULL);
Petr Machata1012b9a2017-09-02 23:49:23 +02001032 if (IS_ERR(ipip_entry->ol_lb)) {
1033 ret = ERR_CAST(ipip_entry->ol_lb);
1034 goto err_ol_ipip_lb_create;
1035 }
1036
1037 ipip_entry->ipipt = ipipt;
1038 ipip_entry->ol_dev = ol_dev;
Petr Machata4cf04f32017-11-03 10:03:42 +01001039 ipip_entry->parms = mlxsw_sp_ipip_netdev_parms(ol_dev);
Petr Machata1012b9a2017-09-02 23:49:23 +02001040
1041 return ipip_entry;
1042
1043err_ol_ipip_lb_create:
1044 kfree(ipip_entry);
1045 return ret;
1046}
1047
1048static void
Petr Machata4cccb732017-10-16 16:26:39 +02001049mlxsw_sp_ipip_entry_dealloc(struct mlxsw_sp_ipip_entry *ipip_entry)
Petr Machata1012b9a2017-09-02 23:49:23 +02001050{
Petr Machata1012b9a2017-09-02 23:49:23 +02001051 mlxsw_sp_rif_destroy(&ipip_entry->ol_lb->common);
1052 kfree(ipip_entry);
1053}
1054
Petr Machata1012b9a2017-09-02 23:49:23 +02001055static bool
1056mlxsw_sp_ipip_entry_saddr_matches(struct mlxsw_sp *mlxsw_sp,
1057 const enum mlxsw_sp_l3proto ul_proto,
1058 union mlxsw_sp_l3addr saddr,
1059 u32 ul_tb_id,
1060 struct mlxsw_sp_ipip_entry *ipip_entry)
1061{
1062 u32 tun_ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(ipip_entry->ol_dev);
1063 enum mlxsw_sp_ipip_type ipipt = ipip_entry->ipipt;
1064 union mlxsw_sp_l3addr tun_saddr;
1065
1066 if (mlxsw_sp->router->ipip_ops_arr[ipipt]->ul_proto != ul_proto)
1067 return false;
1068
1069 tun_saddr = mlxsw_sp_ipip_netdev_saddr(ul_proto, ipip_entry->ol_dev);
1070 return tun_ul_tb_id == ul_tb_id &&
1071 mlxsw_sp_l3addr_eq(&tun_saddr, &saddr);
1072}
1073
Petr Machata4607f6d2017-09-02 23:49:25 +02001074static int
1075mlxsw_sp_fib_entry_decap_init(struct mlxsw_sp *mlxsw_sp,
1076 struct mlxsw_sp_fib_entry *fib_entry,
1077 struct mlxsw_sp_ipip_entry *ipip_entry)
1078{
1079 u32 tunnel_index;
1080 int err;
1081
1082 err = mlxsw_sp_kvdl_alloc(mlxsw_sp, 1, &tunnel_index);
1083 if (err)
1084 return err;
1085
1086 ipip_entry->decap_fib_entry = fib_entry;
1087 fib_entry->decap.ipip_entry = ipip_entry;
1088 fib_entry->decap.tunnel_index = tunnel_index;
1089 return 0;
1090}
1091
1092static void mlxsw_sp_fib_entry_decap_fini(struct mlxsw_sp *mlxsw_sp,
1093 struct mlxsw_sp_fib_entry *fib_entry)
1094{
1095 /* Unlink this node from the IPIP entry that it's the decap entry of. */
1096 fib_entry->decap.ipip_entry->decap_fib_entry = NULL;
1097 fib_entry->decap.ipip_entry = NULL;
1098 mlxsw_sp_kvdl_free(mlxsw_sp, fib_entry->decap.tunnel_index);
1099}
1100
Petr Machata1cc38fb2017-09-02 23:49:26 +02001101static struct mlxsw_sp_fib_node *
1102mlxsw_sp_fib_node_lookup(struct mlxsw_sp_fib *fib, const void *addr,
1103 size_t addr_len, unsigned char prefix_len);
Petr Machata4607f6d2017-09-02 23:49:25 +02001104static int mlxsw_sp_fib_entry_update(struct mlxsw_sp *mlxsw_sp,
1105 struct mlxsw_sp_fib_entry *fib_entry);
1106
1107static void
1108mlxsw_sp_ipip_entry_demote_decap(struct mlxsw_sp *mlxsw_sp,
1109 struct mlxsw_sp_ipip_entry *ipip_entry)
1110{
1111 struct mlxsw_sp_fib_entry *fib_entry = ipip_entry->decap_fib_entry;
1112
1113 mlxsw_sp_fib_entry_decap_fini(mlxsw_sp, fib_entry);
1114 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
1115
1116 mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
1117}
1118
Petr Machata1cc38fb2017-09-02 23:49:26 +02001119static void
1120mlxsw_sp_ipip_entry_promote_decap(struct mlxsw_sp *mlxsw_sp,
1121 struct mlxsw_sp_ipip_entry *ipip_entry,
1122 struct mlxsw_sp_fib_entry *decap_fib_entry)
1123{
1124 if (mlxsw_sp_fib_entry_decap_init(mlxsw_sp, decap_fib_entry,
1125 ipip_entry))
1126 return;
1127 decap_fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP;
1128
1129 if (mlxsw_sp_fib_entry_update(mlxsw_sp, decap_fib_entry))
1130 mlxsw_sp_ipip_entry_demote_decap(mlxsw_sp, ipip_entry);
1131}
1132
1133/* Given an IPIP entry, find the corresponding decap route. */
1134static struct mlxsw_sp_fib_entry *
1135mlxsw_sp_ipip_entry_find_decap(struct mlxsw_sp *mlxsw_sp,
1136 struct mlxsw_sp_ipip_entry *ipip_entry)
1137{
1138 static struct mlxsw_sp_fib_node *fib_node;
1139 const struct mlxsw_sp_ipip_ops *ipip_ops;
1140 struct mlxsw_sp_fib_entry *fib_entry;
1141 unsigned char saddr_prefix_len;
1142 union mlxsw_sp_l3addr saddr;
1143 struct mlxsw_sp_fib *ul_fib;
1144 struct mlxsw_sp_vr *ul_vr;
1145 const void *saddrp;
1146 size_t saddr_len;
1147 u32 ul_tb_id;
1148 u32 saddr4;
1149
1150 ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt];
1151
1152 ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(ipip_entry->ol_dev);
1153 ul_vr = mlxsw_sp_vr_find(mlxsw_sp, ul_tb_id);
1154 if (!ul_vr)
1155 return NULL;
1156
1157 ul_fib = mlxsw_sp_vr_fib(ul_vr, ipip_ops->ul_proto);
1158 saddr = mlxsw_sp_ipip_netdev_saddr(ipip_ops->ul_proto,
1159 ipip_entry->ol_dev);
1160
1161 switch (ipip_ops->ul_proto) {
1162 case MLXSW_SP_L3_PROTO_IPV4:
1163 saddr4 = be32_to_cpu(saddr.addr4);
1164 saddrp = &saddr4;
1165 saddr_len = 4;
1166 saddr_prefix_len = 32;
1167 break;
1168 case MLXSW_SP_L3_PROTO_IPV6:
1169 WARN_ON(1);
1170 return NULL;
1171 }
1172
1173 fib_node = mlxsw_sp_fib_node_lookup(ul_fib, saddrp, saddr_len,
1174 saddr_prefix_len);
1175 if (!fib_node || list_empty(&fib_node->entry_list))
1176 return NULL;
1177
1178 fib_entry = list_first_entry(&fib_node->entry_list,
1179 struct mlxsw_sp_fib_entry, list);
1180 if (fib_entry->type != MLXSW_SP_FIB_ENTRY_TYPE_TRAP)
1181 return NULL;
1182
1183 return fib_entry;
1184}
1185
Petr Machata1012b9a2017-09-02 23:49:23 +02001186static struct mlxsw_sp_ipip_entry *
Petr Machata4cccb732017-10-16 16:26:39 +02001187mlxsw_sp_ipip_entry_create(struct mlxsw_sp *mlxsw_sp,
1188 enum mlxsw_sp_ipip_type ipipt,
1189 struct net_device *ol_dev)
Petr Machata1012b9a2017-09-02 23:49:23 +02001190{
Petr Machata1012b9a2017-09-02 23:49:23 +02001191 struct mlxsw_sp_ipip_entry *ipip_entry;
Petr Machata1012b9a2017-09-02 23:49:23 +02001192
1193 ipip_entry = mlxsw_sp_ipip_entry_alloc(mlxsw_sp, ipipt, ol_dev);
1194 if (IS_ERR(ipip_entry))
1195 return ipip_entry;
1196
1197 list_add_tail(&ipip_entry->ipip_list_node,
1198 &mlxsw_sp->router->ipip_list);
1199
Petr Machata1012b9a2017-09-02 23:49:23 +02001200 return ipip_entry;
1201}
1202
1203static void
Petr Machata4cccb732017-10-16 16:26:39 +02001204mlxsw_sp_ipip_entry_destroy(struct mlxsw_sp *mlxsw_sp,
1205 struct mlxsw_sp_ipip_entry *ipip_entry)
Petr Machata1012b9a2017-09-02 23:49:23 +02001206{
Petr Machata4cccb732017-10-16 16:26:39 +02001207 list_del(&ipip_entry->ipip_list_node);
1208 mlxsw_sp_ipip_entry_dealloc(ipip_entry);
Petr Machata1012b9a2017-09-02 23:49:23 +02001209}
1210
Petr Machata4607f6d2017-09-02 23:49:25 +02001211static bool
1212mlxsw_sp_ipip_entry_matches_decap(struct mlxsw_sp *mlxsw_sp,
1213 const struct net_device *ul_dev,
1214 enum mlxsw_sp_l3proto ul_proto,
1215 union mlxsw_sp_l3addr ul_dip,
1216 struct mlxsw_sp_ipip_entry *ipip_entry)
1217{
1218 u32 ul_tb_id = l3mdev_fib_table(ul_dev) ? : RT_TABLE_MAIN;
1219 enum mlxsw_sp_ipip_type ipipt = ipip_entry->ipipt;
1220 struct net_device *ipip_ul_dev;
1221
1222 if (mlxsw_sp->router->ipip_ops_arr[ipipt]->ul_proto != ul_proto)
1223 return false;
1224
1225 ipip_ul_dev = __mlxsw_sp_ipip_netdev_ul_dev_get(ipip_entry->ol_dev);
1226 return mlxsw_sp_ipip_entry_saddr_matches(mlxsw_sp, ul_proto, ul_dip,
1227 ul_tb_id, ipip_entry) &&
1228 (!ipip_ul_dev || ipip_ul_dev == ul_dev);
1229}
1230
1231/* Given decap parameters, find the corresponding IPIP entry. */
1232static struct mlxsw_sp_ipip_entry *
1233mlxsw_sp_ipip_entry_find_by_decap(struct mlxsw_sp *mlxsw_sp,
1234 const struct net_device *ul_dev,
1235 enum mlxsw_sp_l3proto ul_proto,
1236 union mlxsw_sp_l3addr ul_dip)
1237{
1238 struct mlxsw_sp_ipip_entry *ipip_entry;
1239
1240 list_for_each_entry(ipip_entry, &mlxsw_sp->router->ipip_list,
1241 ipip_list_node)
1242 if (mlxsw_sp_ipip_entry_matches_decap(mlxsw_sp, ul_dev,
1243 ul_proto, ul_dip,
1244 ipip_entry))
1245 return ipip_entry;
1246
1247 return NULL;
1248}
1249
Petr Machata6698c162017-10-16 16:26:36 +02001250static bool mlxsw_sp_netdev_ipip_type(const struct mlxsw_sp *mlxsw_sp,
1251 const struct net_device *dev,
1252 enum mlxsw_sp_ipip_type *p_type)
1253{
1254 struct mlxsw_sp_router *router = mlxsw_sp->router;
1255 const struct mlxsw_sp_ipip_ops *ipip_ops;
1256 enum mlxsw_sp_ipip_type ipipt;
1257
1258 for (ipipt = 0; ipipt < MLXSW_SP_IPIP_TYPE_MAX; ++ipipt) {
1259 ipip_ops = router->ipip_ops_arr[ipipt];
1260 if (dev->type == ipip_ops->dev_type) {
1261 if (p_type)
1262 *p_type = ipipt;
1263 return true;
1264 }
1265 }
1266 return false;
1267}
1268
Petr Machata796ec772017-11-03 10:03:29 +01001269bool mlxsw_sp_netdev_is_ipip_ol(const struct mlxsw_sp *mlxsw_sp,
1270 const struct net_device *dev)
Petr Machata00635872017-10-16 16:26:37 +02001271{
1272 return mlxsw_sp_netdev_ipip_type(mlxsw_sp, dev, NULL);
1273}
1274
1275static struct mlxsw_sp_ipip_entry *
1276mlxsw_sp_ipip_entry_find_by_ol_dev(struct mlxsw_sp *mlxsw_sp,
1277 const struct net_device *ol_dev)
1278{
1279 struct mlxsw_sp_ipip_entry *ipip_entry;
1280
1281 list_for_each_entry(ipip_entry, &mlxsw_sp->router->ipip_list,
1282 ipip_list_node)
1283 if (ipip_entry->ol_dev == ol_dev)
1284 return ipip_entry;
1285
1286 return NULL;
1287}
1288
Petr Machata61481f22017-11-03 10:03:41 +01001289static struct mlxsw_sp_ipip_entry *
1290mlxsw_sp_ipip_entry_find_by_ul_dev(const struct mlxsw_sp *mlxsw_sp,
1291 const struct net_device *ul_dev,
1292 struct mlxsw_sp_ipip_entry *start)
1293{
1294 struct mlxsw_sp_ipip_entry *ipip_entry;
1295
1296 ipip_entry = list_prepare_entry(start, &mlxsw_sp->router->ipip_list,
1297 ipip_list_node);
1298 list_for_each_entry_continue(ipip_entry, &mlxsw_sp->router->ipip_list,
1299 ipip_list_node) {
1300 struct net_device *ipip_ul_dev =
1301 __mlxsw_sp_ipip_netdev_ul_dev_get(ipip_entry->ol_dev);
1302
1303 if (ipip_ul_dev == ul_dev)
1304 return ipip_entry;
1305 }
1306
1307 return NULL;
1308}
1309
1310bool mlxsw_sp_netdev_is_ipip_ul(const struct mlxsw_sp *mlxsw_sp,
1311 const struct net_device *dev)
1312{
1313 return mlxsw_sp_ipip_entry_find_by_ul_dev(mlxsw_sp, dev, NULL);
1314}
1315
Petr Machatacafdb2a2017-11-03 10:03:30 +01001316static bool mlxsw_sp_netdevice_ipip_can_offload(struct mlxsw_sp *mlxsw_sp,
1317 const struct net_device *ol_dev,
1318 enum mlxsw_sp_ipip_type ipipt)
1319{
1320 const struct mlxsw_sp_ipip_ops *ops
1321 = mlxsw_sp->router->ipip_ops_arr[ipipt];
1322
1323 /* For deciding whether decap should be offloaded, we don't care about
1324 * overlay protocol, so ask whether either one is supported.
1325 */
1326 return ops->can_offload(mlxsw_sp, ol_dev, MLXSW_SP_L3_PROTO_IPV4) ||
1327 ops->can_offload(mlxsw_sp, ol_dev, MLXSW_SP_L3_PROTO_IPV6);
1328}
1329
Petr Machata796ec772017-11-03 10:03:29 +01001330static int mlxsw_sp_netdevice_ipip_ol_reg_event(struct mlxsw_sp *mlxsw_sp,
1331 struct net_device *ol_dev)
Petr Machata00635872017-10-16 16:26:37 +02001332{
Petr Machata00635872017-10-16 16:26:37 +02001333 struct mlxsw_sp_ipip_entry *ipip_entry;
Petr Machataaf641712017-11-03 10:03:40 +01001334 enum mlxsw_sp_l3proto ul_proto;
Petr Machata00635872017-10-16 16:26:37 +02001335 enum mlxsw_sp_ipip_type ipipt;
Petr Machataaf641712017-11-03 10:03:40 +01001336 union mlxsw_sp_l3addr saddr;
1337 u32 ul_tb_id;
Petr Machata00635872017-10-16 16:26:37 +02001338
1339 mlxsw_sp_netdev_ipip_type(mlxsw_sp, ol_dev, &ipipt);
Petr Machatacafdb2a2017-11-03 10:03:30 +01001340 if (mlxsw_sp_netdevice_ipip_can_offload(mlxsw_sp, ol_dev, ipipt)) {
Petr Machataaf641712017-11-03 10:03:40 +01001341 ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(ol_dev);
1342 ul_proto = mlxsw_sp->router->ipip_ops_arr[ipipt]->ul_proto;
1343 saddr = mlxsw_sp_ipip_netdev_saddr(ul_proto, ol_dev);
1344 if (!mlxsw_sp_ipip_demote_tunnel_by_saddr(mlxsw_sp, ul_proto,
1345 saddr, ul_tb_id,
1346 NULL)) {
1347 ipip_entry = mlxsw_sp_ipip_entry_create(mlxsw_sp, ipipt,
1348 ol_dev);
1349 if (IS_ERR(ipip_entry))
1350 return PTR_ERR(ipip_entry);
1351 }
Petr Machata00635872017-10-16 16:26:37 +02001352 }
1353
1354 return 0;
1355}
1356
Petr Machata796ec772017-11-03 10:03:29 +01001357static void mlxsw_sp_netdevice_ipip_ol_unreg_event(struct mlxsw_sp *mlxsw_sp,
1358 struct net_device *ol_dev)
Petr Machata00635872017-10-16 16:26:37 +02001359{
1360 struct mlxsw_sp_ipip_entry *ipip_entry;
1361
1362 ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
1363 if (ipip_entry)
Petr Machata4cccb732017-10-16 16:26:39 +02001364 mlxsw_sp_ipip_entry_destroy(mlxsw_sp, ipip_entry);
Petr Machata00635872017-10-16 16:26:37 +02001365}
1366
Petr Machata47518ca2017-11-03 10:03:35 +01001367static void
1368mlxsw_sp_ipip_entry_ol_up_event(struct mlxsw_sp *mlxsw_sp,
1369 struct mlxsw_sp_ipip_entry *ipip_entry)
1370{
1371 struct mlxsw_sp_fib_entry *decap_fib_entry;
1372
1373 decap_fib_entry = mlxsw_sp_ipip_entry_find_decap(mlxsw_sp, ipip_entry);
1374 if (decap_fib_entry)
1375 mlxsw_sp_ipip_entry_promote_decap(mlxsw_sp, ipip_entry,
1376 decap_fib_entry);
1377}
1378
Petr Machata6d4de442017-11-03 10:03:34 +01001379static void mlxsw_sp_netdevice_ipip_ol_up_event(struct mlxsw_sp *mlxsw_sp,
1380 struct net_device *ol_dev)
Petr Machata00635872017-10-16 16:26:37 +02001381{
Petr Machata00635872017-10-16 16:26:37 +02001382 struct mlxsw_sp_ipip_entry *ipip_entry;
1383
1384 ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
Petr Machata47518ca2017-11-03 10:03:35 +01001385 if (ipip_entry)
1386 mlxsw_sp_ipip_entry_ol_up_event(mlxsw_sp, ipip_entry);
Petr Machata00635872017-10-16 16:26:37 +02001387}
1388
Petr Machataa3fe1982017-11-03 10:03:33 +01001389static void
1390mlxsw_sp_ipip_entry_ol_down_event(struct mlxsw_sp *mlxsw_sp,
1391 struct mlxsw_sp_ipip_entry *ipip_entry)
1392{
1393 if (ipip_entry->decap_fib_entry)
1394 mlxsw_sp_ipip_entry_demote_decap(mlxsw_sp, ipip_entry);
1395}
1396
Petr Machata796ec772017-11-03 10:03:29 +01001397static void mlxsw_sp_netdevice_ipip_ol_down_event(struct mlxsw_sp *mlxsw_sp,
1398 struct net_device *ol_dev)
Petr Machata00635872017-10-16 16:26:37 +02001399{
1400 struct mlxsw_sp_ipip_entry *ipip_entry;
1401
1402 ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
Petr Machataa3fe1982017-11-03 10:03:33 +01001403 if (ipip_entry)
1404 mlxsw_sp_ipip_entry_ol_down_event(mlxsw_sp, ipip_entry);
Petr Machata00635872017-10-16 16:26:37 +02001405}
1406
Petr Machata09dbf622017-11-28 13:17:14 +01001407static void mlxsw_sp_nexthop_rif_migrate(struct mlxsw_sp *mlxsw_sp,
1408 struct mlxsw_sp_rif *old_rif,
1409 struct mlxsw_sp_rif *new_rif);
Petr Machata65a61212017-11-03 10:03:37 +01001410static int
1411mlxsw_sp_ipip_entry_ol_lb_update(struct mlxsw_sp *mlxsw_sp,
1412 struct mlxsw_sp_ipip_entry *ipip_entry,
Petr Machata0c5f1cd2017-11-03 10:03:38 +01001413 bool keep_encap,
Petr Machata65a61212017-11-03 10:03:37 +01001414 struct netlink_ext_ack *extack)
Petr Machataf63ce4e2017-10-16 16:26:38 +02001415{
Petr Machata65a61212017-11-03 10:03:37 +01001416 struct mlxsw_sp_rif_ipip_lb *old_lb_rif = ipip_entry->ol_lb;
1417 struct mlxsw_sp_rif_ipip_lb *new_lb_rif;
Petr Machataf63ce4e2017-10-16 16:26:38 +02001418
Petr Machata65a61212017-11-03 10:03:37 +01001419 new_lb_rif = mlxsw_sp_ipip_ol_ipip_lb_create(mlxsw_sp,
1420 ipip_entry->ipipt,
1421 ipip_entry->ol_dev,
1422 extack);
1423 if (IS_ERR(new_lb_rif))
1424 return PTR_ERR(new_lb_rif);
1425 ipip_entry->ol_lb = new_lb_rif;
Petr Machata0c5f1cd2017-11-03 10:03:38 +01001426
Petr Machata09dbf622017-11-28 13:17:14 +01001427 if (keep_encap)
1428 mlxsw_sp_nexthop_rif_migrate(mlxsw_sp, &old_lb_rif->common,
1429 &new_lb_rif->common);
Petr Machata0c5f1cd2017-11-03 10:03:38 +01001430
Petr Machata65a61212017-11-03 10:03:37 +01001431 mlxsw_sp_rif_destroy(&old_lb_rif->common);
Petr Machataf63ce4e2017-10-16 16:26:38 +02001432
Petr Machata65a61212017-11-03 10:03:37 +01001433 return 0;
1434}
1435
Petr Machata09dbf622017-11-28 13:17:14 +01001436static void mlxsw_sp_nexthop_rif_update(struct mlxsw_sp *mlxsw_sp,
1437 struct mlxsw_sp_rif *rif);
1438
Petr Machata0c5f1cd2017-11-03 10:03:38 +01001439/**
1440 * Update the offload related to an IPIP entry. This always updates decap, and
1441 * in addition to that it also:
1442 * @recreate_loopback: recreates the associated loopback RIF
1443 * @keep_encap: updates next hops that use the tunnel netdevice. This is only
1444 * relevant when recreate_loopback is true.
1445 * @update_nexthops: updates next hops, keeping the current loopback RIF. This
1446 * is only relevant when recreate_loopback is false.
1447 */
Petr Machata65a61212017-11-03 10:03:37 +01001448int __mlxsw_sp_ipip_entry_update_tunnel(struct mlxsw_sp *mlxsw_sp,
1449 struct mlxsw_sp_ipip_entry *ipip_entry,
Petr Machata0c5f1cd2017-11-03 10:03:38 +01001450 bool recreate_loopback,
1451 bool keep_encap,
1452 bool update_nexthops,
Petr Machata65a61212017-11-03 10:03:37 +01001453 struct netlink_ext_ack *extack)
1454{
1455 int err;
1456
1457 /* RIFs can't be edited, so to update loopback, we need to destroy and
1458 * recreate it. That creates a window of opportunity where RALUE and
1459 * RATR registers end up referencing a RIF that's already gone. RATRs
1460 * are handled in mlxsw_sp_ipip_entry_ol_lb_update(), and to take care
Petr Machataf63ce4e2017-10-16 16:26:38 +02001461 * of RALUE, demote the decap route back.
1462 */
1463 if (ipip_entry->decap_fib_entry)
1464 mlxsw_sp_ipip_entry_demote_decap(mlxsw_sp, ipip_entry);
1465
Petr Machata0c5f1cd2017-11-03 10:03:38 +01001466 if (recreate_loopback) {
1467 err = mlxsw_sp_ipip_entry_ol_lb_update(mlxsw_sp, ipip_entry,
1468 keep_encap, extack);
1469 if (err)
1470 return err;
1471 } else if (update_nexthops) {
1472 mlxsw_sp_nexthop_rif_update(mlxsw_sp,
1473 &ipip_entry->ol_lb->common);
1474 }
Petr Machataf63ce4e2017-10-16 16:26:38 +02001475
Petr Machata65a61212017-11-03 10:03:37 +01001476 if (ipip_entry->ol_dev->flags & IFF_UP)
1477 mlxsw_sp_ipip_entry_ol_up_event(mlxsw_sp, ipip_entry);
Petr Machataf63ce4e2017-10-16 16:26:38 +02001478
1479 return 0;
1480}
1481
Petr Machata65a61212017-11-03 10:03:37 +01001482static int mlxsw_sp_netdevice_ipip_ol_vrf_event(struct mlxsw_sp *mlxsw_sp,
1483 struct net_device *ol_dev,
1484 struct netlink_ext_ack *extack)
1485{
1486 struct mlxsw_sp_ipip_entry *ipip_entry =
1487 mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
Petr Machatacab43d92017-11-28 13:17:12 +01001488 enum mlxsw_sp_l3proto ul_proto;
1489 union mlxsw_sp_l3addr saddr;
1490 u32 ul_tb_id;
Petr Machata65a61212017-11-03 10:03:37 +01001491
1492 if (!ipip_entry)
1493 return 0;
Petr Machatacab43d92017-11-28 13:17:12 +01001494
1495 /* For flat configuration cases, moving overlay to a different VRF might
1496 * cause local address conflict, and the conflicting tunnels need to be
1497 * demoted.
1498 */
1499 ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(ol_dev);
1500 ul_proto = mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt]->ul_proto;
1501 saddr = mlxsw_sp_ipip_netdev_saddr(ul_proto, ol_dev);
1502 if (mlxsw_sp_ipip_demote_tunnel_by_saddr(mlxsw_sp, ul_proto,
1503 saddr, ul_tb_id,
1504 ipip_entry)) {
1505 mlxsw_sp_ipip_entry_demote_tunnel(mlxsw_sp, ipip_entry);
1506 return 0;
1507 }
1508
Petr Machata65a61212017-11-03 10:03:37 +01001509 return __mlxsw_sp_ipip_entry_update_tunnel(mlxsw_sp, ipip_entry,
Petr Machata0c5f1cd2017-11-03 10:03:38 +01001510 true, false, false, extack);
Petr Machata65a61212017-11-03 10:03:37 +01001511}
1512
Petr Machata61481f22017-11-03 10:03:41 +01001513static int
1514mlxsw_sp_netdevice_ipip_ul_vrf_event(struct mlxsw_sp *mlxsw_sp,
1515 struct mlxsw_sp_ipip_entry *ipip_entry,
1516 struct net_device *ul_dev,
1517 struct netlink_ext_ack *extack)
1518{
1519 return __mlxsw_sp_ipip_entry_update_tunnel(mlxsw_sp, ipip_entry,
1520 true, true, false, extack);
1521}
1522
Petr Machata4cf04f32017-11-03 10:03:42 +01001523static int
Petr Machata44b0fff2017-11-03 10:03:44 +01001524mlxsw_sp_netdevice_ipip_ul_up_event(struct mlxsw_sp *mlxsw_sp,
1525 struct mlxsw_sp_ipip_entry *ipip_entry,
1526 struct net_device *ul_dev)
1527{
1528 return __mlxsw_sp_ipip_entry_update_tunnel(mlxsw_sp, ipip_entry,
1529 false, false, true, NULL);
1530}
1531
1532static int
1533mlxsw_sp_netdevice_ipip_ul_down_event(struct mlxsw_sp *mlxsw_sp,
1534 struct mlxsw_sp_ipip_entry *ipip_entry,
1535 struct net_device *ul_dev)
1536{
1537 /* A down underlay device causes encapsulated packets to not be
1538 * forwarded, but decap still works. So refresh next hops without
1539 * touching anything else.
1540 */
1541 return __mlxsw_sp_ipip_entry_update_tunnel(mlxsw_sp, ipip_entry,
1542 false, false, true, NULL);
1543}
1544
1545static int
Petr Machata4cf04f32017-11-03 10:03:42 +01001546mlxsw_sp_netdevice_ipip_ol_change_event(struct mlxsw_sp *mlxsw_sp,
1547 struct net_device *ol_dev,
1548 struct netlink_ext_ack *extack)
1549{
1550 const struct mlxsw_sp_ipip_ops *ipip_ops;
1551 struct mlxsw_sp_ipip_entry *ipip_entry;
1552 int err;
1553
1554 ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
1555 if (!ipip_entry)
1556 /* A change might make a tunnel eligible for offloading, but
1557 * that is currently not implemented. What falls to slow path
1558 * stays there.
1559 */
1560 return 0;
1561
1562 /* A change might make a tunnel not eligible for offloading. */
1563 if (!mlxsw_sp_netdevice_ipip_can_offload(mlxsw_sp, ol_dev,
1564 ipip_entry->ipipt)) {
1565 mlxsw_sp_ipip_entry_demote_tunnel(mlxsw_sp, ipip_entry);
1566 return 0;
1567 }
1568
1569 ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt];
1570 err = ipip_ops->ol_netdev_change(mlxsw_sp, ipip_entry, extack);
1571 return err;
1572}
1573
Petr Machataaf641712017-11-03 10:03:40 +01001574void mlxsw_sp_ipip_entry_demote_tunnel(struct mlxsw_sp *mlxsw_sp,
1575 struct mlxsw_sp_ipip_entry *ipip_entry)
1576{
1577 struct net_device *ol_dev = ipip_entry->ol_dev;
1578
1579 if (ol_dev->flags & IFF_UP)
1580 mlxsw_sp_ipip_entry_ol_down_event(mlxsw_sp, ipip_entry);
1581 mlxsw_sp_ipip_entry_destroy(mlxsw_sp, ipip_entry);
1582}
1583
1584/* The configuration where several tunnels have the same local address in the
1585 * same underlay table needs special treatment in the HW. That is currently not
1586 * implemented in the driver. This function finds and demotes the first tunnel
1587 * with a given source address, except the one passed in in the argument
1588 * `except'.
1589 */
1590bool
1591mlxsw_sp_ipip_demote_tunnel_by_saddr(struct mlxsw_sp *mlxsw_sp,
1592 enum mlxsw_sp_l3proto ul_proto,
1593 union mlxsw_sp_l3addr saddr,
1594 u32 ul_tb_id,
1595 const struct mlxsw_sp_ipip_entry *except)
1596{
1597 struct mlxsw_sp_ipip_entry *ipip_entry, *tmp;
1598
1599 list_for_each_entry_safe(ipip_entry, tmp, &mlxsw_sp->router->ipip_list,
1600 ipip_list_node) {
1601 if (ipip_entry != except &&
1602 mlxsw_sp_ipip_entry_saddr_matches(mlxsw_sp, ul_proto, saddr,
1603 ul_tb_id, ipip_entry)) {
1604 mlxsw_sp_ipip_entry_demote_tunnel(mlxsw_sp, ipip_entry);
1605 return true;
1606 }
1607 }
1608
1609 return false;
1610}
1611
Petr Machata61481f22017-11-03 10:03:41 +01001612static void mlxsw_sp_ipip_demote_tunnel_by_ul_netdev(struct mlxsw_sp *mlxsw_sp,
1613 struct net_device *ul_dev)
1614{
1615 struct mlxsw_sp_ipip_entry *ipip_entry, *tmp;
1616
1617 list_for_each_entry_safe(ipip_entry, tmp, &mlxsw_sp->router->ipip_list,
1618 ipip_list_node) {
1619 struct net_device *ipip_ul_dev =
1620 __mlxsw_sp_ipip_netdev_ul_dev_get(ipip_entry->ol_dev);
1621
1622 if (ipip_ul_dev == ul_dev)
1623 mlxsw_sp_ipip_entry_demote_tunnel(mlxsw_sp, ipip_entry);
1624 }
1625}
1626
Petr Machata7e75af62017-11-03 10:03:36 +01001627int mlxsw_sp_netdevice_ipip_ol_event(struct mlxsw_sp *mlxsw_sp,
1628 struct net_device *ol_dev,
1629 unsigned long event,
1630 struct netdev_notifier_info *info)
Petr Machata00635872017-10-16 16:26:37 +02001631{
Petr Machata7e75af62017-11-03 10:03:36 +01001632 struct netdev_notifier_changeupper_info *chup;
1633 struct netlink_ext_ack *extack;
1634
Petr Machata00635872017-10-16 16:26:37 +02001635 switch (event) {
1636 case NETDEV_REGISTER:
Petr Machata796ec772017-11-03 10:03:29 +01001637 return mlxsw_sp_netdevice_ipip_ol_reg_event(mlxsw_sp, ol_dev);
Petr Machata00635872017-10-16 16:26:37 +02001638 case NETDEV_UNREGISTER:
Petr Machata796ec772017-11-03 10:03:29 +01001639 mlxsw_sp_netdevice_ipip_ol_unreg_event(mlxsw_sp, ol_dev);
Petr Machata00635872017-10-16 16:26:37 +02001640 return 0;
1641 case NETDEV_UP:
Petr Machata6d4de442017-11-03 10:03:34 +01001642 mlxsw_sp_netdevice_ipip_ol_up_event(mlxsw_sp, ol_dev);
1643 return 0;
Petr Machata00635872017-10-16 16:26:37 +02001644 case NETDEV_DOWN:
Petr Machata796ec772017-11-03 10:03:29 +01001645 mlxsw_sp_netdevice_ipip_ol_down_event(mlxsw_sp, ol_dev);
Petr Machata00635872017-10-16 16:26:37 +02001646 return 0;
Petr Machataf63ce4e2017-10-16 16:26:38 +02001647 case NETDEV_CHANGEUPPER:
Petr Machata7e75af62017-11-03 10:03:36 +01001648 chup = container_of(info, typeof(*chup), info);
1649 extack = info->extack;
1650 if (netif_is_l3_master(chup->upper_dev))
Petr Machata796ec772017-11-03 10:03:29 +01001651 return mlxsw_sp_netdevice_ipip_ol_vrf_event(mlxsw_sp,
Petr Machata7e75af62017-11-03 10:03:36 +01001652 ol_dev,
1653 extack);
Petr Machataf63ce4e2017-10-16 16:26:38 +02001654 return 0;
Petr Machata4cf04f32017-11-03 10:03:42 +01001655 case NETDEV_CHANGE:
1656 extack = info->extack;
1657 return mlxsw_sp_netdevice_ipip_ol_change_event(mlxsw_sp,
1658 ol_dev, extack);
Petr Machata00635872017-10-16 16:26:37 +02001659 }
1660 return 0;
1661}
1662
Petr Machata61481f22017-11-03 10:03:41 +01001663static int
1664__mlxsw_sp_netdevice_ipip_ul_event(struct mlxsw_sp *mlxsw_sp,
1665 struct mlxsw_sp_ipip_entry *ipip_entry,
1666 struct net_device *ul_dev,
1667 unsigned long event,
1668 struct netdev_notifier_info *info)
1669{
1670 struct netdev_notifier_changeupper_info *chup;
1671 struct netlink_ext_ack *extack;
1672
1673 switch (event) {
1674 case NETDEV_CHANGEUPPER:
1675 chup = container_of(info, typeof(*chup), info);
1676 extack = info->extack;
1677 if (netif_is_l3_master(chup->upper_dev))
1678 return mlxsw_sp_netdevice_ipip_ul_vrf_event(mlxsw_sp,
1679 ipip_entry,
1680 ul_dev,
1681 extack);
1682 break;
Petr Machata44b0fff2017-11-03 10:03:44 +01001683
1684 case NETDEV_UP:
1685 return mlxsw_sp_netdevice_ipip_ul_up_event(mlxsw_sp, ipip_entry,
1686 ul_dev);
1687 case NETDEV_DOWN:
1688 return mlxsw_sp_netdevice_ipip_ul_down_event(mlxsw_sp,
1689 ipip_entry,
1690 ul_dev);
Petr Machata61481f22017-11-03 10:03:41 +01001691 }
1692 return 0;
1693}
1694
1695int
1696mlxsw_sp_netdevice_ipip_ul_event(struct mlxsw_sp *mlxsw_sp,
1697 struct net_device *ul_dev,
1698 unsigned long event,
1699 struct netdev_notifier_info *info)
1700{
1701 struct mlxsw_sp_ipip_entry *ipip_entry = NULL;
1702 int err;
1703
1704 while ((ipip_entry = mlxsw_sp_ipip_entry_find_by_ul_dev(mlxsw_sp,
1705 ul_dev,
1706 ipip_entry))) {
1707 err = __mlxsw_sp_netdevice_ipip_ul_event(mlxsw_sp, ipip_entry,
1708 ul_dev, event, info);
1709 if (err) {
1710 mlxsw_sp_ipip_demote_tunnel_by_ul_netdev(mlxsw_sp,
1711 ul_dev);
1712 return err;
1713 }
1714 }
1715
1716 return 0;
1717}
1718
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001719struct mlxsw_sp_neigh_key {
Jiri Pirko33b13412016-11-10 12:31:04 +01001720 struct neighbour *n;
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001721};
1722
1723struct mlxsw_sp_neigh_entry {
Ido Schimmel9665b742017-02-08 11:16:42 +01001724 struct list_head rif_list_node;
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001725 struct rhash_head ht_node;
1726 struct mlxsw_sp_neigh_key key;
1727 u16 rif;
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001728 bool connected;
Yotam Gigia6bf9e92016-07-05 11:27:44 +02001729 unsigned char ha[ETH_ALEN];
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001730 struct list_head nexthop_list; /* list of nexthops using
1731 * this neigh entry
1732 */
Yotam Gigib2157142016-07-05 11:27:51 +02001733 struct list_head nexthop_neighs_list_node;
Arkadi Sharshevsky7cfcbc72017-08-24 08:40:08 +02001734 unsigned int counter_index;
1735 bool counter_valid;
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001736};
1737
1738static const struct rhashtable_params mlxsw_sp_neigh_ht_params = {
1739 .key_offset = offsetof(struct mlxsw_sp_neigh_entry, key),
1740 .head_offset = offsetof(struct mlxsw_sp_neigh_entry, ht_node),
1741 .key_len = sizeof(struct mlxsw_sp_neigh_key),
1742};
1743
Arkadi Sharshevskyf17cc842017-08-24 08:40:04 +02001744struct mlxsw_sp_neigh_entry *
1745mlxsw_sp_rif_neigh_next(struct mlxsw_sp_rif *rif,
1746 struct mlxsw_sp_neigh_entry *neigh_entry)
1747{
1748 if (!neigh_entry) {
1749 if (list_empty(&rif->neigh_list))
1750 return NULL;
1751 else
1752 return list_first_entry(&rif->neigh_list,
1753 typeof(*neigh_entry),
1754 rif_list_node);
1755 }
Arkadi Sharshevskyec2437f2017-09-25 10:32:24 +02001756 if (list_is_last(&neigh_entry->rif_list_node, &rif->neigh_list))
Arkadi Sharshevskyf17cc842017-08-24 08:40:04 +02001757 return NULL;
1758 return list_next_entry(neigh_entry, rif_list_node);
1759}
1760
1761int mlxsw_sp_neigh_entry_type(struct mlxsw_sp_neigh_entry *neigh_entry)
1762{
1763 return neigh_entry->key.n->tbl->family;
1764}
1765
1766unsigned char *
1767mlxsw_sp_neigh_entry_ha(struct mlxsw_sp_neigh_entry *neigh_entry)
1768{
1769 return neigh_entry->ha;
1770}
1771
1772u32 mlxsw_sp_neigh4_entry_dip(struct mlxsw_sp_neigh_entry *neigh_entry)
1773{
1774 struct neighbour *n;
1775
1776 n = neigh_entry->key.n;
1777 return ntohl(*((__be32 *) n->primary_key));
1778}
1779
Arkadi Sharshevsky02507682017-08-31 17:59:15 +02001780struct in6_addr *
1781mlxsw_sp_neigh6_entry_dip(struct mlxsw_sp_neigh_entry *neigh_entry)
1782{
1783 struct neighbour *n;
1784
1785 n = neigh_entry->key.n;
1786 return (struct in6_addr *) &n->primary_key;
1787}
1788
Arkadi Sharshevsky7cfcbc72017-08-24 08:40:08 +02001789int mlxsw_sp_neigh_counter_get(struct mlxsw_sp *mlxsw_sp,
1790 struct mlxsw_sp_neigh_entry *neigh_entry,
1791 u64 *p_counter)
1792{
1793 if (!neigh_entry->counter_valid)
1794 return -EINVAL;
1795
1796 return mlxsw_sp_flow_counter_get(mlxsw_sp, neigh_entry->counter_index,
1797 p_counter, NULL);
1798}
1799
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001800static struct mlxsw_sp_neigh_entry *
1801mlxsw_sp_neigh_entry_alloc(struct mlxsw_sp *mlxsw_sp, struct neighbour *n,
1802 u16 rif)
1803{
1804 struct mlxsw_sp_neigh_entry *neigh_entry;
1805
1806 neigh_entry = kzalloc(sizeof(*neigh_entry), GFP_KERNEL);
1807 if (!neigh_entry)
1808 return NULL;
1809
1810 neigh_entry->key.n = n;
1811 neigh_entry->rif = rif;
1812 INIT_LIST_HEAD(&neigh_entry->nexthop_list);
1813
1814 return neigh_entry;
1815}
1816
1817static void mlxsw_sp_neigh_entry_free(struct mlxsw_sp_neigh_entry *neigh_entry)
1818{
1819 kfree(neigh_entry);
1820}
1821
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001822static int
1823mlxsw_sp_neigh_entry_insert(struct mlxsw_sp *mlxsw_sp,
1824 struct mlxsw_sp_neigh_entry *neigh_entry)
1825{
Ido Schimmel9011b672017-05-16 19:38:25 +02001826 return rhashtable_insert_fast(&mlxsw_sp->router->neigh_ht,
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001827 &neigh_entry->ht_node,
1828 mlxsw_sp_neigh_ht_params);
1829}
1830
1831static void
1832mlxsw_sp_neigh_entry_remove(struct mlxsw_sp *mlxsw_sp,
1833 struct mlxsw_sp_neigh_entry *neigh_entry)
1834{
Ido Schimmel9011b672017-05-16 19:38:25 +02001835 rhashtable_remove_fast(&mlxsw_sp->router->neigh_ht,
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001836 &neigh_entry->ht_node,
1837 mlxsw_sp_neigh_ht_params);
1838}
1839
Arkadi Sharshevsky7cfcbc72017-08-24 08:40:08 +02001840static bool
Arkadi Sharshevsky1ed55742017-08-31 17:59:18 +02001841mlxsw_sp_neigh_counter_should_alloc(struct mlxsw_sp *mlxsw_sp,
1842 struct mlxsw_sp_neigh_entry *neigh_entry)
Arkadi Sharshevsky7cfcbc72017-08-24 08:40:08 +02001843{
1844 struct devlink *devlink;
Arkadi Sharshevsky1ed55742017-08-31 17:59:18 +02001845 const char *table_name;
1846
1847 switch (mlxsw_sp_neigh_entry_type(neigh_entry)) {
1848 case AF_INET:
1849 table_name = MLXSW_SP_DPIPE_TABLE_NAME_HOST4;
1850 break;
1851 case AF_INET6:
1852 table_name = MLXSW_SP_DPIPE_TABLE_NAME_HOST6;
1853 break;
1854 default:
1855 WARN_ON(1);
1856 return false;
1857 }
Arkadi Sharshevsky7cfcbc72017-08-24 08:40:08 +02001858
1859 devlink = priv_to_devlink(mlxsw_sp->core);
Arkadi Sharshevsky1ed55742017-08-31 17:59:18 +02001860 return devlink_dpipe_table_counter_enabled(devlink, table_name);
Arkadi Sharshevsky7cfcbc72017-08-24 08:40:08 +02001861}
1862
1863static void
1864mlxsw_sp_neigh_counter_alloc(struct mlxsw_sp *mlxsw_sp,
1865 struct mlxsw_sp_neigh_entry *neigh_entry)
1866{
Arkadi Sharshevsky1ed55742017-08-31 17:59:18 +02001867 if (!mlxsw_sp_neigh_counter_should_alloc(mlxsw_sp, neigh_entry))
Arkadi Sharshevsky7cfcbc72017-08-24 08:40:08 +02001868 return;
1869
1870 if (mlxsw_sp_flow_counter_alloc(mlxsw_sp, &neigh_entry->counter_index))
1871 return;
1872
1873 neigh_entry->counter_valid = true;
1874}
1875
1876static void
1877mlxsw_sp_neigh_counter_free(struct mlxsw_sp *mlxsw_sp,
1878 struct mlxsw_sp_neigh_entry *neigh_entry)
1879{
1880 if (!neigh_entry->counter_valid)
1881 return;
1882 mlxsw_sp_flow_counter_free(mlxsw_sp,
1883 neigh_entry->counter_index);
1884 neigh_entry->counter_valid = false;
1885}
1886
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001887static struct mlxsw_sp_neigh_entry *
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001888mlxsw_sp_neigh_entry_create(struct mlxsw_sp *mlxsw_sp, struct neighbour *n)
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001889{
1890 struct mlxsw_sp_neigh_entry *neigh_entry;
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01001891 struct mlxsw_sp_rif *rif;
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001892 int err;
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001893
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01001894 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, n->dev);
1895 if (!rif)
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001896 return ERR_PTR(-EINVAL);
1897
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01001898 neigh_entry = mlxsw_sp_neigh_entry_alloc(mlxsw_sp, n, rif->rif_index);
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001899 if (!neigh_entry)
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001900 return ERR_PTR(-ENOMEM);
1901
1902 err = mlxsw_sp_neigh_entry_insert(mlxsw_sp, neigh_entry);
1903 if (err)
1904 goto err_neigh_entry_insert;
1905
Arkadi Sharshevsky7cfcbc72017-08-24 08:40:08 +02001906 mlxsw_sp_neigh_counter_alloc(mlxsw_sp, neigh_entry);
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01001907 list_add(&neigh_entry->rif_list_node, &rif->neigh_list);
Ido Schimmel9665b742017-02-08 11:16:42 +01001908
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001909 return neigh_entry;
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001910
1911err_neigh_entry_insert:
1912 mlxsw_sp_neigh_entry_free(neigh_entry);
1913 return ERR_PTR(err);
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001914}
1915
1916static void
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001917mlxsw_sp_neigh_entry_destroy(struct mlxsw_sp *mlxsw_sp,
1918 struct mlxsw_sp_neigh_entry *neigh_entry)
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001919{
Ido Schimmel9665b742017-02-08 11:16:42 +01001920 list_del(&neigh_entry->rif_list_node);
Arkadi Sharshevsky7cfcbc72017-08-24 08:40:08 +02001921 mlxsw_sp_neigh_counter_free(mlxsw_sp, neigh_entry);
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001922 mlxsw_sp_neigh_entry_remove(mlxsw_sp, neigh_entry);
1923 mlxsw_sp_neigh_entry_free(neigh_entry);
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001924}
1925
1926static struct mlxsw_sp_neigh_entry *
Jiri Pirko33b13412016-11-10 12:31:04 +01001927mlxsw_sp_neigh_entry_lookup(struct mlxsw_sp *mlxsw_sp, struct neighbour *n)
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001928{
Jiri Pirko33b13412016-11-10 12:31:04 +01001929 struct mlxsw_sp_neigh_key key;
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001930
Jiri Pirko33b13412016-11-10 12:31:04 +01001931 key.n = n;
Ido Schimmel9011b672017-05-16 19:38:25 +02001932 return rhashtable_lookup_fast(&mlxsw_sp->router->neigh_ht,
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001933 &key, mlxsw_sp_neigh_ht_params);
1934}
1935
Yotam Gigic723c7352016-07-05 11:27:43 +02001936static void
1937mlxsw_sp_router_neighs_update_interval_init(struct mlxsw_sp *mlxsw_sp)
1938{
Arkadi Sharshevskya6c9b5d2017-07-18 10:10:18 +02001939 unsigned long interval;
Yotam Gigic723c7352016-07-05 11:27:43 +02001940
Ido Schimmelb5f3e0d2017-07-24 09:56:00 +02001941#if IS_ENABLED(CONFIG_IPV6)
Arkadi Sharshevskya6c9b5d2017-07-18 10:10:18 +02001942 interval = min_t(unsigned long,
1943 NEIGH_VAR(&arp_tbl.parms, DELAY_PROBE_TIME),
1944 NEIGH_VAR(&nd_tbl.parms, DELAY_PROBE_TIME));
Ido Schimmelb5f3e0d2017-07-24 09:56:00 +02001945#else
1946 interval = NEIGH_VAR(&arp_tbl.parms, DELAY_PROBE_TIME);
1947#endif
Ido Schimmel9011b672017-05-16 19:38:25 +02001948 mlxsw_sp->router->neighs_update.interval = jiffies_to_msecs(interval);
Yotam Gigic723c7352016-07-05 11:27:43 +02001949}
1950
1951static void mlxsw_sp_router_neigh_ent_ipv4_process(struct mlxsw_sp *mlxsw_sp,
1952 char *rauhtd_pl,
1953 int ent_index)
1954{
1955 struct net_device *dev;
1956 struct neighbour *n;
1957 __be32 dipn;
1958 u32 dip;
1959 u16 rif;
1960
1961 mlxsw_reg_rauhtd_ent_ipv4_unpack(rauhtd_pl, ent_index, &rif, &dip);
1962
Ido Schimmel5f9efff2017-05-16 19:38:27 +02001963 if (!mlxsw_sp->router->rifs[rif]) {
Yotam Gigic723c7352016-07-05 11:27:43 +02001964 dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Incorrect RIF in neighbour entry\n");
1965 return;
1966 }
1967
1968 dipn = htonl(dip);
Ido Schimmel5f9efff2017-05-16 19:38:27 +02001969 dev = mlxsw_sp->router->rifs[rif]->dev;
Yotam Gigic723c7352016-07-05 11:27:43 +02001970 n = neigh_lookup(&arp_tbl, &dipn, dev);
1971 if (!n) {
1972 netdev_err(dev, "Failed to find matching neighbour for IP=%pI4h\n",
1973 &dip);
1974 return;
1975 }
1976
1977 netdev_dbg(dev, "Updating neighbour with IP=%pI4h\n", &dip);
1978 neigh_event_send(n, NULL);
1979 neigh_release(n);
1980}
1981
Ido Schimmeldf9a21f2017-08-15 09:10:33 +02001982#if IS_ENABLED(CONFIG_IPV6)
Arkadi Sharshevsky60f040c2017-07-18 10:10:17 +02001983static void mlxsw_sp_router_neigh_ent_ipv6_process(struct mlxsw_sp *mlxsw_sp,
1984 char *rauhtd_pl,
1985 int rec_index)
1986{
1987 struct net_device *dev;
1988 struct neighbour *n;
1989 struct in6_addr dip;
1990 u16 rif;
1991
1992 mlxsw_reg_rauhtd_ent_ipv6_unpack(rauhtd_pl, rec_index, &rif,
1993 (char *) &dip);
1994
1995 if (!mlxsw_sp->router->rifs[rif]) {
1996 dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Incorrect RIF in neighbour entry\n");
1997 return;
1998 }
1999
2000 dev = mlxsw_sp->router->rifs[rif]->dev;
2001 n = neigh_lookup(&nd_tbl, &dip, dev);
2002 if (!n) {
2003 netdev_err(dev, "Failed to find matching neighbour for IP=%pI6c\n",
2004 &dip);
2005 return;
2006 }
2007
2008 netdev_dbg(dev, "Updating neighbour with IP=%pI6c\n", &dip);
2009 neigh_event_send(n, NULL);
2010 neigh_release(n);
2011}
Ido Schimmelb5f3e0d2017-07-24 09:56:00 +02002012#else
2013static void mlxsw_sp_router_neigh_ent_ipv6_process(struct mlxsw_sp *mlxsw_sp,
2014 char *rauhtd_pl,
2015 int rec_index)
2016{
2017}
2018#endif
Arkadi Sharshevsky60f040c2017-07-18 10:10:17 +02002019
Yotam Gigic723c7352016-07-05 11:27:43 +02002020static void mlxsw_sp_router_neigh_rec_ipv4_process(struct mlxsw_sp *mlxsw_sp,
2021 char *rauhtd_pl,
2022 int rec_index)
2023{
2024 u8 num_entries;
2025 int i;
2026
2027 num_entries = mlxsw_reg_rauhtd_ipv4_rec_num_entries_get(rauhtd_pl,
2028 rec_index);
2029 /* Hardware starts counting at 0, so add 1. */
2030 num_entries++;
2031
2032 /* Each record consists of several neighbour entries. */
2033 for (i = 0; i < num_entries; i++) {
2034 int ent_index;
2035
2036 ent_index = rec_index * MLXSW_REG_RAUHTD_IPV4_ENT_PER_REC + i;
2037 mlxsw_sp_router_neigh_ent_ipv4_process(mlxsw_sp, rauhtd_pl,
2038 ent_index);
2039 }
2040
2041}
2042
Arkadi Sharshevsky60f040c2017-07-18 10:10:17 +02002043static void mlxsw_sp_router_neigh_rec_ipv6_process(struct mlxsw_sp *mlxsw_sp,
2044 char *rauhtd_pl,
2045 int rec_index)
2046{
2047 /* One record contains one entry. */
2048 mlxsw_sp_router_neigh_ent_ipv6_process(mlxsw_sp, rauhtd_pl,
2049 rec_index);
2050}
2051
Yotam Gigic723c7352016-07-05 11:27:43 +02002052static void mlxsw_sp_router_neigh_rec_process(struct mlxsw_sp *mlxsw_sp,
2053 char *rauhtd_pl, int rec_index)
2054{
2055 switch (mlxsw_reg_rauhtd_rec_type_get(rauhtd_pl, rec_index)) {
2056 case MLXSW_REG_RAUHTD_TYPE_IPV4:
2057 mlxsw_sp_router_neigh_rec_ipv4_process(mlxsw_sp, rauhtd_pl,
2058 rec_index);
2059 break;
2060 case MLXSW_REG_RAUHTD_TYPE_IPV6:
Arkadi Sharshevsky60f040c2017-07-18 10:10:17 +02002061 mlxsw_sp_router_neigh_rec_ipv6_process(mlxsw_sp, rauhtd_pl,
2062 rec_index);
Yotam Gigic723c7352016-07-05 11:27:43 +02002063 break;
2064 }
2065}
2066
Arkadi Sharshevsky42cdb332016-11-11 16:34:26 +01002067static bool mlxsw_sp_router_rauhtd_is_full(char *rauhtd_pl)
2068{
2069 u8 num_rec, last_rec_index, num_entries;
2070
2071 num_rec = mlxsw_reg_rauhtd_num_rec_get(rauhtd_pl);
2072 last_rec_index = num_rec - 1;
2073
2074 if (num_rec < MLXSW_REG_RAUHTD_REC_MAX_NUM)
2075 return false;
2076 if (mlxsw_reg_rauhtd_rec_type_get(rauhtd_pl, last_rec_index) ==
2077 MLXSW_REG_RAUHTD_TYPE_IPV6)
2078 return true;
2079
2080 num_entries = mlxsw_reg_rauhtd_ipv4_rec_num_entries_get(rauhtd_pl,
2081 last_rec_index);
2082 if (++num_entries == MLXSW_REG_RAUHTD_IPV4_ENT_PER_REC)
2083 return true;
2084 return false;
2085}
2086
Arkadi Sharshevsky60f040c2017-07-18 10:10:17 +02002087static int
2088__mlxsw_sp_router_neighs_update_rauhtd(struct mlxsw_sp *mlxsw_sp,
2089 char *rauhtd_pl,
2090 enum mlxsw_reg_rauhtd_type type)
Yotam Gigic723c7352016-07-05 11:27:43 +02002091{
Arkadi Sharshevsky60f040c2017-07-18 10:10:17 +02002092 int i, num_rec;
2093 int err;
Yotam Gigic723c7352016-07-05 11:27:43 +02002094
2095 /* Make sure the neighbour's netdev isn't removed in the
2096 * process.
2097 */
2098 rtnl_lock();
2099 do {
Arkadi Sharshevsky60f040c2017-07-18 10:10:17 +02002100 mlxsw_reg_rauhtd_pack(rauhtd_pl, type);
Yotam Gigic723c7352016-07-05 11:27:43 +02002101 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(rauhtd),
2102 rauhtd_pl);
2103 if (err) {
Petr Machata7ff176f2017-10-02 12:21:57 +02002104 dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Failed to dump neighbour table\n");
Yotam Gigic723c7352016-07-05 11:27:43 +02002105 break;
2106 }
2107 num_rec = mlxsw_reg_rauhtd_num_rec_get(rauhtd_pl);
2108 for (i = 0; i < num_rec; i++)
2109 mlxsw_sp_router_neigh_rec_process(mlxsw_sp, rauhtd_pl,
2110 i);
Arkadi Sharshevsky42cdb332016-11-11 16:34:26 +01002111 } while (mlxsw_sp_router_rauhtd_is_full(rauhtd_pl));
Yotam Gigic723c7352016-07-05 11:27:43 +02002112 rtnl_unlock();
2113
Arkadi Sharshevsky60f040c2017-07-18 10:10:17 +02002114 return err;
2115}
2116
2117static int mlxsw_sp_router_neighs_update_rauhtd(struct mlxsw_sp *mlxsw_sp)
2118{
2119 enum mlxsw_reg_rauhtd_type type;
2120 char *rauhtd_pl;
2121 int err;
2122
2123 rauhtd_pl = kmalloc(MLXSW_REG_RAUHTD_LEN, GFP_KERNEL);
2124 if (!rauhtd_pl)
2125 return -ENOMEM;
2126
2127 type = MLXSW_REG_RAUHTD_TYPE_IPV4;
2128 err = __mlxsw_sp_router_neighs_update_rauhtd(mlxsw_sp, rauhtd_pl, type);
2129 if (err)
2130 goto out;
2131
2132 type = MLXSW_REG_RAUHTD_TYPE_IPV6;
2133 err = __mlxsw_sp_router_neighs_update_rauhtd(mlxsw_sp, rauhtd_pl, type);
2134out:
Yotam Gigic723c7352016-07-05 11:27:43 +02002135 kfree(rauhtd_pl);
Yotam Gigib2157142016-07-05 11:27:51 +02002136 return err;
2137}
2138
2139static void mlxsw_sp_router_neighs_update_nh(struct mlxsw_sp *mlxsw_sp)
2140{
2141 struct mlxsw_sp_neigh_entry *neigh_entry;
2142
2143 /* Take RTNL mutex here to prevent lists from changes */
2144 rtnl_lock();
Ido Schimmel9011b672017-05-16 19:38:25 +02002145 list_for_each_entry(neigh_entry, &mlxsw_sp->router->nexthop_neighs_list,
Ido Schimmel8a0b7272017-02-06 16:20:15 +01002146 nexthop_neighs_list_node)
Yotam Gigib2157142016-07-05 11:27:51 +02002147 /* If this neigh have nexthops, make the kernel think this neigh
2148 * is active regardless of the traffic.
2149 */
Ido Schimmel8a0b7272017-02-06 16:20:15 +01002150 neigh_event_send(neigh_entry->key.n, NULL);
Yotam Gigib2157142016-07-05 11:27:51 +02002151 rtnl_unlock();
2152}
2153
2154static void
2155mlxsw_sp_router_neighs_update_work_schedule(struct mlxsw_sp *mlxsw_sp)
2156{
Ido Schimmel9011b672017-05-16 19:38:25 +02002157 unsigned long interval = mlxsw_sp->router->neighs_update.interval;
Yotam Gigib2157142016-07-05 11:27:51 +02002158
Ido Schimmel9011b672017-05-16 19:38:25 +02002159 mlxsw_core_schedule_dw(&mlxsw_sp->router->neighs_update.dw,
Yotam Gigib2157142016-07-05 11:27:51 +02002160 msecs_to_jiffies(interval));
2161}
2162
2163static void mlxsw_sp_router_neighs_update_work(struct work_struct *work)
2164{
Ido Schimmel9011b672017-05-16 19:38:25 +02002165 struct mlxsw_sp_router *router;
Yotam Gigib2157142016-07-05 11:27:51 +02002166 int err;
2167
Ido Schimmel9011b672017-05-16 19:38:25 +02002168 router = container_of(work, struct mlxsw_sp_router,
2169 neighs_update.dw.work);
2170 err = mlxsw_sp_router_neighs_update_rauhtd(router->mlxsw_sp);
Yotam Gigib2157142016-07-05 11:27:51 +02002171 if (err)
Ido Schimmel9011b672017-05-16 19:38:25 +02002172 dev_err(router->mlxsw_sp->bus_info->dev, "Could not update kernel for neigh activity");
Yotam Gigib2157142016-07-05 11:27:51 +02002173
Ido Schimmel9011b672017-05-16 19:38:25 +02002174 mlxsw_sp_router_neighs_update_nh(router->mlxsw_sp);
Yotam Gigib2157142016-07-05 11:27:51 +02002175
Ido Schimmel9011b672017-05-16 19:38:25 +02002176 mlxsw_sp_router_neighs_update_work_schedule(router->mlxsw_sp);
Yotam Gigic723c7352016-07-05 11:27:43 +02002177}
2178
Yotam Gigi0b2361d2016-07-05 11:27:52 +02002179static void mlxsw_sp_router_probe_unresolved_nexthops(struct work_struct *work)
2180{
2181 struct mlxsw_sp_neigh_entry *neigh_entry;
Ido Schimmel9011b672017-05-16 19:38:25 +02002182 struct mlxsw_sp_router *router;
Yotam Gigi0b2361d2016-07-05 11:27:52 +02002183
Ido Schimmel9011b672017-05-16 19:38:25 +02002184 router = container_of(work, struct mlxsw_sp_router,
2185 nexthop_probe_dw.work);
Yotam Gigi0b2361d2016-07-05 11:27:52 +02002186 /* Iterate over nexthop neighbours, find those who are unresolved and
2187 * send arp on them. This solves the chicken-egg problem when
2188 * the nexthop wouldn't get offloaded until the neighbor is resolved
2189 * but it wouldn't get resolved ever in case traffic is flowing in HW
2190 * using different nexthop.
2191 *
2192 * Take RTNL mutex here to prevent lists from changes.
2193 */
2194 rtnl_lock();
Ido Schimmel9011b672017-05-16 19:38:25 +02002195 list_for_each_entry(neigh_entry, &router->nexthop_neighs_list,
Ido Schimmel8a0b7272017-02-06 16:20:15 +01002196 nexthop_neighs_list_node)
Ido Schimmel01b1aa32017-02-06 16:20:16 +01002197 if (!neigh_entry->connected)
Jiri Pirko33b13412016-11-10 12:31:04 +01002198 neigh_event_send(neigh_entry->key.n, NULL);
Yotam Gigi0b2361d2016-07-05 11:27:52 +02002199 rtnl_unlock();
2200
Ido Schimmel9011b672017-05-16 19:38:25 +02002201 mlxsw_core_schedule_dw(&router->nexthop_probe_dw,
Yotam Gigi0b2361d2016-07-05 11:27:52 +02002202 MLXSW_SP_UNRESOLVED_NH_PROBE_INTERVAL);
2203}
2204
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002205static void
2206mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp *mlxsw_sp,
2207 struct mlxsw_sp_neigh_entry *neigh_entry,
2208 bool removing);
2209
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002210static enum mlxsw_reg_rauht_op mlxsw_sp_rauht_op(bool adding)
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002211{
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002212 return adding ? MLXSW_REG_RAUHT_OP_WRITE_ADD :
2213 MLXSW_REG_RAUHT_OP_WRITE_DELETE;
2214}
2215
2216static void
2217mlxsw_sp_router_neigh_entry_op4(struct mlxsw_sp *mlxsw_sp,
2218 struct mlxsw_sp_neigh_entry *neigh_entry,
2219 enum mlxsw_reg_rauht_op op)
2220{
Jiri Pirko33b13412016-11-10 12:31:04 +01002221 struct neighbour *n = neigh_entry->key.n;
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002222 u32 dip = ntohl(*((__be32 *) n->primary_key));
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002223 char rauht_pl[MLXSW_REG_RAUHT_LEN];
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002224
2225 mlxsw_reg_rauht_pack4(rauht_pl, op, neigh_entry->rif, neigh_entry->ha,
2226 dip);
Arkadi Sharshevsky7cfcbc72017-08-24 08:40:08 +02002227 if (neigh_entry->counter_valid)
2228 mlxsw_reg_rauht_pack_counter(rauht_pl,
2229 neigh_entry->counter_index);
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002230 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rauht), rauht_pl);
2231}
2232
2233static void
Arkadi Sharshevskyd5eb89c2017-07-18 10:10:15 +02002234mlxsw_sp_router_neigh_entry_op6(struct mlxsw_sp *mlxsw_sp,
2235 struct mlxsw_sp_neigh_entry *neigh_entry,
2236 enum mlxsw_reg_rauht_op op)
2237{
2238 struct neighbour *n = neigh_entry->key.n;
2239 char rauht_pl[MLXSW_REG_RAUHT_LEN];
2240 const char *dip = n->primary_key;
2241
2242 mlxsw_reg_rauht_pack6(rauht_pl, op, neigh_entry->rif, neigh_entry->ha,
2243 dip);
Arkadi Sharshevsky7cfcbc72017-08-24 08:40:08 +02002244 if (neigh_entry->counter_valid)
2245 mlxsw_reg_rauht_pack_counter(rauht_pl,
2246 neigh_entry->counter_index);
Arkadi Sharshevskyd5eb89c2017-07-18 10:10:15 +02002247 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rauht), rauht_pl);
2248}
2249
Arkadi Sharshevsky1d1056d82017-08-31 17:59:13 +02002250bool mlxsw_sp_neigh_ipv6_ignore(struct mlxsw_sp_neigh_entry *neigh_entry)
Arkadi Sharshevskyd5eb89c2017-07-18 10:10:15 +02002251{
Arkadi Sharshevsky1d1056d82017-08-31 17:59:13 +02002252 struct neighbour *n = neigh_entry->key.n;
2253
Arkadi Sharshevskyd5eb89c2017-07-18 10:10:15 +02002254 /* Packets with a link-local destination address are trapped
2255 * after LPM lookup and never reach the neighbour table, so
2256 * there is no need to program such neighbours to the device.
2257 */
2258 if (ipv6_addr_type((struct in6_addr *) &n->primary_key) &
2259 IPV6_ADDR_LINKLOCAL)
2260 return true;
2261 return false;
2262}
2263
2264static void
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002265mlxsw_sp_neigh_entry_update(struct mlxsw_sp *mlxsw_sp,
2266 struct mlxsw_sp_neigh_entry *neigh_entry,
2267 bool adding)
2268{
2269 if (!adding && !neigh_entry->connected)
2270 return;
2271 neigh_entry->connected = adding;
Ido Schimmelb5f3e0d2017-07-24 09:56:00 +02002272 if (neigh_entry->key.n->tbl->family == AF_INET) {
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002273 mlxsw_sp_router_neigh_entry_op4(mlxsw_sp, neigh_entry,
2274 mlxsw_sp_rauht_op(adding));
Ido Schimmelb5f3e0d2017-07-24 09:56:00 +02002275 } else if (neigh_entry->key.n->tbl->family == AF_INET6) {
Arkadi Sharshevsky1d1056d82017-08-31 17:59:13 +02002276 if (mlxsw_sp_neigh_ipv6_ignore(neigh_entry))
Arkadi Sharshevskyd5eb89c2017-07-18 10:10:15 +02002277 return;
2278 mlxsw_sp_router_neigh_entry_op6(mlxsw_sp, neigh_entry,
2279 mlxsw_sp_rauht_op(adding));
2280 } else {
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002281 WARN_ON_ONCE(1);
Arkadi Sharshevskyd5eb89c2017-07-18 10:10:15 +02002282 }
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002283}
2284
Arkadi Sharshevskya481d712017-08-24 08:40:10 +02002285void
2286mlxsw_sp_neigh_entry_counter_update(struct mlxsw_sp *mlxsw_sp,
2287 struct mlxsw_sp_neigh_entry *neigh_entry,
2288 bool adding)
2289{
2290 if (adding)
2291 mlxsw_sp_neigh_counter_alloc(mlxsw_sp, neigh_entry);
2292 else
2293 mlxsw_sp_neigh_counter_free(mlxsw_sp, neigh_entry);
2294 mlxsw_sp_neigh_entry_update(mlxsw_sp, neigh_entry, true);
2295}
2296
Ido Schimmelceb88812017-11-02 17:14:07 +01002297struct mlxsw_sp_netevent_work {
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002298 struct work_struct work;
2299 struct mlxsw_sp *mlxsw_sp;
2300 struct neighbour *n;
2301};
2302
2303static void mlxsw_sp_router_neigh_event_work(struct work_struct *work)
2304{
Ido Schimmelceb88812017-11-02 17:14:07 +01002305 struct mlxsw_sp_netevent_work *net_work =
2306 container_of(work, struct mlxsw_sp_netevent_work, work);
2307 struct mlxsw_sp *mlxsw_sp = net_work->mlxsw_sp;
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002308 struct mlxsw_sp_neigh_entry *neigh_entry;
Ido Schimmelceb88812017-11-02 17:14:07 +01002309 struct neighbour *n = net_work->n;
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002310 unsigned char ha[ETH_ALEN];
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002311 bool entry_connected;
Ido Schimmel93a87e52016-12-23 09:32:49 +01002312 u8 nud_state, dead;
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002313
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002314 /* If these parameters are changed after we release the lock,
2315 * then we are guaranteed to receive another event letting us
2316 * know about it.
2317 */
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002318 read_lock_bh(&n->lock);
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002319 memcpy(ha, n->ha, ETH_ALEN);
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002320 nud_state = n->nud_state;
Ido Schimmel93a87e52016-12-23 09:32:49 +01002321 dead = n->dead;
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002322 read_unlock_bh(&n->lock);
2323
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002324 rtnl_lock();
Ido Schimmel93a87e52016-12-23 09:32:49 +01002325 entry_connected = nud_state & NUD_VALID && !dead;
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002326 neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp, n);
2327 if (!entry_connected && !neigh_entry)
2328 goto out;
2329 if (!neigh_entry) {
2330 neigh_entry = mlxsw_sp_neigh_entry_create(mlxsw_sp, n);
2331 if (IS_ERR(neigh_entry))
2332 goto out;
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002333 }
2334
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002335 memcpy(neigh_entry->ha, ha, ETH_ALEN);
2336 mlxsw_sp_neigh_entry_update(mlxsw_sp, neigh_entry, entry_connected);
2337 mlxsw_sp_nexthop_neigh_update(mlxsw_sp, neigh_entry, !entry_connected);
2338
2339 if (!neigh_entry->connected && list_empty(&neigh_entry->nexthop_list))
2340 mlxsw_sp_neigh_entry_destroy(mlxsw_sp, neigh_entry);
2341
2342out:
2343 rtnl_unlock();
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002344 neigh_release(n);
Ido Schimmelceb88812017-11-02 17:14:07 +01002345 kfree(net_work);
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002346}
2347
Ido Schimmel28678f02017-11-02 17:14:10 +01002348static int mlxsw_sp_mp_hash_init(struct mlxsw_sp *mlxsw_sp);
2349
2350static void mlxsw_sp_router_mp_hash_event_work(struct work_struct *work)
2351{
2352 struct mlxsw_sp_netevent_work *net_work =
2353 container_of(work, struct mlxsw_sp_netevent_work, work);
2354 struct mlxsw_sp *mlxsw_sp = net_work->mlxsw_sp;
2355
2356 mlxsw_sp_mp_hash_init(mlxsw_sp);
2357 kfree(net_work);
2358}
2359
2360static int mlxsw_sp_router_netevent_event(struct notifier_block *nb,
Ido Schimmel48fac882017-11-02 17:14:06 +01002361 unsigned long event, void *ptr)
Yotam Gigic723c7352016-07-05 11:27:43 +02002362{
Ido Schimmelceb88812017-11-02 17:14:07 +01002363 struct mlxsw_sp_netevent_work *net_work;
Yotam Gigic723c7352016-07-05 11:27:43 +02002364 struct mlxsw_sp_port *mlxsw_sp_port;
Ido Schimmel28678f02017-11-02 17:14:10 +01002365 struct mlxsw_sp_router *router;
Yotam Gigic723c7352016-07-05 11:27:43 +02002366 struct mlxsw_sp *mlxsw_sp;
2367 unsigned long interval;
2368 struct neigh_parms *p;
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002369 struct neighbour *n;
Ido Schimmel28678f02017-11-02 17:14:10 +01002370 struct net *net;
Yotam Gigic723c7352016-07-05 11:27:43 +02002371
2372 switch (event) {
2373 case NETEVENT_DELAY_PROBE_TIME_UPDATE:
2374 p = ptr;
2375
2376 /* We don't care about changes in the default table. */
Ido Schimmelb5f3e0d2017-07-24 09:56:00 +02002377 if (!p->dev || (p->tbl->family != AF_INET &&
2378 p->tbl->family != AF_INET6))
Yotam Gigic723c7352016-07-05 11:27:43 +02002379 return NOTIFY_DONE;
2380
2381 /* We are in atomic context and can't take RTNL mutex,
2382 * so use RCU variant to walk the device chain.
2383 */
2384 mlxsw_sp_port = mlxsw_sp_port_lower_dev_hold(p->dev);
2385 if (!mlxsw_sp_port)
2386 return NOTIFY_DONE;
2387
2388 mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2389 interval = jiffies_to_msecs(NEIGH_VAR(p, DELAY_PROBE_TIME));
Ido Schimmel9011b672017-05-16 19:38:25 +02002390 mlxsw_sp->router->neighs_update.interval = interval;
Yotam Gigic723c7352016-07-05 11:27:43 +02002391
2392 mlxsw_sp_port_dev_put(mlxsw_sp_port);
2393 break;
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002394 case NETEVENT_NEIGH_UPDATE:
2395 n = ptr;
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002396
Ido Schimmelb5f3e0d2017-07-24 09:56:00 +02002397 if (n->tbl->family != AF_INET && n->tbl->family != AF_INET6)
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002398 return NOTIFY_DONE;
2399
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002400 mlxsw_sp_port = mlxsw_sp_port_lower_dev_hold(n->dev);
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002401 if (!mlxsw_sp_port)
2402 return NOTIFY_DONE;
2403
Ido Schimmelceb88812017-11-02 17:14:07 +01002404 net_work = kzalloc(sizeof(*net_work), GFP_ATOMIC);
2405 if (!net_work) {
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002406 mlxsw_sp_port_dev_put(mlxsw_sp_port);
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002407 return NOTIFY_BAD;
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002408 }
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002409
Ido Schimmelceb88812017-11-02 17:14:07 +01002410 INIT_WORK(&net_work->work, mlxsw_sp_router_neigh_event_work);
2411 net_work->mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2412 net_work->n = n;
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002413
2414 /* Take a reference to ensure the neighbour won't be
2415 * destructed until we drop the reference in delayed
2416 * work.
2417 */
2418 neigh_clone(n);
Ido Schimmelceb88812017-11-02 17:14:07 +01002419 mlxsw_core_schedule_work(&net_work->work);
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002420 mlxsw_sp_port_dev_put(mlxsw_sp_port);
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002421 break;
Ido Schimmel28678f02017-11-02 17:14:10 +01002422 case NETEVENT_MULTIPATH_HASH_UPDATE:
2423 net = ptr;
2424
2425 if (!net_eq(net, &init_net))
2426 return NOTIFY_DONE;
2427
2428 net_work = kzalloc(sizeof(*net_work), GFP_ATOMIC);
2429 if (!net_work)
2430 return NOTIFY_BAD;
2431
2432 router = container_of(nb, struct mlxsw_sp_router, netevent_nb);
2433 INIT_WORK(&net_work->work, mlxsw_sp_router_mp_hash_event_work);
2434 net_work->mlxsw_sp = router->mlxsw_sp;
2435 mlxsw_core_schedule_work(&net_work->work);
2436 break;
Yotam Gigic723c7352016-07-05 11:27:43 +02002437 }
2438
2439 return NOTIFY_DONE;
2440}
2441
Jiri Pirko6cf3c972016-07-05 11:27:39 +02002442static int mlxsw_sp_neigh_init(struct mlxsw_sp *mlxsw_sp)
2443{
Yotam Gigic723c7352016-07-05 11:27:43 +02002444 int err;
2445
Ido Schimmel9011b672017-05-16 19:38:25 +02002446 err = rhashtable_init(&mlxsw_sp->router->neigh_ht,
Yotam Gigic723c7352016-07-05 11:27:43 +02002447 &mlxsw_sp_neigh_ht_params);
2448 if (err)
2449 return err;
2450
2451 /* Initialize the polling interval according to the default
2452 * table.
2453 */
2454 mlxsw_sp_router_neighs_update_interval_init(mlxsw_sp);
2455
Yotam Gigi0b2361d2016-07-05 11:27:52 +02002456 /* Create the delayed works for the activity_update */
Ido Schimmel9011b672017-05-16 19:38:25 +02002457 INIT_DELAYED_WORK(&mlxsw_sp->router->neighs_update.dw,
Yotam Gigic723c7352016-07-05 11:27:43 +02002458 mlxsw_sp_router_neighs_update_work);
Ido Schimmel9011b672017-05-16 19:38:25 +02002459 INIT_DELAYED_WORK(&mlxsw_sp->router->nexthop_probe_dw,
Yotam Gigi0b2361d2016-07-05 11:27:52 +02002460 mlxsw_sp_router_probe_unresolved_nexthops);
Ido Schimmel9011b672017-05-16 19:38:25 +02002461 mlxsw_core_schedule_dw(&mlxsw_sp->router->neighs_update.dw, 0);
2462 mlxsw_core_schedule_dw(&mlxsw_sp->router->nexthop_probe_dw, 0);
Yotam Gigic723c7352016-07-05 11:27:43 +02002463 return 0;
Jiri Pirko6cf3c972016-07-05 11:27:39 +02002464}
2465
2466static void mlxsw_sp_neigh_fini(struct mlxsw_sp *mlxsw_sp)
2467{
Ido Schimmel9011b672017-05-16 19:38:25 +02002468 cancel_delayed_work_sync(&mlxsw_sp->router->neighs_update.dw);
2469 cancel_delayed_work_sync(&mlxsw_sp->router->nexthop_probe_dw);
2470 rhashtable_destroy(&mlxsw_sp->router->neigh_ht);
Jiri Pirko6cf3c972016-07-05 11:27:39 +02002471}
2472
Ido Schimmel9665b742017-02-08 11:16:42 +01002473static void mlxsw_sp_neigh_rif_gone_sync(struct mlxsw_sp *mlxsw_sp,
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01002474 struct mlxsw_sp_rif *rif)
Ido Schimmel9665b742017-02-08 11:16:42 +01002475{
2476 struct mlxsw_sp_neigh_entry *neigh_entry, *tmp;
2477
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01002478 list_for_each_entry_safe(neigh_entry, tmp, &rif->neigh_list,
Petr Machata8ba6b302017-12-17 17:16:43 +01002479 rif_list_node) {
2480 mlxsw_sp_neigh_entry_update(mlxsw_sp, neigh_entry, false);
Ido Schimmel9665b742017-02-08 11:16:42 +01002481 mlxsw_sp_neigh_entry_destroy(mlxsw_sp, neigh_entry);
Petr Machata8ba6b302017-12-17 17:16:43 +01002482 }
Ido Schimmel9665b742017-02-08 11:16:42 +01002483}
2484
Petr Machata35225e42017-09-02 23:49:22 +02002485enum mlxsw_sp_nexthop_type {
2486 MLXSW_SP_NEXTHOP_TYPE_ETH,
Petr Machata1012b9a2017-09-02 23:49:23 +02002487 MLXSW_SP_NEXTHOP_TYPE_IPIP,
Petr Machata35225e42017-09-02 23:49:22 +02002488};
2489
Ido Schimmelc53b8e12017-02-08 11:16:30 +01002490struct mlxsw_sp_nexthop_key {
2491 struct fib_nh *fib_nh;
2492};
2493
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002494struct mlxsw_sp_nexthop {
2495 struct list_head neigh_list_node; /* member of neigh entry list */
Ido Schimmel9665b742017-02-08 11:16:42 +01002496 struct list_head rif_list_node;
Arkadi Sharshevskydbe45982017-09-25 10:32:23 +02002497 struct list_head router_list_node;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002498 struct mlxsw_sp_nexthop_group *nh_grp; /* pointer back to the group
2499 * this belongs to
2500 */
Ido Schimmelc53b8e12017-02-08 11:16:30 +01002501 struct rhash_head ht_node;
2502 struct mlxsw_sp_nexthop_key key;
Ido Schimmel58adf2c2017-07-18 10:10:19 +02002503 unsigned char gw_addr[sizeof(struct in6_addr)];
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02002504 int ifindex;
Ido Schimmel408bd942017-10-22 23:11:46 +02002505 int nh_weight;
Ido Schimmeleb789982017-10-22 23:11:48 +02002506 int norm_nh_weight;
2507 int num_adj_entries;
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01002508 struct mlxsw_sp_rif *rif;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002509 u8 should_offload:1, /* set indicates this neigh is connected and
2510 * should be put to KVD linear area of this group.
2511 */
2512 offloaded:1, /* set in case the neigh is actually put into
2513 * KVD linear area of this group.
2514 */
2515 update:1; /* set indicates that MAC of this neigh should be
2516 * updated in HW
2517 */
Petr Machata35225e42017-09-02 23:49:22 +02002518 enum mlxsw_sp_nexthop_type type;
2519 union {
2520 struct mlxsw_sp_neigh_entry *neigh_entry;
Petr Machata1012b9a2017-09-02 23:49:23 +02002521 struct mlxsw_sp_ipip_entry *ipip_entry;
Petr Machata35225e42017-09-02 23:49:22 +02002522 };
Arkadi Sharshevskya5390272017-09-25 10:32:28 +02002523 unsigned int counter_index;
2524 bool counter_valid;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002525};
2526
2527struct mlxsw_sp_nexthop_group {
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02002528 void *priv;
Ido Schimmele9ad5e72017-02-08 11:16:29 +01002529 struct rhash_head ht_node;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002530 struct list_head fib_list; /* list of fib entries that use this group */
Ido Schimmel58adf2c2017-07-18 10:10:19 +02002531 struct neigh_table *neigh_tbl;
Ido Schimmelb3e8d1e2017-02-08 11:16:32 +01002532 u8 adj_index_valid:1,
2533 gateway:1; /* routes using the group use a gateway */
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002534 u32 adj_index;
2535 u16 ecmp_size;
2536 u16 count;
Ido Schimmeleb789982017-10-22 23:11:48 +02002537 int sum_norm_weight;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002538 struct mlxsw_sp_nexthop nexthops[0];
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01002539#define nh_rif nexthops[0].rif
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002540};
2541
Arkadi Sharshevsky427e6522017-09-25 10:32:30 +02002542void mlxsw_sp_nexthop_counter_alloc(struct mlxsw_sp *mlxsw_sp,
2543 struct mlxsw_sp_nexthop *nh)
Arkadi Sharshevskya5390272017-09-25 10:32:28 +02002544{
2545 struct devlink *devlink;
2546
2547 devlink = priv_to_devlink(mlxsw_sp->core);
2548 if (!devlink_dpipe_table_counter_enabled(devlink,
2549 MLXSW_SP_DPIPE_TABLE_NAME_ADJ))
2550 return;
2551
2552 if (mlxsw_sp_flow_counter_alloc(mlxsw_sp, &nh->counter_index))
2553 return;
2554
2555 nh->counter_valid = true;
2556}
2557
Arkadi Sharshevsky427e6522017-09-25 10:32:30 +02002558void mlxsw_sp_nexthop_counter_free(struct mlxsw_sp *mlxsw_sp,
2559 struct mlxsw_sp_nexthop *nh)
Arkadi Sharshevskya5390272017-09-25 10:32:28 +02002560{
2561 if (!nh->counter_valid)
2562 return;
2563 mlxsw_sp_flow_counter_free(mlxsw_sp, nh->counter_index);
2564 nh->counter_valid = false;
2565}
2566
2567int mlxsw_sp_nexthop_counter_get(struct mlxsw_sp *mlxsw_sp,
2568 struct mlxsw_sp_nexthop *nh, u64 *p_counter)
2569{
2570 if (!nh->counter_valid)
2571 return -EINVAL;
2572
2573 return mlxsw_sp_flow_counter_get(mlxsw_sp, nh->counter_index,
2574 p_counter, NULL);
2575}
2576
Arkadi Sharshevskyc556cd22017-09-25 10:32:25 +02002577struct mlxsw_sp_nexthop *mlxsw_sp_nexthop_next(struct mlxsw_sp_router *router,
2578 struct mlxsw_sp_nexthop *nh)
2579{
2580 if (!nh) {
2581 if (list_empty(&router->nexthop_list))
2582 return NULL;
2583 else
2584 return list_first_entry(&router->nexthop_list,
2585 typeof(*nh), router_list_node);
2586 }
2587 if (list_is_last(&nh->router_list_node, &router->nexthop_list))
2588 return NULL;
2589 return list_next_entry(nh, router_list_node);
2590}
2591
2592bool mlxsw_sp_nexthop_offload(struct mlxsw_sp_nexthop *nh)
2593{
2594 return nh->offloaded;
2595}
2596
2597unsigned char *mlxsw_sp_nexthop_ha(struct mlxsw_sp_nexthop *nh)
2598{
2599 if (!nh->offloaded)
2600 return NULL;
2601 return nh->neigh_entry->ha;
2602}
2603
2604int mlxsw_sp_nexthop_indexes(struct mlxsw_sp_nexthop *nh, u32 *p_adj_index,
Ido Schimmele69cd9d2017-10-22 23:11:43 +02002605 u32 *p_adj_size, u32 *p_adj_hash_index)
Arkadi Sharshevskyc556cd22017-09-25 10:32:25 +02002606{
2607 struct mlxsw_sp_nexthop_group *nh_grp = nh->nh_grp;
2608 u32 adj_hash_index = 0;
2609 int i;
2610
2611 if (!nh->offloaded || !nh_grp->adj_index_valid)
2612 return -EINVAL;
2613
2614 *p_adj_index = nh_grp->adj_index;
Ido Schimmele69cd9d2017-10-22 23:11:43 +02002615 *p_adj_size = nh_grp->ecmp_size;
Arkadi Sharshevskyc556cd22017-09-25 10:32:25 +02002616
2617 for (i = 0; i < nh_grp->count; i++) {
2618 struct mlxsw_sp_nexthop *nh_iter = &nh_grp->nexthops[i];
2619
2620 if (nh_iter == nh)
2621 break;
2622 if (nh_iter->offloaded)
Ido Schimmeleb789982017-10-22 23:11:48 +02002623 adj_hash_index += nh_iter->num_adj_entries;
Arkadi Sharshevskyc556cd22017-09-25 10:32:25 +02002624 }
2625
2626 *p_adj_hash_index = adj_hash_index;
2627 return 0;
2628}
2629
2630struct mlxsw_sp_rif *mlxsw_sp_nexthop_rif(struct mlxsw_sp_nexthop *nh)
2631{
2632 return nh->rif;
2633}
2634
2635bool mlxsw_sp_nexthop_group_has_ipip(struct mlxsw_sp_nexthop *nh)
2636{
2637 struct mlxsw_sp_nexthop_group *nh_grp = nh->nh_grp;
2638 int i;
2639
2640 for (i = 0; i < nh_grp->count; i++) {
2641 struct mlxsw_sp_nexthop *nh_iter = &nh_grp->nexthops[i];
2642
2643 if (nh_iter->type == MLXSW_SP_NEXTHOP_TYPE_IPIP)
2644 return true;
2645 }
2646 return false;
2647}
2648
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02002649static struct fib_info *
2650mlxsw_sp_nexthop4_group_fi(const struct mlxsw_sp_nexthop_group *nh_grp)
2651{
2652 return nh_grp->priv;
2653}
2654
2655struct mlxsw_sp_nexthop_group_cmp_arg {
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02002656 enum mlxsw_sp_l3proto proto;
2657 union {
2658 struct fib_info *fi;
2659 struct mlxsw_sp_fib6_entry *fib6_entry;
2660 };
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02002661};
2662
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02002663static bool
2664mlxsw_sp_nexthop6_group_has_nexthop(const struct mlxsw_sp_nexthop_group *nh_grp,
Ido Schimmel3743d882018-01-12 17:15:59 +01002665 const struct in6_addr *gw, int ifindex,
2666 int weight)
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02002667{
2668 int i;
2669
2670 for (i = 0; i < nh_grp->count; i++) {
2671 const struct mlxsw_sp_nexthop *nh;
2672
2673 nh = &nh_grp->nexthops[i];
Ido Schimmel3743d882018-01-12 17:15:59 +01002674 if (nh->ifindex == ifindex && nh->nh_weight == weight &&
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02002675 ipv6_addr_equal(gw, (struct in6_addr *) nh->gw_addr))
2676 return true;
2677 }
2678
2679 return false;
2680}
2681
2682static bool
2683mlxsw_sp_nexthop6_group_cmp(const struct mlxsw_sp_nexthop_group *nh_grp,
2684 const struct mlxsw_sp_fib6_entry *fib6_entry)
2685{
2686 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
2687
2688 if (nh_grp->count != fib6_entry->nrt6)
2689 return false;
2690
2691 list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) {
2692 struct in6_addr *gw;
Ido Schimmel3743d882018-01-12 17:15:59 +01002693 int ifindex, weight;
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02002694
2695 ifindex = mlxsw_sp_rt6->rt->dst.dev->ifindex;
Ido Schimmel3743d882018-01-12 17:15:59 +01002696 weight = mlxsw_sp_rt6->rt->rt6i_nh_weight;
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02002697 gw = &mlxsw_sp_rt6->rt->rt6i_gateway;
Ido Schimmel3743d882018-01-12 17:15:59 +01002698 if (!mlxsw_sp_nexthop6_group_has_nexthop(nh_grp, gw, ifindex,
2699 weight))
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02002700 return false;
2701 }
2702
2703 return true;
2704}
2705
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02002706static int
2707mlxsw_sp_nexthop_group_cmp(struct rhashtable_compare_arg *arg, const void *ptr)
2708{
2709 const struct mlxsw_sp_nexthop_group_cmp_arg *cmp_arg = arg->key;
2710 const struct mlxsw_sp_nexthop_group *nh_grp = ptr;
2711
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02002712 switch (cmp_arg->proto) {
2713 case MLXSW_SP_L3_PROTO_IPV4:
2714 return cmp_arg->fi != mlxsw_sp_nexthop4_group_fi(nh_grp);
2715 case MLXSW_SP_L3_PROTO_IPV6:
2716 return !mlxsw_sp_nexthop6_group_cmp(nh_grp,
2717 cmp_arg->fib6_entry);
2718 default:
2719 WARN_ON(1);
2720 return 1;
2721 }
2722}
2723
2724static int
2725mlxsw_sp_nexthop_group_type(const struct mlxsw_sp_nexthop_group *nh_grp)
2726{
2727 return nh_grp->neigh_tbl->family;
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02002728}
2729
2730static u32 mlxsw_sp_nexthop_group_hash_obj(const void *data, u32 len, u32 seed)
2731{
2732 const struct mlxsw_sp_nexthop_group *nh_grp = data;
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02002733 const struct mlxsw_sp_nexthop *nh;
2734 struct fib_info *fi;
2735 unsigned int val;
2736 int i;
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02002737
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02002738 switch (mlxsw_sp_nexthop_group_type(nh_grp)) {
2739 case AF_INET:
2740 fi = mlxsw_sp_nexthop4_group_fi(nh_grp);
2741 return jhash(&fi, sizeof(fi), seed);
2742 case AF_INET6:
2743 val = nh_grp->count;
2744 for (i = 0; i < nh_grp->count; i++) {
2745 nh = &nh_grp->nexthops[i];
2746 val ^= nh->ifindex;
2747 }
2748 return jhash(&val, sizeof(val), seed);
2749 default:
2750 WARN_ON(1);
2751 return 0;
2752 }
2753}
2754
2755static u32
2756mlxsw_sp_nexthop6_group_hash(struct mlxsw_sp_fib6_entry *fib6_entry, u32 seed)
2757{
2758 unsigned int val = fib6_entry->nrt6;
2759 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
2760 struct net_device *dev;
2761
2762 list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) {
2763 dev = mlxsw_sp_rt6->rt->dst.dev;
2764 val ^= dev->ifindex;
2765 }
2766
2767 return jhash(&val, sizeof(val), seed);
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02002768}
2769
2770static u32
2771mlxsw_sp_nexthop_group_hash(const void *data, u32 len, u32 seed)
2772{
2773 const struct mlxsw_sp_nexthop_group_cmp_arg *cmp_arg = data;
2774
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02002775 switch (cmp_arg->proto) {
2776 case MLXSW_SP_L3_PROTO_IPV4:
2777 return jhash(&cmp_arg->fi, sizeof(cmp_arg->fi), seed);
2778 case MLXSW_SP_L3_PROTO_IPV6:
2779 return mlxsw_sp_nexthop6_group_hash(cmp_arg->fib6_entry, seed);
2780 default:
2781 WARN_ON(1);
2782 return 0;
2783 }
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02002784}
2785
Ido Schimmele9ad5e72017-02-08 11:16:29 +01002786static const struct rhashtable_params mlxsw_sp_nexthop_group_ht_params = {
Ido Schimmele9ad5e72017-02-08 11:16:29 +01002787 .head_offset = offsetof(struct mlxsw_sp_nexthop_group, ht_node),
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02002788 .hashfn = mlxsw_sp_nexthop_group_hash,
2789 .obj_hashfn = mlxsw_sp_nexthop_group_hash_obj,
2790 .obj_cmpfn = mlxsw_sp_nexthop_group_cmp,
Ido Schimmele9ad5e72017-02-08 11:16:29 +01002791};
2792
2793static int mlxsw_sp_nexthop_group_insert(struct mlxsw_sp *mlxsw_sp,
2794 struct mlxsw_sp_nexthop_group *nh_grp)
2795{
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02002796 if (mlxsw_sp_nexthop_group_type(nh_grp) == AF_INET6 &&
2797 !nh_grp->gateway)
2798 return 0;
2799
Ido Schimmel9011b672017-05-16 19:38:25 +02002800 return rhashtable_insert_fast(&mlxsw_sp->router->nexthop_group_ht,
Ido Schimmele9ad5e72017-02-08 11:16:29 +01002801 &nh_grp->ht_node,
2802 mlxsw_sp_nexthop_group_ht_params);
2803}
2804
2805static void mlxsw_sp_nexthop_group_remove(struct mlxsw_sp *mlxsw_sp,
2806 struct mlxsw_sp_nexthop_group *nh_grp)
2807{
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02002808 if (mlxsw_sp_nexthop_group_type(nh_grp) == AF_INET6 &&
2809 !nh_grp->gateway)
2810 return;
2811
Ido Schimmel9011b672017-05-16 19:38:25 +02002812 rhashtable_remove_fast(&mlxsw_sp->router->nexthop_group_ht,
Ido Schimmele9ad5e72017-02-08 11:16:29 +01002813 &nh_grp->ht_node,
2814 mlxsw_sp_nexthop_group_ht_params);
2815}
2816
2817static struct mlxsw_sp_nexthop_group *
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02002818mlxsw_sp_nexthop4_group_lookup(struct mlxsw_sp *mlxsw_sp,
2819 struct fib_info *fi)
Ido Schimmele9ad5e72017-02-08 11:16:29 +01002820{
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02002821 struct mlxsw_sp_nexthop_group_cmp_arg cmp_arg;
2822
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02002823 cmp_arg.proto = MLXSW_SP_L3_PROTO_IPV4;
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02002824 cmp_arg.fi = fi;
2825 return rhashtable_lookup_fast(&mlxsw_sp->router->nexthop_group_ht,
2826 &cmp_arg,
Ido Schimmele9ad5e72017-02-08 11:16:29 +01002827 mlxsw_sp_nexthop_group_ht_params);
2828}
2829
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02002830static struct mlxsw_sp_nexthop_group *
2831mlxsw_sp_nexthop6_group_lookup(struct mlxsw_sp *mlxsw_sp,
2832 struct mlxsw_sp_fib6_entry *fib6_entry)
2833{
2834 struct mlxsw_sp_nexthop_group_cmp_arg cmp_arg;
2835
2836 cmp_arg.proto = MLXSW_SP_L3_PROTO_IPV6;
2837 cmp_arg.fib6_entry = fib6_entry;
2838 return rhashtable_lookup_fast(&mlxsw_sp->router->nexthop_group_ht,
2839 &cmp_arg,
2840 mlxsw_sp_nexthop_group_ht_params);
2841}
2842
Ido Schimmelc53b8e12017-02-08 11:16:30 +01002843static const struct rhashtable_params mlxsw_sp_nexthop_ht_params = {
2844 .key_offset = offsetof(struct mlxsw_sp_nexthop, key),
2845 .head_offset = offsetof(struct mlxsw_sp_nexthop, ht_node),
2846 .key_len = sizeof(struct mlxsw_sp_nexthop_key),
2847};
2848
2849static int mlxsw_sp_nexthop_insert(struct mlxsw_sp *mlxsw_sp,
2850 struct mlxsw_sp_nexthop *nh)
2851{
Ido Schimmel9011b672017-05-16 19:38:25 +02002852 return rhashtable_insert_fast(&mlxsw_sp->router->nexthop_ht,
Ido Schimmelc53b8e12017-02-08 11:16:30 +01002853 &nh->ht_node, mlxsw_sp_nexthop_ht_params);
2854}
2855
2856static void mlxsw_sp_nexthop_remove(struct mlxsw_sp *mlxsw_sp,
2857 struct mlxsw_sp_nexthop *nh)
2858{
Ido Schimmel9011b672017-05-16 19:38:25 +02002859 rhashtable_remove_fast(&mlxsw_sp->router->nexthop_ht, &nh->ht_node,
Ido Schimmelc53b8e12017-02-08 11:16:30 +01002860 mlxsw_sp_nexthop_ht_params);
2861}
2862
Ido Schimmelad178c82017-02-08 11:16:40 +01002863static struct mlxsw_sp_nexthop *
2864mlxsw_sp_nexthop_lookup(struct mlxsw_sp *mlxsw_sp,
2865 struct mlxsw_sp_nexthop_key key)
2866{
Ido Schimmel9011b672017-05-16 19:38:25 +02002867 return rhashtable_lookup_fast(&mlxsw_sp->router->nexthop_ht, &key,
Ido Schimmelad178c82017-02-08 11:16:40 +01002868 mlxsw_sp_nexthop_ht_params);
2869}
2870
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002871static int mlxsw_sp_adj_index_mass_update_vr(struct mlxsw_sp *mlxsw_sp,
Ido Schimmel76610eb2017-03-10 08:53:41 +01002872 const struct mlxsw_sp_fib *fib,
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002873 u32 adj_index, u16 ecmp_size,
2874 u32 new_adj_index,
2875 u16 new_ecmp_size)
2876{
2877 char raleu_pl[MLXSW_REG_RALEU_LEN];
2878
Ido Schimmel1a9234e662016-09-19 08:29:26 +02002879 mlxsw_reg_raleu_pack(raleu_pl,
Ido Schimmel76610eb2017-03-10 08:53:41 +01002880 (enum mlxsw_reg_ralxx_protocol) fib->proto,
2881 fib->vr->id, adj_index, ecmp_size, new_adj_index,
Ido Schimmel1a9234e662016-09-19 08:29:26 +02002882 new_ecmp_size);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002883 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raleu), raleu_pl);
2884}
2885
2886static int mlxsw_sp_adj_index_mass_update(struct mlxsw_sp *mlxsw_sp,
2887 struct mlxsw_sp_nexthop_group *nh_grp,
2888 u32 old_adj_index, u16 old_ecmp_size)
2889{
2890 struct mlxsw_sp_fib_entry *fib_entry;
Ido Schimmel76610eb2017-03-10 08:53:41 +01002891 struct mlxsw_sp_fib *fib = NULL;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002892 int err;
2893
2894 list_for_each_entry(fib_entry, &nh_grp->fib_list, nexthop_group_node) {
Ido Schimmel76610eb2017-03-10 08:53:41 +01002895 if (fib == fib_entry->fib_node->fib)
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002896 continue;
Ido Schimmel76610eb2017-03-10 08:53:41 +01002897 fib = fib_entry->fib_node->fib;
2898 err = mlxsw_sp_adj_index_mass_update_vr(mlxsw_sp, fib,
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002899 old_adj_index,
2900 old_ecmp_size,
2901 nh_grp->adj_index,
2902 nh_grp->ecmp_size);
2903 if (err)
2904 return err;
2905 }
2906 return 0;
2907}
2908
Ido Schimmeleb789982017-10-22 23:11:48 +02002909static int __mlxsw_sp_nexthop_update(struct mlxsw_sp *mlxsw_sp, u32 adj_index,
2910 struct mlxsw_sp_nexthop *nh)
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002911{
2912 struct mlxsw_sp_neigh_entry *neigh_entry = nh->neigh_entry;
2913 char ratr_pl[MLXSW_REG_RATR_LEN];
2914
2915 mlxsw_reg_ratr_pack(ratr_pl, MLXSW_REG_RATR_OP_WRITE_WRITE_ENTRY,
Petr Machata89e41982017-09-02 23:49:15 +02002916 true, MLXSW_REG_RATR_TYPE_ETHERNET,
2917 adj_index, neigh_entry->rif);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002918 mlxsw_reg_ratr_eth_entry_pack(ratr_pl, neigh_entry->ha);
Arkadi Sharshevskya5390272017-09-25 10:32:28 +02002919 if (nh->counter_valid)
2920 mlxsw_reg_ratr_counter_pack(ratr_pl, nh->counter_index, true);
2921 else
2922 mlxsw_reg_ratr_counter_pack(ratr_pl, 0, false);
2923
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002924 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ratr), ratr_pl);
2925}
2926
Ido Schimmeleb789982017-10-22 23:11:48 +02002927int mlxsw_sp_nexthop_update(struct mlxsw_sp *mlxsw_sp, u32 adj_index,
2928 struct mlxsw_sp_nexthop *nh)
2929{
2930 int i;
2931
2932 for (i = 0; i < nh->num_adj_entries; i++) {
2933 int err;
2934
2935 err = __mlxsw_sp_nexthop_update(mlxsw_sp, adj_index + i, nh);
2936 if (err)
2937 return err;
2938 }
2939
2940 return 0;
2941}
2942
2943static int __mlxsw_sp_nexthop_ipip_update(struct mlxsw_sp *mlxsw_sp,
2944 u32 adj_index,
2945 struct mlxsw_sp_nexthop *nh)
Petr Machata1012b9a2017-09-02 23:49:23 +02002946{
2947 const struct mlxsw_sp_ipip_ops *ipip_ops;
2948
2949 ipip_ops = mlxsw_sp->router->ipip_ops_arr[nh->ipip_entry->ipipt];
2950 return ipip_ops->nexthop_update(mlxsw_sp, adj_index, nh->ipip_entry);
2951}
2952
Ido Schimmeleb789982017-10-22 23:11:48 +02002953static int mlxsw_sp_nexthop_ipip_update(struct mlxsw_sp *mlxsw_sp,
2954 u32 adj_index,
2955 struct mlxsw_sp_nexthop *nh)
2956{
2957 int i;
2958
2959 for (i = 0; i < nh->num_adj_entries; i++) {
2960 int err;
2961
2962 err = __mlxsw_sp_nexthop_ipip_update(mlxsw_sp, adj_index + i,
2963 nh);
2964 if (err)
2965 return err;
2966 }
2967
2968 return 0;
2969}
2970
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002971static int
Petr Machata35225e42017-09-02 23:49:22 +02002972mlxsw_sp_nexthop_group_update(struct mlxsw_sp *mlxsw_sp,
2973 struct mlxsw_sp_nexthop_group *nh_grp,
2974 bool reallocate)
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002975{
2976 u32 adj_index = nh_grp->adj_index; /* base */
2977 struct mlxsw_sp_nexthop *nh;
2978 int i;
2979 int err;
2980
2981 for (i = 0; i < nh_grp->count; i++) {
2982 nh = &nh_grp->nexthops[i];
2983
2984 if (!nh->should_offload) {
2985 nh->offloaded = 0;
2986 continue;
2987 }
2988
Ido Schimmela59b7e02017-01-23 11:11:42 +01002989 if (nh->update || reallocate) {
Petr Machata35225e42017-09-02 23:49:22 +02002990 switch (nh->type) {
2991 case MLXSW_SP_NEXTHOP_TYPE_ETH:
Arkadi Sharshevskya5390272017-09-25 10:32:28 +02002992 err = mlxsw_sp_nexthop_update
Petr Machata35225e42017-09-02 23:49:22 +02002993 (mlxsw_sp, adj_index, nh);
2994 break;
Petr Machata1012b9a2017-09-02 23:49:23 +02002995 case MLXSW_SP_NEXTHOP_TYPE_IPIP:
2996 err = mlxsw_sp_nexthop_ipip_update
2997 (mlxsw_sp, adj_index, nh);
2998 break;
Petr Machata35225e42017-09-02 23:49:22 +02002999 }
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003000 if (err)
3001 return err;
3002 nh->update = 0;
3003 nh->offloaded = 1;
3004 }
Ido Schimmeleb789982017-10-22 23:11:48 +02003005 adj_index += nh->num_adj_entries;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003006 }
3007 return 0;
3008}
3009
Ido Schimmel1819ae32017-07-21 18:04:28 +02003010static bool
3011mlxsw_sp_fib_node_entry_is_first(const struct mlxsw_sp_fib_node *fib_node,
3012 const struct mlxsw_sp_fib_entry *fib_entry);
3013
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003014static int
3015mlxsw_sp_nexthop_fib_entries_update(struct mlxsw_sp *mlxsw_sp,
3016 struct mlxsw_sp_nexthop_group *nh_grp)
3017{
3018 struct mlxsw_sp_fib_entry *fib_entry;
3019 int err;
3020
3021 list_for_each_entry(fib_entry, &nh_grp->fib_list, nexthop_group_node) {
Ido Schimmel1819ae32017-07-21 18:04:28 +02003022 if (!mlxsw_sp_fib_node_entry_is_first(fib_entry->fib_node,
3023 fib_entry))
3024 continue;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003025 err = mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
3026 if (err)
3027 return err;
3028 }
3029 return 0;
3030}
3031
3032static void
Ido Schimmel77d964e2017-08-02 09:56:05 +02003033mlxsw_sp_fib_entry_offload_refresh(struct mlxsw_sp_fib_entry *fib_entry,
3034 enum mlxsw_reg_ralue_op op, int err);
3035
3036static void
3037mlxsw_sp_nexthop_fib_entries_refresh(struct mlxsw_sp_nexthop_group *nh_grp)
3038{
3039 enum mlxsw_reg_ralue_op op = MLXSW_REG_RALUE_OP_WRITE_WRITE;
3040 struct mlxsw_sp_fib_entry *fib_entry;
3041
3042 list_for_each_entry(fib_entry, &nh_grp->fib_list, nexthop_group_node) {
3043 if (!mlxsw_sp_fib_node_entry_is_first(fib_entry->fib_node,
3044 fib_entry))
3045 continue;
3046 mlxsw_sp_fib_entry_offload_refresh(fib_entry, op, 0);
3047 }
3048}
3049
Ido Schimmel425a08c2017-10-22 23:11:47 +02003050static void mlxsw_sp_adj_grp_size_round_up(u16 *p_adj_grp_size)
3051{
3052 /* Valid sizes for an adjacency group are:
3053 * 1-64, 512, 1024, 2048 and 4096.
3054 */
3055 if (*p_adj_grp_size <= 64)
3056 return;
3057 else if (*p_adj_grp_size <= 512)
3058 *p_adj_grp_size = 512;
3059 else if (*p_adj_grp_size <= 1024)
3060 *p_adj_grp_size = 1024;
3061 else if (*p_adj_grp_size <= 2048)
3062 *p_adj_grp_size = 2048;
3063 else
3064 *p_adj_grp_size = 4096;
3065}
3066
3067static void mlxsw_sp_adj_grp_size_round_down(u16 *p_adj_grp_size,
3068 unsigned int alloc_size)
3069{
3070 if (alloc_size >= 4096)
3071 *p_adj_grp_size = 4096;
3072 else if (alloc_size >= 2048)
3073 *p_adj_grp_size = 2048;
3074 else if (alloc_size >= 1024)
3075 *p_adj_grp_size = 1024;
3076 else if (alloc_size >= 512)
3077 *p_adj_grp_size = 512;
3078}
3079
3080static int mlxsw_sp_fix_adj_grp_size(struct mlxsw_sp *mlxsw_sp,
3081 u16 *p_adj_grp_size)
3082{
3083 unsigned int alloc_size;
3084 int err;
3085
3086 /* Round up the requested group size to the next size supported
3087 * by the device and make sure the request can be satisfied.
3088 */
3089 mlxsw_sp_adj_grp_size_round_up(p_adj_grp_size);
3090 err = mlxsw_sp_kvdl_alloc_size_query(mlxsw_sp, *p_adj_grp_size,
3091 &alloc_size);
3092 if (err)
3093 return err;
3094 /* It is possible the allocation results in more allocated
3095 * entries than requested. Try to use as much of them as
3096 * possible.
3097 */
3098 mlxsw_sp_adj_grp_size_round_down(p_adj_grp_size, alloc_size);
3099
3100 return 0;
3101}
3102
Ido Schimmel77d964e2017-08-02 09:56:05 +02003103static void
Ido Schimmeleb789982017-10-22 23:11:48 +02003104mlxsw_sp_nexthop_group_normalize(struct mlxsw_sp_nexthop_group *nh_grp)
3105{
3106 int i, g = 0, sum_norm_weight = 0;
3107 struct mlxsw_sp_nexthop *nh;
3108
3109 for (i = 0; i < nh_grp->count; i++) {
3110 nh = &nh_grp->nexthops[i];
3111
3112 if (!nh->should_offload)
3113 continue;
3114 if (g > 0)
3115 g = gcd(nh->nh_weight, g);
3116 else
3117 g = nh->nh_weight;
3118 }
3119
3120 for (i = 0; i < nh_grp->count; i++) {
3121 nh = &nh_grp->nexthops[i];
3122
3123 if (!nh->should_offload)
3124 continue;
3125 nh->norm_nh_weight = nh->nh_weight / g;
3126 sum_norm_weight += nh->norm_nh_weight;
3127 }
3128
3129 nh_grp->sum_norm_weight = sum_norm_weight;
3130}
3131
3132static void
3133mlxsw_sp_nexthop_group_rebalance(struct mlxsw_sp_nexthop_group *nh_grp)
3134{
3135 int total = nh_grp->sum_norm_weight;
3136 u16 ecmp_size = nh_grp->ecmp_size;
3137 int i, weight = 0, lower_bound = 0;
3138
3139 for (i = 0; i < nh_grp->count; i++) {
3140 struct mlxsw_sp_nexthop *nh = &nh_grp->nexthops[i];
3141 int upper_bound;
3142
3143 if (!nh->should_offload)
3144 continue;
3145 weight += nh->norm_nh_weight;
3146 upper_bound = DIV_ROUND_CLOSEST(ecmp_size * weight, total);
3147 nh->num_adj_entries = upper_bound - lower_bound;
3148 lower_bound = upper_bound;
3149 }
3150}
3151
3152static void
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003153mlxsw_sp_nexthop_group_refresh(struct mlxsw_sp *mlxsw_sp,
3154 struct mlxsw_sp_nexthop_group *nh_grp)
3155{
Ido Schimmeleb789982017-10-22 23:11:48 +02003156 u16 ecmp_size, old_ecmp_size;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003157 struct mlxsw_sp_nexthop *nh;
3158 bool offload_change = false;
3159 u32 adj_index;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003160 bool old_adj_index_valid;
3161 u32 old_adj_index;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003162 int i;
3163 int err;
3164
Ido Schimmelb3e8d1e2017-02-08 11:16:32 +01003165 if (!nh_grp->gateway) {
3166 mlxsw_sp_nexthop_fib_entries_update(mlxsw_sp, nh_grp);
3167 return;
3168 }
3169
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003170 for (i = 0; i < nh_grp->count; i++) {
3171 nh = &nh_grp->nexthops[i];
3172
Petr Machata56b8a9e2017-07-31 09:27:29 +02003173 if (nh->should_offload != nh->offloaded) {
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003174 offload_change = true;
3175 if (nh->should_offload)
3176 nh->update = 1;
3177 }
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003178 }
3179 if (!offload_change) {
3180 /* Nothing was added or removed, so no need to reallocate. Just
3181 * update MAC on existing adjacency indexes.
3182 */
Petr Machata35225e42017-09-02 23:49:22 +02003183 err = mlxsw_sp_nexthop_group_update(mlxsw_sp, nh_grp, false);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003184 if (err) {
3185 dev_warn(mlxsw_sp->bus_info->dev, "Failed to update neigh MAC in adjacency table.\n");
3186 goto set_trap;
3187 }
3188 return;
3189 }
Ido Schimmeleb789982017-10-22 23:11:48 +02003190 mlxsw_sp_nexthop_group_normalize(nh_grp);
3191 if (!nh_grp->sum_norm_weight)
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003192 /* No neigh of this group is connected so we just set
3193 * the trap and let everthing flow through kernel.
3194 */
3195 goto set_trap;
3196
Ido Schimmeleb789982017-10-22 23:11:48 +02003197 ecmp_size = nh_grp->sum_norm_weight;
Ido Schimmel425a08c2017-10-22 23:11:47 +02003198 err = mlxsw_sp_fix_adj_grp_size(mlxsw_sp, &ecmp_size);
3199 if (err)
3200 /* No valid allocation size available. */
3201 goto set_trap;
3202
Arkadi Sharshevsky13124442017-03-25 08:28:22 +01003203 err = mlxsw_sp_kvdl_alloc(mlxsw_sp, ecmp_size, &adj_index);
3204 if (err) {
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003205 /* We ran out of KVD linear space, just set the
3206 * trap and let everything flow through kernel.
3207 */
3208 dev_warn(mlxsw_sp->bus_info->dev, "Failed to allocate KVD linear area for nexthop group.\n");
3209 goto set_trap;
3210 }
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003211 old_adj_index_valid = nh_grp->adj_index_valid;
3212 old_adj_index = nh_grp->adj_index;
3213 old_ecmp_size = nh_grp->ecmp_size;
3214 nh_grp->adj_index_valid = 1;
3215 nh_grp->adj_index = adj_index;
3216 nh_grp->ecmp_size = ecmp_size;
Ido Schimmeleb789982017-10-22 23:11:48 +02003217 mlxsw_sp_nexthop_group_rebalance(nh_grp);
Petr Machata35225e42017-09-02 23:49:22 +02003218 err = mlxsw_sp_nexthop_group_update(mlxsw_sp, nh_grp, true);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003219 if (err) {
3220 dev_warn(mlxsw_sp->bus_info->dev, "Failed to update neigh MAC in adjacency table.\n");
3221 goto set_trap;
3222 }
3223
3224 if (!old_adj_index_valid) {
3225 /* The trap was set for fib entries, so we have to call
3226 * fib entry update to unset it and use adjacency index.
3227 */
3228 err = mlxsw_sp_nexthop_fib_entries_update(mlxsw_sp, nh_grp);
3229 if (err) {
3230 dev_warn(mlxsw_sp->bus_info->dev, "Failed to add adjacency index to fib entries.\n");
3231 goto set_trap;
3232 }
3233 return;
3234 }
3235
3236 err = mlxsw_sp_adj_index_mass_update(mlxsw_sp, nh_grp,
3237 old_adj_index, old_ecmp_size);
3238 mlxsw_sp_kvdl_free(mlxsw_sp, old_adj_index);
3239 if (err) {
3240 dev_warn(mlxsw_sp->bus_info->dev, "Failed to mass-update adjacency index for nexthop group.\n");
3241 goto set_trap;
3242 }
Ido Schimmel77d964e2017-08-02 09:56:05 +02003243
3244 /* Offload state within the group changed, so update the flags. */
3245 mlxsw_sp_nexthop_fib_entries_refresh(nh_grp);
3246
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003247 return;
3248
3249set_trap:
3250 old_adj_index_valid = nh_grp->adj_index_valid;
3251 nh_grp->adj_index_valid = 0;
3252 for (i = 0; i < nh_grp->count; i++) {
3253 nh = &nh_grp->nexthops[i];
3254 nh->offloaded = 0;
3255 }
3256 err = mlxsw_sp_nexthop_fib_entries_update(mlxsw_sp, nh_grp);
3257 if (err)
3258 dev_warn(mlxsw_sp->bus_info->dev, "Failed to set traps for fib entries.\n");
3259 if (old_adj_index_valid)
3260 mlxsw_sp_kvdl_free(mlxsw_sp, nh_grp->adj_index);
3261}
3262
3263static void __mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp_nexthop *nh,
3264 bool removing)
3265{
Petr Machata213666a2017-07-31 09:27:30 +02003266 if (!removing)
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003267 nh->should_offload = 1;
Ido Schimmel8764a822017-12-25 08:57:35 +01003268 else
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003269 nh->should_offload = 0;
3270 nh->update = 1;
3271}
3272
3273static void
3274mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp *mlxsw_sp,
3275 struct mlxsw_sp_neigh_entry *neigh_entry,
3276 bool removing)
3277{
3278 struct mlxsw_sp_nexthop *nh;
3279
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003280 list_for_each_entry(nh, &neigh_entry->nexthop_list,
3281 neigh_list_node) {
3282 __mlxsw_sp_nexthop_neigh_update(nh, removing);
3283 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nh_grp);
3284 }
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003285}
3286
Ido Schimmel9665b742017-02-08 11:16:42 +01003287static void mlxsw_sp_nexthop_rif_init(struct mlxsw_sp_nexthop *nh,
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003288 struct mlxsw_sp_rif *rif)
Ido Schimmel9665b742017-02-08 11:16:42 +01003289{
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003290 if (nh->rif)
Ido Schimmel9665b742017-02-08 11:16:42 +01003291 return;
3292
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003293 nh->rif = rif;
3294 list_add(&nh->rif_list_node, &rif->nexthop_list);
Ido Schimmel9665b742017-02-08 11:16:42 +01003295}
3296
3297static void mlxsw_sp_nexthop_rif_fini(struct mlxsw_sp_nexthop *nh)
3298{
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003299 if (!nh->rif)
Ido Schimmel9665b742017-02-08 11:16:42 +01003300 return;
3301
3302 list_del(&nh->rif_list_node);
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003303 nh->rif = NULL;
Ido Schimmel9665b742017-02-08 11:16:42 +01003304}
3305
Ido Schimmela8c97012017-02-08 11:16:35 +01003306static int mlxsw_sp_nexthop_neigh_init(struct mlxsw_sp *mlxsw_sp,
3307 struct mlxsw_sp_nexthop *nh)
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003308{
3309 struct mlxsw_sp_neigh_entry *neigh_entry;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003310 struct neighbour *n;
Ido Schimmel93a87e52016-12-23 09:32:49 +01003311 u8 nud_state, dead;
Ido Schimmelc53b8e12017-02-08 11:16:30 +01003312 int err;
3313
Ido Schimmelad178c82017-02-08 11:16:40 +01003314 if (!nh->nh_grp->gateway || nh->neigh_entry)
Ido Schimmelb8399a12017-02-08 11:16:33 +01003315 return 0;
3316
Jiri Pirko33b13412016-11-10 12:31:04 +01003317 /* Take a reference of neigh here ensuring that neigh would
Petr Machata8de3c172017-07-31 09:27:25 +02003318 * not be destructed before the nexthop entry is finished.
Jiri Pirko33b13412016-11-10 12:31:04 +01003319 * The reference is taken either in neigh_lookup() or
Ido Schimmelfd76d912017-02-06 16:20:17 +01003320 * in neigh_create() in case n is not found.
Jiri Pirko33b13412016-11-10 12:31:04 +01003321 */
Ido Schimmel58adf2c2017-07-18 10:10:19 +02003322 n = neigh_lookup(nh->nh_grp->neigh_tbl, &nh->gw_addr, nh->rif->dev);
Jiri Pirko33b13412016-11-10 12:31:04 +01003323 if (!n) {
Ido Schimmel58adf2c2017-07-18 10:10:19 +02003324 n = neigh_create(nh->nh_grp->neigh_tbl, &nh->gw_addr,
3325 nh->rif->dev);
Ido Schimmela8c97012017-02-08 11:16:35 +01003326 if (IS_ERR(n))
3327 return PTR_ERR(n);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003328 neigh_event_send(n, NULL);
Jiri Pirko33b13412016-11-10 12:31:04 +01003329 }
3330 neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp, n);
3331 if (!neigh_entry) {
Ido Schimmel5c8802f2017-02-06 16:20:13 +01003332 neigh_entry = mlxsw_sp_neigh_entry_create(mlxsw_sp, n);
3333 if (IS_ERR(neigh_entry)) {
Ido Schimmelc53b8e12017-02-08 11:16:30 +01003334 err = -EINVAL;
3335 goto err_neigh_entry_create;
Ido Schimmel5c8802f2017-02-06 16:20:13 +01003336 }
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003337 }
Yotam Gigib2157142016-07-05 11:27:51 +02003338
3339 /* If that is the first nexthop connected to that neigh, add to
3340 * nexthop_neighs_list
3341 */
3342 if (list_empty(&neigh_entry->nexthop_list))
3343 list_add_tail(&neigh_entry->nexthop_neighs_list_node,
Ido Schimmel9011b672017-05-16 19:38:25 +02003344 &mlxsw_sp->router->nexthop_neighs_list);
Yotam Gigib2157142016-07-05 11:27:51 +02003345
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003346 nh->neigh_entry = neigh_entry;
3347 list_add_tail(&nh->neigh_list_node, &neigh_entry->nexthop_list);
3348 read_lock_bh(&n->lock);
3349 nud_state = n->nud_state;
Ido Schimmel93a87e52016-12-23 09:32:49 +01003350 dead = n->dead;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003351 read_unlock_bh(&n->lock);
Ido Schimmel93a87e52016-12-23 09:32:49 +01003352 __mlxsw_sp_nexthop_neigh_update(nh, !(nud_state & NUD_VALID && !dead));
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003353
3354 return 0;
Ido Schimmelc53b8e12017-02-08 11:16:30 +01003355
3356err_neigh_entry_create:
3357 neigh_release(n);
Ido Schimmelc53b8e12017-02-08 11:16:30 +01003358 return err;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003359}
3360
Ido Schimmela8c97012017-02-08 11:16:35 +01003361static void mlxsw_sp_nexthop_neigh_fini(struct mlxsw_sp *mlxsw_sp,
3362 struct mlxsw_sp_nexthop *nh)
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003363{
3364 struct mlxsw_sp_neigh_entry *neigh_entry = nh->neigh_entry;
Ido Schimmela8c97012017-02-08 11:16:35 +01003365 struct neighbour *n;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003366
Ido Schimmelb8399a12017-02-08 11:16:33 +01003367 if (!neigh_entry)
Ido Schimmela8c97012017-02-08 11:16:35 +01003368 return;
3369 n = neigh_entry->key.n;
Ido Schimmelb8399a12017-02-08 11:16:33 +01003370
Ido Schimmel58312122016-12-23 09:32:50 +01003371 __mlxsw_sp_nexthop_neigh_update(nh, true);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003372 list_del(&nh->neigh_list_node);
Ido Schimmele58be792017-02-08 11:16:28 +01003373 nh->neigh_entry = NULL;
Yotam Gigib2157142016-07-05 11:27:51 +02003374
3375 /* If that is the last nexthop connected to that neigh, remove from
3376 * nexthop_neighs_list
3377 */
Ido Schimmele58be792017-02-08 11:16:28 +01003378 if (list_empty(&neigh_entry->nexthop_list))
3379 list_del(&neigh_entry->nexthop_neighs_list_node);
Yotam Gigib2157142016-07-05 11:27:51 +02003380
Ido Schimmel5c8802f2017-02-06 16:20:13 +01003381 if (!neigh_entry->connected && list_empty(&neigh_entry->nexthop_list))
3382 mlxsw_sp_neigh_entry_destroy(mlxsw_sp, neigh_entry);
3383
3384 neigh_release(n);
Ido Schimmela8c97012017-02-08 11:16:35 +01003385}
Ido Schimmelc53b8e12017-02-08 11:16:30 +01003386
Petr Machata44b0fff2017-11-03 10:03:44 +01003387static bool mlxsw_sp_ipip_netdev_ul_up(struct net_device *ol_dev)
3388{
3389 struct net_device *ul_dev = __mlxsw_sp_ipip_netdev_ul_dev_get(ol_dev);
3390
3391 return ul_dev ? (ul_dev->flags & IFF_UP) : true;
3392}
3393
Petr Machatad97cda52017-11-28 13:17:13 +01003394static void mlxsw_sp_nexthop_ipip_init(struct mlxsw_sp *mlxsw_sp,
3395 struct mlxsw_sp_nexthop *nh,
3396 struct mlxsw_sp_ipip_entry *ipip_entry)
Petr Machata1012b9a2017-09-02 23:49:23 +02003397{
Petr Machata44b0fff2017-11-03 10:03:44 +01003398 bool removing;
3399
Petr Machata1012b9a2017-09-02 23:49:23 +02003400 if (!nh->nh_grp->gateway || nh->ipip_entry)
Petr Machatad97cda52017-11-28 13:17:13 +01003401 return;
Petr Machata1012b9a2017-09-02 23:49:23 +02003402
Petr Machatad97cda52017-11-28 13:17:13 +01003403 nh->ipip_entry = ipip_entry;
3404 removing = !mlxsw_sp_ipip_netdev_ul_up(ipip_entry->ol_dev);
Petr Machata44b0fff2017-11-03 10:03:44 +01003405 __mlxsw_sp_nexthop_neigh_update(nh, removing);
Petr Machatad97cda52017-11-28 13:17:13 +01003406 mlxsw_sp_nexthop_rif_init(nh, &ipip_entry->ol_lb->common);
Petr Machata1012b9a2017-09-02 23:49:23 +02003407}
3408
3409static void mlxsw_sp_nexthop_ipip_fini(struct mlxsw_sp *mlxsw_sp,
3410 struct mlxsw_sp_nexthop *nh)
3411{
3412 struct mlxsw_sp_ipip_entry *ipip_entry = nh->ipip_entry;
3413
3414 if (!ipip_entry)
3415 return;
3416
3417 __mlxsw_sp_nexthop_neigh_update(nh, true);
Petr Machata1012b9a2017-09-02 23:49:23 +02003418 nh->ipip_entry = NULL;
3419}
3420
3421static bool mlxsw_sp_nexthop4_ipip_type(const struct mlxsw_sp *mlxsw_sp,
3422 const struct fib_nh *fib_nh,
3423 enum mlxsw_sp_ipip_type *p_ipipt)
3424{
3425 struct net_device *dev = fib_nh->nh_dev;
3426
3427 return dev &&
3428 fib_nh->nh_parent->fib_type == RTN_UNICAST &&
3429 mlxsw_sp_netdev_ipip_type(mlxsw_sp, dev, p_ipipt);
3430}
3431
Petr Machata35225e42017-09-02 23:49:22 +02003432static void mlxsw_sp_nexthop_type_fini(struct mlxsw_sp *mlxsw_sp,
3433 struct mlxsw_sp_nexthop *nh)
3434{
3435 switch (nh->type) {
3436 case MLXSW_SP_NEXTHOP_TYPE_ETH:
3437 mlxsw_sp_nexthop_neigh_fini(mlxsw_sp, nh);
3438 mlxsw_sp_nexthop_rif_fini(nh);
3439 break;
Petr Machata1012b9a2017-09-02 23:49:23 +02003440 case MLXSW_SP_NEXTHOP_TYPE_IPIP:
Petr Machatade0f43c2017-10-02 12:14:57 +02003441 mlxsw_sp_nexthop_rif_fini(nh);
Petr Machata1012b9a2017-09-02 23:49:23 +02003442 mlxsw_sp_nexthop_ipip_fini(mlxsw_sp, nh);
3443 break;
Petr Machata35225e42017-09-02 23:49:22 +02003444 }
3445}
3446
3447static int mlxsw_sp_nexthop4_type_init(struct mlxsw_sp *mlxsw_sp,
3448 struct mlxsw_sp_nexthop *nh,
3449 struct fib_nh *fib_nh)
3450{
Petr Machatad97cda52017-11-28 13:17:13 +01003451 const struct mlxsw_sp_ipip_ops *ipip_ops;
Petr Machata35225e42017-09-02 23:49:22 +02003452 struct net_device *dev = fib_nh->nh_dev;
Petr Machatad97cda52017-11-28 13:17:13 +01003453 struct mlxsw_sp_ipip_entry *ipip_entry;
Petr Machata35225e42017-09-02 23:49:22 +02003454 struct mlxsw_sp_rif *rif;
3455 int err;
3456
Petr Machatad97cda52017-11-28 13:17:13 +01003457 ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, dev);
3458 if (ipip_entry) {
3459 ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt];
3460 if (ipip_ops->can_offload(mlxsw_sp, dev,
3461 MLXSW_SP_L3_PROTO_IPV4)) {
3462 nh->type = MLXSW_SP_NEXTHOP_TYPE_IPIP;
3463 mlxsw_sp_nexthop_ipip_init(mlxsw_sp, nh, ipip_entry);
3464 return 0;
3465 }
Petr Machata1012b9a2017-09-02 23:49:23 +02003466 }
3467
Petr Machata35225e42017-09-02 23:49:22 +02003468 nh->type = MLXSW_SP_NEXTHOP_TYPE_ETH;
3469 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
3470 if (!rif)
3471 return 0;
3472
3473 mlxsw_sp_nexthop_rif_init(nh, rif);
3474 err = mlxsw_sp_nexthop_neigh_init(mlxsw_sp, nh);
3475 if (err)
3476 goto err_neigh_init;
3477
3478 return 0;
3479
3480err_neigh_init:
3481 mlxsw_sp_nexthop_rif_fini(nh);
3482 return err;
3483}
3484
3485static void mlxsw_sp_nexthop4_type_fini(struct mlxsw_sp *mlxsw_sp,
3486 struct mlxsw_sp_nexthop *nh)
3487{
3488 mlxsw_sp_nexthop_type_fini(mlxsw_sp, nh);
3489}
3490
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02003491static int mlxsw_sp_nexthop4_init(struct mlxsw_sp *mlxsw_sp,
3492 struct mlxsw_sp_nexthop_group *nh_grp,
3493 struct mlxsw_sp_nexthop *nh,
3494 struct fib_nh *fib_nh)
Ido Schimmela8c97012017-02-08 11:16:35 +01003495{
3496 struct net_device *dev = fib_nh->nh_dev;
Ido Schimmeldf6dd79b2017-02-08 14:36:49 +01003497 struct in_device *in_dev;
Ido Schimmela8c97012017-02-08 11:16:35 +01003498 int err;
3499
3500 nh->nh_grp = nh_grp;
3501 nh->key.fib_nh = fib_nh;
Ido Schimmel408bd942017-10-22 23:11:46 +02003502#ifdef CONFIG_IP_ROUTE_MULTIPATH
3503 nh->nh_weight = fib_nh->nh_weight;
3504#else
3505 nh->nh_weight = 1;
3506#endif
Ido Schimmel58adf2c2017-07-18 10:10:19 +02003507 memcpy(&nh->gw_addr, &fib_nh->nh_gw, sizeof(fib_nh->nh_gw));
Ido Schimmela8c97012017-02-08 11:16:35 +01003508 err = mlxsw_sp_nexthop_insert(mlxsw_sp, nh);
3509 if (err)
3510 return err;
3511
Arkadi Sharshevskya5390272017-09-25 10:32:28 +02003512 mlxsw_sp_nexthop_counter_alloc(mlxsw_sp, nh);
Arkadi Sharshevskydbe45982017-09-25 10:32:23 +02003513 list_add_tail(&nh->router_list_node, &mlxsw_sp->router->nexthop_list);
3514
Ido Schimmel97989ee2017-03-10 08:53:38 +01003515 if (!dev)
3516 return 0;
3517
Ido Schimmeldf6dd79b2017-02-08 14:36:49 +01003518 in_dev = __in_dev_get_rtnl(dev);
3519 if (in_dev && IN_DEV_IGNORE_ROUTES_WITH_LINKDOWN(in_dev) &&
3520 fib_nh->nh_flags & RTNH_F_LINKDOWN)
3521 return 0;
3522
Petr Machata35225e42017-09-02 23:49:22 +02003523 err = mlxsw_sp_nexthop4_type_init(mlxsw_sp, nh, fib_nh);
Ido Schimmela8c97012017-02-08 11:16:35 +01003524 if (err)
3525 goto err_nexthop_neigh_init;
3526
3527 return 0;
3528
3529err_nexthop_neigh_init:
3530 mlxsw_sp_nexthop_remove(mlxsw_sp, nh);
3531 return err;
3532}
3533
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02003534static void mlxsw_sp_nexthop4_fini(struct mlxsw_sp *mlxsw_sp,
3535 struct mlxsw_sp_nexthop *nh)
Ido Schimmela8c97012017-02-08 11:16:35 +01003536{
Petr Machata35225e42017-09-02 23:49:22 +02003537 mlxsw_sp_nexthop4_type_fini(mlxsw_sp, nh);
Arkadi Sharshevskydbe45982017-09-25 10:32:23 +02003538 list_del(&nh->router_list_node);
Arkadi Sharshevskya5390272017-09-25 10:32:28 +02003539 mlxsw_sp_nexthop_counter_free(mlxsw_sp, nh);
Ido Schimmelc53b8e12017-02-08 11:16:30 +01003540 mlxsw_sp_nexthop_remove(mlxsw_sp, nh);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003541}
3542
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02003543static void mlxsw_sp_nexthop4_event(struct mlxsw_sp *mlxsw_sp,
3544 unsigned long event, struct fib_nh *fib_nh)
Ido Schimmelad178c82017-02-08 11:16:40 +01003545{
3546 struct mlxsw_sp_nexthop_key key;
3547 struct mlxsw_sp_nexthop *nh;
Ido Schimmelad178c82017-02-08 11:16:40 +01003548
Ido Schimmel9011b672017-05-16 19:38:25 +02003549 if (mlxsw_sp->router->aborted)
Ido Schimmelad178c82017-02-08 11:16:40 +01003550 return;
3551
3552 key.fib_nh = fib_nh;
3553 nh = mlxsw_sp_nexthop_lookup(mlxsw_sp, key);
3554 if (WARN_ON_ONCE(!nh))
3555 return;
3556
Ido Schimmelad178c82017-02-08 11:16:40 +01003557 switch (event) {
3558 case FIB_EVENT_NH_ADD:
Petr Machata35225e42017-09-02 23:49:22 +02003559 mlxsw_sp_nexthop4_type_init(mlxsw_sp, nh, fib_nh);
Ido Schimmelad178c82017-02-08 11:16:40 +01003560 break;
3561 case FIB_EVENT_NH_DEL:
Petr Machata35225e42017-09-02 23:49:22 +02003562 mlxsw_sp_nexthop4_type_fini(mlxsw_sp, nh);
Ido Schimmelad178c82017-02-08 11:16:40 +01003563 break;
3564 }
3565
3566 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nh_grp);
3567}
3568
Petr Machata0c5f1cd2017-11-03 10:03:38 +01003569static void mlxsw_sp_nexthop_rif_update(struct mlxsw_sp *mlxsw_sp,
3570 struct mlxsw_sp_rif *rif)
3571{
3572 struct mlxsw_sp_nexthop *nh;
Petr Machata44b0fff2017-11-03 10:03:44 +01003573 bool removing;
Petr Machata0c5f1cd2017-11-03 10:03:38 +01003574
3575 list_for_each_entry(nh, &rif->nexthop_list, rif_list_node) {
Petr Machata44b0fff2017-11-03 10:03:44 +01003576 switch (nh->type) {
3577 case MLXSW_SP_NEXTHOP_TYPE_ETH:
3578 removing = false;
3579 break;
3580 case MLXSW_SP_NEXTHOP_TYPE_IPIP:
3581 removing = !mlxsw_sp_ipip_netdev_ul_up(rif->dev);
3582 break;
3583 default:
3584 WARN_ON(1);
3585 continue;
3586 }
3587
3588 __mlxsw_sp_nexthop_neigh_update(nh, removing);
Petr Machata0c5f1cd2017-11-03 10:03:38 +01003589 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nh_grp);
3590 }
3591}
3592
Petr Machata09dbf622017-11-28 13:17:14 +01003593static void mlxsw_sp_nexthop_rif_migrate(struct mlxsw_sp *mlxsw_sp,
3594 struct mlxsw_sp_rif *old_rif,
3595 struct mlxsw_sp_rif *new_rif)
3596{
3597 struct mlxsw_sp_nexthop *nh;
3598
3599 list_splice_init(&old_rif->nexthop_list, &new_rif->nexthop_list);
3600 list_for_each_entry(nh, &new_rif->nexthop_list, rif_list_node)
3601 nh->rif = new_rif;
3602 mlxsw_sp_nexthop_rif_update(mlxsw_sp, new_rif);
3603}
3604
Ido Schimmel9665b742017-02-08 11:16:42 +01003605static void mlxsw_sp_nexthop_rif_gone_sync(struct mlxsw_sp *mlxsw_sp,
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003606 struct mlxsw_sp_rif *rif)
Ido Schimmel9665b742017-02-08 11:16:42 +01003607{
3608 struct mlxsw_sp_nexthop *nh, *tmp;
3609
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003610 list_for_each_entry_safe(nh, tmp, &rif->nexthop_list, rif_list_node) {
Petr Machata35225e42017-09-02 23:49:22 +02003611 mlxsw_sp_nexthop_type_fini(mlxsw_sp, nh);
Ido Schimmel9665b742017-02-08 11:16:42 +01003612 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nh_grp);
3613 }
3614}
3615
Petr Machata9b014512017-09-02 23:49:20 +02003616static bool mlxsw_sp_fi_is_gateway(const struct mlxsw_sp *mlxsw_sp,
3617 const struct fib_info *fi)
3618{
Petr Machata1012b9a2017-09-02 23:49:23 +02003619 return fi->fib_nh->nh_scope == RT_SCOPE_LINK ||
3620 mlxsw_sp_nexthop4_ipip_type(mlxsw_sp, fi->fib_nh, NULL);
Petr Machata9b014512017-09-02 23:49:20 +02003621}
3622
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003623static struct mlxsw_sp_nexthop_group *
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02003624mlxsw_sp_nexthop4_group_create(struct mlxsw_sp *mlxsw_sp, struct fib_info *fi)
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003625{
3626 struct mlxsw_sp_nexthop_group *nh_grp;
3627 struct mlxsw_sp_nexthop *nh;
3628 struct fib_nh *fib_nh;
3629 size_t alloc_size;
3630 int i;
3631 int err;
3632
3633 alloc_size = sizeof(*nh_grp) +
3634 fi->fib_nhs * sizeof(struct mlxsw_sp_nexthop);
3635 nh_grp = kzalloc(alloc_size, GFP_KERNEL);
3636 if (!nh_grp)
3637 return ERR_PTR(-ENOMEM);
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02003638 nh_grp->priv = fi;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003639 INIT_LIST_HEAD(&nh_grp->fib_list);
Ido Schimmel58adf2c2017-07-18 10:10:19 +02003640 nh_grp->neigh_tbl = &arp_tbl;
3641
Petr Machata9b014512017-09-02 23:49:20 +02003642 nh_grp->gateway = mlxsw_sp_fi_is_gateway(mlxsw_sp, fi);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003643 nh_grp->count = fi->fib_nhs;
Ido Schimmel7387dbb2017-07-12 09:12:53 +02003644 fib_info_hold(fi);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003645 for (i = 0; i < nh_grp->count; i++) {
3646 nh = &nh_grp->nexthops[i];
3647 fib_nh = &fi->fib_nh[i];
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02003648 err = mlxsw_sp_nexthop4_init(mlxsw_sp, nh_grp, nh, fib_nh);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003649 if (err)
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02003650 goto err_nexthop4_init;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003651 }
Ido Schimmele9ad5e72017-02-08 11:16:29 +01003652 err = mlxsw_sp_nexthop_group_insert(mlxsw_sp, nh_grp);
3653 if (err)
3654 goto err_nexthop_group_insert;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003655 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
3656 return nh_grp;
3657
Ido Schimmele9ad5e72017-02-08 11:16:29 +01003658err_nexthop_group_insert:
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02003659err_nexthop4_init:
Ido Schimmeldf6dd79b2017-02-08 14:36:49 +01003660 for (i--; i >= 0; i--) {
3661 nh = &nh_grp->nexthops[i];
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02003662 mlxsw_sp_nexthop4_fini(mlxsw_sp, nh);
Ido Schimmeldf6dd79b2017-02-08 14:36:49 +01003663 }
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02003664 fib_info_put(fi);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003665 kfree(nh_grp);
3666 return ERR_PTR(err);
3667}
3668
3669static void
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02003670mlxsw_sp_nexthop4_group_destroy(struct mlxsw_sp *mlxsw_sp,
3671 struct mlxsw_sp_nexthop_group *nh_grp)
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003672{
3673 struct mlxsw_sp_nexthop *nh;
3674 int i;
3675
Ido Schimmele9ad5e72017-02-08 11:16:29 +01003676 mlxsw_sp_nexthop_group_remove(mlxsw_sp, nh_grp);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003677 for (i = 0; i < nh_grp->count; i++) {
3678 nh = &nh_grp->nexthops[i];
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02003679 mlxsw_sp_nexthop4_fini(mlxsw_sp, nh);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003680 }
Ido Schimmel58312122016-12-23 09:32:50 +01003681 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
3682 WARN_ON_ONCE(nh_grp->adj_index_valid);
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02003683 fib_info_put(mlxsw_sp_nexthop4_group_fi(nh_grp));
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003684 kfree(nh_grp);
3685}
3686
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02003687static int mlxsw_sp_nexthop4_group_get(struct mlxsw_sp *mlxsw_sp,
3688 struct mlxsw_sp_fib_entry *fib_entry,
3689 struct fib_info *fi)
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003690{
3691 struct mlxsw_sp_nexthop_group *nh_grp;
3692
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02003693 nh_grp = mlxsw_sp_nexthop4_group_lookup(mlxsw_sp, fi);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003694 if (!nh_grp) {
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02003695 nh_grp = mlxsw_sp_nexthop4_group_create(mlxsw_sp, fi);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003696 if (IS_ERR(nh_grp))
3697 return PTR_ERR(nh_grp);
3698 }
3699 list_add_tail(&fib_entry->nexthop_group_node, &nh_grp->fib_list);
3700 fib_entry->nh_group = nh_grp;
3701 return 0;
3702}
3703
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02003704static void mlxsw_sp_nexthop4_group_put(struct mlxsw_sp *mlxsw_sp,
3705 struct mlxsw_sp_fib_entry *fib_entry)
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003706{
3707 struct mlxsw_sp_nexthop_group *nh_grp = fib_entry->nh_group;
3708
3709 list_del(&fib_entry->nexthop_group_node);
3710 if (!list_empty(&nh_grp->fib_list))
3711 return;
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02003712 mlxsw_sp_nexthop4_group_destroy(mlxsw_sp, nh_grp);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003713}
3714
Ido Schimmel013b20f2017-02-08 11:16:36 +01003715static bool
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02003716mlxsw_sp_fib4_entry_should_offload(const struct mlxsw_sp_fib_entry *fib_entry)
3717{
3718 struct mlxsw_sp_fib4_entry *fib4_entry;
3719
3720 fib4_entry = container_of(fib_entry, struct mlxsw_sp_fib4_entry,
3721 common);
3722 return !fib4_entry->tos;
3723}
3724
3725static bool
Ido Schimmel013b20f2017-02-08 11:16:36 +01003726mlxsw_sp_fib_entry_should_offload(const struct mlxsw_sp_fib_entry *fib_entry)
3727{
3728 struct mlxsw_sp_nexthop_group *nh_group = fib_entry->nh_group;
3729
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02003730 switch (fib_entry->fib_node->fib->proto) {
3731 case MLXSW_SP_L3_PROTO_IPV4:
3732 if (!mlxsw_sp_fib4_entry_should_offload(fib_entry))
3733 return false;
3734 break;
3735 case MLXSW_SP_L3_PROTO_IPV6:
3736 break;
3737 }
Ido Schimmel9aecce12017-02-09 10:28:42 +01003738
Ido Schimmel013b20f2017-02-08 11:16:36 +01003739 switch (fib_entry->type) {
3740 case MLXSW_SP_FIB_ENTRY_TYPE_REMOTE:
3741 return !!nh_group->adj_index_valid;
3742 case MLXSW_SP_FIB_ENTRY_TYPE_LOCAL:
Ido Schimmel70ad3502017-02-08 11:16:38 +01003743 return !!nh_group->nh_rif;
Petr Machata4607f6d2017-09-02 23:49:25 +02003744 case MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP:
3745 return true;
Ido Schimmel013b20f2017-02-08 11:16:36 +01003746 default:
3747 return false;
3748 }
3749}
3750
Ido Schimmel428b8512017-08-03 13:28:28 +02003751static struct mlxsw_sp_nexthop *
3752mlxsw_sp_rt6_nexthop(struct mlxsw_sp_nexthop_group *nh_grp,
3753 const struct mlxsw_sp_rt6 *mlxsw_sp_rt6)
3754{
3755 int i;
3756
3757 for (i = 0; i < nh_grp->count; i++) {
3758 struct mlxsw_sp_nexthop *nh = &nh_grp->nexthops[i];
3759 struct rt6_info *rt = mlxsw_sp_rt6->rt;
3760
3761 if (nh->rif && nh->rif->dev == rt->dst.dev &&
3762 ipv6_addr_equal((const struct in6_addr *) &nh->gw_addr,
3763 &rt->rt6i_gateway))
3764 return nh;
3765 continue;
3766 }
3767
3768 return NULL;
3769}
3770
Ido Schimmel3984d1a2017-08-02 09:56:03 +02003771static void
3772mlxsw_sp_fib4_entry_offload_set(struct mlxsw_sp_fib_entry *fib_entry)
3773{
3774 struct mlxsw_sp_nexthop_group *nh_grp = fib_entry->nh_group;
3775 int i;
3776
Petr Machata4607f6d2017-09-02 23:49:25 +02003777 if (fib_entry->type == MLXSW_SP_FIB_ENTRY_TYPE_LOCAL ||
3778 fib_entry->type == MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP) {
Ido Schimmel3984d1a2017-08-02 09:56:03 +02003779 nh_grp->nexthops->key.fib_nh->nh_flags |= RTNH_F_OFFLOAD;
3780 return;
3781 }
3782
3783 for (i = 0; i < nh_grp->count; i++) {
3784 struct mlxsw_sp_nexthop *nh = &nh_grp->nexthops[i];
3785
3786 if (nh->offloaded)
3787 nh->key.fib_nh->nh_flags |= RTNH_F_OFFLOAD;
3788 else
3789 nh->key.fib_nh->nh_flags &= ~RTNH_F_OFFLOAD;
3790 }
3791}
3792
3793static void
3794mlxsw_sp_fib4_entry_offload_unset(struct mlxsw_sp_fib_entry *fib_entry)
3795{
3796 struct mlxsw_sp_nexthop_group *nh_grp = fib_entry->nh_group;
3797 int i;
3798
3799 for (i = 0; i < nh_grp->count; i++) {
3800 struct mlxsw_sp_nexthop *nh = &nh_grp->nexthops[i];
3801
3802 nh->key.fib_nh->nh_flags &= ~RTNH_F_OFFLOAD;
3803 }
3804}
3805
Ido Schimmel428b8512017-08-03 13:28:28 +02003806static void
3807mlxsw_sp_fib6_entry_offload_set(struct mlxsw_sp_fib_entry *fib_entry)
3808{
3809 struct mlxsw_sp_fib6_entry *fib6_entry;
3810 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
3811
3812 fib6_entry = container_of(fib_entry, struct mlxsw_sp_fib6_entry,
3813 common);
3814
3815 if (fib_entry->type == MLXSW_SP_FIB_ENTRY_TYPE_LOCAL) {
3816 list_first_entry(&fib6_entry->rt6_list, struct mlxsw_sp_rt6,
Ido Schimmelfe400792017-08-15 09:09:49 +02003817 list)->rt->rt6i_nh_flags |= RTNH_F_OFFLOAD;
Ido Schimmel428b8512017-08-03 13:28:28 +02003818 return;
3819 }
3820
3821 list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) {
3822 struct mlxsw_sp_nexthop_group *nh_grp = fib_entry->nh_group;
3823 struct mlxsw_sp_nexthop *nh;
3824
3825 nh = mlxsw_sp_rt6_nexthop(nh_grp, mlxsw_sp_rt6);
3826 if (nh && nh->offloaded)
Ido Schimmelfe400792017-08-15 09:09:49 +02003827 mlxsw_sp_rt6->rt->rt6i_nh_flags |= RTNH_F_OFFLOAD;
Ido Schimmel428b8512017-08-03 13:28:28 +02003828 else
Ido Schimmelfe400792017-08-15 09:09:49 +02003829 mlxsw_sp_rt6->rt->rt6i_nh_flags &= ~RTNH_F_OFFLOAD;
Ido Schimmel428b8512017-08-03 13:28:28 +02003830 }
3831}
3832
3833static void
3834mlxsw_sp_fib6_entry_offload_unset(struct mlxsw_sp_fib_entry *fib_entry)
3835{
3836 struct mlxsw_sp_fib6_entry *fib6_entry;
3837 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
3838
3839 fib6_entry = container_of(fib_entry, struct mlxsw_sp_fib6_entry,
3840 common);
3841 list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) {
3842 struct rt6_info *rt = mlxsw_sp_rt6->rt;
3843
Ido Schimmelfe400792017-08-15 09:09:49 +02003844 rt->rt6i_nh_flags &= ~RTNH_F_OFFLOAD;
Ido Schimmel428b8512017-08-03 13:28:28 +02003845 }
3846}
3847
Ido Schimmel013b20f2017-02-08 11:16:36 +01003848static void mlxsw_sp_fib_entry_offload_set(struct mlxsw_sp_fib_entry *fib_entry)
3849{
Ido Schimmel76610eb2017-03-10 08:53:41 +01003850 switch (fib_entry->fib_node->fib->proto) {
Ido Schimmel013b20f2017-02-08 11:16:36 +01003851 case MLXSW_SP_L3_PROTO_IPV4:
Ido Schimmel3984d1a2017-08-02 09:56:03 +02003852 mlxsw_sp_fib4_entry_offload_set(fib_entry);
Ido Schimmel013b20f2017-02-08 11:16:36 +01003853 break;
3854 case MLXSW_SP_L3_PROTO_IPV6:
Ido Schimmel428b8512017-08-03 13:28:28 +02003855 mlxsw_sp_fib6_entry_offload_set(fib_entry);
3856 break;
Ido Schimmel013b20f2017-02-08 11:16:36 +01003857 }
3858}
3859
3860static void
3861mlxsw_sp_fib_entry_offload_unset(struct mlxsw_sp_fib_entry *fib_entry)
3862{
Ido Schimmel76610eb2017-03-10 08:53:41 +01003863 switch (fib_entry->fib_node->fib->proto) {
Ido Schimmel013b20f2017-02-08 11:16:36 +01003864 case MLXSW_SP_L3_PROTO_IPV4:
Ido Schimmel3984d1a2017-08-02 09:56:03 +02003865 mlxsw_sp_fib4_entry_offload_unset(fib_entry);
Ido Schimmel013b20f2017-02-08 11:16:36 +01003866 break;
3867 case MLXSW_SP_L3_PROTO_IPV6:
Ido Schimmel428b8512017-08-03 13:28:28 +02003868 mlxsw_sp_fib6_entry_offload_unset(fib_entry);
3869 break;
Ido Schimmel013b20f2017-02-08 11:16:36 +01003870 }
Ido Schimmel013b20f2017-02-08 11:16:36 +01003871}
3872
3873static void
3874mlxsw_sp_fib_entry_offload_refresh(struct mlxsw_sp_fib_entry *fib_entry,
3875 enum mlxsw_reg_ralue_op op, int err)
3876{
3877 switch (op) {
3878 case MLXSW_REG_RALUE_OP_WRITE_DELETE:
Ido Schimmel013b20f2017-02-08 11:16:36 +01003879 return mlxsw_sp_fib_entry_offload_unset(fib_entry);
3880 case MLXSW_REG_RALUE_OP_WRITE_WRITE:
3881 if (err)
3882 return;
Ido Schimmel1353ee72017-08-02 09:56:04 +02003883 if (mlxsw_sp_fib_entry_should_offload(fib_entry))
Ido Schimmel013b20f2017-02-08 11:16:36 +01003884 mlxsw_sp_fib_entry_offload_set(fib_entry);
Petr Machata85f44a12017-10-02 12:21:58 +02003885 else
Ido Schimmel013b20f2017-02-08 11:16:36 +01003886 mlxsw_sp_fib_entry_offload_unset(fib_entry);
3887 return;
3888 default:
3889 return;
3890 }
3891}
3892
Ido Schimmel9dbf4d72017-07-18 10:10:24 +02003893static void
3894mlxsw_sp_fib_entry_ralue_pack(char *ralue_pl,
3895 const struct mlxsw_sp_fib_entry *fib_entry,
3896 enum mlxsw_reg_ralue_op op)
3897{
3898 struct mlxsw_sp_fib *fib = fib_entry->fib_node->fib;
3899 enum mlxsw_reg_ralxx_protocol proto;
3900 u32 *p_dip;
3901
3902 proto = (enum mlxsw_reg_ralxx_protocol) fib->proto;
3903
3904 switch (fib->proto) {
3905 case MLXSW_SP_L3_PROTO_IPV4:
3906 p_dip = (u32 *) fib_entry->fib_node->key.addr;
3907 mlxsw_reg_ralue_pack4(ralue_pl, proto, op, fib->vr->id,
3908 fib_entry->fib_node->key.prefix_len,
3909 *p_dip);
3910 break;
3911 case MLXSW_SP_L3_PROTO_IPV6:
3912 mlxsw_reg_ralue_pack6(ralue_pl, proto, op, fib->vr->id,
3913 fib_entry->fib_node->key.prefix_len,
3914 fib_entry->fib_node->key.addr);
3915 break;
3916 }
3917}
3918
3919static int mlxsw_sp_fib_entry_op_remote(struct mlxsw_sp *mlxsw_sp,
3920 struct mlxsw_sp_fib_entry *fib_entry,
3921 enum mlxsw_reg_ralue_op op)
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003922{
3923 char ralue_pl[MLXSW_REG_RALUE_LEN];
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003924 enum mlxsw_reg_ralue_trap_action trap_action;
3925 u16 trap_id = 0;
3926 u32 adjacency_index = 0;
3927 u16 ecmp_size = 0;
3928
3929 /* In case the nexthop group adjacency index is valid, use it
3930 * with provided ECMP size. Otherwise, setup trap and pass
3931 * traffic to kernel.
3932 */
Ido Schimmel4b411472017-02-08 11:16:37 +01003933 if (mlxsw_sp_fib_entry_should_offload(fib_entry)) {
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003934 trap_action = MLXSW_REG_RALUE_TRAP_ACTION_NOP;
3935 adjacency_index = fib_entry->nh_group->adj_index;
3936 ecmp_size = fib_entry->nh_group->ecmp_size;
3937 } else {
3938 trap_action = MLXSW_REG_RALUE_TRAP_ACTION_TRAP;
3939 trap_id = MLXSW_TRAP_ID_RTR_INGRESS0;
3940 }
3941
Ido Schimmel9dbf4d72017-07-18 10:10:24 +02003942 mlxsw_sp_fib_entry_ralue_pack(ralue_pl, fib_entry, op);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003943 mlxsw_reg_ralue_act_remote_pack(ralue_pl, trap_action, trap_id,
3944 adjacency_index, ecmp_size);
3945 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
3946}
3947
Ido Schimmel9dbf4d72017-07-18 10:10:24 +02003948static int mlxsw_sp_fib_entry_op_local(struct mlxsw_sp *mlxsw_sp,
3949 struct mlxsw_sp_fib_entry *fib_entry,
3950 enum mlxsw_reg_ralue_op op)
Jiri Pirko61c503f2016-07-04 08:23:11 +02003951{
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003952 struct mlxsw_sp_rif *rif = fib_entry->nh_group->nh_rif;
Ido Schimmel70ad3502017-02-08 11:16:38 +01003953 enum mlxsw_reg_ralue_trap_action trap_action;
Jiri Pirko61c503f2016-07-04 08:23:11 +02003954 char ralue_pl[MLXSW_REG_RALUE_LEN];
Ido Schimmel70ad3502017-02-08 11:16:38 +01003955 u16 trap_id = 0;
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003956 u16 rif_index = 0;
Ido Schimmel70ad3502017-02-08 11:16:38 +01003957
3958 if (mlxsw_sp_fib_entry_should_offload(fib_entry)) {
3959 trap_action = MLXSW_REG_RALUE_TRAP_ACTION_NOP;
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003960 rif_index = rif->rif_index;
Ido Schimmel70ad3502017-02-08 11:16:38 +01003961 } else {
3962 trap_action = MLXSW_REG_RALUE_TRAP_ACTION_TRAP;
3963 trap_id = MLXSW_TRAP_ID_RTR_INGRESS0;
3964 }
Jiri Pirko61c503f2016-07-04 08:23:11 +02003965
Ido Schimmel9dbf4d72017-07-18 10:10:24 +02003966 mlxsw_sp_fib_entry_ralue_pack(ralue_pl, fib_entry, op);
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003967 mlxsw_reg_ralue_act_local_pack(ralue_pl, trap_action, trap_id,
3968 rif_index);
Jiri Pirko61c503f2016-07-04 08:23:11 +02003969 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
3970}
3971
Ido Schimmel9dbf4d72017-07-18 10:10:24 +02003972static int mlxsw_sp_fib_entry_op_trap(struct mlxsw_sp *mlxsw_sp,
3973 struct mlxsw_sp_fib_entry *fib_entry,
3974 enum mlxsw_reg_ralue_op op)
Jiri Pirko61c503f2016-07-04 08:23:11 +02003975{
3976 char ralue_pl[MLXSW_REG_RALUE_LEN];
Jiri Pirko61c503f2016-07-04 08:23:11 +02003977
Ido Schimmel9dbf4d72017-07-18 10:10:24 +02003978 mlxsw_sp_fib_entry_ralue_pack(ralue_pl, fib_entry, op);
Jiri Pirko61c503f2016-07-04 08:23:11 +02003979 mlxsw_reg_ralue_act_ip2me_pack(ralue_pl);
3980 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
3981}
3982
Petr Machata4607f6d2017-09-02 23:49:25 +02003983static int
3984mlxsw_sp_fib_entry_op_ipip_decap(struct mlxsw_sp *mlxsw_sp,
3985 struct mlxsw_sp_fib_entry *fib_entry,
3986 enum mlxsw_reg_ralue_op op)
3987{
3988 struct mlxsw_sp_ipip_entry *ipip_entry = fib_entry->decap.ipip_entry;
3989 const struct mlxsw_sp_ipip_ops *ipip_ops;
3990
3991 if (WARN_ON(!ipip_entry))
3992 return -EINVAL;
3993
3994 ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt];
3995 return ipip_ops->fib_entry_op(mlxsw_sp, ipip_entry, op,
3996 fib_entry->decap.tunnel_index);
3997}
3998
Ido Schimmel9dbf4d72017-07-18 10:10:24 +02003999static int __mlxsw_sp_fib_entry_op(struct mlxsw_sp *mlxsw_sp,
4000 struct mlxsw_sp_fib_entry *fib_entry,
4001 enum mlxsw_reg_ralue_op op)
Jiri Pirko61c503f2016-07-04 08:23:11 +02004002{
4003 switch (fib_entry->type) {
4004 case MLXSW_SP_FIB_ENTRY_TYPE_REMOTE:
Ido Schimmel9dbf4d72017-07-18 10:10:24 +02004005 return mlxsw_sp_fib_entry_op_remote(mlxsw_sp, fib_entry, op);
Jiri Pirko61c503f2016-07-04 08:23:11 +02004006 case MLXSW_SP_FIB_ENTRY_TYPE_LOCAL:
Ido Schimmel9dbf4d72017-07-18 10:10:24 +02004007 return mlxsw_sp_fib_entry_op_local(mlxsw_sp, fib_entry, op);
Jiri Pirko61c503f2016-07-04 08:23:11 +02004008 case MLXSW_SP_FIB_ENTRY_TYPE_TRAP:
Ido Schimmel9dbf4d72017-07-18 10:10:24 +02004009 return mlxsw_sp_fib_entry_op_trap(mlxsw_sp, fib_entry, op);
Petr Machata4607f6d2017-09-02 23:49:25 +02004010 case MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP:
4011 return mlxsw_sp_fib_entry_op_ipip_decap(mlxsw_sp,
4012 fib_entry, op);
Jiri Pirko61c503f2016-07-04 08:23:11 +02004013 }
4014 return -EINVAL;
4015}
4016
4017static int mlxsw_sp_fib_entry_op(struct mlxsw_sp *mlxsw_sp,
4018 struct mlxsw_sp_fib_entry *fib_entry,
4019 enum mlxsw_reg_ralue_op op)
4020{
Ido Schimmel9dbf4d72017-07-18 10:10:24 +02004021 int err = __mlxsw_sp_fib_entry_op(mlxsw_sp, fib_entry, op);
Ido Schimmel013b20f2017-02-08 11:16:36 +01004022
Ido Schimmel013b20f2017-02-08 11:16:36 +01004023 mlxsw_sp_fib_entry_offload_refresh(fib_entry, op, err);
Ido Schimmel9dbf4d72017-07-18 10:10:24 +02004024
Ido Schimmel013b20f2017-02-08 11:16:36 +01004025 return err;
Jiri Pirko61c503f2016-07-04 08:23:11 +02004026}
4027
4028static int mlxsw_sp_fib_entry_update(struct mlxsw_sp *mlxsw_sp,
4029 struct mlxsw_sp_fib_entry *fib_entry)
4030{
Jiri Pirko7146da32016-09-01 10:37:41 +02004031 return mlxsw_sp_fib_entry_op(mlxsw_sp, fib_entry,
4032 MLXSW_REG_RALUE_OP_WRITE_WRITE);
Jiri Pirko61c503f2016-07-04 08:23:11 +02004033}
4034
4035static int mlxsw_sp_fib_entry_del(struct mlxsw_sp *mlxsw_sp,
4036 struct mlxsw_sp_fib_entry *fib_entry)
4037{
4038 return mlxsw_sp_fib_entry_op(mlxsw_sp, fib_entry,
4039 MLXSW_REG_RALUE_OP_WRITE_DELETE);
4040}
4041
Jiri Pirko61c503f2016-07-04 08:23:11 +02004042static int
Ido Schimmel013b20f2017-02-08 11:16:36 +01004043mlxsw_sp_fib4_entry_type_set(struct mlxsw_sp *mlxsw_sp,
4044 const struct fib_entry_notifier_info *fen_info,
4045 struct mlxsw_sp_fib_entry *fib_entry)
Jiri Pirko61c503f2016-07-04 08:23:11 +02004046{
Petr Machata4607f6d2017-09-02 23:49:25 +02004047 union mlxsw_sp_l3addr dip = { .addr4 = htonl(fen_info->dst) };
4048 struct net_device *dev = fen_info->fi->fib_dev;
4049 struct mlxsw_sp_ipip_entry *ipip_entry;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02004050 struct fib_info *fi = fen_info->fi;
Jiri Pirko61c503f2016-07-04 08:23:11 +02004051
Ido Schimmel97989ee2017-03-10 08:53:38 +01004052 switch (fen_info->type) {
Ido Schimmel97989ee2017-03-10 08:53:38 +01004053 case RTN_LOCAL:
Petr Machata4607f6d2017-09-02 23:49:25 +02004054 ipip_entry = mlxsw_sp_ipip_entry_find_by_decap(mlxsw_sp, dev,
4055 MLXSW_SP_L3_PROTO_IPV4, dip);
Petr Machata57c77ce2017-11-28 13:17:11 +01004056 if (ipip_entry && ipip_entry->ol_dev->flags & IFF_UP) {
Petr Machata4607f6d2017-09-02 23:49:25 +02004057 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP;
4058 return mlxsw_sp_fib_entry_decap_init(mlxsw_sp,
4059 fib_entry,
4060 ipip_entry);
4061 }
4062 /* fall through */
4063 case RTN_BROADCAST:
Jiri Pirko61c503f2016-07-04 08:23:11 +02004064 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
4065 return 0;
Ido Schimmel97989ee2017-03-10 08:53:38 +01004066 case RTN_UNREACHABLE: /* fall through */
4067 case RTN_BLACKHOLE: /* fall through */
4068 case RTN_PROHIBIT:
4069 /* Packets hitting these routes need to be trapped, but
4070 * can do so with a lower priority than packets directed
4071 * at the host, so use action type local instead of trap.
4072 */
Jiri Pirkob45f64d2016-09-26 12:52:31 +02004073 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_LOCAL;
Ido Schimmel97989ee2017-03-10 08:53:38 +01004074 return 0;
4075 case RTN_UNICAST:
Petr Machata9b014512017-09-02 23:49:20 +02004076 if (mlxsw_sp_fi_is_gateway(mlxsw_sp, fi))
Ido Schimmel97989ee2017-03-10 08:53:38 +01004077 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_REMOTE;
Petr Machata9b014512017-09-02 23:49:20 +02004078 else
4079 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_LOCAL;
Ido Schimmel97989ee2017-03-10 08:53:38 +01004080 return 0;
4081 default:
4082 return -EINVAL;
4083 }
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02004084}
4085
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004086static struct mlxsw_sp_fib4_entry *
Ido Schimmel9aecce12017-02-09 10:28:42 +01004087mlxsw_sp_fib4_entry_create(struct mlxsw_sp *mlxsw_sp,
4088 struct mlxsw_sp_fib_node *fib_node,
4089 const struct fib_entry_notifier_info *fen_info)
Jiri Pirko5b004412016-09-01 10:37:40 +02004090{
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004091 struct mlxsw_sp_fib4_entry *fib4_entry;
Jiri Pirko5b004412016-09-01 10:37:40 +02004092 struct mlxsw_sp_fib_entry *fib_entry;
Ido Schimmel9aecce12017-02-09 10:28:42 +01004093 int err;
4094
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004095 fib4_entry = kzalloc(sizeof(*fib4_entry), GFP_KERNEL);
4096 if (!fib4_entry)
4097 return ERR_PTR(-ENOMEM);
4098 fib_entry = &fib4_entry->common;
Ido Schimmel9aecce12017-02-09 10:28:42 +01004099
4100 err = mlxsw_sp_fib4_entry_type_set(mlxsw_sp, fen_info, fib_entry);
4101 if (err)
4102 goto err_fib4_entry_type_set;
4103
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02004104 err = mlxsw_sp_nexthop4_group_get(mlxsw_sp, fib_entry, fen_info->fi);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004105 if (err)
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02004106 goto err_nexthop4_group_get;
Ido Schimmel9aecce12017-02-09 10:28:42 +01004107
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004108 fib4_entry->prio = fen_info->fi->fib_priority;
4109 fib4_entry->tb_id = fen_info->tb_id;
4110 fib4_entry->type = fen_info->type;
4111 fib4_entry->tos = fen_info->tos;
Ido Schimmel9aecce12017-02-09 10:28:42 +01004112
4113 fib_entry->fib_node = fib_node;
4114
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004115 return fib4_entry;
Ido Schimmel9aecce12017-02-09 10:28:42 +01004116
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02004117err_nexthop4_group_get:
Ido Schimmel9aecce12017-02-09 10:28:42 +01004118err_fib4_entry_type_set:
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004119 kfree(fib4_entry);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004120 return ERR_PTR(err);
4121}
4122
4123static void mlxsw_sp_fib4_entry_destroy(struct mlxsw_sp *mlxsw_sp,
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004124 struct mlxsw_sp_fib4_entry *fib4_entry)
Ido Schimmel9aecce12017-02-09 10:28:42 +01004125{
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02004126 mlxsw_sp_nexthop4_group_put(mlxsw_sp, &fib4_entry->common);
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004127 kfree(fib4_entry);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004128}
4129
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004130static struct mlxsw_sp_fib4_entry *
Ido Schimmel9aecce12017-02-09 10:28:42 +01004131mlxsw_sp_fib4_entry_lookup(struct mlxsw_sp *mlxsw_sp,
4132 const struct fib_entry_notifier_info *fen_info)
4133{
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004134 struct mlxsw_sp_fib4_entry *fib4_entry;
Ido Schimmel9aecce12017-02-09 10:28:42 +01004135 struct mlxsw_sp_fib_node *fib_node;
Ido Schimmel160e22a2017-07-18 10:10:20 +02004136 struct mlxsw_sp_fib *fib;
4137 struct mlxsw_sp_vr *vr;
Ido Schimmel9aecce12017-02-09 10:28:42 +01004138
Ido Schimmel160e22a2017-07-18 10:10:20 +02004139 vr = mlxsw_sp_vr_find(mlxsw_sp, fen_info->tb_id);
4140 if (!vr)
4141 return NULL;
4142 fib = mlxsw_sp_vr_fib(vr, MLXSW_SP_L3_PROTO_IPV4);
4143
4144 fib_node = mlxsw_sp_fib_node_lookup(fib, &fen_info->dst,
4145 sizeof(fen_info->dst),
4146 fen_info->dst_len);
4147 if (!fib_node)
Ido Schimmel9aecce12017-02-09 10:28:42 +01004148 return NULL;
4149
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004150 list_for_each_entry(fib4_entry, &fib_node->entry_list, common.list) {
4151 if (fib4_entry->tb_id == fen_info->tb_id &&
4152 fib4_entry->tos == fen_info->tos &&
4153 fib4_entry->type == fen_info->type &&
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02004154 mlxsw_sp_nexthop4_group_fi(fib4_entry->common.nh_group) ==
4155 fen_info->fi) {
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004156 return fib4_entry;
Ido Schimmel9aecce12017-02-09 10:28:42 +01004157 }
4158 }
4159
4160 return NULL;
4161}
4162
4163static const struct rhashtable_params mlxsw_sp_fib_ht_params = {
4164 .key_offset = offsetof(struct mlxsw_sp_fib_node, key),
4165 .head_offset = offsetof(struct mlxsw_sp_fib_node, ht_node),
4166 .key_len = sizeof(struct mlxsw_sp_fib_key),
4167 .automatic_shrinking = true,
4168};
4169
4170static int mlxsw_sp_fib_node_insert(struct mlxsw_sp_fib *fib,
4171 struct mlxsw_sp_fib_node *fib_node)
4172{
4173 return rhashtable_insert_fast(&fib->ht, &fib_node->ht_node,
4174 mlxsw_sp_fib_ht_params);
4175}
4176
4177static void mlxsw_sp_fib_node_remove(struct mlxsw_sp_fib *fib,
4178 struct mlxsw_sp_fib_node *fib_node)
4179{
4180 rhashtable_remove_fast(&fib->ht, &fib_node->ht_node,
4181 mlxsw_sp_fib_ht_params);
4182}
4183
4184static struct mlxsw_sp_fib_node *
4185mlxsw_sp_fib_node_lookup(struct mlxsw_sp_fib *fib, const void *addr,
4186 size_t addr_len, unsigned char prefix_len)
4187{
4188 struct mlxsw_sp_fib_key key;
4189
4190 memset(&key, 0, sizeof(key));
4191 memcpy(key.addr, addr, addr_len);
4192 key.prefix_len = prefix_len;
4193 return rhashtable_lookup_fast(&fib->ht, &key, mlxsw_sp_fib_ht_params);
4194}
4195
4196static struct mlxsw_sp_fib_node *
Ido Schimmel76610eb2017-03-10 08:53:41 +01004197mlxsw_sp_fib_node_create(struct mlxsw_sp_fib *fib, const void *addr,
Ido Schimmel9aecce12017-02-09 10:28:42 +01004198 size_t addr_len, unsigned char prefix_len)
4199{
4200 struct mlxsw_sp_fib_node *fib_node;
4201
4202 fib_node = kzalloc(sizeof(*fib_node), GFP_KERNEL);
4203 if (!fib_node)
4204 return NULL;
4205
4206 INIT_LIST_HEAD(&fib_node->entry_list);
Ido Schimmel76610eb2017-03-10 08:53:41 +01004207 list_add(&fib_node->list, &fib->node_list);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004208 memcpy(fib_node->key.addr, addr, addr_len);
4209 fib_node->key.prefix_len = prefix_len;
Ido Schimmel9aecce12017-02-09 10:28:42 +01004210
4211 return fib_node;
4212}
4213
4214static void mlxsw_sp_fib_node_destroy(struct mlxsw_sp_fib_node *fib_node)
4215{
Ido Schimmel9aecce12017-02-09 10:28:42 +01004216 list_del(&fib_node->list);
4217 WARN_ON(!list_empty(&fib_node->entry_list));
4218 kfree(fib_node);
4219}
4220
4221static bool
4222mlxsw_sp_fib_node_entry_is_first(const struct mlxsw_sp_fib_node *fib_node,
4223 const struct mlxsw_sp_fib_entry *fib_entry)
4224{
4225 return list_first_entry(&fib_node->entry_list,
4226 struct mlxsw_sp_fib_entry, list) == fib_entry;
4227}
4228
Ido Schimmelfc922bb2017-08-14 10:54:05 +02004229static int mlxsw_sp_fib_lpm_tree_link(struct mlxsw_sp *mlxsw_sp,
Ido Schimmelfc922bb2017-08-14 10:54:05 +02004230 struct mlxsw_sp_fib_node *fib_node)
4231{
Ido Schimmel2b52ce02018-01-22 09:17:42 +01004232 struct mlxsw_sp_prefix_usage req_prefix_usage;
Ido Schimmel3aad95d2018-01-22 09:17:41 +01004233 struct mlxsw_sp_fib *fib = fib_node->fib;
Ido Schimmelfc922bb2017-08-14 10:54:05 +02004234 struct mlxsw_sp_lpm_tree *lpm_tree;
4235 int err;
4236
Ido Schimmel2b52ce02018-01-22 09:17:42 +01004237 lpm_tree = mlxsw_sp->router->lpm.proto_trees[fib->proto];
4238 if (lpm_tree->prefix_ref_count[fib_node->key.prefix_len] != 0)
4239 goto out;
Ido Schimmelfc922bb2017-08-14 10:54:05 +02004240
Ido Schimmel2b52ce02018-01-22 09:17:42 +01004241 mlxsw_sp_prefix_usage_cpy(&req_prefix_usage, &lpm_tree->prefix_usage);
4242 mlxsw_sp_prefix_usage_set(&req_prefix_usage, fib_node->key.prefix_len);
Ido Schimmelfc922bb2017-08-14 10:54:05 +02004243 lpm_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, &req_prefix_usage,
4244 fib->proto);
4245 if (IS_ERR(lpm_tree))
4246 return PTR_ERR(lpm_tree);
4247
Ido Schimmelfc922bb2017-08-14 10:54:05 +02004248 err = mlxsw_sp_vrs_lpm_tree_replace(mlxsw_sp, fib, lpm_tree);
4249 if (err)
Ido Schimmel2b52ce02018-01-22 09:17:42 +01004250 goto err_lpm_tree_replace;
Ido Schimmelfc922bb2017-08-14 10:54:05 +02004251
Ido Schimmel2b52ce02018-01-22 09:17:42 +01004252out:
4253 lpm_tree->prefix_ref_count[fib_node->key.prefix_len]++;
Ido Schimmelfc922bb2017-08-14 10:54:05 +02004254 return 0;
Ido Schimmel2b52ce02018-01-22 09:17:42 +01004255
4256err_lpm_tree_replace:
4257 mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
4258 return err;
Ido Schimmelfc922bb2017-08-14 10:54:05 +02004259}
4260
4261static void mlxsw_sp_fib_lpm_tree_unlink(struct mlxsw_sp *mlxsw_sp,
Ido Schimmel3aad95d2018-01-22 09:17:41 +01004262 struct mlxsw_sp_fib_node *fib_node)
Ido Schimmelfc922bb2017-08-14 10:54:05 +02004263{
Ido Schimmel2b52ce02018-01-22 09:17:42 +01004264 struct mlxsw_sp_lpm_tree *lpm_tree = fib_node->fib->lpm_tree;
4265 struct mlxsw_sp_prefix_usage req_prefix_usage;
Ido Schimmel3aad95d2018-01-22 09:17:41 +01004266 struct mlxsw_sp_fib *fib = fib_node->fib;
Ido Schimmel2b52ce02018-01-22 09:17:42 +01004267 int err;
Ido Schimmel3aad95d2018-01-22 09:17:41 +01004268
Ido Schimmel2b52ce02018-01-22 09:17:42 +01004269 if (--lpm_tree->prefix_ref_count[fib_node->key.prefix_len] != 0)
Ido Schimmelfc922bb2017-08-14 10:54:05 +02004270 return;
Ido Schimmel2b52ce02018-01-22 09:17:42 +01004271 /* Try to construct a new LPM tree from the current prefix usage
4272 * minus the unused one. If we fail, continue using the old one.
Ido Schimmel4fd00312018-01-22 09:17:40 +01004273 */
Ido Schimmel2b52ce02018-01-22 09:17:42 +01004274 mlxsw_sp_prefix_usage_cpy(&req_prefix_usage, &lpm_tree->prefix_usage);
4275 mlxsw_sp_prefix_usage_clear(&req_prefix_usage,
4276 fib_node->key.prefix_len);
4277 lpm_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, &req_prefix_usage,
4278 fib->proto);
4279 if (IS_ERR(lpm_tree))
4280 return;
Ido Schimmelfc922bb2017-08-14 10:54:05 +02004281
Ido Schimmel2b52ce02018-01-22 09:17:42 +01004282 err = mlxsw_sp_vrs_lpm_tree_replace(mlxsw_sp, fib, lpm_tree);
4283 if (err)
4284 goto err_lpm_tree_replace;
Ido Schimmel9aecce12017-02-09 10:28:42 +01004285
Ido Schimmel2b52ce02018-01-22 09:17:42 +01004286 return;
Ido Schimmel9aecce12017-02-09 10:28:42 +01004287
Ido Schimmel2b52ce02018-01-22 09:17:42 +01004288err_lpm_tree_replace:
4289 mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004290}
4291
Ido Schimmel76610eb2017-03-10 08:53:41 +01004292static int mlxsw_sp_fib_node_init(struct mlxsw_sp *mlxsw_sp,
4293 struct mlxsw_sp_fib_node *fib_node,
4294 struct mlxsw_sp_fib *fib)
4295{
Ido Schimmel76610eb2017-03-10 08:53:41 +01004296 int err;
4297
4298 err = mlxsw_sp_fib_node_insert(fib, fib_node);
4299 if (err)
4300 return err;
4301 fib_node->fib = fib;
4302
Ido Schimmel3aad95d2018-01-22 09:17:41 +01004303 err = mlxsw_sp_fib_lpm_tree_link(mlxsw_sp, fib_node);
Ido Schimmelfc922bb2017-08-14 10:54:05 +02004304 if (err)
4305 goto err_fib_lpm_tree_link;
Ido Schimmel76610eb2017-03-10 08:53:41 +01004306
Ido Schimmel76610eb2017-03-10 08:53:41 +01004307 return 0;
4308
Ido Schimmelfc922bb2017-08-14 10:54:05 +02004309err_fib_lpm_tree_link:
Ido Schimmel76610eb2017-03-10 08:53:41 +01004310 fib_node->fib = NULL;
4311 mlxsw_sp_fib_node_remove(fib, fib_node);
4312 return err;
4313}
4314
4315static void mlxsw_sp_fib_node_fini(struct mlxsw_sp *mlxsw_sp,
4316 struct mlxsw_sp_fib_node *fib_node)
4317{
Ido Schimmel76610eb2017-03-10 08:53:41 +01004318 struct mlxsw_sp_fib *fib = fib_node->fib;
4319
Ido Schimmel3aad95d2018-01-22 09:17:41 +01004320 mlxsw_sp_fib_lpm_tree_unlink(mlxsw_sp, fib_node);
Ido Schimmel76610eb2017-03-10 08:53:41 +01004321 fib_node->fib = NULL;
4322 mlxsw_sp_fib_node_remove(fib, fib_node);
4323}
4324
Ido Schimmel9aecce12017-02-09 10:28:42 +01004325static struct mlxsw_sp_fib_node *
Ido Schimmel731ea1c2017-07-18 10:10:21 +02004326mlxsw_sp_fib_node_get(struct mlxsw_sp *mlxsw_sp, u32 tb_id, const void *addr,
4327 size_t addr_len, unsigned char prefix_len,
4328 enum mlxsw_sp_l3proto proto)
Ido Schimmel9aecce12017-02-09 10:28:42 +01004329{
4330 struct mlxsw_sp_fib_node *fib_node;
Ido Schimmel76610eb2017-03-10 08:53:41 +01004331 struct mlxsw_sp_fib *fib;
Jiri Pirko5b004412016-09-01 10:37:40 +02004332 struct mlxsw_sp_vr *vr;
4333 int err;
4334
David Ahernf8fa9b42017-10-18 09:56:56 -07004335 vr = mlxsw_sp_vr_get(mlxsw_sp, tb_id, NULL);
Jiri Pirko5b004412016-09-01 10:37:40 +02004336 if (IS_ERR(vr))
4337 return ERR_CAST(vr);
Ido Schimmel731ea1c2017-07-18 10:10:21 +02004338 fib = mlxsw_sp_vr_fib(vr, proto);
Jiri Pirko5b004412016-09-01 10:37:40 +02004339
Ido Schimmel731ea1c2017-07-18 10:10:21 +02004340 fib_node = mlxsw_sp_fib_node_lookup(fib, addr, addr_len, prefix_len);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004341 if (fib_node)
4342 return fib_node;
4343
Ido Schimmel731ea1c2017-07-18 10:10:21 +02004344 fib_node = mlxsw_sp_fib_node_create(fib, addr, addr_len, prefix_len);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004345 if (!fib_node) {
Jiri Pirko5b004412016-09-01 10:37:40 +02004346 err = -ENOMEM;
Ido Schimmel9aecce12017-02-09 10:28:42 +01004347 goto err_fib_node_create;
Jiri Pirko5b004412016-09-01 10:37:40 +02004348 }
Jiri Pirko5b004412016-09-01 10:37:40 +02004349
Ido Schimmel76610eb2017-03-10 08:53:41 +01004350 err = mlxsw_sp_fib_node_init(mlxsw_sp, fib_node, fib);
4351 if (err)
4352 goto err_fib_node_init;
4353
Ido Schimmel9aecce12017-02-09 10:28:42 +01004354 return fib_node;
Jiri Pirko5b004412016-09-01 10:37:40 +02004355
Ido Schimmel76610eb2017-03-10 08:53:41 +01004356err_fib_node_init:
4357 mlxsw_sp_fib_node_destroy(fib_node);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004358err_fib_node_create:
Ido Schimmel2b52ce02018-01-22 09:17:42 +01004359 mlxsw_sp_vr_put(mlxsw_sp, vr);
Jiri Pirko5b004412016-09-01 10:37:40 +02004360 return ERR_PTR(err);
4361}
4362
Ido Schimmel731ea1c2017-07-18 10:10:21 +02004363static void mlxsw_sp_fib_node_put(struct mlxsw_sp *mlxsw_sp,
4364 struct mlxsw_sp_fib_node *fib_node)
Jiri Pirko5b004412016-09-01 10:37:40 +02004365{
Ido Schimmel76610eb2017-03-10 08:53:41 +01004366 struct mlxsw_sp_vr *vr = fib_node->fib->vr;
Jiri Pirko5b004412016-09-01 10:37:40 +02004367
Ido Schimmel9aecce12017-02-09 10:28:42 +01004368 if (!list_empty(&fib_node->entry_list))
4369 return;
Ido Schimmel76610eb2017-03-10 08:53:41 +01004370 mlxsw_sp_fib_node_fini(mlxsw_sp, fib_node);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004371 mlxsw_sp_fib_node_destroy(fib_node);
Ido Schimmel2b52ce02018-01-22 09:17:42 +01004372 mlxsw_sp_vr_put(mlxsw_sp, vr);
Jiri Pirko5b004412016-09-01 10:37:40 +02004373}
4374
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004375static struct mlxsw_sp_fib4_entry *
Ido Schimmel9aecce12017-02-09 10:28:42 +01004376mlxsw_sp_fib4_node_entry_find(const struct mlxsw_sp_fib_node *fib_node,
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004377 const struct mlxsw_sp_fib4_entry *new4_entry)
Jiri Pirko61c503f2016-07-04 08:23:11 +02004378{
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004379 struct mlxsw_sp_fib4_entry *fib4_entry;
Ido Schimmel9aecce12017-02-09 10:28:42 +01004380
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004381 list_for_each_entry(fib4_entry, &fib_node->entry_list, common.list) {
4382 if (fib4_entry->tb_id > new4_entry->tb_id)
Ido Schimmel9aecce12017-02-09 10:28:42 +01004383 continue;
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004384 if (fib4_entry->tb_id != new4_entry->tb_id)
Ido Schimmel9aecce12017-02-09 10:28:42 +01004385 break;
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004386 if (fib4_entry->tos > new4_entry->tos)
Ido Schimmel9aecce12017-02-09 10:28:42 +01004387 continue;
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004388 if (fib4_entry->prio >= new4_entry->prio ||
4389 fib4_entry->tos < new4_entry->tos)
4390 return fib4_entry;
Ido Schimmel9aecce12017-02-09 10:28:42 +01004391 }
4392
4393 return NULL;
4394}
4395
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004396static int
4397mlxsw_sp_fib4_node_list_append(struct mlxsw_sp_fib4_entry *fib4_entry,
4398 struct mlxsw_sp_fib4_entry *new4_entry)
Ido Schimmel4283bce2017-02-09 10:28:43 +01004399{
4400 struct mlxsw_sp_fib_node *fib_node;
4401
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004402 if (WARN_ON(!fib4_entry))
Ido Schimmel4283bce2017-02-09 10:28:43 +01004403 return -EINVAL;
4404
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004405 fib_node = fib4_entry->common.fib_node;
4406 list_for_each_entry_from(fib4_entry, &fib_node->entry_list,
4407 common.list) {
4408 if (fib4_entry->tb_id != new4_entry->tb_id ||
4409 fib4_entry->tos != new4_entry->tos ||
4410 fib4_entry->prio != new4_entry->prio)
Ido Schimmel4283bce2017-02-09 10:28:43 +01004411 break;
4412 }
4413
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004414 list_add_tail(&new4_entry->common.list, &fib4_entry->common.list);
Ido Schimmel4283bce2017-02-09 10:28:43 +01004415 return 0;
4416}
4417
Ido Schimmel9aecce12017-02-09 10:28:42 +01004418static int
Ido Schimmel9efbee62017-07-18 10:10:28 +02004419mlxsw_sp_fib4_node_list_insert(struct mlxsw_sp_fib4_entry *new4_entry,
Ido Schimmel599cf8f2017-02-09 10:28:44 +01004420 bool replace, bool append)
Ido Schimmel9aecce12017-02-09 10:28:42 +01004421{
Ido Schimmel9efbee62017-07-18 10:10:28 +02004422 struct mlxsw_sp_fib_node *fib_node = new4_entry->common.fib_node;
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004423 struct mlxsw_sp_fib4_entry *fib4_entry;
Ido Schimmel9aecce12017-02-09 10:28:42 +01004424
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004425 fib4_entry = mlxsw_sp_fib4_node_entry_find(fib_node, new4_entry);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004426
Ido Schimmel4283bce2017-02-09 10:28:43 +01004427 if (append)
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004428 return mlxsw_sp_fib4_node_list_append(fib4_entry, new4_entry);
4429 if (replace && WARN_ON(!fib4_entry))
Ido Schimmel599cf8f2017-02-09 10:28:44 +01004430 return -EINVAL;
Ido Schimmel4283bce2017-02-09 10:28:43 +01004431
Ido Schimmel599cf8f2017-02-09 10:28:44 +01004432 /* Insert new entry before replaced one, so that we can later
4433 * remove the second.
4434 */
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004435 if (fib4_entry) {
4436 list_add_tail(&new4_entry->common.list,
4437 &fib4_entry->common.list);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004438 } else {
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004439 struct mlxsw_sp_fib4_entry *last;
Ido Schimmel9aecce12017-02-09 10:28:42 +01004440
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004441 list_for_each_entry(last, &fib_node->entry_list, common.list) {
4442 if (new4_entry->tb_id > last->tb_id)
Ido Schimmel9aecce12017-02-09 10:28:42 +01004443 break;
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004444 fib4_entry = last;
Ido Schimmel9aecce12017-02-09 10:28:42 +01004445 }
4446
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004447 if (fib4_entry)
4448 list_add(&new4_entry->common.list,
4449 &fib4_entry->common.list);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004450 else
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004451 list_add(&new4_entry->common.list,
4452 &fib_node->entry_list);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004453 }
4454
4455 return 0;
4456}
4457
4458static void
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004459mlxsw_sp_fib4_node_list_remove(struct mlxsw_sp_fib4_entry *fib4_entry)
Ido Schimmel9aecce12017-02-09 10:28:42 +01004460{
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004461 list_del(&fib4_entry->common.list);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004462}
4463
Ido Schimmel80c238f2017-07-18 10:10:29 +02004464static int mlxsw_sp_fib_node_entry_add(struct mlxsw_sp *mlxsw_sp,
4465 struct mlxsw_sp_fib_entry *fib_entry)
Ido Schimmel9aecce12017-02-09 10:28:42 +01004466{
Ido Schimmel9efbee62017-07-18 10:10:28 +02004467 struct mlxsw_sp_fib_node *fib_node = fib_entry->fib_node;
4468
Ido Schimmel9aecce12017-02-09 10:28:42 +01004469 if (!mlxsw_sp_fib_node_entry_is_first(fib_node, fib_entry))
4470 return 0;
4471
4472 /* To prevent packet loss, overwrite the previously offloaded
4473 * entry.
4474 */
4475 if (!list_is_singular(&fib_node->entry_list)) {
4476 enum mlxsw_reg_ralue_op op = MLXSW_REG_RALUE_OP_WRITE_DELETE;
4477 struct mlxsw_sp_fib_entry *n = list_next_entry(fib_entry, list);
4478
4479 mlxsw_sp_fib_entry_offload_refresh(n, op, 0);
4480 }
4481
4482 return mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
4483}
4484
Ido Schimmel80c238f2017-07-18 10:10:29 +02004485static void mlxsw_sp_fib_node_entry_del(struct mlxsw_sp *mlxsw_sp,
4486 struct mlxsw_sp_fib_entry *fib_entry)
Ido Schimmel9aecce12017-02-09 10:28:42 +01004487{
Ido Schimmel9efbee62017-07-18 10:10:28 +02004488 struct mlxsw_sp_fib_node *fib_node = fib_entry->fib_node;
4489
Ido Schimmel9aecce12017-02-09 10:28:42 +01004490 if (!mlxsw_sp_fib_node_entry_is_first(fib_node, fib_entry))
4491 return;
4492
4493 /* Promote the next entry by overwriting the deleted entry */
4494 if (!list_is_singular(&fib_node->entry_list)) {
4495 struct mlxsw_sp_fib_entry *n = list_next_entry(fib_entry, list);
4496 enum mlxsw_reg_ralue_op op = MLXSW_REG_RALUE_OP_WRITE_DELETE;
4497
4498 mlxsw_sp_fib_entry_update(mlxsw_sp, n);
4499 mlxsw_sp_fib_entry_offload_refresh(fib_entry, op, 0);
4500 return;
4501 }
4502
4503 mlxsw_sp_fib_entry_del(mlxsw_sp, fib_entry);
4504}
4505
4506static int mlxsw_sp_fib4_node_entry_link(struct mlxsw_sp *mlxsw_sp,
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004507 struct mlxsw_sp_fib4_entry *fib4_entry,
Ido Schimmel599cf8f2017-02-09 10:28:44 +01004508 bool replace, bool append)
Ido Schimmel9aecce12017-02-09 10:28:42 +01004509{
Ido Schimmel9aecce12017-02-09 10:28:42 +01004510 int err;
4511
Ido Schimmel9efbee62017-07-18 10:10:28 +02004512 err = mlxsw_sp_fib4_node_list_insert(fib4_entry, replace, append);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004513 if (err)
4514 return err;
4515
Ido Schimmel80c238f2017-07-18 10:10:29 +02004516 err = mlxsw_sp_fib_node_entry_add(mlxsw_sp, &fib4_entry->common);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004517 if (err)
Ido Schimmel80c238f2017-07-18 10:10:29 +02004518 goto err_fib_node_entry_add;
Ido Schimmel9aecce12017-02-09 10:28:42 +01004519
Ido Schimmel9aecce12017-02-09 10:28:42 +01004520 return 0;
4521
Ido Schimmel80c238f2017-07-18 10:10:29 +02004522err_fib_node_entry_add:
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004523 mlxsw_sp_fib4_node_list_remove(fib4_entry);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004524 return err;
4525}
4526
4527static void
4528mlxsw_sp_fib4_node_entry_unlink(struct mlxsw_sp *mlxsw_sp,
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004529 struct mlxsw_sp_fib4_entry *fib4_entry)
Ido Schimmel9aecce12017-02-09 10:28:42 +01004530{
Ido Schimmel80c238f2017-07-18 10:10:29 +02004531 mlxsw_sp_fib_node_entry_del(mlxsw_sp, &fib4_entry->common);
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004532 mlxsw_sp_fib4_node_list_remove(fib4_entry);
Petr Machata4607f6d2017-09-02 23:49:25 +02004533
4534 if (fib4_entry->common.type == MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP)
4535 mlxsw_sp_fib_entry_decap_fini(mlxsw_sp, &fib4_entry->common);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004536}
4537
Ido Schimmel599cf8f2017-02-09 10:28:44 +01004538static void mlxsw_sp_fib4_entry_replace(struct mlxsw_sp *mlxsw_sp,
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004539 struct mlxsw_sp_fib4_entry *fib4_entry,
Ido Schimmel599cf8f2017-02-09 10:28:44 +01004540 bool replace)
4541{
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004542 struct mlxsw_sp_fib_node *fib_node = fib4_entry->common.fib_node;
4543 struct mlxsw_sp_fib4_entry *replaced;
Ido Schimmel599cf8f2017-02-09 10:28:44 +01004544
4545 if (!replace)
4546 return;
4547
4548 /* We inserted the new entry before replaced one */
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004549 replaced = list_next_entry(fib4_entry, common.list);
Ido Schimmel599cf8f2017-02-09 10:28:44 +01004550
4551 mlxsw_sp_fib4_node_entry_unlink(mlxsw_sp, replaced);
4552 mlxsw_sp_fib4_entry_destroy(mlxsw_sp, replaced);
Ido Schimmel731ea1c2017-07-18 10:10:21 +02004553 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
Ido Schimmel599cf8f2017-02-09 10:28:44 +01004554}
4555
Ido Schimmel9aecce12017-02-09 10:28:42 +01004556static int
4557mlxsw_sp_router_fib4_add(struct mlxsw_sp *mlxsw_sp,
Ido Schimmel4283bce2017-02-09 10:28:43 +01004558 const struct fib_entry_notifier_info *fen_info,
Ido Schimmel599cf8f2017-02-09 10:28:44 +01004559 bool replace, bool append)
Ido Schimmel9aecce12017-02-09 10:28:42 +01004560{
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004561 struct mlxsw_sp_fib4_entry *fib4_entry;
Ido Schimmel9aecce12017-02-09 10:28:42 +01004562 struct mlxsw_sp_fib_node *fib_node;
Jiri Pirko61c503f2016-07-04 08:23:11 +02004563 int err;
4564
Ido Schimmel9011b672017-05-16 19:38:25 +02004565 if (mlxsw_sp->router->aborted)
Jiri Pirkob45f64d2016-09-26 12:52:31 +02004566 return 0;
4567
Ido Schimmel731ea1c2017-07-18 10:10:21 +02004568 fib_node = mlxsw_sp_fib_node_get(mlxsw_sp, fen_info->tb_id,
4569 &fen_info->dst, sizeof(fen_info->dst),
4570 fen_info->dst_len,
4571 MLXSW_SP_L3_PROTO_IPV4);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004572 if (IS_ERR(fib_node)) {
4573 dev_warn(mlxsw_sp->bus_info->dev, "Failed to get FIB node\n");
4574 return PTR_ERR(fib_node);
4575 }
4576
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004577 fib4_entry = mlxsw_sp_fib4_entry_create(mlxsw_sp, fib_node, fen_info);
4578 if (IS_ERR(fib4_entry)) {
Ido Schimmel9aecce12017-02-09 10:28:42 +01004579 dev_warn(mlxsw_sp->bus_info->dev, "Failed to create FIB entry\n");
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004580 err = PTR_ERR(fib4_entry);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004581 goto err_fib4_entry_create;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02004582 }
Jiri Pirko61c503f2016-07-04 08:23:11 +02004583
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004584 err = mlxsw_sp_fib4_node_entry_link(mlxsw_sp, fib4_entry, replace,
Ido Schimmel599cf8f2017-02-09 10:28:44 +01004585 append);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02004586 if (err) {
Ido Schimmel9aecce12017-02-09 10:28:42 +01004587 dev_warn(mlxsw_sp->bus_info->dev, "Failed to link FIB entry to node\n");
4588 goto err_fib4_node_entry_link;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02004589 }
Ido Schimmel9aecce12017-02-09 10:28:42 +01004590
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004591 mlxsw_sp_fib4_entry_replace(mlxsw_sp, fib4_entry, replace);
Ido Schimmel599cf8f2017-02-09 10:28:44 +01004592
Jiri Pirko61c503f2016-07-04 08:23:11 +02004593 return 0;
4594
Ido Schimmel9aecce12017-02-09 10:28:42 +01004595err_fib4_node_entry_link:
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004596 mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib4_entry);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004597err_fib4_entry_create:
Ido Schimmel731ea1c2017-07-18 10:10:21 +02004598 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
Jiri Pirko61c503f2016-07-04 08:23:11 +02004599 return err;
4600}
4601
Jiri Pirko37956d72016-10-20 16:05:43 +02004602static void mlxsw_sp_router_fib4_del(struct mlxsw_sp *mlxsw_sp,
4603 struct fib_entry_notifier_info *fen_info)
Jiri Pirko61c503f2016-07-04 08:23:11 +02004604{
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004605 struct mlxsw_sp_fib4_entry *fib4_entry;
Ido Schimmel9aecce12017-02-09 10:28:42 +01004606 struct mlxsw_sp_fib_node *fib_node;
Jiri Pirko61c503f2016-07-04 08:23:11 +02004607
Ido Schimmel9011b672017-05-16 19:38:25 +02004608 if (mlxsw_sp->router->aborted)
Jiri Pirko37956d72016-10-20 16:05:43 +02004609 return;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02004610
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004611 fib4_entry = mlxsw_sp_fib4_entry_lookup(mlxsw_sp, fen_info);
4612 if (WARN_ON(!fib4_entry))
Jiri Pirko37956d72016-10-20 16:05:43 +02004613 return;
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004614 fib_node = fib4_entry->common.fib_node;
Jiri Pirko5b004412016-09-01 10:37:40 +02004615
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004616 mlxsw_sp_fib4_node_entry_unlink(mlxsw_sp, fib4_entry);
4617 mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib4_entry);
Ido Schimmel731ea1c2017-07-18 10:10:21 +02004618 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
Jiri Pirko61c503f2016-07-04 08:23:11 +02004619}
Jiri Pirkob45f64d2016-09-26 12:52:31 +02004620
Ido Schimmel428b8512017-08-03 13:28:28 +02004621static bool mlxsw_sp_fib6_rt_should_ignore(const struct rt6_info *rt)
4622{
4623 /* Packets with link-local destination IP arriving to the router
4624 * are trapped to the CPU, so no need to program specific routes
4625 * for them.
4626 */
4627 if (ipv6_addr_type(&rt->rt6i_dst.addr) & IPV6_ADDR_LINKLOCAL)
4628 return true;
4629
4630 /* Multicast routes aren't supported, so ignore them. Neighbour
4631 * Discovery packets are specifically trapped.
4632 */
4633 if (ipv6_addr_type(&rt->rt6i_dst.addr) & IPV6_ADDR_MULTICAST)
4634 return true;
4635
4636 /* Cloned routes are irrelevant in the forwarding path. */
4637 if (rt->rt6i_flags & RTF_CACHE)
4638 return true;
4639
4640 return false;
4641}
4642
4643static struct mlxsw_sp_rt6 *mlxsw_sp_rt6_create(struct rt6_info *rt)
4644{
4645 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
4646
4647 mlxsw_sp_rt6 = kzalloc(sizeof(*mlxsw_sp_rt6), GFP_KERNEL);
4648 if (!mlxsw_sp_rt6)
4649 return ERR_PTR(-ENOMEM);
4650
4651 /* In case of route replace, replaced route is deleted with
4652 * no notification. Take reference to prevent accessing freed
4653 * memory.
4654 */
4655 mlxsw_sp_rt6->rt = rt;
4656 rt6_hold(rt);
4657
4658 return mlxsw_sp_rt6;
4659}
4660
4661#if IS_ENABLED(CONFIG_IPV6)
4662static void mlxsw_sp_rt6_release(struct rt6_info *rt)
4663{
4664 rt6_release(rt);
4665}
4666#else
4667static void mlxsw_sp_rt6_release(struct rt6_info *rt)
4668{
4669}
4670#endif
4671
4672static void mlxsw_sp_rt6_destroy(struct mlxsw_sp_rt6 *mlxsw_sp_rt6)
4673{
4674 mlxsw_sp_rt6_release(mlxsw_sp_rt6->rt);
4675 kfree(mlxsw_sp_rt6);
4676}
4677
4678static bool mlxsw_sp_fib6_rt_can_mp(const struct rt6_info *rt)
4679{
4680 /* RTF_CACHE routes are ignored */
4681 return (rt->rt6i_flags & (RTF_GATEWAY | RTF_ADDRCONF)) == RTF_GATEWAY;
4682}
4683
4684static struct rt6_info *
4685mlxsw_sp_fib6_entry_rt(const struct mlxsw_sp_fib6_entry *fib6_entry)
4686{
4687 return list_first_entry(&fib6_entry->rt6_list, struct mlxsw_sp_rt6,
4688 list)->rt;
4689}
4690
4691static struct mlxsw_sp_fib6_entry *
4692mlxsw_sp_fib6_node_mp_entry_find(const struct mlxsw_sp_fib_node *fib_node,
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02004693 const struct rt6_info *nrt, bool replace)
Ido Schimmel428b8512017-08-03 13:28:28 +02004694{
4695 struct mlxsw_sp_fib6_entry *fib6_entry;
4696
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02004697 if (!mlxsw_sp_fib6_rt_can_mp(nrt) || replace)
Ido Schimmel428b8512017-08-03 13:28:28 +02004698 return NULL;
4699
4700 list_for_each_entry(fib6_entry, &fib_node->entry_list, common.list) {
4701 struct rt6_info *rt = mlxsw_sp_fib6_entry_rt(fib6_entry);
4702
4703 /* RT6_TABLE_LOCAL and RT6_TABLE_MAIN share the same
4704 * virtual router.
4705 */
4706 if (rt->rt6i_table->tb6_id > nrt->rt6i_table->tb6_id)
4707 continue;
4708 if (rt->rt6i_table->tb6_id != nrt->rt6i_table->tb6_id)
4709 break;
4710 if (rt->rt6i_metric < nrt->rt6i_metric)
4711 continue;
4712 if (rt->rt6i_metric == nrt->rt6i_metric &&
4713 mlxsw_sp_fib6_rt_can_mp(rt))
4714 return fib6_entry;
4715 if (rt->rt6i_metric > nrt->rt6i_metric)
4716 break;
4717 }
4718
4719 return NULL;
4720}
4721
4722static struct mlxsw_sp_rt6 *
4723mlxsw_sp_fib6_entry_rt_find(const struct mlxsw_sp_fib6_entry *fib6_entry,
4724 const struct rt6_info *rt)
4725{
4726 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
4727
4728 list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) {
4729 if (mlxsw_sp_rt6->rt == rt)
4730 return mlxsw_sp_rt6;
4731 }
4732
4733 return NULL;
4734}
4735
Petr Machata8f28a302017-09-02 23:49:24 +02004736static bool mlxsw_sp_nexthop6_ipip_type(const struct mlxsw_sp *mlxsw_sp,
4737 const struct rt6_info *rt,
4738 enum mlxsw_sp_ipip_type *ret)
4739{
4740 return rt->dst.dev &&
4741 mlxsw_sp_netdev_ipip_type(mlxsw_sp, rt->dst.dev, ret);
4742}
4743
Petr Machata35225e42017-09-02 23:49:22 +02004744static int mlxsw_sp_nexthop6_type_init(struct mlxsw_sp *mlxsw_sp,
4745 struct mlxsw_sp_nexthop_group *nh_grp,
4746 struct mlxsw_sp_nexthop *nh,
4747 const struct rt6_info *rt)
Ido Schimmel428b8512017-08-03 13:28:28 +02004748{
Petr Machatad97cda52017-11-28 13:17:13 +01004749 const struct mlxsw_sp_ipip_ops *ipip_ops;
4750 struct mlxsw_sp_ipip_entry *ipip_entry;
Ido Schimmel428b8512017-08-03 13:28:28 +02004751 struct net_device *dev = rt->dst.dev;
4752 struct mlxsw_sp_rif *rif;
4753 int err;
4754
Petr Machatad97cda52017-11-28 13:17:13 +01004755 ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, dev);
4756 if (ipip_entry) {
4757 ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt];
4758 if (ipip_ops->can_offload(mlxsw_sp, dev,
4759 MLXSW_SP_L3_PROTO_IPV6)) {
4760 nh->type = MLXSW_SP_NEXTHOP_TYPE_IPIP;
4761 mlxsw_sp_nexthop_ipip_init(mlxsw_sp, nh, ipip_entry);
4762 return 0;
4763 }
Petr Machata8f28a302017-09-02 23:49:24 +02004764 }
4765
Petr Machata35225e42017-09-02 23:49:22 +02004766 nh->type = MLXSW_SP_NEXTHOP_TYPE_ETH;
Ido Schimmel428b8512017-08-03 13:28:28 +02004767 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
4768 if (!rif)
4769 return 0;
4770 mlxsw_sp_nexthop_rif_init(nh, rif);
4771
4772 err = mlxsw_sp_nexthop_neigh_init(mlxsw_sp, nh);
4773 if (err)
4774 goto err_nexthop_neigh_init;
4775
4776 return 0;
4777
4778err_nexthop_neigh_init:
4779 mlxsw_sp_nexthop_rif_fini(nh);
4780 return err;
4781}
4782
Petr Machata35225e42017-09-02 23:49:22 +02004783static void mlxsw_sp_nexthop6_type_fini(struct mlxsw_sp *mlxsw_sp,
4784 struct mlxsw_sp_nexthop *nh)
4785{
4786 mlxsw_sp_nexthop_type_fini(mlxsw_sp, nh);
4787}
4788
4789static int mlxsw_sp_nexthop6_init(struct mlxsw_sp *mlxsw_sp,
4790 struct mlxsw_sp_nexthop_group *nh_grp,
4791 struct mlxsw_sp_nexthop *nh,
4792 const struct rt6_info *rt)
4793{
4794 struct net_device *dev = rt->dst.dev;
4795
4796 nh->nh_grp = nh_grp;
Ido Schimmel3743d882018-01-12 17:15:59 +01004797 nh->nh_weight = rt->rt6i_nh_weight;
Petr Machata35225e42017-09-02 23:49:22 +02004798 memcpy(&nh->gw_addr, &rt->rt6i_gateway, sizeof(nh->gw_addr));
Arkadi Sharshevskya5390272017-09-25 10:32:28 +02004799 mlxsw_sp_nexthop_counter_alloc(mlxsw_sp, nh);
Petr Machata35225e42017-09-02 23:49:22 +02004800
Arkadi Sharshevskydbe45982017-09-25 10:32:23 +02004801 list_add_tail(&nh->router_list_node, &mlxsw_sp->router->nexthop_list);
4802
Petr Machata35225e42017-09-02 23:49:22 +02004803 if (!dev)
4804 return 0;
4805 nh->ifindex = dev->ifindex;
4806
4807 return mlxsw_sp_nexthop6_type_init(mlxsw_sp, nh_grp, nh, rt);
4808}
4809
Ido Schimmel428b8512017-08-03 13:28:28 +02004810static void mlxsw_sp_nexthop6_fini(struct mlxsw_sp *mlxsw_sp,
4811 struct mlxsw_sp_nexthop *nh)
4812{
Petr Machata35225e42017-09-02 23:49:22 +02004813 mlxsw_sp_nexthop6_type_fini(mlxsw_sp, nh);
Arkadi Sharshevskydbe45982017-09-25 10:32:23 +02004814 list_del(&nh->router_list_node);
Arkadi Sharshevskya5390272017-09-25 10:32:28 +02004815 mlxsw_sp_nexthop_counter_free(mlxsw_sp, nh);
Ido Schimmel428b8512017-08-03 13:28:28 +02004816}
4817
Petr Machataf6050ee2017-09-02 23:49:21 +02004818static bool mlxsw_sp_rt6_is_gateway(const struct mlxsw_sp *mlxsw_sp,
4819 const struct rt6_info *rt)
4820{
Petr Machata8f28a302017-09-02 23:49:24 +02004821 return rt->rt6i_flags & RTF_GATEWAY ||
4822 mlxsw_sp_nexthop6_ipip_type(mlxsw_sp, rt, NULL);
Petr Machataf6050ee2017-09-02 23:49:21 +02004823}
4824
Ido Schimmel428b8512017-08-03 13:28:28 +02004825static struct mlxsw_sp_nexthop_group *
4826mlxsw_sp_nexthop6_group_create(struct mlxsw_sp *mlxsw_sp,
4827 struct mlxsw_sp_fib6_entry *fib6_entry)
4828{
4829 struct mlxsw_sp_nexthop_group *nh_grp;
4830 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
4831 struct mlxsw_sp_nexthop *nh;
4832 size_t alloc_size;
4833 int i = 0;
4834 int err;
4835
4836 alloc_size = sizeof(*nh_grp) +
4837 fib6_entry->nrt6 * sizeof(struct mlxsw_sp_nexthop);
4838 nh_grp = kzalloc(alloc_size, GFP_KERNEL);
4839 if (!nh_grp)
4840 return ERR_PTR(-ENOMEM);
4841 INIT_LIST_HEAD(&nh_grp->fib_list);
4842#if IS_ENABLED(CONFIG_IPV6)
4843 nh_grp->neigh_tbl = &nd_tbl;
4844#endif
4845 mlxsw_sp_rt6 = list_first_entry(&fib6_entry->rt6_list,
4846 struct mlxsw_sp_rt6, list);
Petr Machataf6050ee2017-09-02 23:49:21 +02004847 nh_grp->gateway = mlxsw_sp_rt6_is_gateway(mlxsw_sp, mlxsw_sp_rt6->rt);
Ido Schimmel428b8512017-08-03 13:28:28 +02004848 nh_grp->count = fib6_entry->nrt6;
4849 for (i = 0; i < nh_grp->count; i++) {
4850 struct rt6_info *rt = mlxsw_sp_rt6->rt;
4851
4852 nh = &nh_grp->nexthops[i];
4853 err = mlxsw_sp_nexthop6_init(mlxsw_sp, nh_grp, nh, rt);
4854 if (err)
4855 goto err_nexthop6_init;
4856 mlxsw_sp_rt6 = list_next_entry(mlxsw_sp_rt6, list);
4857 }
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02004858
4859 err = mlxsw_sp_nexthop_group_insert(mlxsw_sp, nh_grp);
4860 if (err)
4861 goto err_nexthop_group_insert;
4862
Ido Schimmel428b8512017-08-03 13:28:28 +02004863 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
4864 return nh_grp;
4865
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02004866err_nexthop_group_insert:
Ido Schimmel428b8512017-08-03 13:28:28 +02004867err_nexthop6_init:
4868 for (i--; i >= 0; i--) {
4869 nh = &nh_grp->nexthops[i];
4870 mlxsw_sp_nexthop6_fini(mlxsw_sp, nh);
4871 }
4872 kfree(nh_grp);
4873 return ERR_PTR(err);
4874}
4875
4876static void
4877mlxsw_sp_nexthop6_group_destroy(struct mlxsw_sp *mlxsw_sp,
4878 struct mlxsw_sp_nexthop_group *nh_grp)
4879{
4880 struct mlxsw_sp_nexthop *nh;
4881 int i = nh_grp->count;
4882
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02004883 mlxsw_sp_nexthop_group_remove(mlxsw_sp, nh_grp);
Ido Schimmel428b8512017-08-03 13:28:28 +02004884 for (i--; i >= 0; i--) {
4885 nh = &nh_grp->nexthops[i];
4886 mlxsw_sp_nexthop6_fini(mlxsw_sp, nh);
4887 }
4888 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
4889 WARN_ON(nh_grp->adj_index_valid);
4890 kfree(nh_grp);
4891}
4892
4893static int mlxsw_sp_nexthop6_group_get(struct mlxsw_sp *mlxsw_sp,
4894 struct mlxsw_sp_fib6_entry *fib6_entry)
4895{
4896 struct mlxsw_sp_nexthop_group *nh_grp;
4897
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02004898 nh_grp = mlxsw_sp_nexthop6_group_lookup(mlxsw_sp, fib6_entry);
4899 if (!nh_grp) {
4900 nh_grp = mlxsw_sp_nexthop6_group_create(mlxsw_sp, fib6_entry);
4901 if (IS_ERR(nh_grp))
4902 return PTR_ERR(nh_grp);
4903 }
Ido Schimmel428b8512017-08-03 13:28:28 +02004904
4905 list_add_tail(&fib6_entry->common.nexthop_group_node,
4906 &nh_grp->fib_list);
4907 fib6_entry->common.nh_group = nh_grp;
4908
4909 return 0;
4910}
4911
4912static void mlxsw_sp_nexthop6_group_put(struct mlxsw_sp *mlxsw_sp,
4913 struct mlxsw_sp_fib_entry *fib_entry)
4914{
4915 struct mlxsw_sp_nexthop_group *nh_grp = fib_entry->nh_group;
4916
4917 list_del(&fib_entry->nexthop_group_node);
4918 if (!list_empty(&nh_grp->fib_list))
4919 return;
4920 mlxsw_sp_nexthop6_group_destroy(mlxsw_sp, nh_grp);
4921}
4922
4923static int
4924mlxsw_sp_nexthop6_group_update(struct mlxsw_sp *mlxsw_sp,
4925 struct mlxsw_sp_fib6_entry *fib6_entry)
4926{
4927 struct mlxsw_sp_nexthop_group *old_nh_grp = fib6_entry->common.nh_group;
4928 int err;
4929
4930 fib6_entry->common.nh_group = NULL;
4931 list_del(&fib6_entry->common.nexthop_group_node);
4932
4933 err = mlxsw_sp_nexthop6_group_get(mlxsw_sp, fib6_entry);
4934 if (err)
4935 goto err_nexthop6_group_get;
4936
4937 /* In case this entry is offloaded, then the adjacency index
4938 * currently associated with it in the device's table is that
4939 * of the old group. Start using the new one instead.
4940 */
4941 err = mlxsw_sp_fib_node_entry_add(mlxsw_sp, &fib6_entry->common);
4942 if (err)
4943 goto err_fib_node_entry_add;
4944
4945 if (list_empty(&old_nh_grp->fib_list))
4946 mlxsw_sp_nexthop6_group_destroy(mlxsw_sp, old_nh_grp);
4947
4948 return 0;
4949
4950err_fib_node_entry_add:
4951 mlxsw_sp_nexthop6_group_put(mlxsw_sp, &fib6_entry->common);
4952err_nexthop6_group_get:
4953 list_add_tail(&fib6_entry->common.nexthop_group_node,
4954 &old_nh_grp->fib_list);
4955 fib6_entry->common.nh_group = old_nh_grp;
4956 return err;
4957}
4958
4959static int
4960mlxsw_sp_fib6_entry_nexthop_add(struct mlxsw_sp *mlxsw_sp,
4961 struct mlxsw_sp_fib6_entry *fib6_entry,
4962 struct rt6_info *rt)
4963{
4964 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
4965 int err;
4966
4967 mlxsw_sp_rt6 = mlxsw_sp_rt6_create(rt);
4968 if (IS_ERR(mlxsw_sp_rt6))
4969 return PTR_ERR(mlxsw_sp_rt6);
4970
4971 list_add_tail(&mlxsw_sp_rt6->list, &fib6_entry->rt6_list);
4972 fib6_entry->nrt6++;
4973
4974 err = mlxsw_sp_nexthop6_group_update(mlxsw_sp, fib6_entry);
4975 if (err)
4976 goto err_nexthop6_group_update;
4977
4978 return 0;
4979
4980err_nexthop6_group_update:
4981 fib6_entry->nrt6--;
4982 list_del(&mlxsw_sp_rt6->list);
4983 mlxsw_sp_rt6_destroy(mlxsw_sp_rt6);
4984 return err;
4985}
4986
4987static void
4988mlxsw_sp_fib6_entry_nexthop_del(struct mlxsw_sp *mlxsw_sp,
4989 struct mlxsw_sp_fib6_entry *fib6_entry,
4990 struct rt6_info *rt)
4991{
4992 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
4993
4994 mlxsw_sp_rt6 = mlxsw_sp_fib6_entry_rt_find(fib6_entry, rt);
4995 if (WARN_ON(!mlxsw_sp_rt6))
4996 return;
4997
4998 fib6_entry->nrt6--;
4999 list_del(&mlxsw_sp_rt6->list);
5000 mlxsw_sp_nexthop6_group_update(mlxsw_sp, fib6_entry);
5001 mlxsw_sp_rt6_destroy(mlxsw_sp_rt6);
5002}
5003
Petr Machataf6050ee2017-09-02 23:49:21 +02005004static void mlxsw_sp_fib6_entry_type_set(struct mlxsw_sp *mlxsw_sp,
5005 struct mlxsw_sp_fib_entry *fib_entry,
Ido Schimmel428b8512017-08-03 13:28:28 +02005006 const struct rt6_info *rt)
5007{
5008 /* Packets hitting RTF_REJECT routes need to be discarded by the
5009 * stack. We can rely on their destination device not having a
5010 * RIF (it's the loopback device) and can thus use action type
5011 * local, which will cause them to be trapped with a lower
5012 * priority than packets that need to be locally received.
5013 */
Ido Schimmeld3b6d372017-09-01 10:58:55 +02005014 if (rt->rt6i_flags & (RTF_LOCAL | RTF_ANYCAST))
Ido Schimmel428b8512017-08-03 13:28:28 +02005015 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
5016 else if (rt->rt6i_flags & RTF_REJECT)
5017 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_LOCAL;
Petr Machataf6050ee2017-09-02 23:49:21 +02005018 else if (mlxsw_sp_rt6_is_gateway(mlxsw_sp, rt))
Ido Schimmel428b8512017-08-03 13:28:28 +02005019 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_REMOTE;
5020 else
5021 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_LOCAL;
5022}
5023
5024static void
5025mlxsw_sp_fib6_entry_rt_destroy_all(struct mlxsw_sp_fib6_entry *fib6_entry)
5026{
5027 struct mlxsw_sp_rt6 *mlxsw_sp_rt6, *tmp;
5028
5029 list_for_each_entry_safe(mlxsw_sp_rt6, tmp, &fib6_entry->rt6_list,
5030 list) {
5031 fib6_entry->nrt6--;
5032 list_del(&mlxsw_sp_rt6->list);
5033 mlxsw_sp_rt6_destroy(mlxsw_sp_rt6);
5034 }
5035}
5036
5037static struct mlxsw_sp_fib6_entry *
5038mlxsw_sp_fib6_entry_create(struct mlxsw_sp *mlxsw_sp,
5039 struct mlxsw_sp_fib_node *fib_node,
5040 struct rt6_info *rt)
5041{
5042 struct mlxsw_sp_fib6_entry *fib6_entry;
5043 struct mlxsw_sp_fib_entry *fib_entry;
5044 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
5045 int err;
5046
5047 fib6_entry = kzalloc(sizeof(*fib6_entry), GFP_KERNEL);
5048 if (!fib6_entry)
5049 return ERR_PTR(-ENOMEM);
5050 fib_entry = &fib6_entry->common;
5051
5052 mlxsw_sp_rt6 = mlxsw_sp_rt6_create(rt);
5053 if (IS_ERR(mlxsw_sp_rt6)) {
5054 err = PTR_ERR(mlxsw_sp_rt6);
5055 goto err_rt6_create;
5056 }
5057
Petr Machataf6050ee2017-09-02 23:49:21 +02005058 mlxsw_sp_fib6_entry_type_set(mlxsw_sp, fib_entry, mlxsw_sp_rt6->rt);
Ido Schimmel428b8512017-08-03 13:28:28 +02005059
5060 INIT_LIST_HEAD(&fib6_entry->rt6_list);
5061 list_add_tail(&mlxsw_sp_rt6->list, &fib6_entry->rt6_list);
5062 fib6_entry->nrt6 = 1;
5063 err = mlxsw_sp_nexthop6_group_get(mlxsw_sp, fib6_entry);
5064 if (err)
5065 goto err_nexthop6_group_get;
5066
5067 fib_entry->fib_node = fib_node;
5068
5069 return fib6_entry;
5070
5071err_nexthop6_group_get:
5072 list_del(&mlxsw_sp_rt6->list);
5073 mlxsw_sp_rt6_destroy(mlxsw_sp_rt6);
5074err_rt6_create:
5075 kfree(fib6_entry);
5076 return ERR_PTR(err);
5077}
5078
5079static void mlxsw_sp_fib6_entry_destroy(struct mlxsw_sp *mlxsw_sp,
5080 struct mlxsw_sp_fib6_entry *fib6_entry)
5081{
5082 mlxsw_sp_nexthop6_group_put(mlxsw_sp, &fib6_entry->common);
5083 mlxsw_sp_fib6_entry_rt_destroy_all(fib6_entry);
5084 WARN_ON(fib6_entry->nrt6);
5085 kfree(fib6_entry);
5086}
5087
5088static struct mlxsw_sp_fib6_entry *
5089mlxsw_sp_fib6_node_entry_find(const struct mlxsw_sp_fib_node *fib_node,
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02005090 const struct rt6_info *nrt, bool replace)
Ido Schimmel428b8512017-08-03 13:28:28 +02005091{
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02005092 struct mlxsw_sp_fib6_entry *fib6_entry, *fallback = NULL;
Ido Schimmel428b8512017-08-03 13:28:28 +02005093
5094 list_for_each_entry(fib6_entry, &fib_node->entry_list, common.list) {
5095 struct rt6_info *rt = mlxsw_sp_fib6_entry_rt(fib6_entry);
5096
5097 if (rt->rt6i_table->tb6_id > nrt->rt6i_table->tb6_id)
5098 continue;
5099 if (rt->rt6i_table->tb6_id != nrt->rt6i_table->tb6_id)
5100 break;
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02005101 if (replace && rt->rt6i_metric == nrt->rt6i_metric) {
5102 if (mlxsw_sp_fib6_rt_can_mp(rt) ==
5103 mlxsw_sp_fib6_rt_can_mp(nrt))
5104 return fib6_entry;
5105 if (mlxsw_sp_fib6_rt_can_mp(nrt))
5106 fallback = fallback ?: fib6_entry;
5107 }
Ido Schimmel428b8512017-08-03 13:28:28 +02005108 if (rt->rt6i_metric > nrt->rt6i_metric)
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02005109 return fallback ?: fib6_entry;
Ido Schimmel428b8512017-08-03 13:28:28 +02005110 }
5111
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02005112 return fallback;
Ido Schimmel428b8512017-08-03 13:28:28 +02005113}
5114
5115static int
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02005116mlxsw_sp_fib6_node_list_insert(struct mlxsw_sp_fib6_entry *new6_entry,
5117 bool replace)
Ido Schimmel428b8512017-08-03 13:28:28 +02005118{
5119 struct mlxsw_sp_fib_node *fib_node = new6_entry->common.fib_node;
5120 struct rt6_info *nrt = mlxsw_sp_fib6_entry_rt(new6_entry);
5121 struct mlxsw_sp_fib6_entry *fib6_entry;
5122
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02005123 fib6_entry = mlxsw_sp_fib6_node_entry_find(fib_node, nrt, replace);
5124
5125 if (replace && WARN_ON(!fib6_entry))
5126 return -EINVAL;
Ido Schimmel428b8512017-08-03 13:28:28 +02005127
5128 if (fib6_entry) {
5129 list_add_tail(&new6_entry->common.list,
5130 &fib6_entry->common.list);
5131 } else {
5132 struct mlxsw_sp_fib6_entry *last;
5133
5134 list_for_each_entry(last, &fib_node->entry_list, common.list) {
5135 struct rt6_info *rt = mlxsw_sp_fib6_entry_rt(last);
5136
5137 if (nrt->rt6i_table->tb6_id > rt->rt6i_table->tb6_id)
5138 break;
5139 fib6_entry = last;
5140 }
5141
5142 if (fib6_entry)
5143 list_add(&new6_entry->common.list,
5144 &fib6_entry->common.list);
5145 else
5146 list_add(&new6_entry->common.list,
5147 &fib_node->entry_list);
5148 }
5149
5150 return 0;
5151}
5152
5153static void
5154mlxsw_sp_fib6_node_list_remove(struct mlxsw_sp_fib6_entry *fib6_entry)
5155{
5156 list_del(&fib6_entry->common.list);
5157}
5158
5159static int mlxsw_sp_fib6_node_entry_link(struct mlxsw_sp *mlxsw_sp,
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02005160 struct mlxsw_sp_fib6_entry *fib6_entry,
5161 bool replace)
Ido Schimmel428b8512017-08-03 13:28:28 +02005162{
5163 int err;
5164
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02005165 err = mlxsw_sp_fib6_node_list_insert(fib6_entry, replace);
Ido Schimmel428b8512017-08-03 13:28:28 +02005166 if (err)
5167 return err;
5168
5169 err = mlxsw_sp_fib_node_entry_add(mlxsw_sp, &fib6_entry->common);
5170 if (err)
5171 goto err_fib_node_entry_add;
5172
5173 return 0;
5174
5175err_fib_node_entry_add:
5176 mlxsw_sp_fib6_node_list_remove(fib6_entry);
5177 return err;
5178}
5179
5180static void
5181mlxsw_sp_fib6_node_entry_unlink(struct mlxsw_sp *mlxsw_sp,
5182 struct mlxsw_sp_fib6_entry *fib6_entry)
5183{
5184 mlxsw_sp_fib_node_entry_del(mlxsw_sp, &fib6_entry->common);
5185 mlxsw_sp_fib6_node_list_remove(fib6_entry);
5186}
5187
5188static struct mlxsw_sp_fib6_entry *
5189mlxsw_sp_fib6_entry_lookup(struct mlxsw_sp *mlxsw_sp,
5190 const struct rt6_info *rt)
5191{
5192 struct mlxsw_sp_fib6_entry *fib6_entry;
5193 struct mlxsw_sp_fib_node *fib_node;
5194 struct mlxsw_sp_fib *fib;
5195 struct mlxsw_sp_vr *vr;
5196
5197 vr = mlxsw_sp_vr_find(mlxsw_sp, rt->rt6i_table->tb6_id);
5198 if (!vr)
5199 return NULL;
5200 fib = mlxsw_sp_vr_fib(vr, MLXSW_SP_L3_PROTO_IPV6);
5201
5202 fib_node = mlxsw_sp_fib_node_lookup(fib, &rt->rt6i_dst.addr,
5203 sizeof(rt->rt6i_dst.addr),
5204 rt->rt6i_dst.plen);
5205 if (!fib_node)
5206 return NULL;
5207
5208 list_for_each_entry(fib6_entry, &fib_node->entry_list, common.list) {
5209 struct rt6_info *iter_rt = mlxsw_sp_fib6_entry_rt(fib6_entry);
5210
5211 if (rt->rt6i_table->tb6_id == iter_rt->rt6i_table->tb6_id &&
5212 rt->rt6i_metric == iter_rt->rt6i_metric &&
5213 mlxsw_sp_fib6_entry_rt_find(fib6_entry, rt))
5214 return fib6_entry;
5215 }
5216
5217 return NULL;
5218}
5219
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02005220static void mlxsw_sp_fib6_entry_replace(struct mlxsw_sp *mlxsw_sp,
5221 struct mlxsw_sp_fib6_entry *fib6_entry,
5222 bool replace)
5223{
5224 struct mlxsw_sp_fib_node *fib_node = fib6_entry->common.fib_node;
5225 struct mlxsw_sp_fib6_entry *replaced;
5226
5227 if (!replace)
5228 return;
5229
5230 replaced = list_next_entry(fib6_entry, common.list);
5231
5232 mlxsw_sp_fib6_node_entry_unlink(mlxsw_sp, replaced);
5233 mlxsw_sp_fib6_entry_destroy(mlxsw_sp, replaced);
5234 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
5235}
5236
Ido Schimmel428b8512017-08-03 13:28:28 +02005237static int mlxsw_sp_router_fib6_add(struct mlxsw_sp *mlxsw_sp,
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02005238 struct rt6_info *rt, bool replace)
Ido Schimmel428b8512017-08-03 13:28:28 +02005239{
5240 struct mlxsw_sp_fib6_entry *fib6_entry;
5241 struct mlxsw_sp_fib_node *fib_node;
5242 int err;
5243
5244 if (mlxsw_sp->router->aborted)
5245 return 0;
5246
Ido Schimmelf36f5ac2017-08-03 13:28:30 +02005247 if (rt->rt6i_src.plen)
5248 return -EINVAL;
5249
Ido Schimmel428b8512017-08-03 13:28:28 +02005250 if (mlxsw_sp_fib6_rt_should_ignore(rt))
5251 return 0;
5252
5253 fib_node = mlxsw_sp_fib_node_get(mlxsw_sp, rt->rt6i_table->tb6_id,
5254 &rt->rt6i_dst.addr,
5255 sizeof(rt->rt6i_dst.addr),
5256 rt->rt6i_dst.plen,
5257 MLXSW_SP_L3_PROTO_IPV6);
5258 if (IS_ERR(fib_node))
5259 return PTR_ERR(fib_node);
5260
5261 /* Before creating a new entry, try to append route to an existing
5262 * multipath entry.
5263 */
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02005264 fib6_entry = mlxsw_sp_fib6_node_mp_entry_find(fib_node, rt, replace);
Ido Schimmel428b8512017-08-03 13:28:28 +02005265 if (fib6_entry) {
5266 err = mlxsw_sp_fib6_entry_nexthop_add(mlxsw_sp, fib6_entry, rt);
5267 if (err)
5268 goto err_fib6_entry_nexthop_add;
5269 return 0;
5270 }
5271
5272 fib6_entry = mlxsw_sp_fib6_entry_create(mlxsw_sp, fib_node, rt);
5273 if (IS_ERR(fib6_entry)) {
5274 err = PTR_ERR(fib6_entry);
5275 goto err_fib6_entry_create;
5276 }
5277
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02005278 err = mlxsw_sp_fib6_node_entry_link(mlxsw_sp, fib6_entry, replace);
Ido Schimmel428b8512017-08-03 13:28:28 +02005279 if (err)
5280 goto err_fib6_node_entry_link;
5281
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02005282 mlxsw_sp_fib6_entry_replace(mlxsw_sp, fib6_entry, replace);
5283
Ido Schimmel428b8512017-08-03 13:28:28 +02005284 return 0;
5285
5286err_fib6_node_entry_link:
5287 mlxsw_sp_fib6_entry_destroy(mlxsw_sp, fib6_entry);
5288err_fib6_entry_create:
5289err_fib6_entry_nexthop_add:
5290 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
5291 return err;
5292}
5293
5294static void mlxsw_sp_router_fib6_del(struct mlxsw_sp *mlxsw_sp,
5295 struct rt6_info *rt)
5296{
5297 struct mlxsw_sp_fib6_entry *fib6_entry;
5298 struct mlxsw_sp_fib_node *fib_node;
5299
5300 if (mlxsw_sp->router->aborted)
5301 return;
5302
5303 if (mlxsw_sp_fib6_rt_should_ignore(rt))
5304 return;
5305
5306 fib6_entry = mlxsw_sp_fib6_entry_lookup(mlxsw_sp, rt);
5307 if (WARN_ON(!fib6_entry))
5308 return;
5309
5310 /* If route is part of a multipath entry, but not the last one
5311 * removed, then only reduce its nexthop group.
5312 */
5313 if (!list_is_singular(&fib6_entry->rt6_list)) {
5314 mlxsw_sp_fib6_entry_nexthop_del(mlxsw_sp, fib6_entry, rt);
5315 return;
5316 }
5317
5318 fib_node = fib6_entry->common.fib_node;
5319
5320 mlxsw_sp_fib6_node_entry_unlink(mlxsw_sp, fib6_entry);
5321 mlxsw_sp_fib6_entry_destroy(mlxsw_sp, fib6_entry);
5322 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
5323}
5324
Ido Schimmelbc65a8a2017-07-18 10:10:25 +02005325static int __mlxsw_sp_router_set_abort_trap(struct mlxsw_sp *mlxsw_sp,
5326 enum mlxsw_reg_ralxx_protocol proto,
5327 u8 tree_id)
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005328{
5329 char ralta_pl[MLXSW_REG_RALTA_LEN];
5330 char ralst_pl[MLXSW_REG_RALST_LEN];
Ido Schimmelb5d90e62017-03-10 08:53:43 +01005331 int i, err;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005332
Ido Schimmelbc65a8a2017-07-18 10:10:25 +02005333 mlxsw_reg_ralta_pack(ralta_pl, true, proto, tree_id);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005334 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralta), ralta_pl);
5335 if (err)
5336 return err;
5337
Ido Schimmelbc65a8a2017-07-18 10:10:25 +02005338 mlxsw_reg_ralst_pack(ralst_pl, 0xff, tree_id);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005339 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralst), ralst_pl);
5340 if (err)
5341 return err;
5342
Ido Schimmelb5d90e62017-03-10 08:53:43 +01005343 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
Ido Schimmel9011b672017-05-16 19:38:25 +02005344 struct mlxsw_sp_vr *vr = &mlxsw_sp->router->vrs[i];
Ido Schimmelb5d90e62017-03-10 08:53:43 +01005345 char raltb_pl[MLXSW_REG_RALTB_LEN];
5346 char ralue_pl[MLXSW_REG_RALUE_LEN];
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005347
Ido Schimmelbc65a8a2017-07-18 10:10:25 +02005348 mlxsw_reg_raltb_pack(raltb_pl, vr->id, proto, tree_id);
Ido Schimmelb5d90e62017-03-10 08:53:43 +01005349 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raltb),
5350 raltb_pl);
5351 if (err)
5352 return err;
5353
Ido Schimmelbc65a8a2017-07-18 10:10:25 +02005354 mlxsw_reg_ralue_pack(ralue_pl, proto,
5355 MLXSW_REG_RALUE_OP_WRITE_WRITE, vr->id, 0);
Ido Schimmelb5d90e62017-03-10 08:53:43 +01005356 mlxsw_reg_ralue_act_ip2me_pack(ralue_pl);
5357 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue),
5358 ralue_pl);
5359 if (err)
5360 return err;
5361 }
5362
5363 return 0;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005364}
5365
Yotam Gigid42b0962017-09-27 08:23:20 +02005366static int mlxsw_sp_router_fibmr_add(struct mlxsw_sp *mlxsw_sp,
5367 struct mfc_entry_notifier_info *men_info,
5368 bool replace)
5369{
5370 struct mlxsw_sp_vr *vr;
5371
5372 if (mlxsw_sp->router->aborted)
5373 return 0;
5374
David Ahernf8fa9b42017-10-18 09:56:56 -07005375 vr = mlxsw_sp_vr_get(mlxsw_sp, men_info->tb_id, NULL);
Yotam Gigid42b0962017-09-27 08:23:20 +02005376 if (IS_ERR(vr))
5377 return PTR_ERR(vr);
5378
5379 return mlxsw_sp_mr_route4_add(vr->mr4_table, men_info->mfc, replace);
5380}
5381
5382static void mlxsw_sp_router_fibmr_del(struct mlxsw_sp *mlxsw_sp,
5383 struct mfc_entry_notifier_info *men_info)
5384{
5385 struct mlxsw_sp_vr *vr;
5386
5387 if (mlxsw_sp->router->aborted)
5388 return;
5389
5390 vr = mlxsw_sp_vr_find(mlxsw_sp, men_info->tb_id);
5391 if (WARN_ON(!vr))
5392 return;
5393
5394 mlxsw_sp_mr_route4_del(vr->mr4_table, men_info->mfc);
Ido Schimmel2b52ce02018-01-22 09:17:42 +01005395 mlxsw_sp_vr_put(mlxsw_sp, vr);
Yotam Gigid42b0962017-09-27 08:23:20 +02005396}
5397
5398static int
5399mlxsw_sp_router_fibmr_vif_add(struct mlxsw_sp *mlxsw_sp,
5400 struct vif_entry_notifier_info *ven_info)
5401{
5402 struct mlxsw_sp_rif *rif;
5403 struct mlxsw_sp_vr *vr;
5404
5405 if (mlxsw_sp->router->aborted)
5406 return 0;
5407
David Ahernf8fa9b42017-10-18 09:56:56 -07005408 vr = mlxsw_sp_vr_get(mlxsw_sp, ven_info->tb_id, NULL);
Yotam Gigid42b0962017-09-27 08:23:20 +02005409 if (IS_ERR(vr))
5410 return PTR_ERR(vr);
5411
5412 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, ven_info->dev);
5413 return mlxsw_sp_mr_vif_add(vr->mr4_table, ven_info->dev,
5414 ven_info->vif_index,
5415 ven_info->vif_flags, rif);
5416}
5417
5418static void
5419mlxsw_sp_router_fibmr_vif_del(struct mlxsw_sp *mlxsw_sp,
5420 struct vif_entry_notifier_info *ven_info)
5421{
5422 struct mlxsw_sp_vr *vr;
5423
5424 if (mlxsw_sp->router->aborted)
5425 return;
5426
5427 vr = mlxsw_sp_vr_find(mlxsw_sp, ven_info->tb_id);
5428 if (WARN_ON(!vr))
5429 return;
5430
5431 mlxsw_sp_mr_vif_del(vr->mr4_table, ven_info->vif_index);
Ido Schimmel2b52ce02018-01-22 09:17:42 +01005432 mlxsw_sp_vr_put(mlxsw_sp, vr);
Yotam Gigid42b0962017-09-27 08:23:20 +02005433}
5434
Ido Schimmelbc65a8a2017-07-18 10:10:25 +02005435static int mlxsw_sp_router_set_abort_trap(struct mlxsw_sp *mlxsw_sp)
5436{
5437 enum mlxsw_reg_ralxx_protocol proto = MLXSW_REG_RALXX_PROTOCOL_IPV4;
5438 int err;
5439
5440 err = __mlxsw_sp_router_set_abort_trap(mlxsw_sp, proto,
5441 MLXSW_SP_LPM_TREE_MIN);
5442 if (err)
5443 return err;
5444
Yotam Gigid42b0962017-09-27 08:23:20 +02005445 /* The multicast router code does not need an abort trap as by default,
5446 * packets that don't match any routes are trapped to the CPU.
5447 */
5448
Ido Schimmelbc65a8a2017-07-18 10:10:25 +02005449 proto = MLXSW_REG_RALXX_PROTOCOL_IPV6;
5450 return __mlxsw_sp_router_set_abort_trap(mlxsw_sp, proto,
5451 MLXSW_SP_LPM_TREE_MIN + 1);
5452}
5453
Ido Schimmel9aecce12017-02-09 10:28:42 +01005454static void mlxsw_sp_fib4_node_flush(struct mlxsw_sp *mlxsw_sp,
5455 struct mlxsw_sp_fib_node *fib_node)
5456{
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02005457 struct mlxsw_sp_fib4_entry *fib4_entry, *tmp;
Ido Schimmel9aecce12017-02-09 10:28:42 +01005458
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02005459 list_for_each_entry_safe(fib4_entry, tmp, &fib_node->entry_list,
5460 common.list) {
5461 bool do_break = &tmp->common.list == &fib_node->entry_list;
Ido Schimmel9aecce12017-02-09 10:28:42 +01005462
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02005463 mlxsw_sp_fib4_node_entry_unlink(mlxsw_sp, fib4_entry);
5464 mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib4_entry);
Ido Schimmel731ea1c2017-07-18 10:10:21 +02005465 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
Ido Schimmel9aecce12017-02-09 10:28:42 +01005466 /* Break when entry list is empty and node was freed.
5467 * Otherwise, we'll access freed memory in the next
5468 * iteration.
5469 */
5470 if (do_break)
5471 break;
5472 }
5473}
5474
Ido Schimmel428b8512017-08-03 13:28:28 +02005475static void mlxsw_sp_fib6_node_flush(struct mlxsw_sp *mlxsw_sp,
5476 struct mlxsw_sp_fib_node *fib_node)
5477{
5478 struct mlxsw_sp_fib6_entry *fib6_entry, *tmp;
5479
5480 list_for_each_entry_safe(fib6_entry, tmp, &fib_node->entry_list,
5481 common.list) {
5482 bool do_break = &tmp->common.list == &fib_node->entry_list;
5483
5484 mlxsw_sp_fib6_node_entry_unlink(mlxsw_sp, fib6_entry);
5485 mlxsw_sp_fib6_entry_destroy(mlxsw_sp, fib6_entry);
5486 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
5487 if (do_break)
5488 break;
5489 }
5490}
5491
Ido Schimmel9aecce12017-02-09 10:28:42 +01005492static void mlxsw_sp_fib_node_flush(struct mlxsw_sp *mlxsw_sp,
5493 struct mlxsw_sp_fib_node *fib_node)
5494{
Ido Schimmel76610eb2017-03-10 08:53:41 +01005495 switch (fib_node->fib->proto) {
Ido Schimmel9aecce12017-02-09 10:28:42 +01005496 case MLXSW_SP_L3_PROTO_IPV4:
5497 mlxsw_sp_fib4_node_flush(mlxsw_sp, fib_node);
5498 break;
5499 case MLXSW_SP_L3_PROTO_IPV6:
Ido Schimmel428b8512017-08-03 13:28:28 +02005500 mlxsw_sp_fib6_node_flush(mlxsw_sp, fib_node);
Ido Schimmel9aecce12017-02-09 10:28:42 +01005501 break;
5502 }
5503}
5504
Ido Schimmel76610eb2017-03-10 08:53:41 +01005505static void mlxsw_sp_vr_fib_flush(struct mlxsw_sp *mlxsw_sp,
5506 struct mlxsw_sp_vr *vr,
5507 enum mlxsw_sp_l3proto proto)
5508{
5509 struct mlxsw_sp_fib *fib = mlxsw_sp_vr_fib(vr, proto);
5510 struct mlxsw_sp_fib_node *fib_node, *tmp;
5511
5512 list_for_each_entry_safe(fib_node, tmp, &fib->node_list, list) {
5513 bool do_break = &tmp->list == &fib->node_list;
5514
5515 mlxsw_sp_fib_node_flush(mlxsw_sp, fib_node);
5516 if (do_break)
5517 break;
5518 }
5519}
5520
Ido Schimmelac571de2016-11-14 11:26:32 +01005521static void mlxsw_sp_router_fib_flush(struct mlxsw_sp *mlxsw_sp)
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005522{
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005523 int i;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005524
Jiri Pirkoc1a38312016-10-21 16:07:23 +02005525 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
Ido Schimmel9011b672017-05-16 19:38:25 +02005526 struct mlxsw_sp_vr *vr = &mlxsw_sp->router->vrs[i];
Ido Schimmelac571de2016-11-14 11:26:32 +01005527
Ido Schimmel76610eb2017-03-10 08:53:41 +01005528 if (!mlxsw_sp_vr_is_used(vr))
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005529 continue;
Yotam Gigid42b0962017-09-27 08:23:20 +02005530
5531 mlxsw_sp_mr_table_flush(vr->mr4_table);
Ido Schimmel76610eb2017-03-10 08:53:41 +01005532 mlxsw_sp_vr_fib_flush(mlxsw_sp, vr, MLXSW_SP_L3_PROTO_IPV4);
Ido Schimmela3d9bc52017-07-18 10:10:22 +02005533
5534 /* If virtual router was only used for IPv4, then it's no
5535 * longer used.
5536 */
5537 if (!mlxsw_sp_vr_is_used(vr))
5538 continue;
5539 mlxsw_sp_vr_fib_flush(mlxsw_sp, vr, MLXSW_SP_L3_PROTO_IPV6);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005540 }
Ido Schimmelac571de2016-11-14 11:26:32 +01005541}
5542
Ido Schimmelbc65a8a2017-07-18 10:10:25 +02005543static void mlxsw_sp_router_fib_abort(struct mlxsw_sp *mlxsw_sp)
Ido Schimmelac571de2016-11-14 11:26:32 +01005544{
5545 int err;
5546
Ido Schimmel9011b672017-05-16 19:38:25 +02005547 if (mlxsw_sp->router->aborted)
Ido Schimmeld331d302016-11-16 09:51:58 +01005548 return;
5549 dev_warn(mlxsw_sp->bus_info->dev, "FIB abort triggered. Note that FIB entries are no longer being offloaded to this device.\n");
Ido Schimmelac571de2016-11-14 11:26:32 +01005550 mlxsw_sp_router_fib_flush(mlxsw_sp);
Ido Schimmel9011b672017-05-16 19:38:25 +02005551 mlxsw_sp->router->aborted = true;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005552 err = mlxsw_sp_router_set_abort_trap(mlxsw_sp);
5553 if (err)
5554 dev_warn(mlxsw_sp->bus_info->dev, "Failed to set abort trap.\n");
5555}
5556
Ido Schimmel30572242016-12-03 16:45:01 +01005557struct mlxsw_sp_fib_event_work {
Ido Schimmela0e47612017-02-06 16:20:10 +01005558 struct work_struct work;
Ido Schimmelad178c82017-02-08 11:16:40 +01005559 union {
Ido Schimmel428b8512017-08-03 13:28:28 +02005560 struct fib6_entry_notifier_info fen6_info;
Ido Schimmelad178c82017-02-08 11:16:40 +01005561 struct fib_entry_notifier_info fen_info;
Ido Schimmel5d7bfd12017-03-16 09:08:14 +01005562 struct fib_rule_notifier_info fr_info;
Ido Schimmelad178c82017-02-08 11:16:40 +01005563 struct fib_nh_notifier_info fnh_info;
Yotam Gigid42b0962017-09-27 08:23:20 +02005564 struct mfc_entry_notifier_info men_info;
5565 struct vif_entry_notifier_info ven_info;
Ido Schimmelad178c82017-02-08 11:16:40 +01005566 };
Ido Schimmel30572242016-12-03 16:45:01 +01005567 struct mlxsw_sp *mlxsw_sp;
5568 unsigned long event;
5569};
5570
Ido Schimmel66a57632017-08-03 13:28:26 +02005571static void mlxsw_sp_router_fib4_event_work(struct work_struct *work)
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005572{
Ido Schimmel30572242016-12-03 16:45:01 +01005573 struct mlxsw_sp_fib_event_work *fib_work =
Ido Schimmela0e47612017-02-06 16:20:10 +01005574 container_of(work, struct mlxsw_sp_fib_event_work, work);
Ido Schimmel30572242016-12-03 16:45:01 +01005575 struct mlxsw_sp *mlxsw_sp = fib_work->mlxsw_sp;
Ido Schimmel599cf8f2017-02-09 10:28:44 +01005576 bool replace, append;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005577 int err;
5578
Ido Schimmel30572242016-12-03 16:45:01 +01005579 /* Protect internal structures from changes */
5580 rtnl_lock();
5581 switch (fib_work->event) {
Ido Schimmel599cf8f2017-02-09 10:28:44 +01005582 case FIB_EVENT_ENTRY_REPLACE: /* fall through */
Ido Schimmel4283bce2017-02-09 10:28:43 +01005583 case FIB_EVENT_ENTRY_APPEND: /* fall through */
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005584 case FIB_EVENT_ENTRY_ADD:
Ido Schimmel599cf8f2017-02-09 10:28:44 +01005585 replace = fib_work->event == FIB_EVENT_ENTRY_REPLACE;
Ido Schimmel4283bce2017-02-09 10:28:43 +01005586 append = fib_work->event == FIB_EVENT_ENTRY_APPEND;
5587 err = mlxsw_sp_router_fib4_add(mlxsw_sp, &fib_work->fen_info,
Ido Schimmel599cf8f2017-02-09 10:28:44 +01005588 replace, append);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005589 if (err)
Ido Schimmelbc65a8a2017-07-18 10:10:25 +02005590 mlxsw_sp_router_fib_abort(mlxsw_sp);
Ido Schimmel30572242016-12-03 16:45:01 +01005591 fib_info_put(fib_work->fen_info.fi);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005592 break;
5593 case FIB_EVENT_ENTRY_DEL:
Ido Schimmel30572242016-12-03 16:45:01 +01005594 mlxsw_sp_router_fib4_del(mlxsw_sp, &fib_work->fen_info);
5595 fib_info_put(fib_work->fen_info.fi);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005596 break;
David Ahern1f279232017-10-27 17:37:14 -07005597 case FIB_EVENT_RULE_ADD:
5598 /* if we get here, a rule was added that we do not support.
5599 * just do the fib_abort
5600 */
5601 mlxsw_sp_router_fib_abort(mlxsw_sp);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005602 break;
Ido Schimmelad178c82017-02-08 11:16:40 +01005603 case FIB_EVENT_NH_ADD: /* fall through */
5604 case FIB_EVENT_NH_DEL:
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02005605 mlxsw_sp_nexthop4_event(mlxsw_sp, fib_work->event,
5606 fib_work->fnh_info.fib_nh);
Ido Schimmelad178c82017-02-08 11:16:40 +01005607 fib_info_put(fib_work->fnh_info.fib_nh->nh_parent);
5608 break;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005609 }
Ido Schimmel30572242016-12-03 16:45:01 +01005610 rtnl_unlock();
5611 kfree(fib_work);
5612}
5613
Ido Schimmel66a57632017-08-03 13:28:26 +02005614static void mlxsw_sp_router_fib6_event_work(struct work_struct *work)
5615{
Ido Schimmel583419f2017-08-03 13:28:27 +02005616 struct mlxsw_sp_fib_event_work *fib_work =
5617 container_of(work, struct mlxsw_sp_fib_event_work, work);
5618 struct mlxsw_sp *mlxsw_sp = fib_work->mlxsw_sp;
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02005619 bool replace;
Ido Schimmel428b8512017-08-03 13:28:28 +02005620 int err;
Ido Schimmel583419f2017-08-03 13:28:27 +02005621
5622 rtnl_lock();
5623 switch (fib_work->event) {
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02005624 case FIB_EVENT_ENTRY_REPLACE: /* fall through */
Ido Schimmel428b8512017-08-03 13:28:28 +02005625 case FIB_EVENT_ENTRY_ADD:
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02005626 replace = fib_work->event == FIB_EVENT_ENTRY_REPLACE;
Ido Schimmel428b8512017-08-03 13:28:28 +02005627 err = mlxsw_sp_router_fib6_add(mlxsw_sp,
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02005628 fib_work->fen6_info.rt, replace);
Ido Schimmel428b8512017-08-03 13:28:28 +02005629 if (err)
5630 mlxsw_sp_router_fib_abort(mlxsw_sp);
5631 mlxsw_sp_rt6_release(fib_work->fen6_info.rt);
5632 break;
5633 case FIB_EVENT_ENTRY_DEL:
5634 mlxsw_sp_router_fib6_del(mlxsw_sp, fib_work->fen6_info.rt);
5635 mlxsw_sp_rt6_release(fib_work->fen6_info.rt);
5636 break;
David Ahern1f279232017-10-27 17:37:14 -07005637 case FIB_EVENT_RULE_ADD:
5638 /* if we get here, a rule was added that we do not support.
5639 * just do the fib_abort
5640 */
5641 mlxsw_sp_router_fib_abort(mlxsw_sp);
Ido Schimmel583419f2017-08-03 13:28:27 +02005642 break;
5643 }
5644 rtnl_unlock();
5645 kfree(fib_work);
Ido Schimmel66a57632017-08-03 13:28:26 +02005646}
5647
Yotam Gigid42b0962017-09-27 08:23:20 +02005648static void mlxsw_sp_router_fibmr_event_work(struct work_struct *work)
5649{
5650 struct mlxsw_sp_fib_event_work *fib_work =
5651 container_of(work, struct mlxsw_sp_fib_event_work, work);
5652 struct mlxsw_sp *mlxsw_sp = fib_work->mlxsw_sp;
Yotam Gigid42b0962017-09-27 08:23:20 +02005653 bool replace;
5654 int err;
5655
5656 rtnl_lock();
5657 switch (fib_work->event) {
5658 case FIB_EVENT_ENTRY_REPLACE: /* fall through */
5659 case FIB_EVENT_ENTRY_ADD:
5660 replace = fib_work->event == FIB_EVENT_ENTRY_REPLACE;
5661
5662 err = mlxsw_sp_router_fibmr_add(mlxsw_sp, &fib_work->men_info,
5663 replace);
5664 if (err)
5665 mlxsw_sp_router_fib_abort(mlxsw_sp);
5666 ipmr_cache_put(fib_work->men_info.mfc);
5667 break;
5668 case FIB_EVENT_ENTRY_DEL:
5669 mlxsw_sp_router_fibmr_del(mlxsw_sp, &fib_work->men_info);
5670 ipmr_cache_put(fib_work->men_info.mfc);
5671 break;
5672 case FIB_EVENT_VIF_ADD:
5673 err = mlxsw_sp_router_fibmr_vif_add(mlxsw_sp,
5674 &fib_work->ven_info);
5675 if (err)
5676 mlxsw_sp_router_fib_abort(mlxsw_sp);
5677 dev_put(fib_work->ven_info.dev);
5678 break;
5679 case FIB_EVENT_VIF_DEL:
5680 mlxsw_sp_router_fibmr_vif_del(mlxsw_sp,
5681 &fib_work->ven_info);
5682 dev_put(fib_work->ven_info.dev);
5683 break;
David Ahern1f279232017-10-27 17:37:14 -07005684 case FIB_EVENT_RULE_ADD:
5685 /* if we get here, a rule was added that we do not support.
5686 * just do the fib_abort
5687 */
5688 mlxsw_sp_router_fib_abort(mlxsw_sp);
Yotam Gigid42b0962017-09-27 08:23:20 +02005689 break;
5690 }
5691 rtnl_unlock();
5692 kfree(fib_work);
5693}
5694
Ido Schimmel66a57632017-08-03 13:28:26 +02005695static void mlxsw_sp_router_fib4_event(struct mlxsw_sp_fib_event_work *fib_work,
5696 struct fib_notifier_info *info)
5697{
David Ahern3c75f9b2017-10-18 15:01:38 -07005698 struct fib_entry_notifier_info *fen_info;
David Ahern3c75f9b2017-10-18 15:01:38 -07005699 struct fib_nh_notifier_info *fnh_info;
5700
Ido Schimmel66a57632017-08-03 13:28:26 +02005701 switch (fib_work->event) {
5702 case FIB_EVENT_ENTRY_REPLACE: /* fall through */
5703 case FIB_EVENT_ENTRY_APPEND: /* fall through */
5704 case FIB_EVENT_ENTRY_ADD: /* fall through */
5705 case FIB_EVENT_ENTRY_DEL:
David Ahern3c75f9b2017-10-18 15:01:38 -07005706 fen_info = container_of(info, struct fib_entry_notifier_info,
5707 info);
5708 fib_work->fen_info = *fen_info;
5709 /* Take reference on fib_info to prevent it from being
Ido Schimmel66a57632017-08-03 13:28:26 +02005710 * freed while work is queued. Release it afterwards.
5711 */
5712 fib_info_hold(fib_work->fen_info.fi);
5713 break;
Ido Schimmel66a57632017-08-03 13:28:26 +02005714 case FIB_EVENT_NH_ADD: /* fall through */
5715 case FIB_EVENT_NH_DEL:
David Ahern3c75f9b2017-10-18 15:01:38 -07005716 fnh_info = container_of(info, struct fib_nh_notifier_info,
5717 info);
5718 fib_work->fnh_info = *fnh_info;
Ido Schimmel66a57632017-08-03 13:28:26 +02005719 fib_info_hold(fib_work->fnh_info.fib_nh->nh_parent);
5720 break;
5721 }
5722}
5723
5724static void mlxsw_sp_router_fib6_event(struct mlxsw_sp_fib_event_work *fib_work,
5725 struct fib_notifier_info *info)
5726{
David Ahern3c75f9b2017-10-18 15:01:38 -07005727 struct fib6_entry_notifier_info *fen6_info;
David Ahern3c75f9b2017-10-18 15:01:38 -07005728
Ido Schimmel583419f2017-08-03 13:28:27 +02005729 switch (fib_work->event) {
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02005730 case FIB_EVENT_ENTRY_REPLACE: /* fall through */
Ido Schimmel428b8512017-08-03 13:28:28 +02005731 case FIB_EVENT_ENTRY_ADD: /* fall through */
5732 case FIB_EVENT_ENTRY_DEL:
David Ahern3c75f9b2017-10-18 15:01:38 -07005733 fen6_info = container_of(info, struct fib6_entry_notifier_info,
5734 info);
5735 fib_work->fen6_info = *fen6_info;
Ido Schimmel428b8512017-08-03 13:28:28 +02005736 rt6_hold(fib_work->fen6_info.rt);
5737 break;
Ido Schimmel583419f2017-08-03 13:28:27 +02005738 }
Ido Schimmel66a57632017-08-03 13:28:26 +02005739}
5740
Yotam Gigid42b0962017-09-27 08:23:20 +02005741static void
5742mlxsw_sp_router_fibmr_event(struct mlxsw_sp_fib_event_work *fib_work,
5743 struct fib_notifier_info *info)
5744{
5745 switch (fib_work->event) {
5746 case FIB_EVENT_ENTRY_REPLACE: /* fall through */
5747 case FIB_EVENT_ENTRY_ADD: /* fall through */
5748 case FIB_EVENT_ENTRY_DEL:
5749 memcpy(&fib_work->men_info, info, sizeof(fib_work->men_info));
5750 ipmr_cache_hold(fib_work->men_info.mfc);
5751 break;
5752 case FIB_EVENT_VIF_ADD: /* fall through */
5753 case FIB_EVENT_VIF_DEL:
5754 memcpy(&fib_work->ven_info, info, sizeof(fib_work->ven_info));
5755 dev_hold(fib_work->ven_info.dev);
5756 break;
David Ahern1f279232017-10-27 17:37:14 -07005757 }
5758}
5759
5760static int mlxsw_sp_router_fib_rule_event(unsigned long event,
5761 struct fib_notifier_info *info,
5762 struct mlxsw_sp *mlxsw_sp)
5763{
5764 struct netlink_ext_ack *extack = info->extack;
5765 struct fib_rule_notifier_info *fr_info;
5766 struct fib_rule *rule;
5767 int err = 0;
5768
5769 /* nothing to do at the moment */
5770 if (event == FIB_EVENT_RULE_DEL)
5771 return 0;
5772
5773 if (mlxsw_sp->router->aborted)
5774 return 0;
5775
5776 fr_info = container_of(info, struct fib_rule_notifier_info, info);
5777 rule = fr_info->rule;
5778
5779 switch (info->family) {
5780 case AF_INET:
5781 if (!fib4_rule_default(rule) && !rule->l3mdev)
5782 err = -1;
5783 break;
5784 case AF_INET6:
5785 if (!fib6_rule_default(rule) && !rule->l3mdev)
5786 err = -1;
5787 break;
5788 case RTNL_FAMILY_IPMR:
5789 if (!ipmr_rule_default(rule) && !rule->l3mdev)
5790 err = -1;
Yotam Gigid42b0962017-09-27 08:23:20 +02005791 break;
5792 }
David Ahern1f279232017-10-27 17:37:14 -07005793
5794 if (err < 0)
5795 NL_SET_ERR_MSG(extack, "spectrum: FIB rules not supported. Aborting offload");
5796
5797 return err;
Yotam Gigid42b0962017-09-27 08:23:20 +02005798}
5799
Ido Schimmel30572242016-12-03 16:45:01 +01005800/* Called with rcu_read_lock() */
5801static int mlxsw_sp_router_fib_event(struct notifier_block *nb,
5802 unsigned long event, void *ptr)
5803{
Ido Schimmel30572242016-12-03 16:45:01 +01005804 struct mlxsw_sp_fib_event_work *fib_work;
5805 struct fib_notifier_info *info = ptr;
Ido Schimmel7e39d112017-05-16 19:38:28 +02005806 struct mlxsw_sp_router *router;
David Ahern1f279232017-10-27 17:37:14 -07005807 int err;
Ido Schimmel30572242016-12-03 16:45:01 +01005808
Ido Schimmel8e29f972017-09-15 15:31:07 +02005809 if (!net_eq(info->net, &init_net) ||
Yotam Gigi664375e2017-09-27 08:23:22 +02005810 (info->family != AF_INET && info->family != AF_INET6 &&
5811 info->family != RTNL_FAMILY_IPMR))
Ido Schimmel30572242016-12-03 16:45:01 +01005812 return NOTIFY_DONE;
5813
David Ahern1f279232017-10-27 17:37:14 -07005814 router = container_of(nb, struct mlxsw_sp_router, fib_nb);
5815
5816 switch (event) {
5817 case FIB_EVENT_RULE_ADD: /* fall through */
5818 case FIB_EVENT_RULE_DEL:
5819 err = mlxsw_sp_router_fib_rule_event(event, info,
5820 router->mlxsw_sp);
5821 if (!err)
5822 return NOTIFY_DONE;
5823 }
5824
Ido Schimmel30572242016-12-03 16:45:01 +01005825 fib_work = kzalloc(sizeof(*fib_work), GFP_ATOMIC);
5826 if (WARN_ON(!fib_work))
5827 return NOTIFY_BAD;
5828
Ido Schimmel7e39d112017-05-16 19:38:28 +02005829 fib_work->mlxsw_sp = router->mlxsw_sp;
Ido Schimmel30572242016-12-03 16:45:01 +01005830 fib_work->event = event;
5831
Ido Schimmel66a57632017-08-03 13:28:26 +02005832 switch (info->family) {
5833 case AF_INET:
5834 INIT_WORK(&fib_work->work, mlxsw_sp_router_fib4_event_work);
5835 mlxsw_sp_router_fib4_event(fib_work, info);
Ido Schimmel30572242016-12-03 16:45:01 +01005836 break;
Ido Schimmel66a57632017-08-03 13:28:26 +02005837 case AF_INET6:
5838 INIT_WORK(&fib_work->work, mlxsw_sp_router_fib6_event_work);
5839 mlxsw_sp_router_fib6_event(fib_work, info);
Ido Schimmelad178c82017-02-08 11:16:40 +01005840 break;
Yotam Gigid42b0962017-09-27 08:23:20 +02005841 case RTNL_FAMILY_IPMR:
5842 INIT_WORK(&fib_work->work, mlxsw_sp_router_fibmr_event_work);
5843 mlxsw_sp_router_fibmr_event(fib_work, info);
5844 break;
Ido Schimmel30572242016-12-03 16:45:01 +01005845 }
5846
Ido Schimmela0e47612017-02-06 16:20:10 +01005847 mlxsw_core_schedule_work(&fib_work->work);
Ido Schimmel30572242016-12-03 16:45:01 +01005848
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005849 return NOTIFY_DONE;
5850}
5851
Ido Schimmel4724ba562017-03-10 08:53:39 +01005852static struct mlxsw_sp_rif *
5853mlxsw_sp_rif_find_by_dev(const struct mlxsw_sp *mlxsw_sp,
5854 const struct net_device *dev)
5855{
5856 int i;
5857
5858 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++)
Ido Schimmel5f9efff2017-05-16 19:38:27 +02005859 if (mlxsw_sp->router->rifs[i] &&
5860 mlxsw_sp->router->rifs[i]->dev == dev)
5861 return mlxsw_sp->router->rifs[i];
Ido Schimmel4724ba562017-03-10 08:53:39 +01005862
5863 return NULL;
5864}
5865
5866static int mlxsw_sp_router_rif_disable(struct mlxsw_sp *mlxsw_sp, u16 rif)
5867{
5868 char ritr_pl[MLXSW_REG_RITR_LEN];
5869 int err;
5870
5871 mlxsw_reg_ritr_rif_pack(ritr_pl, rif);
5872 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
5873 if (WARN_ON_ONCE(err))
5874 return err;
5875
5876 mlxsw_reg_ritr_enable_set(ritr_pl, false);
5877 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
5878}
5879
5880static void mlxsw_sp_router_rif_gone_sync(struct mlxsw_sp *mlxsw_sp,
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01005881 struct mlxsw_sp_rif *rif)
Ido Schimmel4724ba562017-03-10 08:53:39 +01005882{
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01005883 mlxsw_sp_router_rif_disable(mlxsw_sp, rif->rif_index);
5884 mlxsw_sp_nexthop_rif_gone_sync(mlxsw_sp, rif);
5885 mlxsw_sp_neigh_rif_gone_sync(mlxsw_sp, rif);
Ido Schimmel4724ba562017-03-10 08:53:39 +01005886}
5887
Arkadi Sharshevsky5ea12372017-07-18 10:10:13 +02005888static bool
5889mlxsw_sp_rif_should_config(struct mlxsw_sp_rif *rif, struct net_device *dev,
5890 unsigned long event)
Ido Schimmel4724ba562017-03-10 08:53:39 +01005891{
Arkadi Sharshevsky5ea12372017-07-18 10:10:13 +02005892 struct inet6_dev *inet6_dev;
5893 bool addr_list_empty = true;
5894 struct in_device *idev;
5895
Ido Schimmel4724ba562017-03-10 08:53:39 +01005896 switch (event) {
5897 case NETDEV_UP:
Petr Machataf1b1f272017-07-31 09:27:28 +02005898 return rif == NULL;
Ido Schimmel4724ba562017-03-10 08:53:39 +01005899 case NETDEV_DOWN:
Arkadi Sharshevsky5ea12372017-07-18 10:10:13 +02005900 idev = __in_dev_get_rtnl(dev);
5901 if (idev && idev->ifa_list)
5902 addr_list_empty = false;
5903
5904 inet6_dev = __in6_dev_get(dev);
5905 if (addr_list_empty && inet6_dev &&
5906 !list_empty(&inet6_dev->addr_list))
5907 addr_list_empty = false;
5908
5909 if (rif && addr_list_empty &&
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01005910 !netif_is_l3_slave(rif->dev))
Ido Schimmel4724ba562017-03-10 08:53:39 +01005911 return true;
5912 /* It is possible we already removed the RIF ourselves
5913 * if it was assigned to a netdev that is now a bridge
5914 * or LAG slave.
5915 */
5916 return false;
5917 }
5918
5919 return false;
5920}
5921
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02005922static enum mlxsw_sp_rif_type
5923mlxsw_sp_dev_rif_type(const struct mlxsw_sp *mlxsw_sp,
5924 const struct net_device *dev)
5925{
5926 enum mlxsw_sp_fid_type type;
5927
Petr Machata6ddb7422017-09-02 23:49:19 +02005928 if (mlxsw_sp_netdev_ipip_type(mlxsw_sp, dev, NULL))
5929 return MLXSW_SP_RIF_TYPE_IPIP_LB;
5930
5931 /* Otherwise RIF type is derived from the type of the underlying FID. */
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02005932 if (is_vlan_dev(dev) && netif_is_bridge_master(vlan_dev_real_dev(dev)))
5933 type = MLXSW_SP_FID_TYPE_8021Q;
5934 else if (netif_is_bridge_master(dev) && br_vlan_enabled(dev))
5935 type = MLXSW_SP_FID_TYPE_8021Q;
5936 else if (netif_is_bridge_master(dev))
5937 type = MLXSW_SP_FID_TYPE_8021D;
5938 else
5939 type = MLXSW_SP_FID_TYPE_RFID;
5940
5941 return mlxsw_sp_fid_type_rif_type(mlxsw_sp, type);
5942}
5943
Ido Schimmelde5ed992017-06-04 16:53:40 +02005944static int mlxsw_sp_rif_index_alloc(struct mlxsw_sp *mlxsw_sp, u16 *p_rif_index)
Ido Schimmel4724ba562017-03-10 08:53:39 +01005945{
5946 int i;
5947
Ido Schimmelde5ed992017-06-04 16:53:40 +02005948 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++) {
5949 if (!mlxsw_sp->router->rifs[i]) {
5950 *p_rif_index = i;
5951 return 0;
5952 }
5953 }
Ido Schimmel4724ba562017-03-10 08:53:39 +01005954
Ido Schimmelde5ed992017-06-04 16:53:40 +02005955 return -ENOBUFS;
Ido Schimmel4724ba562017-03-10 08:53:39 +01005956}
5957
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02005958static struct mlxsw_sp_rif *mlxsw_sp_rif_alloc(size_t rif_size, u16 rif_index,
5959 u16 vr_id,
5960 struct net_device *l3_dev)
Ido Schimmel4724ba562017-03-10 08:53:39 +01005961{
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01005962 struct mlxsw_sp_rif *rif;
Ido Schimmel4724ba562017-03-10 08:53:39 +01005963
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02005964 rif = kzalloc(rif_size, GFP_KERNEL);
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01005965 if (!rif)
Ido Schimmel4724ba562017-03-10 08:53:39 +01005966 return NULL;
5967
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01005968 INIT_LIST_HEAD(&rif->nexthop_list);
5969 INIT_LIST_HEAD(&rif->neigh_list);
5970 ether_addr_copy(rif->addr, l3_dev->dev_addr);
5971 rif->mtu = l3_dev->mtu;
5972 rif->vr_id = vr_id;
5973 rif->dev = l3_dev;
5974 rif->rif_index = rif_index;
Ido Schimmel4724ba562017-03-10 08:53:39 +01005975
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01005976 return rif;
Ido Schimmel4724ba562017-03-10 08:53:39 +01005977}
5978
Ido Schimmel5f9efff2017-05-16 19:38:27 +02005979struct mlxsw_sp_rif *mlxsw_sp_rif_by_index(const struct mlxsw_sp *mlxsw_sp,
5980 u16 rif_index)
5981{
5982 return mlxsw_sp->router->rifs[rif_index];
5983}
5984
Arkadi Sharshevskyfd1b9d42017-03-28 17:24:16 +02005985u16 mlxsw_sp_rif_index(const struct mlxsw_sp_rif *rif)
5986{
5987 return rif->rif_index;
5988}
5989
Petr Machata92107cf2017-09-02 23:49:28 +02005990u16 mlxsw_sp_ipip_lb_rif_index(const struct mlxsw_sp_rif_ipip_lb *lb_rif)
5991{
5992 return lb_rif->common.rif_index;
5993}
5994
5995u16 mlxsw_sp_ipip_lb_ul_vr_id(const struct mlxsw_sp_rif_ipip_lb *lb_rif)
5996{
5997 return lb_rif->ul_vr_id;
5998}
5999
Arkadi Sharshevskyfd1b9d42017-03-28 17:24:16 +02006000int mlxsw_sp_rif_dev_ifindex(const struct mlxsw_sp_rif *rif)
6001{
6002 return rif->dev->ifindex;
6003}
6004
Yotam Gigi91e4d592017-09-19 10:00:19 +02006005const struct net_device *mlxsw_sp_rif_dev(const struct mlxsw_sp_rif *rif)
6006{
6007 return rif->dev;
6008}
6009
Ido Schimmel4724ba562017-03-10 08:53:39 +01006010static struct mlxsw_sp_rif *
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006011mlxsw_sp_rif_create(struct mlxsw_sp *mlxsw_sp,
David Ahernf8fa9b42017-10-18 09:56:56 -07006012 const struct mlxsw_sp_rif_params *params,
6013 struct netlink_ext_ack *extack)
Ido Schimmel4724ba562017-03-10 08:53:39 +01006014{
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006015 u32 tb_id = l3mdev_fib_table(params->dev);
6016 const struct mlxsw_sp_rif_ops *ops;
Petr Machata010cadf2017-09-02 23:49:18 +02006017 struct mlxsw_sp_fid *fid = NULL;
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006018 enum mlxsw_sp_rif_type type;
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01006019 struct mlxsw_sp_rif *rif;
Ido Schimmela1107482017-05-26 08:37:39 +02006020 struct mlxsw_sp_vr *vr;
6021 u16 rif_index;
Ido Schimmel4724ba562017-03-10 08:53:39 +01006022 int err;
6023
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006024 type = mlxsw_sp_dev_rif_type(mlxsw_sp, params->dev);
6025 ops = mlxsw_sp->router->rif_ops_arr[type];
6026
David Ahernf8fa9b42017-10-18 09:56:56 -07006027 vr = mlxsw_sp_vr_get(mlxsw_sp, tb_id ? : RT_TABLE_MAIN, extack);
Ido Schimmelc9ec53f2017-05-26 08:37:38 +02006028 if (IS_ERR(vr))
6029 return ERR_CAST(vr);
Petr Machata28a04c72017-10-02 12:14:56 +02006030 vr->rif_count++;
Ido Schimmelc9ec53f2017-05-26 08:37:38 +02006031
Ido Schimmelde5ed992017-06-04 16:53:40 +02006032 err = mlxsw_sp_rif_index_alloc(mlxsw_sp, &rif_index);
David Ahernf8fa9b42017-10-18 09:56:56 -07006033 if (err) {
6034 NL_SET_ERR_MSG(extack, "spectrum: Exceeded number of supported router interfaces");
Ido Schimmelde5ed992017-06-04 16:53:40 +02006035 goto err_rif_index_alloc;
David Ahernf8fa9b42017-10-18 09:56:56 -07006036 }
Ido Schimmel4724ba562017-03-10 08:53:39 +01006037
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006038 rif = mlxsw_sp_rif_alloc(ops->rif_size, rif_index, vr->id, params->dev);
Ido Schimmela13a5942017-05-26 08:37:33 +02006039 if (!rif) {
6040 err = -ENOMEM;
6041 goto err_rif_alloc;
6042 }
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006043 rif->mlxsw_sp = mlxsw_sp;
6044 rif->ops = ops;
Ido Schimmela13a5942017-05-26 08:37:33 +02006045
Petr Machata010cadf2017-09-02 23:49:18 +02006046 if (ops->fid_get) {
6047 fid = ops->fid_get(rif);
6048 if (IS_ERR(fid)) {
6049 err = PTR_ERR(fid);
6050 goto err_fid_get;
6051 }
6052 rif->fid = fid;
Ido Schimmel4d93cee2017-05-26 08:37:34 +02006053 }
6054
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006055 if (ops->setup)
6056 ops->setup(rif, params);
6057
6058 err = ops->configure(rif);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006059 if (err)
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006060 goto err_configure;
Ido Schimmel4724ba562017-03-10 08:53:39 +01006061
Yotam Gigid42b0962017-09-27 08:23:20 +02006062 err = mlxsw_sp_mr_rif_add(vr->mr4_table, rif);
6063 if (err)
6064 goto err_mr_rif_add;
6065
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006066 mlxsw_sp_rif_counters_alloc(rif);
Ido Schimmel5f9efff2017-05-16 19:38:27 +02006067 mlxsw_sp->router->rifs[rif_index] = rif;
Ido Schimmel4724ba562017-03-10 08:53:39 +01006068
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01006069 return rif;
Ido Schimmel4724ba562017-03-10 08:53:39 +01006070
Yotam Gigid42b0962017-09-27 08:23:20 +02006071err_mr_rif_add:
6072 ops->deconfigure(rif);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006073err_configure:
Petr Machata010cadf2017-09-02 23:49:18 +02006074 if (fid)
6075 mlxsw_sp_fid_put(fid);
Ido Schimmela1107482017-05-26 08:37:39 +02006076err_fid_get:
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006077 kfree(rif);
6078err_rif_alloc:
Ido Schimmelde5ed992017-06-04 16:53:40 +02006079err_rif_index_alloc:
Petr Machata28a04c72017-10-02 12:14:56 +02006080 vr->rif_count--;
Ido Schimmel2b52ce02018-01-22 09:17:42 +01006081 mlxsw_sp_vr_put(mlxsw_sp, vr);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006082 return ERR_PTR(err);
6083}
6084
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006085void mlxsw_sp_rif_destroy(struct mlxsw_sp_rif *rif)
Ido Schimmel4724ba562017-03-10 08:53:39 +01006086{
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006087 const struct mlxsw_sp_rif_ops *ops = rif->ops;
6088 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
Ido Schimmela1107482017-05-26 08:37:39 +02006089 struct mlxsw_sp_fid *fid = rif->fid;
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006090 struct mlxsw_sp_vr *vr;
Ido Schimmel4724ba562017-03-10 08:53:39 +01006091
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01006092 mlxsw_sp_router_rif_gone_sync(mlxsw_sp, rif);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006093 vr = &mlxsw_sp->router->vrs[rif->vr_id];
Arkadi Sharshevskye0c0afd2017-03-28 17:24:15 +02006094
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006095 mlxsw_sp->router->rifs[rif->rif_index] = NULL;
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006096 mlxsw_sp_rif_counters_free(rif);
Yotam Gigid42b0962017-09-27 08:23:20 +02006097 mlxsw_sp_mr_rif_del(vr->mr4_table, rif);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006098 ops->deconfigure(rif);
Petr Machata010cadf2017-09-02 23:49:18 +02006099 if (fid)
6100 /* Loopback RIFs are not associated with a FID. */
6101 mlxsw_sp_fid_put(fid);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006102 kfree(rif);
Petr Machata28a04c72017-10-02 12:14:56 +02006103 vr->rif_count--;
Ido Schimmel2b52ce02018-01-22 09:17:42 +01006104 mlxsw_sp_vr_put(mlxsw_sp, vr);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006105}
6106
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006107static void
6108mlxsw_sp_rif_subport_params_init(struct mlxsw_sp_rif_params *params,
6109 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
6110{
6111 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
6112
6113 params->vid = mlxsw_sp_port_vlan->vid;
6114 params->lag = mlxsw_sp_port->lagged;
6115 if (params->lag)
6116 params->lag_id = mlxsw_sp_port->lag_id;
6117 else
6118 params->system_port = mlxsw_sp_port->local_port;
6119}
6120
Ido Schimmel7cbecf22017-05-26 08:37:28 +02006121static int
Ido Schimmela1107482017-05-26 08:37:39 +02006122mlxsw_sp_port_vlan_router_join(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan,
David Ahernf8fa9b42017-10-18 09:56:56 -07006123 struct net_device *l3_dev,
6124 struct netlink_ext_ack *extack)
Ido Schimmel4724ba562017-03-10 08:53:39 +01006125{
Ido Schimmel7cbecf22017-05-26 08:37:28 +02006126 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
Ido Schimmel1b8f09a2017-05-26 08:37:36 +02006127 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
Ido Schimmel7cbecf22017-05-26 08:37:28 +02006128 u16 vid = mlxsw_sp_port_vlan->vid;
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01006129 struct mlxsw_sp_rif *rif;
Ido Schimmela1107482017-05-26 08:37:39 +02006130 struct mlxsw_sp_fid *fid;
Ido Schimmel03ea01e2017-05-23 21:56:30 +02006131 int err;
Ido Schimmel4724ba562017-03-10 08:53:39 +01006132
Ido Schimmel1b8f09a2017-05-26 08:37:36 +02006133 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev);
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01006134 if (!rif) {
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006135 struct mlxsw_sp_rif_params params = {
6136 .dev = l3_dev,
6137 };
6138
6139 mlxsw_sp_rif_subport_params_init(&params, mlxsw_sp_port_vlan);
David Ahernf8fa9b42017-10-18 09:56:56 -07006140 rif = mlxsw_sp_rif_create(mlxsw_sp, &params, extack);
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01006141 if (IS_ERR(rif))
6142 return PTR_ERR(rif);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006143 }
6144
Ido Schimmela1107482017-05-26 08:37:39 +02006145 /* FID was already created, just take a reference */
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006146 fid = rif->ops->fid_get(rif);
Ido Schimmela1107482017-05-26 08:37:39 +02006147 err = mlxsw_sp_fid_port_vid_map(fid, mlxsw_sp_port, vid);
6148 if (err)
6149 goto err_fid_port_vid_map;
6150
Ido Schimmel7cbecf22017-05-26 08:37:28 +02006151 err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, false);
Ido Schimmel03ea01e2017-05-23 21:56:30 +02006152 if (err)
6153 goto err_port_vid_learning_set;
6154
Ido Schimmel7cbecf22017-05-26 08:37:28 +02006155 err = mlxsw_sp_port_vid_stp_set(mlxsw_sp_port, vid,
Ido Schimmel03ea01e2017-05-23 21:56:30 +02006156 BR_STATE_FORWARDING);
6157 if (err)
6158 goto err_port_vid_stp_set;
6159
Ido Schimmela1107482017-05-26 08:37:39 +02006160 mlxsw_sp_port_vlan->fid = fid;
Ido Schimmel4724ba562017-03-10 08:53:39 +01006161
Ido Schimmel4724ba562017-03-10 08:53:39 +01006162 return 0;
Ido Schimmel03ea01e2017-05-23 21:56:30 +02006163
6164err_port_vid_stp_set:
Ido Schimmel7cbecf22017-05-26 08:37:28 +02006165 mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, true);
Ido Schimmel03ea01e2017-05-23 21:56:30 +02006166err_port_vid_learning_set:
Ido Schimmela1107482017-05-26 08:37:39 +02006167 mlxsw_sp_fid_port_vid_unmap(fid, mlxsw_sp_port, vid);
6168err_fid_port_vid_map:
6169 mlxsw_sp_fid_put(fid);
Ido Schimmel03ea01e2017-05-23 21:56:30 +02006170 return err;
Ido Schimmel4724ba562017-03-10 08:53:39 +01006171}
6172
Ido Schimmela1107482017-05-26 08:37:39 +02006173void
6174mlxsw_sp_port_vlan_router_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
Ido Schimmel4724ba562017-03-10 08:53:39 +01006175{
Ido Schimmelce95e152017-05-26 08:37:27 +02006176 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
Ido Schimmel7cbecf22017-05-26 08:37:28 +02006177 struct mlxsw_sp_fid *fid = mlxsw_sp_port_vlan->fid;
Ido Schimmelce95e152017-05-26 08:37:27 +02006178 u16 vid = mlxsw_sp_port_vlan->vid;
Ido Schimmelce95e152017-05-26 08:37:27 +02006179
Ido Schimmela1107482017-05-26 08:37:39 +02006180 if (WARN_ON(mlxsw_sp_fid_type(fid) != MLXSW_SP_FID_TYPE_RFID))
6181 return;
Ido Schimmel4aafc362017-05-26 08:37:25 +02006182
Ido Schimmela1107482017-05-26 08:37:39 +02006183 mlxsw_sp_port_vlan->fid = NULL;
Ido Schimmel7cbecf22017-05-26 08:37:28 +02006184 mlxsw_sp_port_vid_stp_set(mlxsw_sp_port, vid, BR_STATE_BLOCKING);
6185 mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, true);
Ido Schimmela1107482017-05-26 08:37:39 +02006186 mlxsw_sp_fid_port_vid_unmap(fid, mlxsw_sp_port, vid);
6187 /* If router port holds the last reference on the rFID, then the
6188 * associated Sub-port RIF will be destroyed.
6189 */
6190 mlxsw_sp_fid_put(fid);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006191}
6192
Ido Schimmel7cbecf22017-05-26 08:37:28 +02006193static int mlxsw_sp_inetaddr_port_vlan_event(struct net_device *l3_dev,
6194 struct net_device *port_dev,
David Ahernf8fa9b42017-10-18 09:56:56 -07006195 unsigned long event, u16 vid,
6196 struct netlink_ext_ack *extack)
Ido Schimmel4724ba562017-03-10 08:53:39 +01006197{
6198 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(port_dev);
Ido Schimmelce95e152017-05-26 08:37:27 +02006199 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
Ido Schimmel4724ba562017-03-10 08:53:39 +01006200
Ido Schimmelce95e152017-05-26 08:37:27 +02006201 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
Ido Schimmel7cbecf22017-05-26 08:37:28 +02006202 if (WARN_ON(!mlxsw_sp_port_vlan))
6203 return -EINVAL;
Ido Schimmel4724ba562017-03-10 08:53:39 +01006204
6205 switch (event) {
6206 case NETDEV_UP:
Ido Schimmela1107482017-05-26 08:37:39 +02006207 return mlxsw_sp_port_vlan_router_join(mlxsw_sp_port_vlan,
David Ahernf8fa9b42017-10-18 09:56:56 -07006208 l3_dev, extack);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006209 case NETDEV_DOWN:
Ido Schimmela1107482017-05-26 08:37:39 +02006210 mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port_vlan);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006211 break;
6212 }
6213
6214 return 0;
6215}
6216
6217static int mlxsw_sp_inetaddr_port_event(struct net_device *port_dev,
David Ahernf8fa9b42017-10-18 09:56:56 -07006218 unsigned long event,
6219 struct netlink_ext_ack *extack)
Ido Schimmel4724ba562017-03-10 08:53:39 +01006220{
Jiri Pirko2b94e582017-04-18 16:55:37 +02006221 if (netif_is_bridge_port(port_dev) ||
6222 netif_is_lag_port(port_dev) ||
6223 netif_is_ovs_port(port_dev))
Ido Schimmel4724ba562017-03-10 08:53:39 +01006224 return 0;
6225
David Ahernf8fa9b42017-10-18 09:56:56 -07006226 return mlxsw_sp_inetaddr_port_vlan_event(port_dev, port_dev, event, 1,
6227 extack);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006228}
6229
6230static int __mlxsw_sp_inetaddr_lag_event(struct net_device *l3_dev,
6231 struct net_device *lag_dev,
David Ahernf8fa9b42017-10-18 09:56:56 -07006232 unsigned long event, u16 vid,
6233 struct netlink_ext_ack *extack)
Ido Schimmel4724ba562017-03-10 08:53:39 +01006234{
6235 struct net_device *port_dev;
6236 struct list_head *iter;
6237 int err;
6238
6239 netdev_for_each_lower_dev(lag_dev, port_dev, iter) {
6240 if (mlxsw_sp_port_dev_check(port_dev)) {
Ido Schimmel7cbecf22017-05-26 08:37:28 +02006241 err = mlxsw_sp_inetaddr_port_vlan_event(l3_dev,
6242 port_dev,
David Ahernf8fa9b42017-10-18 09:56:56 -07006243 event, vid,
6244 extack);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006245 if (err)
6246 return err;
6247 }
6248 }
6249
6250 return 0;
6251}
6252
6253static int mlxsw_sp_inetaddr_lag_event(struct net_device *lag_dev,
David Ahernf8fa9b42017-10-18 09:56:56 -07006254 unsigned long event,
6255 struct netlink_ext_ack *extack)
Ido Schimmel4724ba562017-03-10 08:53:39 +01006256{
6257 if (netif_is_bridge_port(lag_dev))
6258 return 0;
6259
David Ahernf8fa9b42017-10-18 09:56:56 -07006260 return __mlxsw_sp_inetaddr_lag_event(lag_dev, lag_dev, event, 1,
6261 extack);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006262}
6263
Ido Schimmel4724ba562017-03-10 08:53:39 +01006264static int mlxsw_sp_inetaddr_bridge_event(struct net_device *l3_dev,
David Ahernf8fa9b42017-10-18 09:56:56 -07006265 unsigned long event,
6266 struct netlink_ext_ack *extack)
Ido Schimmel4724ba562017-03-10 08:53:39 +01006267{
6268 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(l3_dev);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006269 struct mlxsw_sp_rif_params params = {
6270 .dev = l3_dev,
6271 };
Ido Schimmela1107482017-05-26 08:37:39 +02006272 struct mlxsw_sp_rif *rif;
Ido Schimmel4724ba562017-03-10 08:53:39 +01006273
6274 switch (event) {
6275 case NETDEV_UP:
David Ahernf8fa9b42017-10-18 09:56:56 -07006276 rif = mlxsw_sp_rif_create(mlxsw_sp, &params, extack);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006277 if (IS_ERR(rif))
6278 return PTR_ERR(rif);
6279 break;
Ido Schimmel4724ba562017-03-10 08:53:39 +01006280 case NETDEV_DOWN:
Ido Schimmela1107482017-05-26 08:37:39 +02006281 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006282 mlxsw_sp_rif_destroy(rif);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006283 break;
6284 }
6285
6286 return 0;
6287}
6288
6289static int mlxsw_sp_inetaddr_vlan_event(struct net_device *vlan_dev,
David Ahernf8fa9b42017-10-18 09:56:56 -07006290 unsigned long event,
6291 struct netlink_ext_ack *extack)
Ido Schimmel4724ba562017-03-10 08:53:39 +01006292{
6293 struct net_device *real_dev = vlan_dev_real_dev(vlan_dev);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006294 u16 vid = vlan_dev_vlan_id(vlan_dev);
6295
Ido Schimmel6b27c8a2017-06-28 09:03:12 +03006296 if (netif_is_bridge_port(vlan_dev))
6297 return 0;
6298
Ido Schimmel4724ba562017-03-10 08:53:39 +01006299 if (mlxsw_sp_port_dev_check(real_dev))
Ido Schimmel7cbecf22017-05-26 08:37:28 +02006300 return mlxsw_sp_inetaddr_port_vlan_event(vlan_dev, real_dev,
David Ahernf8fa9b42017-10-18 09:56:56 -07006301 event, vid, extack);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006302 else if (netif_is_lag_master(real_dev))
6303 return __mlxsw_sp_inetaddr_lag_event(vlan_dev, real_dev, event,
David Ahernf8fa9b42017-10-18 09:56:56 -07006304 vid, extack);
Ido Schimmelc57529e2017-05-26 08:37:31 +02006305 else if (netif_is_bridge_master(real_dev) && br_vlan_enabled(real_dev))
David Ahernf8fa9b42017-10-18 09:56:56 -07006306 return mlxsw_sp_inetaddr_bridge_event(vlan_dev, event, extack);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006307
6308 return 0;
6309}
6310
Ido Schimmelb1e45522017-04-30 19:47:14 +03006311static int __mlxsw_sp_inetaddr_event(struct net_device *dev,
David Ahernf8fa9b42017-10-18 09:56:56 -07006312 unsigned long event,
6313 struct netlink_ext_ack *extack)
Ido Schimmelb1e45522017-04-30 19:47:14 +03006314{
6315 if (mlxsw_sp_port_dev_check(dev))
David Ahernf8fa9b42017-10-18 09:56:56 -07006316 return mlxsw_sp_inetaddr_port_event(dev, event, extack);
Ido Schimmelb1e45522017-04-30 19:47:14 +03006317 else if (netif_is_lag_master(dev))
David Ahernf8fa9b42017-10-18 09:56:56 -07006318 return mlxsw_sp_inetaddr_lag_event(dev, event, extack);
Ido Schimmelb1e45522017-04-30 19:47:14 +03006319 else if (netif_is_bridge_master(dev))
David Ahernf8fa9b42017-10-18 09:56:56 -07006320 return mlxsw_sp_inetaddr_bridge_event(dev, event, extack);
Ido Schimmelb1e45522017-04-30 19:47:14 +03006321 else if (is_vlan_dev(dev))
David Ahernf8fa9b42017-10-18 09:56:56 -07006322 return mlxsw_sp_inetaddr_vlan_event(dev, event, extack);
Ido Schimmelb1e45522017-04-30 19:47:14 +03006323 else
6324 return 0;
6325}
6326
Ido Schimmel4724ba562017-03-10 08:53:39 +01006327int mlxsw_sp_inetaddr_event(struct notifier_block *unused,
6328 unsigned long event, void *ptr)
6329{
6330 struct in_ifaddr *ifa = (struct in_ifaddr *) ptr;
6331 struct net_device *dev = ifa->ifa_dev->dev;
6332 struct mlxsw_sp *mlxsw_sp;
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01006333 struct mlxsw_sp_rif *rif;
Ido Schimmel4724ba562017-03-10 08:53:39 +01006334 int err = 0;
6335
David Ahern89d5dd22017-10-18 09:56:55 -07006336 /* NETDEV_UP event is handled by mlxsw_sp_inetaddr_valid_event */
6337 if (event == NETDEV_UP)
6338 goto out;
6339
6340 mlxsw_sp = mlxsw_sp_lower_get(dev);
6341 if (!mlxsw_sp)
6342 goto out;
6343
6344 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
6345 if (!mlxsw_sp_rif_should_config(rif, dev, event))
6346 goto out;
6347
David Ahernf8fa9b42017-10-18 09:56:56 -07006348 err = __mlxsw_sp_inetaddr_event(dev, event, NULL);
David Ahern89d5dd22017-10-18 09:56:55 -07006349out:
6350 return notifier_from_errno(err);
6351}
6352
6353int mlxsw_sp_inetaddr_valid_event(struct notifier_block *unused,
6354 unsigned long event, void *ptr)
6355{
6356 struct in_validator_info *ivi = (struct in_validator_info *) ptr;
6357 struct net_device *dev = ivi->ivi_dev->dev;
6358 struct mlxsw_sp *mlxsw_sp;
6359 struct mlxsw_sp_rif *rif;
6360 int err = 0;
6361
Ido Schimmel4724ba562017-03-10 08:53:39 +01006362 mlxsw_sp = mlxsw_sp_lower_get(dev);
6363 if (!mlxsw_sp)
6364 goto out;
6365
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01006366 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
Arkadi Sharshevsky5ea12372017-07-18 10:10:13 +02006367 if (!mlxsw_sp_rif_should_config(rif, dev, event))
Ido Schimmel4724ba562017-03-10 08:53:39 +01006368 goto out;
6369
David Ahernf8fa9b42017-10-18 09:56:56 -07006370 err = __mlxsw_sp_inetaddr_event(dev, event, ivi->extack);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006371out:
6372 return notifier_from_errno(err);
6373}
6374
Arkadi Sharshevsky5ea12372017-07-18 10:10:13 +02006375struct mlxsw_sp_inet6addr_event_work {
6376 struct work_struct work;
6377 struct net_device *dev;
6378 unsigned long event;
6379};
6380
6381static void mlxsw_sp_inet6addr_event_work(struct work_struct *work)
6382{
6383 struct mlxsw_sp_inet6addr_event_work *inet6addr_work =
6384 container_of(work, struct mlxsw_sp_inet6addr_event_work, work);
6385 struct net_device *dev = inet6addr_work->dev;
6386 unsigned long event = inet6addr_work->event;
6387 struct mlxsw_sp *mlxsw_sp;
6388 struct mlxsw_sp_rif *rif;
6389
6390 rtnl_lock();
6391 mlxsw_sp = mlxsw_sp_lower_get(dev);
6392 if (!mlxsw_sp)
6393 goto out;
6394
6395 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
6396 if (!mlxsw_sp_rif_should_config(rif, dev, event))
6397 goto out;
6398
David Ahernf8fa9b42017-10-18 09:56:56 -07006399 __mlxsw_sp_inetaddr_event(dev, event, NULL);
Arkadi Sharshevsky5ea12372017-07-18 10:10:13 +02006400out:
6401 rtnl_unlock();
6402 dev_put(dev);
6403 kfree(inet6addr_work);
6404}
6405
6406/* Called with rcu_read_lock() */
6407int mlxsw_sp_inet6addr_event(struct notifier_block *unused,
6408 unsigned long event, void *ptr)
6409{
6410 struct inet6_ifaddr *if6 = (struct inet6_ifaddr *) ptr;
6411 struct mlxsw_sp_inet6addr_event_work *inet6addr_work;
6412 struct net_device *dev = if6->idev->dev;
6413
David Ahern89d5dd22017-10-18 09:56:55 -07006414 /* NETDEV_UP event is handled by mlxsw_sp_inet6addr_valid_event */
6415 if (event == NETDEV_UP)
6416 return NOTIFY_DONE;
6417
Arkadi Sharshevsky5ea12372017-07-18 10:10:13 +02006418 if (!mlxsw_sp_port_dev_lower_find_rcu(dev))
6419 return NOTIFY_DONE;
6420
6421 inet6addr_work = kzalloc(sizeof(*inet6addr_work), GFP_ATOMIC);
6422 if (!inet6addr_work)
6423 return NOTIFY_BAD;
6424
6425 INIT_WORK(&inet6addr_work->work, mlxsw_sp_inet6addr_event_work);
6426 inet6addr_work->dev = dev;
6427 inet6addr_work->event = event;
6428 dev_hold(dev);
6429 mlxsw_core_schedule_work(&inet6addr_work->work);
6430
6431 return NOTIFY_DONE;
6432}
6433
David Ahern89d5dd22017-10-18 09:56:55 -07006434int mlxsw_sp_inet6addr_valid_event(struct notifier_block *unused,
6435 unsigned long event, void *ptr)
6436{
6437 struct in6_validator_info *i6vi = (struct in6_validator_info *) ptr;
6438 struct net_device *dev = i6vi->i6vi_dev->dev;
6439 struct mlxsw_sp *mlxsw_sp;
6440 struct mlxsw_sp_rif *rif;
6441 int err = 0;
6442
6443 mlxsw_sp = mlxsw_sp_lower_get(dev);
6444 if (!mlxsw_sp)
6445 goto out;
6446
6447 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
6448 if (!mlxsw_sp_rif_should_config(rif, dev, event))
6449 goto out;
6450
David Ahernf8fa9b42017-10-18 09:56:56 -07006451 err = __mlxsw_sp_inetaddr_event(dev, event, i6vi->extack);
David Ahern89d5dd22017-10-18 09:56:55 -07006452out:
6453 return notifier_from_errno(err);
6454}
6455
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01006456static int mlxsw_sp_rif_edit(struct mlxsw_sp *mlxsw_sp, u16 rif_index,
Ido Schimmel4724ba562017-03-10 08:53:39 +01006457 const char *mac, int mtu)
6458{
6459 char ritr_pl[MLXSW_REG_RITR_LEN];
6460 int err;
6461
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01006462 mlxsw_reg_ritr_rif_pack(ritr_pl, rif_index);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006463 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
6464 if (err)
6465 return err;
6466
6467 mlxsw_reg_ritr_mtu_set(ritr_pl, mtu);
6468 mlxsw_reg_ritr_if_mac_memcpy_to(ritr_pl, mac);
6469 mlxsw_reg_ritr_op_set(ritr_pl, MLXSW_REG_RITR_RIF_CREATE);
6470 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
6471}
6472
6473int mlxsw_sp_netdevice_router_port_event(struct net_device *dev)
6474{
6475 struct mlxsw_sp *mlxsw_sp;
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01006476 struct mlxsw_sp_rif *rif;
Ido Schimmela1107482017-05-26 08:37:39 +02006477 u16 fid_index;
Ido Schimmel4724ba562017-03-10 08:53:39 +01006478 int err;
6479
6480 mlxsw_sp = mlxsw_sp_lower_get(dev);
6481 if (!mlxsw_sp)
6482 return 0;
6483
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01006484 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
6485 if (!rif)
Ido Schimmel4724ba562017-03-10 08:53:39 +01006486 return 0;
Ido Schimmela1107482017-05-26 08:37:39 +02006487 fid_index = mlxsw_sp_fid_index(rif->fid);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006488
Ido Schimmela1107482017-05-26 08:37:39 +02006489 err = mlxsw_sp_rif_fdb_op(mlxsw_sp, rif->addr, fid_index, false);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006490 if (err)
6491 return err;
6492
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01006493 err = mlxsw_sp_rif_edit(mlxsw_sp, rif->rif_index, dev->dev_addr,
6494 dev->mtu);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006495 if (err)
6496 goto err_rif_edit;
6497
Ido Schimmela1107482017-05-26 08:37:39 +02006498 err = mlxsw_sp_rif_fdb_op(mlxsw_sp, dev->dev_addr, fid_index, true);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006499 if (err)
6500 goto err_rif_fdb_op;
6501
Yotam Gigifd890fe2017-09-27 08:23:21 +02006502 if (rif->mtu != dev->mtu) {
6503 struct mlxsw_sp_vr *vr;
6504
6505 /* The RIF is relevant only to its mr_table instance, as unlike
6506 * unicast routing, in multicast routing a RIF cannot be shared
6507 * between several multicast routing tables.
6508 */
6509 vr = &mlxsw_sp->router->vrs[rif->vr_id];
6510 mlxsw_sp_mr_rif_mtu_update(vr->mr4_table, rif, dev->mtu);
6511 }
6512
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01006513 ether_addr_copy(rif->addr, dev->dev_addr);
6514 rif->mtu = dev->mtu;
Ido Schimmel4724ba562017-03-10 08:53:39 +01006515
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01006516 netdev_dbg(dev, "Updated RIF=%d\n", rif->rif_index);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006517
6518 return 0;
6519
6520err_rif_fdb_op:
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01006521 mlxsw_sp_rif_edit(mlxsw_sp, rif->rif_index, rif->addr, rif->mtu);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006522err_rif_edit:
Ido Schimmela1107482017-05-26 08:37:39 +02006523 mlxsw_sp_rif_fdb_op(mlxsw_sp, rif->addr, fid_index, true);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006524 return err;
6525}
6526
Ido Schimmelb1e45522017-04-30 19:47:14 +03006527static int mlxsw_sp_port_vrf_join(struct mlxsw_sp *mlxsw_sp,
David Ahernf8fa9b42017-10-18 09:56:56 -07006528 struct net_device *l3_dev,
6529 struct netlink_ext_ack *extack)
Ido Schimmel7179eb52017-03-16 09:08:18 +01006530{
Ido Schimmelb1e45522017-04-30 19:47:14 +03006531 struct mlxsw_sp_rif *rif;
Ido Schimmel7179eb52017-03-16 09:08:18 +01006532
Ido Schimmelb1e45522017-04-30 19:47:14 +03006533 /* If netdev is already associated with a RIF, then we need to
6534 * destroy it and create a new one with the new virtual router ID.
Ido Schimmel7179eb52017-03-16 09:08:18 +01006535 */
Ido Schimmelb1e45522017-04-30 19:47:14 +03006536 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev);
6537 if (rif)
David Ahernf8fa9b42017-10-18 09:56:56 -07006538 __mlxsw_sp_inetaddr_event(l3_dev, NETDEV_DOWN, extack);
Ido Schimmel7179eb52017-03-16 09:08:18 +01006539
David Ahernf8fa9b42017-10-18 09:56:56 -07006540 return __mlxsw_sp_inetaddr_event(l3_dev, NETDEV_UP, extack);
Ido Schimmel7179eb52017-03-16 09:08:18 +01006541}
6542
Ido Schimmelb1e45522017-04-30 19:47:14 +03006543static void mlxsw_sp_port_vrf_leave(struct mlxsw_sp *mlxsw_sp,
6544 struct net_device *l3_dev)
Ido Schimmel7179eb52017-03-16 09:08:18 +01006545{
Ido Schimmelb1e45522017-04-30 19:47:14 +03006546 struct mlxsw_sp_rif *rif;
Ido Schimmel7179eb52017-03-16 09:08:18 +01006547
Ido Schimmelb1e45522017-04-30 19:47:14 +03006548 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev);
6549 if (!rif)
Ido Schimmel7179eb52017-03-16 09:08:18 +01006550 return;
David Ahernf8fa9b42017-10-18 09:56:56 -07006551 __mlxsw_sp_inetaddr_event(l3_dev, NETDEV_DOWN, NULL);
Ido Schimmel7179eb52017-03-16 09:08:18 +01006552}
6553
Ido Schimmelb1e45522017-04-30 19:47:14 +03006554int mlxsw_sp_netdevice_vrf_event(struct net_device *l3_dev, unsigned long event,
6555 struct netdev_notifier_changeupper_info *info)
Ido Schimmel3d70e4582017-03-16 09:08:19 +01006556{
Ido Schimmelb1e45522017-04-30 19:47:14 +03006557 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(l3_dev);
6558 int err = 0;
Ido Schimmel3d70e4582017-03-16 09:08:19 +01006559
Ido Schimmelb1e45522017-04-30 19:47:14 +03006560 if (!mlxsw_sp)
6561 return 0;
Ido Schimmel3d70e4582017-03-16 09:08:19 +01006562
Ido Schimmelb1e45522017-04-30 19:47:14 +03006563 switch (event) {
6564 case NETDEV_PRECHANGEUPPER:
6565 return 0;
6566 case NETDEV_CHANGEUPPER:
David Ahernf8fa9b42017-10-18 09:56:56 -07006567 if (info->linking) {
6568 struct netlink_ext_ack *extack;
6569
6570 extack = netdev_notifier_info_to_extack(&info->info);
6571 err = mlxsw_sp_port_vrf_join(mlxsw_sp, l3_dev, extack);
6572 } else {
Ido Schimmelb1e45522017-04-30 19:47:14 +03006573 mlxsw_sp_port_vrf_leave(mlxsw_sp, l3_dev);
David Ahernf8fa9b42017-10-18 09:56:56 -07006574 }
Ido Schimmelb1e45522017-04-30 19:47:14 +03006575 break;
6576 }
Ido Schimmel3d70e4582017-03-16 09:08:19 +01006577
Ido Schimmelb1e45522017-04-30 19:47:14 +03006578 return err;
Ido Schimmel3d70e4582017-03-16 09:08:19 +01006579}
6580
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006581static struct mlxsw_sp_rif_subport *
6582mlxsw_sp_rif_subport_rif(const struct mlxsw_sp_rif *rif)
Ido Schimmela1107482017-05-26 08:37:39 +02006583{
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006584 return container_of(rif, struct mlxsw_sp_rif_subport, common);
Ido Schimmela1107482017-05-26 08:37:39 +02006585}
6586
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006587static void mlxsw_sp_rif_subport_setup(struct mlxsw_sp_rif *rif,
6588 const struct mlxsw_sp_rif_params *params)
6589{
6590 struct mlxsw_sp_rif_subport *rif_subport;
6591
6592 rif_subport = mlxsw_sp_rif_subport_rif(rif);
6593 rif_subport->vid = params->vid;
6594 rif_subport->lag = params->lag;
6595 if (params->lag)
6596 rif_subport->lag_id = params->lag_id;
6597 else
6598 rif_subport->system_port = params->system_port;
6599}
6600
6601static int mlxsw_sp_rif_subport_op(struct mlxsw_sp_rif *rif, bool enable)
6602{
6603 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
6604 struct mlxsw_sp_rif_subport *rif_subport;
6605 char ritr_pl[MLXSW_REG_RITR_LEN];
6606
6607 rif_subport = mlxsw_sp_rif_subport_rif(rif);
6608 mlxsw_reg_ritr_pack(ritr_pl, enable, MLXSW_REG_RITR_SP_IF,
Petr Machata9571e822017-09-02 23:49:14 +02006609 rif->rif_index, rif->vr_id, rif->dev->mtu);
6610 mlxsw_reg_ritr_mac_pack(ritr_pl, rif->dev->dev_addr);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006611 mlxsw_reg_ritr_sp_if_pack(ritr_pl, rif_subport->lag,
6612 rif_subport->lag ? rif_subport->lag_id :
6613 rif_subport->system_port,
6614 rif_subport->vid);
6615
6616 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
6617}
6618
6619static int mlxsw_sp_rif_subport_configure(struct mlxsw_sp_rif *rif)
6620{
Petr Machata010cadf2017-09-02 23:49:18 +02006621 int err;
6622
6623 err = mlxsw_sp_rif_subport_op(rif, true);
6624 if (err)
6625 return err;
6626
6627 err = mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
6628 mlxsw_sp_fid_index(rif->fid), true);
6629 if (err)
6630 goto err_rif_fdb_op;
6631
6632 mlxsw_sp_fid_rif_set(rif->fid, rif);
6633 return 0;
6634
6635err_rif_fdb_op:
6636 mlxsw_sp_rif_subport_op(rif, false);
6637 return err;
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006638}
6639
6640static void mlxsw_sp_rif_subport_deconfigure(struct mlxsw_sp_rif *rif)
6641{
Petr Machata010cadf2017-09-02 23:49:18 +02006642 struct mlxsw_sp_fid *fid = rif->fid;
6643
6644 mlxsw_sp_fid_rif_set(fid, NULL);
6645 mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
6646 mlxsw_sp_fid_index(fid), false);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006647 mlxsw_sp_rif_subport_op(rif, false);
6648}
6649
6650static struct mlxsw_sp_fid *
6651mlxsw_sp_rif_subport_fid_get(struct mlxsw_sp_rif *rif)
6652{
6653 return mlxsw_sp_fid_rfid_get(rif->mlxsw_sp, rif->rif_index);
6654}
6655
6656static const struct mlxsw_sp_rif_ops mlxsw_sp_rif_subport_ops = {
6657 .type = MLXSW_SP_RIF_TYPE_SUBPORT,
6658 .rif_size = sizeof(struct mlxsw_sp_rif_subport),
6659 .setup = mlxsw_sp_rif_subport_setup,
6660 .configure = mlxsw_sp_rif_subport_configure,
6661 .deconfigure = mlxsw_sp_rif_subport_deconfigure,
6662 .fid_get = mlxsw_sp_rif_subport_fid_get,
6663};
6664
6665static int mlxsw_sp_rif_vlan_fid_op(struct mlxsw_sp_rif *rif,
6666 enum mlxsw_reg_ritr_if_type type,
6667 u16 vid_fid, bool enable)
6668{
6669 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
6670 char ritr_pl[MLXSW_REG_RITR_LEN];
6671
6672 mlxsw_reg_ritr_pack(ritr_pl, enable, type, rif->rif_index, rif->vr_id,
Petr Machata9571e822017-09-02 23:49:14 +02006673 rif->dev->mtu);
6674 mlxsw_reg_ritr_mac_pack(ritr_pl, rif->dev->dev_addr);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006675 mlxsw_reg_ritr_fid_set(ritr_pl, type, vid_fid);
6676
6677 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
6678}
6679
Yotam Gigib35750f2017-10-09 11:15:33 +02006680u8 mlxsw_sp_router_port(const struct mlxsw_sp *mlxsw_sp)
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006681{
6682 return mlxsw_core_max_ports(mlxsw_sp->core) + 1;
6683}
6684
6685static int mlxsw_sp_rif_vlan_configure(struct mlxsw_sp_rif *rif)
6686{
6687 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
6688 u16 vid = mlxsw_sp_fid_8021q_vid(rif->fid);
6689 int err;
6690
6691 err = mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_VLAN_IF, vid, true);
6692 if (err)
6693 return err;
6694
Ido Schimmel0d284812017-07-18 10:10:12 +02006695 err = mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
6696 mlxsw_sp_router_port(mlxsw_sp), true);
6697 if (err)
6698 goto err_fid_mc_flood_set;
6699
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006700 err = mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
6701 mlxsw_sp_router_port(mlxsw_sp), true);
6702 if (err)
6703 goto err_fid_bc_flood_set;
6704
Petr Machata010cadf2017-09-02 23:49:18 +02006705 err = mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
6706 mlxsw_sp_fid_index(rif->fid), true);
6707 if (err)
6708 goto err_rif_fdb_op;
6709
6710 mlxsw_sp_fid_rif_set(rif->fid, rif);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006711 return 0;
6712
Petr Machata010cadf2017-09-02 23:49:18 +02006713err_rif_fdb_op:
6714 mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
6715 mlxsw_sp_router_port(mlxsw_sp), false);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006716err_fid_bc_flood_set:
Ido Schimmel0d284812017-07-18 10:10:12 +02006717 mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
6718 mlxsw_sp_router_port(mlxsw_sp), false);
6719err_fid_mc_flood_set:
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006720 mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_VLAN_IF, vid, false);
6721 return err;
6722}
6723
6724static void mlxsw_sp_rif_vlan_deconfigure(struct mlxsw_sp_rif *rif)
6725{
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006726 u16 vid = mlxsw_sp_fid_8021q_vid(rif->fid);
Petr Machata010cadf2017-09-02 23:49:18 +02006727 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
6728 struct mlxsw_sp_fid *fid = rif->fid;
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006729
Petr Machata010cadf2017-09-02 23:49:18 +02006730 mlxsw_sp_fid_rif_set(fid, NULL);
6731 mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
6732 mlxsw_sp_fid_index(fid), false);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006733 mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
6734 mlxsw_sp_router_port(mlxsw_sp), false);
Ido Schimmel0d284812017-07-18 10:10:12 +02006735 mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
6736 mlxsw_sp_router_port(mlxsw_sp), false);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006737 mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_VLAN_IF, vid, false);
6738}
6739
6740static struct mlxsw_sp_fid *
6741mlxsw_sp_rif_vlan_fid_get(struct mlxsw_sp_rif *rif)
6742{
6743 u16 vid = is_vlan_dev(rif->dev) ? vlan_dev_vlan_id(rif->dev) : 1;
6744
6745 return mlxsw_sp_fid_8021q_get(rif->mlxsw_sp, vid);
6746}
6747
6748static const struct mlxsw_sp_rif_ops mlxsw_sp_rif_vlan_ops = {
6749 .type = MLXSW_SP_RIF_TYPE_VLAN,
6750 .rif_size = sizeof(struct mlxsw_sp_rif),
6751 .configure = mlxsw_sp_rif_vlan_configure,
6752 .deconfigure = mlxsw_sp_rif_vlan_deconfigure,
6753 .fid_get = mlxsw_sp_rif_vlan_fid_get,
6754};
6755
6756static int mlxsw_sp_rif_fid_configure(struct mlxsw_sp_rif *rif)
6757{
6758 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
6759 u16 fid_index = mlxsw_sp_fid_index(rif->fid);
6760 int err;
6761
6762 err = mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_FID_IF, fid_index,
6763 true);
6764 if (err)
6765 return err;
6766
Ido Schimmel0d284812017-07-18 10:10:12 +02006767 err = mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
6768 mlxsw_sp_router_port(mlxsw_sp), true);
6769 if (err)
6770 goto err_fid_mc_flood_set;
6771
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006772 err = mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
6773 mlxsw_sp_router_port(mlxsw_sp), true);
6774 if (err)
6775 goto err_fid_bc_flood_set;
6776
Petr Machata010cadf2017-09-02 23:49:18 +02006777 err = mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
6778 mlxsw_sp_fid_index(rif->fid), true);
6779 if (err)
6780 goto err_rif_fdb_op;
6781
6782 mlxsw_sp_fid_rif_set(rif->fid, rif);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006783 return 0;
6784
Petr Machata010cadf2017-09-02 23:49:18 +02006785err_rif_fdb_op:
6786 mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
6787 mlxsw_sp_router_port(mlxsw_sp), false);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006788err_fid_bc_flood_set:
Ido Schimmel0d284812017-07-18 10:10:12 +02006789 mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
6790 mlxsw_sp_router_port(mlxsw_sp), false);
6791err_fid_mc_flood_set:
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006792 mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_FID_IF, fid_index, false);
6793 return err;
6794}
6795
6796static void mlxsw_sp_rif_fid_deconfigure(struct mlxsw_sp_rif *rif)
6797{
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006798 u16 fid_index = mlxsw_sp_fid_index(rif->fid);
Petr Machata010cadf2017-09-02 23:49:18 +02006799 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
6800 struct mlxsw_sp_fid *fid = rif->fid;
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006801
Petr Machata010cadf2017-09-02 23:49:18 +02006802 mlxsw_sp_fid_rif_set(fid, NULL);
6803 mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
6804 mlxsw_sp_fid_index(fid), false);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006805 mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
6806 mlxsw_sp_router_port(mlxsw_sp), false);
Ido Schimmel0d284812017-07-18 10:10:12 +02006807 mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
6808 mlxsw_sp_router_port(mlxsw_sp), false);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006809 mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_FID_IF, fid_index, false);
6810}
6811
6812static struct mlxsw_sp_fid *
6813mlxsw_sp_rif_fid_fid_get(struct mlxsw_sp_rif *rif)
6814{
6815 return mlxsw_sp_fid_8021d_get(rif->mlxsw_sp, rif->dev->ifindex);
6816}
6817
6818static const struct mlxsw_sp_rif_ops mlxsw_sp_rif_fid_ops = {
6819 .type = MLXSW_SP_RIF_TYPE_FID,
6820 .rif_size = sizeof(struct mlxsw_sp_rif),
6821 .configure = mlxsw_sp_rif_fid_configure,
6822 .deconfigure = mlxsw_sp_rif_fid_deconfigure,
6823 .fid_get = mlxsw_sp_rif_fid_fid_get,
6824};
6825
Petr Machata6ddb7422017-09-02 23:49:19 +02006826static struct mlxsw_sp_rif_ipip_lb *
6827mlxsw_sp_rif_ipip_lb_rif(struct mlxsw_sp_rif *rif)
6828{
6829 return container_of(rif, struct mlxsw_sp_rif_ipip_lb, common);
6830}
6831
6832static void
6833mlxsw_sp_rif_ipip_lb_setup(struct mlxsw_sp_rif *rif,
6834 const struct mlxsw_sp_rif_params *params)
6835{
6836 struct mlxsw_sp_rif_params_ipip_lb *params_lb;
6837 struct mlxsw_sp_rif_ipip_lb *rif_lb;
6838
6839 params_lb = container_of(params, struct mlxsw_sp_rif_params_ipip_lb,
6840 common);
6841 rif_lb = mlxsw_sp_rif_ipip_lb_rif(rif);
6842 rif_lb->lb_config = params_lb->lb_config;
6843}
6844
6845static int
6846mlxsw_sp_rif_ipip_lb_op(struct mlxsw_sp_rif_ipip_lb *lb_rif,
6847 struct mlxsw_sp_vr *ul_vr, bool enable)
6848{
6849 struct mlxsw_sp_rif_ipip_lb_config lb_cf = lb_rif->lb_config;
6850 struct mlxsw_sp_rif *rif = &lb_rif->common;
6851 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
6852 char ritr_pl[MLXSW_REG_RITR_LEN];
6853 u32 saddr4;
6854
6855 switch (lb_cf.ul_protocol) {
6856 case MLXSW_SP_L3_PROTO_IPV4:
6857 saddr4 = be32_to_cpu(lb_cf.saddr.addr4);
6858 mlxsw_reg_ritr_pack(ritr_pl, enable, MLXSW_REG_RITR_LOOPBACK_IF,
6859 rif->rif_index, rif->vr_id, rif->dev->mtu);
6860 mlxsw_reg_ritr_loopback_ipip4_pack(ritr_pl, lb_cf.lb_ipipt,
6861 MLXSW_REG_RITR_LOOPBACK_IPIP_OPTIONS_GRE_KEY_PRESET,
6862 ul_vr->id, saddr4, lb_cf.okey);
6863 break;
6864
6865 case MLXSW_SP_L3_PROTO_IPV6:
6866 return -EAFNOSUPPORT;
6867 }
6868
6869 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
6870}
6871
6872static int
6873mlxsw_sp_rif_ipip_lb_configure(struct mlxsw_sp_rif *rif)
6874{
6875 struct mlxsw_sp_rif_ipip_lb *lb_rif = mlxsw_sp_rif_ipip_lb_rif(rif);
6876 u32 ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(rif->dev);
6877 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
6878 struct mlxsw_sp_vr *ul_vr;
6879 int err;
6880
David Ahernf8fa9b42017-10-18 09:56:56 -07006881 ul_vr = mlxsw_sp_vr_get(mlxsw_sp, ul_tb_id, NULL);
Petr Machata6ddb7422017-09-02 23:49:19 +02006882 if (IS_ERR(ul_vr))
6883 return PTR_ERR(ul_vr);
6884
6885 err = mlxsw_sp_rif_ipip_lb_op(lb_rif, ul_vr, true);
6886 if (err)
6887 goto err_loopback_op;
6888
6889 lb_rif->ul_vr_id = ul_vr->id;
6890 ++ul_vr->rif_count;
6891 return 0;
6892
6893err_loopback_op:
Ido Schimmel2b52ce02018-01-22 09:17:42 +01006894 mlxsw_sp_vr_put(mlxsw_sp, ul_vr);
Petr Machata6ddb7422017-09-02 23:49:19 +02006895 return err;
6896}
6897
6898static void mlxsw_sp_rif_ipip_lb_deconfigure(struct mlxsw_sp_rif *rif)
6899{
6900 struct mlxsw_sp_rif_ipip_lb *lb_rif = mlxsw_sp_rif_ipip_lb_rif(rif);
6901 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
6902 struct mlxsw_sp_vr *ul_vr;
6903
6904 ul_vr = &mlxsw_sp->router->vrs[lb_rif->ul_vr_id];
6905 mlxsw_sp_rif_ipip_lb_op(lb_rif, ul_vr, false);
6906
6907 --ul_vr->rif_count;
Ido Schimmel2b52ce02018-01-22 09:17:42 +01006908 mlxsw_sp_vr_put(mlxsw_sp, ul_vr);
Petr Machata6ddb7422017-09-02 23:49:19 +02006909}
6910
6911static const struct mlxsw_sp_rif_ops mlxsw_sp_rif_ipip_lb_ops = {
6912 .type = MLXSW_SP_RIF_TYPE_IPIP_LB,
6913 .rif_size = sizeof(struct mlxsw_sp_rif_ipip_lb),
6914 .setup = mlxsw_sp_rif_ipip_lb_setup,
6915 .configure = mlxsw_sp_rif_ipip_lb_configure,
6916 .deconfigure = mlxsw_sp_rif_ipip_lb_deconfigure,
6917};
6918
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006919static const struct mlxsw_sp_rif_ops *mlxsw_sp_rif_ops_arr[] = {
6920 [MLXSW_SP_RIF_TYPE_SUBPORT] = &mlxsw_sp_rif_subport_ops,
6921 [MLXSW_SP_RIF_TYPE_VLAN] = &mlxsw_sp_rif_vlan_ops,
6922 [MLXSW_SP_RIF_TYPE_FID] = &mlxsw_sp_rif_fid_ops,
Petr Machata6ddb7422017-09-02 23:49:19 +02006923 [MLXSW_SP_RIF_TYPE_IPIP_LB] = &mlxsw_sp_rif_ipip_lb_ops,
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006924};
6925
Ido Schimmel348b8fc2017-05-16 19:38:29 +02006926static int mlxsw_sp_rifs_init(struct mlxsw_sp *mlxsw_sp)
6927{
6928 u64 max_rifs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS);
6929
6930 mlxsw_sp->router->rifs = kcalloc(max_rifs,
6931 sizeof(struct mlxsw_sp_rif *),
6932 GFP_KERNEL);
6933 if (!mlxsw_sp->router->rifs)
6934 return -ENOMEM;
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006935
6936 mlxsw_sp->router->rif_ops_arr = mlxsw_sp_rif_ops_arr;
6937
Ido Schimmel348b8fc2017-05-16 19:38:29 +02006938 return 0;
6939}
6940
6941static void mlxsw_sp_rifs_fini(struct mlxsw_sp *mlxsw_sp)
6942{
6943 int i;
6944
6945 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++)
6946 WARN_ON_ONCE(mlxsw_sp->router->rifs[i]);
6947
6948 kfree(mlxsw_sp->router->rifs);
6949}
6950
Petr Machatadcbda282017-10-20 09:16:16 +02006951static int
6952mlxsw_sp_ipip_config_tigcr(struct mlxsw_sp *mlxsw_sp)
6953{
6954 char tigcr_pl[MLXSW_REG_TIGCR_LEN];
6955
6956 mlxsw_reg_tigcr_pack(tigcr_pl, true, 0);
6957 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(tigcr), tigcr_pl);
6958}
6959
Petr Machata38ebc0f2017-09-02 23:49:17 +02006960static int mlxsw_sp_ipips_init(struct mlxsw_sp *mlxsw_sp)
6961{
6962 mlxsw_sp->router->ipip_ops_arr = mlxsw_sp_ipip_ops_arr;
Petr Machata1012b9a2017-09-02 23:49:23 +02006963 INIT_LIST_HEAD(&mlxsw_sp->router->ipip_list);
Petr Machatadcbda282017-10-20 09:16:16 +02006964 return mlxsw_sp_ipip_config_tigcr(mlxsw_sp);
Petr Machata38ebc0f2017-09-02 23:49:17 +02006965}
6966
6967static void mlxsw_sp_ipips_fini(struct mlxsw_sp *mlxsw_sp)
6968{
Petr Machata1012b9a2017-09-02 23:49:23 +02006969 WARN_ON(!list_empty(&mlxsw_sp->router->ipip_list));
Petr Machata38ebc0f2017-09-02 23:49:17 +02006970}
6971
Ido Schimmelc3852ef2016-12-03 16:45:07 +01006972static void mlxsw_sp_router_fib_dump_flush(struct notifier_block *nb)
6973{
Ido Schimmel7e39d112017-05-16 19:38:28 +02006974 struct mlxsw_sp_router *router;
Ido Schimmelc3852ef2016-12-03 16:45:07 +01006975
6976 /* Flush pending FIB notifications and then flush the device's
6977 * table before requesting another dump. The FIB notification
6978 * block is unregistered, so no need to take RTNL.
6979 */
6980 mlxsw_core_flush_owq();
Ido Schimmel7e39d112017-05-16 19:38:28 +02006981 router = container_of(nb, struct mlxsw_sp_router, fib_nb);
6982 mlxsw_sp_router_fib_flush(router->mlxsw_sp);
Ido Schimmelc3852ef2016-12-03 16:45:07 +01006983}
6984
Ido Schimmelaf658b62017-11-02 17:14:09 +01006985#ifdef CONFIG_IP_ROUTE_MULTIPATH
6986static void mlxsw_sp_mp_hash_header_set(char *recr2_pl, int header)
6987{
6988 mlxsw_reg_recr2_outer_header_enables_set(recr2_pl, header, true);
6989}
6990
6991static void mlxsw_sp_mp_hash_field_set(char *recr2_pl, int field)
6992{
6993 mlxsw_reg_recr2_outer_header_fields_enable_set(recr2_pl, field, true);
6994}
6995
6996static void mlxsw_sp_mp4_hash_init(char *recr2_pl)
6997{
6998 bool only_l3 = !init_net.ipv4.sysctl_fib_multipath_hash_policy;
6999
7000 mlxsw_sp_mp_hash_header_set(recr2_pl,
7001 MLXSW_REG_RECR2_IPV4_EN_NOT_TCP_NOT_UDP);
7002 mlxsw_sp_mp_hash_header_set(recr2_pl, MLXSW_REG_RECR2_IPV4_EN_TCP_UDP);
7003 mlxsw_reg_recr2_ipv4_sip_enable(recr2_pl);
7004 mlxsw_reg_recr2_ipv4_dip_enable(recr2_pl);
7005 if (only_l3)
7006 return;
7007 mlxsw_sp_mp_hash_header_set(recr2_pl, MLXSW_REG_RECR2_TCP_UDP_EN_IPV4);
7008 mlxsw_sp_mp_hash_field_set(recr2_pl, MLXSW_REG_RECR2_IPV4_PROTOCOL);
7009 mlxsw_sp_mp_hash_field_set(recr2_pl, MLXSW_REG_RECR2_TCP_UDP_SPORT);
7010 mlxsw_sp_mp_hash_field_set(recr2_pl, MLXSW_REG_RECR2_TCP_UDP_DPORT);
7011}
7012
7013static void mlxsw_sp_mp6_hash_init(char *recr2_pl)
7014{
7015 mlxsw_sp_mp_hash_header_set(recr2_pl,
7016 MLXSW_REG_RECR2_IPV6_EN_NOT_TCP_NOT_UDP);
7017 mlxsw_sp_mp_hash_header_set(recr2_pl, MLXSW_REG_RECR2_IPV6_EN_TCP_UDP);
7018 mlxsw_reg_recr2_ipv6_sip_enable(recr2_pl);
7019 mlxsw_reg_recr2_ipv6_dip_enable(recr2_pl);
7020 mlxsw_sp_mp_hash_field_set(recr2_pl, MLXSW_REG_RECR2_IPV6_FLOW_LABEL);
7021 mlxsw_sp_mp_hash_field_set(recr2_pl, MLXSW_REG_RECR2_IPV6_NEXT_HEADER);
7022}
7023
7024static int mlxsw_sp_mp_hash_init(struct mlxsw_sp *mlxsw_sp)
7025{
7026 char recr2_pl[MLXSW_REG_RECR2_LEN];
7027 u32 seed;
7028
7029 get_random_bytes(&seed, sizeof(seed));
7030 mlxsw_reg_recr2_pack(recr2_pl, seed);
7031 mlxsw_sp_mp4_hash_init(recr2_pl);
7032 mlxsw_sp_mp6_hash_init(recr2_pl);
7033
7034 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(recr2), recr2_pl);
7035}
7036#else
7037static int mlxsw_sp_mp_hash_init(struct mlxsw_sp *mlxsw_sp)
7038{
7039 return 0;
7040}
7041#endif
7042
Yuval Mintz48276a22018-01-14 12:33:14 +01007043static int mlxsw_sp_dscp_init(struct mlxsw_sp *mlxsw_sp)
7044{
7045 char rdpm_pl[MLXSW_REG_RDPM_LEN];
7046 unsigned int i;
7047
7048 MLXSW_REG_ZERO(rdpm, rdpm_pl);
7049
7050 /* HW is determining switch priority based on DSCP-bits, but the
7051 * kernel is still doing that based on the ToS. Since there's a
7052 * mismatch in bits we need to make sure to translate the right
7053 * value ToS would observe, skipping the 2 least-significant ECN bits.
7054 */
7055 for (i = 0; i < MLXSW_REG_RDPM_DSCP_ENTRY_REC_MAX_COUNT; i++)
7056 mlxsw_reg_rdpm_pack(rdpm_pl, i, rt_tos2priority(i << 2));
7057
7058 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rdpm), rdpm_pl);
7059}
7060
Ido Schimmel4724ba562017-03-10 08:53:39 +01007061static int __mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp)
7062{
7063 char rgcr_pl[MLXSW_REG_RGCR_LEN];
7064 u64 max_rifs;
7065 int err;
7066
7067 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_RIFS))
7068 return -EIO;
Ido Schimmel4724ba562017-03-10 08:53:39 +01007069 max_rifs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS);
Ido Schimmel4724ba562017-03-10 08:53:39 +01007070
Arkadi Sharshevskye29237e2017-07-18 10:10:09 +02007071 mlxsw_reg_rgcr_pack(rgcr_pl, true, true);
Ido Schimmel4724ba562017-03-10 08:53:39 +01007072 mlxsw_reg_rgcr_max_router_interfaces_set(rgcr_pl, max_rifs);
Yuval Mintz48276a22018-01-14 12:33:14 +01007073 mlxsw_reg_rgcr_usp_set(rgcr_pl, true);
Ido Schimmel4724ba562017-03-10 08:53:39 +01007074 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rgcr), rgcr_pl);
7075 if (err)
Ido Schimmel348b8fc2017-05-16 19:38:29 +02007076 return err;
Ido Schimmel4724ba562017-03-10 08:53:39 +01007077 return 0;
Ido Schimmel4724ba562017-03-10 08:53:39 +01007078}
7079
7080static void __mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp)
7081{
7082 char rgcr_pl[MLXSW_REG_RGCR_LEN];
Ido Schimmel4724ba562017-03-10 08:53:39 +01007083
Arkadi Sharshevskye29237e2017-07-18 10:10:09 +02007084 mlxsw_reg_rgcr_pack(rgcr_pl, false, false);
Ido Schimmel4724ba562017-03-10 08:53:39 +01007085 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rgcr), rgcr_pl);
Ido Schimmel4724ba562017-03-10 08:53:39 +01007086}
7087
Jiri Pirkob45f64d2016-09-26 12:52:31 +02007088int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp)
7089{
Ido Schimmel9011b672017-05-16 19:38:25 +02007090 struct mlxsw_sp_router *router;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02007091 int err;
7092
Ido Schimmel9011b672017-05-16 19:38:25 +02007093 router = kzalloc(sizeof(*mlxsw_sp->router), GFP_KERNEL);
7094 if (!router)
7095 return -ENOMEM;
7096 mlxsw_sp->router = router;
7097 router->mlxsw_sp = mlxsw_sp;
7098
7099 INIT_LIST_HEAD(&mlxsw_sp->router->nexthop_neighs_list);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02007100 err = __mlxsw_sp_router_init(mlxsw_sp);
7101 if (err)
Ido Schimmel9011b672017-05-16 19:38:25 +02007102 goto err_router_init;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02007103
Ido Schimmel348b8fc2017-05-16 19:38:29 +02007104 err = mlxsw_sp_rifs_init(mlxsw_sp);
7105 if (err)
7106 goto err_rifs_init;
7107
Petr Machata38ebc0f2017-09-02 23:49:17 +02007108 err = mlxsw_sp_ipips_init(mlxsw_sp);
7109 if (err)
7110 goto err_ipips_init;
7111
Ido Schimmel9011b672017-05-16 19:38:25 +02007112 err = rhashtable_init(&mlxsw_sp->router->nexthop_ht,
Ido Schimmelc53b8e12017-02-08 11:16:30 +01007113 &mlxsw_sp_nexthop_ht_params);
7114 if (err)
7115 goto err_nexthop_ht_init;
7116
Ido Schimmel9011b672017-05-16 19:38:25 +02007117 err = rhashtable_init(&mlxsw_sp->router->nexthop_group_ht,
Ido Schimmele9ad5e72017-02-08 11:16:29 +01007118 &mlxsw_sp_nexthop_group_ht_params);
7119 if (err)
7120 goto err_nexthop_group_ht_init;
7121
Arkadi Sharshevskydbe45982017-09-25 10:32:23 +02007122 INIT_LIST_HEAD(&mlxsw_sp->router->nexthop_list);
Ido Schimmel8494ab02017-03-24 08:02:47 +01007123 err = mlxsw_sp_lpm_init(mlxsw_sp);
7124 if (err)
7125 goto err_lpm_init;
7126
Yotam Gigid42b0962017-09-27 08:23:20 +02007127 err = mlxsw_sp_mr_init(mlxsw_sp, &mlxsw_sp_mr_tcam_ops);
7128 if (err)
7129 goto err_mr_init;
7130
Jiri Pirkob45f64d2016-09-26 12:52:31 +02007131 err = mlxsw_sp_vrs_init(mlxsw_sp);
7132 if (err)
7133 goto err_vrs_init;
7134
Ido Schimmel8c9583a2016-10-27 15:12:57 +02007135 err = mlxsw_sp_neigh_init(mlxsw_sp);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02007136 if (err)
7137 goto err_neigh_init;
7138
Ido Schimmel48fac882017-11-02 17:14:06 +01007139 mlxsw_sp->router->netevent_nb.notifier_call =
7140 mlxsw_sp_router_netevent_event;
7141 err = register_netevent_notifier(&mlxsw_sp->router->netevent_nb);
7142 if (err)
7143 goto err_register_netevent_notifier;
7144
Ido Schimmelaf658b62017-11-02 17:14:09 +01007145 err = mlxsw_sp_mp_hash_init(mlxsw_sp);
7146 if (err)
7147 goto err_mp_hash_init;
7148
Yuval Mintz48276a22018-01-14 12:33:14 +01007149 err = mlxsw_sp_dscp_init(mlxsw_sp);
7150 if (err)
7151 goto err_dscp_init;
7152
Ido Schimmel7e39d112017-05-16 19:38:28 +02007153 mlxsw_sp->router->fib_nb.notifier_call = mlxsw_sp_router_fib_event;
7154 err = register_fib_notifier(&mlxsw_sp->router->fib_nb,
Ido Schimmelc3852ef2016-12-03 16:45:07 +01007155 mlxsw_sp_router_fib_dump_flush);
7156 if (err)
7157 goto err_register_fib_notifier;
7158
Jiri Pirkob45f64d2016-09-26 12:52:31 +02007159 return 0;
7160
Ido Schimmelc3852ef2016-12-03 16:45:07 +01007161err_register_fib_notifier:
Yuval Mintz48276a22018-01-14 12:33:14 +01007162err_dscp_init:
Ido Schimmelaf658b62017-11-02 17:14:09 +01007163err_mp_hash_init:
Ido Schimmel48fac882017-11-02 17:14:06 +01007164 unregister_netevent_notifier(&mlxsw_sp->router->netevent_nb);
7165err_register_netevent_notifier:
Ido Schimmelc3852ef2016-12-03 16:45:07 +01007166 mlxsw_sp_neigh_fini(mlxsw_sp);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02007167err_neigh_init:
7168 mlxsw_sp_vrs_fini(mlxsw_sp);
7169err_vrs_init:
Yotam Gigid42b0962017-09-27 08:23:20 +02007170 mlxsw_sp_mr_fini(mlxsw_sp);
7171err_mr_init:
Ido Schimmel8494ab02017-03-24 08:02:47 +01007172 mlxsw_sp_lpm_fini(mlxsw_sp);
7173err_lpm_init:
Ido Schimmel9011b672017-05-16 19:38:25 +02007174 rhashtable_destroy(&mlxsw_sp->router->nexthop_group_ht);
Ido Schimmele9ad5e72017-02-08 11:16:29 +01007175err_nexthop_group_ht_init:
Ido Schimmel9011b672017-05-16 19:38:25 +02007176 rhashtable_destroy(&mlxsw_sp->router->nexthop_ht);
Ido Schimmelc53b8e12017-02-08 11:16:30 +01007177err_nexthop_ht_init:
Petr Machata38ebc0f2017-09-02 23:49:17 +02007178 mlxsw_sp_ipips_fini(mlxsw_sp);
7179err_ipips_init:
Ido Schimmel348b8fc2017-05-16 19:38:29 +02007180 mlxsw_sp_rifs_fini(mlxsw_sp);
7181err_rifs_init:
Jiri Pirkob45f64d2016-09-26 12:52:31 +02007182 __mlxsw_sp_router_fini(mlxsw_sp);
Ido Schimmel9011b672017-05-16 19:38:25 +02007183err_router_init:
7184 kfree(mlxsw_sp->router);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02007185 return err;
7186}
7187
7188void mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp)
7189{
Ido Schimmel7e39d112017-05-16 19:38:28 +02007190 unregister_fib_notifier(&mlxsw_sp->router->fib_nb);
Ido Schimmel48fac882017-11-02 17:14:06 +01007191 unregister_netevent_notifier(&mlxsw_sp->router->netevent_nb);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02007192 mlxsw_sp_neigh_fini(mlxsw_sp);
7193 mlxsw_sp_vrs_fini(mlxsw_sp);
Yotam Gigid42b0962017-09-27 08:23:20 +02007194 mlxsw_sp_mr_fini(mlxsw_sp);
Ido Schimmel8494ab02017-03-24 08:02:47 +01007195 mlxsw_sp_lpm_fini(mlxsw_sp);
Ido Schimmel9011b672017-05-16 19:38:25 +02007196 rhashtable_destroy(&mlxsw_sp->router->nexthop_group_ht);
7197 rhashtable_destroy(&mlxsw_sp->router->nexthop_ht);
Petr Machata38ebc0f2017-09-02 23:49:17 +02007198 mlxsw_sp_ipips_fini(mlxsw_sp);
Ido Schimmel348b8fc2017-05-16 19:38:29 +02007199 mlxsw_sp_rifs_fini(mlxsw_sp);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02007200 __mlxsw_sp_router_fini(mlxsw_sp);
Ido Schimmel9011b672017-05-16 19:38:25 +02007201 kfree(mlxsw_sp->router);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02007202}