blob: 997e24dcb053ee2f2c87f2bc27c37918e5a7a23d [file] [log] [blame]
Ido Schimmel464dce12016-07-02 11:00:15 +02001/*
2 * drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
Petr Machata6ddb7422017-09-02 23:49:19 +02003 * Copyright (c) 2016-2017 Mellanox Technologies. All rights reserved.
Ido Schimmel464dce12016-07-02 11:00:15 +02004 * Copyright (c) 2016 Jiri Pirko <jiri@mellanox.com>
5 * Copyright (c) 2016 Ido Schimmel <idosch@mellanox.com>
Yotam Gigic723c7352016-07-05 11:27:43 +02006 * Copyright (c) 2016 Yotam Gigi <yotamg@mellanox.com>
Petr Machata6ddb7422017-09-02 23:49:19 +02007 * Copyright (c) 2017 Petr Machata <petrm@mellanox.com>
Ido Schimmel464dce12016-07-02 11:00:15 +02008 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions are met:
11 *
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. Neither the names of the copyright holders nor the names of its
18 * contributors may be used to endorse or promote products derived from
19 * this software without specific prior written permission.
20 *
21 * Alternatively, this software may be distributed under the terms of the
22 * GNU General Public License ("GPL") version 2 as published by the Free
23 * Software Foundation.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
26 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
29 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
36 */
37
38#include <linux/kernel.h>
39#include <linux/types.h>
Jiri Pirko5e9c16c2016-07-04 08:23:04 +020040#include <linux/rhashtable.h>
41#include <linux/bitops.h>
42#include <linux/in6.h>
Yotam Gigic723c7352016-07-05 11:27:43 +020043#include <linux/notifier.h>
Ido Schimmeldf6dd79b2017-02-08 14:36:49 +010044#include <linux/inetdevice.h>
Ido Schimmel9db032b2017-03-16 09:08:17 +010045#include <linux/netdevice.h>
Ido Schimmel03ea01e2017-05-23 21:56:30 +020046#include <linux/if_bridge.h>
Ido Schimmelb5f3e0d2017-07-24 09:56:00 +020047#include <linux/socket.h>
Ido Schimmel428b8512017-08-03 13:28:28 +020048#include <linux/route.h>
Ido Schimmeleb789982017-10-22 23:11:48 +020049#include <linux/gcd.h>
Ido Schimmelaf658b62017-11-02 17:14:09 +010050#include <linux/random.h>
Yotam Gigic723c7352016-07-05 11:27:43 +020051#include <net/netevent.h>
Jiri Pirko6cf3c972016-07-05 11:27:39 +020052#include <net/neighbour.h>
53#include <net/arp.h>
Jiri Pirkob45f64d2016-09-26 12:52:31 +020054#include <net/ip_fib.h>
Ido Schimmel583419f2017-08-03 13:28:27 +020055#include <net/ip6_fib.h>
Ido Schimmel5d7bfd12017-03-16 09:08:14 +010056#include <net/fib_rules.h>
Petr Machata6ddb7422017-09-02 23:49:19 +020057#include <net/ip_tunnels.h>
Ido Schimmel57837882017-03-16 09:08:16 +010058#include <net/l3mdev.h>
Arkadi Sharshevsky5ea12372017-07-18 10:10:13 +020059#include <net/addrconf.h>
Arkadi Sharshevskyd5eb89c2017-07-18 10:10:15 +020060#include <net/ndisc.h>
61#include <net/ipv6.h>
Ido Schimmel04b1d4e2017-08-03 13:28:11 +020062#include <net/fib_notifier.h>
Ido Schimmel464dce12016-07-02 11:00:15 +020063
64#include "spectrum.h"
65#include "core.h"
66#include "reg.h"
Arkadi Sharshevskye0c0afd2017-03-28 17:24:15 +020067#include "spectrum_cnt.h"
68#include "spectrum_dpipe.h"
Petr Machata38ebc0f2017-09-02 23:49:17 +020069#include "spectrum_ipip.h"
Yotam Gigid42b0962017-09-27 08:23:20 +020070#include "spectrum_mr.h"
71#include "spectrum_mr_tcam.h"
Arkadi Sharshevskye0c0afd2017-03-28 17:24:15 +020072#include "spectrum_router.h"
Ido Schimmel464dce12016-07-02 11:00:15 +020073
Ido Schimmel2b52ce02018-01-22 09:17:42 +010074struct mlxsw_sp_fib;
Ido Schimmel9011b672017-05-16 19:38:25 +020075struct mlxsw_sp_vr;
76struct mlxsw_sp_lpm_tree;
Ido Schimmele4f3c1c2017-05-26 08:37:40 +020077struct mlxsw_sp_rif_ops;
Ido Schimmel9011b672017-05-16 19:38:25 +020078
79struct mlxsw_sp_router {
80 struct mlxsw_sp *mlxsw_sp;
Ido Schimmel5f9efff2017-05-16 19:38:27 +020081 struct mlxsw_sp_rif **rifs;
Ido Schimmel9011b672017-05-16 19:38:25 +020082 struct mlxsw_sp_vr *vrs;
83 struct rhashtable neigh_ht;
84 struct rhashtable nexthop_group_ht;
85 struct rhashtable nexthop_ht;
Arkadi Sharshevskydbe45982017-09-25 10:32:23 +020086 struct list_head nexthop_list;
Ido Schimmel9011b672017-05-16 19:38:25 +020087 struct {
Ido Schimmel2b52ce02018-01-22 09:17:42 +010088 /* One tree for each protocol: IPv4 and IPv6 */
89 struct mlxsw_sp_lpm_tree *proto_trees[2];
Ido Schimmel9011b672017-05-16 19:38:25 +020090 struct mlxsw_sp_lpm_tree *trees;
91 unsigned int tree_count;
92 } lpm;
93 struct {
94 struct delayed_work dw;
95 unsigned long interval; /* ms */
96 } neighs_update;
97 struct delayed_work nexthop_probe_dw;
98#define MLXSW_SP_UNRESOLVED_NH_PROBE_INTERVAL 5000 /* ms */
99 struct list_head nexthop_neighs_list;
Petr Machata1012b9a2017-09-02 23:49:23 +0200100 struct list_head ipip_list;
Ido Schimmel9011b672017-05-16 19:38:25 +0200101 bool aborted;
Ido Schimmel7e39d112017-05-16 19:38:28 +0200102 struct notifier_block fib_nb;
Ido Schimmel48fac882017-11-02 17:14:06 +0100103 struct notifier_block netevent_nb;
Ido Schimmele4f3c1c2017-05-26 08:37:40 +0200104 const struct mlxsw_sp_rif_ops **rif_ops_arr;
Petr Machata38ebc0f2017-09-02 23:49:17 +0200105 const struct mlxsw_sp_ipip_ops **ipip_ops_arr;
Ido Schimmel9011b672017-05-16 19:38:25 +0200106};
107
Ido Schimmel4724ba562017-03-10 08:53:39 +0100108struct mlxsw_sp_rif {
109 struct list_head nexthop_list;
110 struct list_head neigh_list;
111 struct net_device *dev;
Ido Schimmela1107482017-05-26 08:37:39 +0200112 struct mlxsw_sp_fid *fid;
Ido Schimmel4724ba562017-03-10 08:53:39 +0100113 unsigned char addr[ETH_ALEN];
114 int mtu;
Arkadi Sharshevskybf952332017-03-17 09:38:00 +0100115 u16 rif_index;
Ido Schimmel69132292017-03-10 08:53:42 +0100116 u16 vr_id;
Ido Schimmele4f3c1c2017-05-26 08:37:40 +0200117 const struct mlxsw_sp_rif_ops *ops;
118 struct mlxsw_sp *mlxsw_sp;
119
Arkadi Sharshevskye0c0afd2017-03-28 17:24:15 +0200120 unsigned int counter_ingress;
121 bool counter_ingress_valid;
122 unsigned int counter_egress;
123 bool counter_egress_valid;
Ido Schimmel4724ba562017-03-10 08:53:39 +0100124};
125
Ido Schimmele4f3c1c2017-05-26 08:37:40 +0200126struct mlxsw_sp_rif_params {
127 struct net_device *dev;
128 union {
129 u16 system_port;
130 u16 lag_id;
131 };
132 u16 vid;
133 bool lag;
134};
135
Ido Schimmel4d93cee2017-05-26 08:37:34 +0200136struct mlxsw_sp_rif_subport {
137 struct mlxsw_sp_rif common;
138 union {
139 u16 system_port;
140 u16 lag_id;
141 };
142 u16 vid;
143 bool lag;
144};
145
Petr Machata6ddb7422017-09-02 23:49:19 +0200146struct mlxsw_sp_rif_ipip_lb {
147 struct mlxsw_sp_rif common;
148 struct mlxsw_sp_rif_ipip_lb_config lb_config;
149 u16 ul_vr_id; /* Reserved for Spectrum-2. */
150};
151
152struct mlxsw_sp_rif_params_ipip_lb {
153 struct mlxsw_sp_rif_params common;
154 struct mlxsw_sp_rif_ipip_lb_config lb_config;
155};
156
Ido Schimmele4f3c1c2017-05-26 08:37:40 +0200157struct mlxsw_sp_rif_ops {
158 enum mlxsw_sp_rif_type type;
159 size_t rif_size;
160
161 void (*setup)(struct mlxsw_sp_rif *rif,
162 const struct mlxsw_sp_rif_params *params);
163 int (*configure)(struct mlxsw_sp_rif *rif);
164 void (*deconfigure)(struct mlxsw_sp_rif *rif);
165 struct mlxsw_sp_fid * (*fid_get)(struct mlxsw_sp_rif *rif);
166};
167
Ido Schimmel2b52ce02018-01-22 09:17:42 +0100168static void mlxsw_sp_lpm_tree_hold(struct mlxsw_sp_lpm_tree *lpm_tree);
169static void mlxsw_sp_lpm_tree_put(struct mlxsw_sp *mlxsw_sp,
170 struct mlxsw_sp_lpm_tree *lpm_tree);
171static int mlxsw_sp_vr_lpm_tree_bind(struct mlxsw_sp *mlxsw_sp,
172 const struct mlxsw_sp_fib *fib,
173 u8 tree_id);
174static int mlxsw_sp_vr_lpm_tree_unbind(struct mlxsw_sp *mlxsw_sp,
175 const struct mlxsw_sp_fib *fib);
176
Arkadi Sharshevskye0c0afd2017-03-28 17:24:15 +0200177static unsigned int *
178mlxsw_sp_rif_p_counter_get(struct mlxsw_sp_rif *rif,
179 enum mlxsw_sp_rif_counter_dir dir)
180{
181 switch (dir) {
182 case MLXSW_SP_RIF_COUNTER_EGRESS:
183 return &rif->counter_egress;
184 case MLXSW_SP_RIF_COUNTER_INGRESS:
185 return &rif->counter_ingress;
186 }
187 return NULL;
188}
189
190static bool
191mlxsw_sp_rif_counter_valid_get(struct mlxsw_sp_rif *rif,
192 enum mlxsw_sp_rif_counter_dir dir)
193{
194 switch (dir) {
195 case MLXSW_SP_RIF_COUNTER_EGRESS:
196 return rif->counter_egress_valid;
197 case MLXSW_SP_RIF_COUNTER_INGRESS:
198 return rif->counter_ingress_valid;
199 }
200 return false;
201}
202
203static void
204mlxsw_sp_rif_counter_valid_set(struct mlxsw_sp_rif *rif,
205 enum mlxsw_sp_rif_counter_dir dir,
206 bool valid)
207{
208 switch (dir) {
209 case MLXSW_SP_RIF_COUNTER_EGRESS:
210 rif->counter_egress_valid = valid;
211 break;
212 case MLXSW_SP_RIF_COUNTER_INGRESS:
213 rif->counter_ingress_valid = valid;
214 break;
215 }
216}
217
218static int mlxsw_sp_rif_counter_edit(struct mlxsw_sp *mlxsw_sp, u16 rif_index,
219 unsigned int counter_index, bool enable,
220 enum mlxsw_sp_rif_counter_dir dir)
221{
222 char ritr_pl[MLXSW_REG_RITR_LEN];
223 bool is_egress = false;
224 int err;
225
226 if (dir == MLXSW_SP_RIF_COUNTER_EGRESS)
227 is_egress = true;
228 mlxsw_reg_ritr_rif_pack(ritr_pl, rif_index);
229 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
230 if (err)
231 return err;
232
233 mlxsw_reg_ritr_counter_pack(ritr_pl, counter_index, enable,
234 is_egress);
235 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
236}
237
238int mlxsw_sp_rif_counter_value_get(struct mlxsw_sp *mlxsw_sp,
239 struct mlxsw_sp_rif *rif,
240 enum mlxsw_sp_rif_counter_dir dir, u64 *cnt)
241{
242 char ricnt_pl[MLXSW_REG_RICNT_LEN];
243 unsigned int *p_counter_index;
244 bool valid;
245 int err;
246
247 valid = mlxsw_sp_rif_counter_valid_get(rif, dir);
248 if (!valid)
249 return -EINVAL;
250
251 p_counter_index = mlxsw_sp_rif_p_counter_get(rif, dir);
252 if (!p_counter_index)
253 return -EINVAL;
254 mlxsw_reg_ricnt_pack(ricnt_pl, *p_counter_index,
255 MLXSW_REG_RICNT_OPCODE_NOP);
256 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ricnt), ricnt_pl);
257 if (err)
258 return err;
259 *cnt = mlxsw_reg_ricnt_good_unicast_packets_get(ricnt_pl);
260 return 0;
261}
262
263static int mlxsw_sp_rif_counter_clear(struct mlxsw_sp *mlxsw_sp,
264 unsigned int counter_index)
265{
266 char ricnt_pl[MLXSW_REG_RICNT_LEN];
267
268 mlxsw_reg_ricnt_pack(ricnt_pl, counter_index,
269 MLXSW_REG_RICNT_OPCODE_CLEAR);
270 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ricnt), ricnt_pl);
271}
272
273int mlxsw_sp_rif_counter_alloc(struct mlxsw_sp *mlxsw_sp,
274 struct mlxsw_sp_rif *rif,
275 enum mlxsw_sp_rif_counter_dir dir)
276{
277 unsigned int *p_counter_index;
278 int err;
279
280 p_counter_index = mlxsw_sp_rif_p_counter_get(rif, dir);
281 if (!p_counter_index)
282 return -EINVAL;
283 err = mlxsw_sp_counter_alloc(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_RIF,
284 p_counter_index);
285 if (err)
286 return err;
287
288 err = mlxsw_sp_rif_counter_clear(mlxsw_sp, *p_counter_index);
289 if (err)
290 goto err_counter_clear;
291
292 err = mlxsw_sp_rif_counter_edit(mlxsw_sp, rif->rif_index,
293 *p_counter_index, true, dir);
294 if (err)
295 goto err_counter_edit;
296 mlxsw_sp_rif_counter_valid_set(rif, dir, true);
297 return 0;
298
299err_counter_edit:
300err_counter_clear:
301 mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_RIF,
302 *p_counter_index);
303 return err;
304}
305
306void mlxsw_sp_rif_counter_free(struct mlxsw_sp *mlxsw_sp,
307 struct mlxsw_sp_rif *rif,
308 enum mlxsw_sp_rif_counter_dir dir)
309{
310 unsigned int *p_counter_index;
311
Arkadi Sharshevsky6b1206b2017-05-18 09:18:53 +0200312 if (!mlxsw_sp_rif_counter_valid_get(rif, dir))
313 return;
314
Arkadi Sharshevskye0c0afd2017-03-28 17:24:15 +0200315 p_counter_index = mlxsw_sp_rif_p_counter_get(rif, dir);
316 if (WARN_ON(!p_counter_index))
317 return;
318 mlxsw_sp_rif_counter_edit(mlxsw_sp, rif->rif_index,
319 *p_counter_index, false, dir);
320 mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_RIF,
321 *p_counter_index);
322 mlxsw_sp_rif_counter_valid_set(rif, dir, false);
323}
324
Ido Schimmele4f3c1c2017-05-26 08:37:40 +0200325static void mlxsw_sp_rif_counters_alloc(struct mlxsw_sp_rif *rif)
326{
327 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
328 struct devlink *devlink;
329
330 devlink = priv_to_devlink(mlxsw_sp->core);
331 if (!devlink_dpipe_table_counter_enabled(devlink,
332 MLXSW_SP_DPIPE_TABLE_NAME_ERIF))
333 return;
334 mlxsw_sp_rif_counter_alloc(mlxsw_sp, rif, MLXSW_SP_RIF_COUNTER_EGRESS);
335}
336
337static void mlxsw_sp_rif_counters_free(struct mlxsw_sp_rif *rif)
338{
339 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
340
341 mlxsw_sp_rif_counter_free(mlxsw_sp, rif, MLXSW_SP_RIF_COUNTER_EGRESS);
342}
343
Ido Schimmel4724ba562017-03-10 08:53:39 +0100344static struct mlxsw_sp_rif *
345mlxsw_sp_rif_find_by_dev(const struct mlxsw_sp *mlxsw_sp,
346 const struct net_device *dev);
347
Ido Schimmel7dcc18a2017-07-18 10:10:30 +0200348#define MLXSW_SP_PREFIX_COUNT (sizeof(struct in6_addr) * BITS_PER_BYTE + 1)
Ido Schimmel9011b672017-05-16 19:38:25 +0200349
350struct mlxsw_sp_prefix_usage {
351 DECLARE_BITMAP(b, MLXSW_SP_PREFIX_COUNT);
352};
353
Jiri Pirko53342022016-07-04 08:23:08 +0200354#define mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage) \
355 for_each_set_bit(prefix, (prefix_usage)->b, MLXSW_SP_PREFIX_COUNT)
356
357static bool
358mlxsw_sp_prefix_usage_eq(struct mlxsw_sp_prefix_usage *prefix_usage1,
359 struct mlxsw_sp_prefix_usage *prefix_usage2)
360{
361 return !memcmp(prefix_usage1, prefix_usage2, sizeof(*prefix_usage1));
362}
363
Jiri Pirko6b75c482016-07-04 08:23:09 +0200364static void
365mlxsw_sp_prefix_usage_cpy(struct mlxsw_sp_prefix_usage *prefix_usage1,
366 struct mlxsw_sp_prefix_usage *prefix_usage2)
367{
368 memcpy(prefix_usage1, prefix_usage2, sizeof(*prefix_usage1));
369}
370
371static void
Jiri Pirko5e9c16c2016-07-04 08:23:04 +0200372mlxsw_sp_prefix_usage_set(struct mlxsw_sp_prefix_usage *prefix_usage,
373 unsigned char prefix_len)
374{
375 set_bit(prefix_len, prefix_usage->b);
376}
377
378static void
379mlxsw_sp_prefix_usage_clear(struct mlxsw_sp_prefix_usage *prefix_usage,
380 unsigned char prefix_len)
381{
382 clear_bit(prefix_len, prefix_usage->b);
383}
384
385struct mlxsw_sp_fib_key {
386 unsigned char addr[sizeof(struct in6_addr)];
387 unsigned char prefix_len;
388};
389
Jiri Pirko61c503f2016-07-04 08:23:11 +0200390enum mlxsw_sp_fib_entry_type {
391 MLXSW_SP_FIB_ENTRY_TYPE_REMOTE,
392 MLXSW_SP_FIB_ENTRY_TYPE_LOCAL,
393 MLXSW_SP_FIB_ENTRY_TYPE_TRAP,
Petr Machata4607f6d2017-09-02 23:49:25 +0200394
395 /* This is a special case of local delivery, where a packet should be
396 * decapsulated on reception. Note that there is no corresponding ENCAP,
397 * because that's a type of next hop, not of FIB entry. (There can be
398 * several next hops in a REMOTE entry, and some of them may be
399 * encapsulating entries.)
400 */
401 MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP,
Jiri Pirko61c503f2016-07-04 08:23:11 +0200402};
403
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +0200404struct mlxsw_sp_nexthop_group;
405
Ido Schimmel9aecce12017-02-09 10:28:42 +0100406struct mlxsw_sp_fib_node {
407 struct list_head entry_list;
Jiri Pirkob45f64d2016-09-26 12:52:31 +0200408 struct list_head list;
Ido Schimmel9aecce12017-02-09 10:28:42 +0100409 struct rhash_head ht_node;
Ido Schimmel76610eb2017-03-10 08:53:41 +0100410 struct mlxsw_sp_fib *fib;
Ido Schimmel9aecce12017-02-09 10:28:42 +0100411 struct mlxsw_sp_fib_key key;
412};
413
Petr Machata4607f6d2017-09-02 23:49:25 +0200414struct mlxsw_sp_fib_entry_decap {
415 struct mlxsw_sp_ipip_entry *ipip_entry;
416 u32 tunnel_index;
417};
418
Ido Schimmel9aecce12017-02-09 10:28:42 +0100419struct mlxsw_sp_fib_entry {
420 struct list_head list;
421 struct mlxsw_sp_fib_node *fib_node;
422 enum mlxsw_sp_fib_entry_type type;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +0200423 struct list_head nexthop_group_node;
424 struct mlxsw_sp_nexthop_group *nh_group;
Petr Machata4607f6d2017-09-02 23:49:25 +0200425 struct mlxsw_sp_fib_entry_decap decap; /* Valid for decap entries. */
Jiri Pirko5e9c16c2016-07-04 08:23:04 +0200426};
427
Ido Schimmel4f1c7f12017-07-18 10:10:26 +0200428struct mlxsw_sp_fib4_entry {
429 struct mlxsw_sp_fib_entry common;
430 u32 tb_id;
431 u32 prio;
432 u8 tos;
433 u8 type;
434};
435
Ido Schimmel428b8512017-08-03 13:28:28 +0200436struct mlxsw_sp_fib6_entry {
437 struct mlxsw_sp_fib_entry common;
438 struct list_head rt6_list;
439 unsigned int nrt6;
440};
441
442struct mlxsw_sp_rt6 {
443 struct list_head list;
444 struct rt6_info *rt;
445};
446
Ido Schimmel9011b672017-05-16 19:38:25 +0200447struct mlxsw_sp_lpm_tree {
448 u8 id; /* tree ID */
449 unsigned int ref_count;
450 enum mlxsw_sp_l3proto proto;
Ido Schimmel2b52ce02018-01-22 09:17:42 +0100451 unsigned long prefix_ref_count[MLXSW_SP_PREFIX_COUNT];
Ido Schimmel9011b672017-05-16 19:38:25 +0200452 struct mlxsw_sp_prefix_usage prefix_usage;
453};
454
Jiri Pirko5e9c16c2016-07-04 08:23:04 +0200455struct mlxsw_sp_fib {
456 struct rhashtable ht;
Ido Schimmel9aecce12017-02-09 10:28:42 +0100457 struct list_head node_list;
Ido Schimmel76610eb2017-03-10 08:53:41 +0100458 struct mlxsw_sp_vr *vr;
459 struct mlxsw_sp_lpm_tree *lpm_tree;
Ido Schimmel76610eb2017-03-10 08:53:41 +0100460 enum mlxsw_sp_l3proto proto;
Jiri Pirko5e9c16c2016-07-04 08:23:04 +0200461};
462
Ido Schimmel9011b672017-05-16 19:38:25 +0200463struct mlxsw_sp_vr {
464 u16 id; /* virtual router ID */
465 u32 tb_id; /* kernel fib table id */
466 unsigned int rif_count;
467 struct mlxsw_sp_fib *fib4;
Ido Schimmela3d9bc52017-07-18 10:10:22 +0200468 struct mlxsw_sp_fib *fib6;
Yotam Gigid42b0962017-09-27 08:23:20 +0200469 struct mlxsw_sp_mr_table *mr4_table;
Ido Schimmel9011b672017-05-16 19:38:25 +0200470};
471
Ido Schimmel9aecce12017-02-09 10:28:42 +0100472static const struct rhashtable_params mlxsw_sp_fib_ht_params;
Jiri Pirko5e9c16c2016-07-04 08:23:04 +0200473
Ido Schimmel2b52ce02018-01-22 09:17:42 +0100474static struct mlxsw_sp_fib *mlxsw_sp_fib_create(struct mlxsw_sp *mlxsw_sp,
475 struct mlxsw_sp_vr *vr,
Ido Schimmel76610eb2017-03-10 08:53:41 +0100476 enum mlxsw_sp_l3proto proto)
Jiri Pirko5e9c16c2016-07-04 08:23:04 +0200477{
Ido Schimmel2b52ce02018-01-22 09:17:42 +0100478 struct mlxsw_sp_lpm_tree *lpm_tree;
Jiri Pirko5e9c16c2016-07-04 08:23:04 +0200479 struct mlxsw_sp_fib *fib;
480 int err;
481
Ido Schimmel2b52ce02018-01-22 09:17:42 +0100482 lpm_tree = mlxsw_sp->router->lpm.proto_trees[proto];
Jiri Pirko5e9c16c2016-07-04 08:23:04 +0200483 fib = kzalloc(sizeof(*fib), GFP_KERNEL);
484 if (!fib)
485 return ERR_PTR(-ENOMEM);
486 err = rhashtable_init(&fib->ht, &mlxsw_sp_fib_ht_params);
487 if (err)
488 goto err_rhashtable_init;
Ido Schimmel9aecce12017-02-09 10:28:42 +0100489 INIT_LIST_HEAD(&fib->node_list);
Ido Schimmel76610eb2017-03-10 08:53:41 +0100490 fib->proto = proto;
491 fib->vr = vr;
Ido Schimmel2b52ce02018-01-22 09:17:42 +0100492 fib->lpm_tree = lpm_tree;
493 mlxsw_sp_lpm_tree_hold(lpm_tree);
494 err = mlxsw_sp_vr_lpm_tree_bind(mlxsw_sp, fib, lpm_tree->id);
495 if (err)
496 goto err_lpm_tree_bind;
Jiri Pirko5e9c16c2016-07-04 08:23:04 +0200497 return fib;
498
Ido Schimmel2b52ce02018-01-22 09:17:42 +0100499err_lpm_tree_bind:
500 mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
Jiri Pirko5e9c16c2016-07-04 08:23:04 +0200501err_rhashtable_init:
502 kfree(fib);
503 return ERR_PTR(err);
504}
505
Ido Schimmel2b52ce02018-01-22 09:17:42 +0100506static void mlxsw_sp_fib_destroy(struct mlxsw_sp *mlxsw_sp,
507 struct mlxsw_sp_fib *fib)
Jiri Pirko5e9c16c2016-07-04 08:23:04 +0200508{
Ido Schimmel2b52ce02018-01-22 09:17:42 +0100509 mlxsw_sp_vr_lpm_tree_unbind(mlxsw_sp, fib);
510 mlxsw_sp_lpm_tree_put(mlxsw_sp, fib->lpm_tree);
Ido Schimmel9aecce12017-02-09 10:28:42 +0100511 WARN_ON(!list_empty(&fib->node_list));
Jiri Pirko5e9c16c2016-07-04 08:23:04 +0200512 rhashtable_destroy(&fib->ht);
513 kfree(fib);
514}
515
Jiri Pirko53342022016-07-04 08:23:08 +0200516static struct mlxsw_sp_lpm_tree *
Ido Schimmel382dbb42017-03-10 08:53:40 +0100517mlxsw_sp_lpm_tree_find_unused(struct mlxsw_sp *mlxsw_sp)
Jiri Pirko53342022016-07-04 08:23:08 +0200518{
519 static struct mlxsw_sp_lpm_tree *lpm_tree;
520 int i;
521
Ido Schimmel9011b672017-05-16 19:38:25 +0200522 for (i = 0; i < mlxsw_sp->router->lpm.tree_count; i++) {
523 lpm_tree = &mlxsw_sp->router->lpm.trees[i];
Ido Schimmel382dbb42017-03-10 08:53:40 +0100524 if (lpm_tree->ref_count == 0)
525 return lpm_tree;
Jiri Pirko53342022016-07-04 08:23:08 +0200526 }
527 return NULL;
528}
529
530static int mlxsw_sp_lpm_tree_alloc(struct mlxsw_sp *mlxsw_sp,
531 struct mlxsw_sp_lpm_tree *lpm_tree)
532{
533 char ralta_pl[MLXSW_REG_RALTA_LEN];
534
Ido Schimmel1a9234e662016-09-19 08:29:26 +0200535 mlxsw_reg_ralta_pack(ralta_pl, true,
536 (enum mlxsw_reg_ralxx_protocol) lpm_tree->proto,
537 lpm_tree->id);
Jiri Pirko53342022016-07-04 08:23:08 +0200538 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralta), ralta_pl);
539}
540
Ido Schimmelcc702672017-08-14 10:54:03 +0200541static void mlxsw_sp_lpm_tree_free(struct mlxsw_sp *mlxsw_sp,
542 struct mlxsw_sp_lpm_tree *lpm_tree)
Jiri Pirko53342022016-07-04 08:23:08 +0200543{
544 char ralta_pl[MLXSW_REG_RALTA_LEN];
545
Ido Schimmel1a9234e662016-09-19 08:29:26 +0200546 mlxsw_reg_ralta_pack(ralta_pl, false,
547 (enum mlxsw_reg_ralxx_protocol) lpm_tree->proto,
548 lpm_tree->id);
Ido Schimmelcc702672017-08-14 10:54:03 +0200549 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralta), ralta_pl);
Jiri Pirko53342022016-07-04 08:23:08 +0200550}
551
552static int
553mlxsw_sp_lpm_tree_left_struct_set(struct mlxsw_sp *mlxsw_sp,
554 struct mlxsw_sp_prefix_usage *prefix_usage,
555 struct mlxsw_sp_lpm_tree *lpm_tree)
556{
557 char ralst_pl[MLXSW_REG_RALST_LEN];
558 u8 root_bin = 0;
559 u8 prefix;
560 u8 last_prefix = MLXSW_REG_RALST_BIN_NO_CHILD;
561
562 mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage)
563 root_bin = prefix;
564
565 mlxsw_reg_ralst_pack(ralst_pl, root_bin, lpm_tree->id);
566 mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage) {
567 if (prefix == 0)
568 continue;
569 mlxsw_reg_ralst_bin_pack(ralst_pl, prefix, last_prefix,
570 MLXSW_REG_RALST_BIN_NO_CHILD);
571 last_prefix = prefix;
572 }
573 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralst), ralst_pl);
574}
575
576static struct mlxsw_sp_lpm_tree *
577mlxsw_sp_lpm_tree_create(struct mlxsw_sp *mlxsw_sp,
578 struct mlxsw_sp_prefix_usage *prefix_usage,
Ido Schimmel382dbb42017-03-10 08:53:40 +0100579 enum mlxsw_sp_l3proto proto)
Jiri Pirko53342022016-07-04 08:23:08 +0200580{
581 struct mlxsw_sp_lpm_tree *lpm_tree;
582 int err;
583
Ido Schimmel382dbb42017-03-10 08:53:40 +0100584 lpm_tree = mlxsw_sp_lpm_tree_find_unused(mlxsw_sp);
Jiri Pirko53342022016-07-04 08:23:08 +0200585 if (!lpm_tree)
586 return ERR_PTR(-EBUSY);
587 lpm_tree->proto = proto;
588 err = mlxsw_sp_lpm_tree_alloc(mlxsw_sp, lpm_tree);
589 if (err)
590 return ERR_PTR(err);
591
592 err = mlxsw_sp_lpm_tree_left_struct_set(mlxsw_sp, prefix_usage,
593 lpm_tree);
594 if (err)
595 goto err_left_struct_set;
Jiri Pirko2083d362016-10-25 11:25:56 +0200596 memcpy(&lpm_tree->prefix_usage, prefix_usage,
597 sizeof(lpm_tree->prefix_usage));
Ido Schimmel2b52ce02018-01-22 09:17:42 +0100598 memset(&lpm_tree->prefix_ref_count, 0,
599 sizeof(lpm_tree->prefix_ref_count));
600 lpm_tree->ref_count = 1;
Jiri Pirko53342022016-07-04 08:23:08 +0200601 return lpm_tree;
602
603err_left_struct_set:
604 mlxsw_sp_lpm_tree_free(mlxsw_sp, lpm_tree);
605 return ERR_PTR(err);
606}
607
Ido Schimmelcc702672017-08-14 10:54:03 +0200608static void mlxsw_sp_lpm_tree_destroy(struct mlxsw_sp *mlxsw_sp,
609 struct mlxsw_sp_lpm_tree *lpm_tree)
Jiri Pirko53342022016-07-04 08:23:08 +0200610{
Ido Schimmelcc702672017-08-14 10:54:03 +0200611 mlxsw_sp_lpm_tree_free(mlxsw_sp, lpm_tree);
Jiri Pirko53342022016-07-04 08:23:08 +0200612}
613
614static struct mlxsw_sp_lpm_tree *
615mlxsw_sp_lpm_tree_get(struct mlxsw_sp *mlxsw_sp,
616 struct mlxsw_sp_prefix_usage *prefix_usage,
Ido Schimmel382dbb42017-03-10 08:53:40 +0100617 enum mlxsw_sp_l3proto proto)
Jiri Pirko53342022016-07-04 08:23:08 +0200618{
619 struct mlxsw_sp_lpm_tree *lpm_tree;
620 int i;
621
Ido Schimmel9011b672017-05-16 19:38:25 +0200622 for (i = 0; i < mlxsw_sp->router->lpm.tree_count; i++) {
623 lpm_tree = &mlxsw_sp->router->lpm.trees[i];
Jiri Pirko8b99bec2016-10-25 11:25:57 +0200624 if (lpm_tree->ref_count != 0 &&
625 lpm_tree->proto == proto &&
Jiri Pirko53342022016-07-04 08:23:08 +0200626 mlxsw_sp_prefix_usage_eq(&lpm_tree->prefix_usage,
Ido Schimmel2b52ce02018-01-22 09:17:42 +0100627 prefix_usage)) {
628 mlxsw_sp_lpm_tree_hold(lpm_tree);
Ido Schimmelfc922bb2017-08-14 10:54:05 +0200629 return lpm_tree;
Ido Schimmel2b52ce02018-01-22 09:17:42 +0100630 }
Jiri Pirko53342022016-07-04 08:23:08 +0200631 }
Ido Schimmelfc922bb2017-08-14 10:54:05 +0200632 return mlxsw_sp_lpm_tree_create(mlxsw_sp, prefix_usage, proto);
633}
Jiri Pirko53342022016-07-04 08:23:08 +0200634
Ido Schimmelfc922bb2017-08-14 10:54:05 +0200635static void mlxsw_sp_lpm_tree_hold(struct mlxsw_sp_lpm_tree *lpm_tree)
636{
Jiri Pirko53342022016-07-04 08:23:08 +0200637 lpm_tree->ref_count++;
Jiri Pirko53342022016-07-04 08:23:08 +0200638}
639
Ido Schimmelcc702672017-08-14 10:54:03 +0200640static void mlxsw_sp_lpm_tree_put(struct mlxsw_sp *mlxsw_sp,
641 struct mlxsw_sp_lpm_tree *lpm_tree)
Jiri Pirko53342022016-07-04 08:23:08 +0200642{
643 if (--lpm_tree->ref_count == 0)
Ido Schimmelcc702672017-08-14 10:54:03 +0200644 mlxsw_sp_lpm_tree_destroy(mlxsw_sp, lpm_tree);
Jiri Pirko53342022016-07-04 08:23:08 +0200645}
646
Ido Schimmeld7a60302017-06-08 08:47:43 +0200647#define MLXSW_SP_LPM_TREE_MIN 1 /* tree 0 is reserved */
Ido Schimmel8494ab02017-03-24 08:02:47 +0100648
649static int mlxsw_sp_lpm_init(struct mlxsw_sp *mlxsw_sp)
Jiri Pirko53342022016-07-04 08:23:08 +0200650{
Ido Schimmel2b52ce02018-01-22 09:17:42 +0100651 struct mlxsw_sp_prefix_usage req_prefix_usage = {{ 0 } };
Jiri Pirko53342022016-07-04 08:23:08 +0200652 struct mlxsw_sp_lpm_tree *lpm_tree;
Ido Schimmel8494ab02017-03-24 08:02:47 +0100653 u64 max_trees;
Ido Schimmel2b52ce02018-01-22 09:17:42 +0100654 int err, i;
Jiri Pirko53342022016-07-04 08:23:08 +0200655
Ido Schimmel8494ab02017-03-24 08:02:47 +0100656 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_LPM_TREES))
657 return -EIO;
658
659 max_trees = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_LPM_TREES);
Ido Schimmel9011b672017-05-16 19:38:25 +0200660 mlxsw_sp->router->lpm.tree_count = max_trees - MLXSW_SP_LPM_TREE_MIN;
661 mlxsw_sp->router->lpm.trees = kcalloc(mlxsw_sp->router->lpm.tree_count,
Ido Schimmel8494ab02017-03-24 08:02:47 +0100662 sizeof(struct mlxsw_sp_lpm_tree),
663 GFP_KERNEL);
Ido Schimmel9011b672017-05-16 19:38:25 +0200664 if (!mlxsw_sp->router->lpm.trees)
Ido Schimmel8494ab02017-03-24 08:02:47 +0100665 return -ENOMEM;
666
Ido Schimmel9011b672017-05-16 19:38:25 +0200667 for (i = 0; i < mlxsw_sp->router->lpm.tree_count; i++) {
668 lpm_tree = &mlxsw_sp->router->lpm.trees[i];
Jiri Pirko53342022016-07-04 08:23:08 +0200669 lpm_tree->id = i + MLXSW_SP_LPM_TREE_MIN;
670 }
Ido Schimmel8494ab02017-03-24 08:02:47 +0100671
Ido Schimmel2b52ce02018-01-22 09:17:42 +0100672 lpm_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, &req_prefix_usage,
673 MLXSW_SP_L3_PROTO_IPV4);
674 if (IS_ERR(lpm_tree)) {
675 err = PTR_ERR(lpm_tree);
676 goto err_ipv4_tree_get;
677 }
678 mlxsw_sp->router->lpm.proto_trees[MLXSW_SP_L3_PROTO_IPV4] = lpm_tree;
679
680 lpm_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, &req_prefix_usage,
681 MLXSW_SP_L3_PROTO_IPV6);
682 if (IS_ERR(lpm_tree)) {
683 err = PTR_ERR(lpm_tree);
684 goto err_ipv6_tree_get;
685 }
686 mlxsw_sp->router->lpm.proto_trees[MLXSW_SP_L3_PROTO_IPV6] = lpm_tree;
687
Ido Schimmel8494ab02017-03-24 08:02:47 +0100688 return 0;
Ido Schimmel2b52ce02018-01-22 09:17:42 +0100689
690err_ipv6_tree_get:
691 lpm_tree = mlxsw_sp->router->lpm.proto_trees[MLXSW_SP_L3_PROTO_IPV4];
692 mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
693err_ipv4_tree_get:
694 kfree(mlxsw_sp->router->lpm.trees);
695 return err;
Ido Schimmel8494ab02017-03-24 08:02:47 +0100696}
697
698static void mlxsw_sp_lpm_fini(struct mlxsw_sp *mlxsw_sp)
699{
Ido Schimmel2b52ce02018-01-22 09:17:42 +0100700 struct mlxsw_sp_lpm_tree *lpm_tree;
701
702 lpm_tree = mlxsw_sp->router->lpm.proto_trees[MLXSW_SP_L3_PROTO_IPV6];
703 mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
704
705 lpm_tree = mlxsw_sp->router->lpm.proto_trees[MLXSW_SP_L3_PROTO_IPV4];
706 mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
707
Ido Schimmel9011b672017-05-16 19:38:25 +0200708 kfree(mlxsw_sp->router->lpm.trees);
Jiri Pirko53342022016-07-04 08:23:08 +0200709}
710
Ido Schimmel76610eb2017-03-10 08:53:41 +0100711static bool mlxsw_sp_vr_is_used(const struct mlxsw_sp_vr *vr)
712{
Yotam Gigid42b0962017-09-27 08:23:20 +0200713 return !!vr->fib4 || !!vr->fib6 || !!vr->mr4_table;
Ido Schimmel76610eb2017-03-10 08:53:41 +0100714}
715
Jiri Pirko6b75c482016-07-04 08:23:09 +0200716static struct mlxsw_sp_vr *mlxsw_sp_vr_find_unused(struct mlxsw_sp *mlxsw_sp)
717{
718 struct mlxsw_sp_vr *vr;
719 int i;
720
Jiri Pirkoc1a38312016-10-21 16:07:23 +0200721 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
Ido Schimmel9011b672017-05-16 19:38:25 +0200722 vr = &mlxsw_sp->router->vrs[i];
Ido Schimmel76610eb2017-03-10 08:53:41 +0100723 if (!mlxsw_sp_vr_is_used(vr))
Jiri Pirko6b75c482016-07-04 08:23:09 +0200724 return vr;
725 }
726 return NULL;
727}
728
729static int mlxsw_sp_vr_lpm_tree_bind(struct mlxsw_sp *mlxsw_sp,
Ido Schimmel0adb2142017-08-14 10:54:04 +0200730 const struct mlxsw_sp_fib *fib, u8 tree_id)
Jiri Pirko6b75c482016-07-04 08:23:09 +0200731{
732 char raltb_pl[MLXSW_REG_RALTB_LEN];
733
Ido Schimmel76610eb2017-03-10 08:53:41 +0100734 mlxsw_reg_raltb_pack(raltb_pl, fib->vr->id,
735 (enum mlxsw_reg_ralxx_protocol) fib->proto,
Ido Schimmel0adb2142017-08-14 10:54:04 +0200736 tree_id);
Jiri Pirko6b75c482016-07-04 08:23:09 +0200737 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raltb), raltb_pl);
738}
739
740static int mlxsw_sp_vr_lpm_tree_unbind(struct mlxsw_sp *mlxsw_sp,
Ido Schimmel76610eb2017-03-10 08:53:41 +0100741 const struct mlxsw_sp_fib *fib)
Jiri Pirko6b75c482016-07-04 08:23:09 +0200742{
743 char raltb_pl[MLXSW_REG_RALTB_LEN];
744
745 /* Bind to tree 0 which is default */
Ido Schimmel76610eb2017-03-10 08:53:41 +0100746 mlxsw_reg_raltb_pack(raltb_pl, fib->vr->id,
747 (enum mlxsw_reg_ralxx_protocol) fib->proto, 0);
Jiri Pirko6b75c482016-07-04 08:23:09 +0200748 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raltb), raltb_pl);
749}
750
751static u32 mlxsw_sp_fix_tb_id(u32 tb_id)
752{
Yotam Gigi7e50d432017-09-27 08:23:19 +0200753 /* For our purpose, squash main, default and local tables into one */
754 if (tb_id == RT_TABLE_LOCAL || tb_id == RT_TABLE_DEFAULT)
Jiri Pirko6b75c482016-07-04 08:23:09 +0200755 tb_id = RT_TABLE_MAIN;
756 return tb_id;
757}
758
759static struct mlxsw_sp_vr *mlxsw_sp_vr_find(struct mlxsw_sp *mlxsw_sp,
Ido Schimmel76610eb2017-03-10 08:53:41 +0100760 u32 tb_id)
Jiri Pirko6b75c482016-07-04 08:23:09 +0200761{
762 struct mlxsw_sp_vr *vr;
763 int i;
764
765 tb_id = mlxsw_sp_fix_tb_id(tb_id);
Nogah Frankel9497c042016-09-20 11:16:54 +0200766
Jiri Pirkoc1a38312016-10-21 16:07:23 +0200767 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
Ido Schimmel9011b672017-05-16 19:38:25 +0200768 vr = &mlxsw_sp->router->vrs[i];
Ido Schimmel76610eb2017-03-10 08:53:41 +0100769 if (mlxsw_sp_vr_is_used(vr) && vr->tb_id == tb_id)
Jiri Pirko6b75c482016-07-04 08:23:09 +0200770 return vr;
771 }
772 return NULL;
773}
774
Ido Schimmel76610eb2017-03-10 08:53:41 +0100775static struct mlxsw_sp_fib *mlxsw_sp_vr_fib(const struct mlxsw_sp_vr *vr,
776 enum mlxsw_sp_l3proto proto)
Jiri Pirko6b75c482016-07-04 08:23:09 +0200777{
Ido Schimmel76610eb2017-03-10 08:53:41 +0100778 switch (proto) {
779 case MLXSW_SP_L3_PROTO_IPV4:
780 return vr->fib4;
781 case MLXSW_SP_L3_PROTO_IPV6:
Ido Schimmela3d9bc52017-07-18 10:10:22 +0200782 return vr->fib6;
Ido Schimmel76610eb2017-03-10 08:53:41 +0100783 }
784 return NULL;
785}
786
787static struct mlxsw_sp_vr *mlxsw_sp_vr_create(struct mlxsw_sp *mlxsw_sp,
David Ahernf8fa9b42017-10-18 09:56:56 -0700788 u32 tb_id,
789 struct netlink_ext_ack *extack)
Ido Schimmel76610eb2017-03-10 08:53:41 +0100790{
Jiri Pirko0f2d2b22018-02-13 11:22:42 +0100791 struct mlxsw_sp_mr_table *mr4_table;
792 struct mlxsw_sp_fib *fib4;
793 struct mlxsw_sp_fib *fib6;
Jiri Pirko6b75c482016-07-04 08:23:09 +0200794 struct mlxsw_sp_vr *vr;
Ido Schimmela3d9bc52017-07-18 10:10:22 +0200795 int err;
Jiri Pirko6b75c482016-07-04 08:23:09 +0200796
797 vr = mlxsw_sp_vr_find_unused(mlxsw_sp);
David Ahernf8fa9b42017-10-18 09:56:56 -0700798 if (!vr) {
799 NL_SET_ERR_MSG(extack, "spectrum: Exceeded number of supported virtual routers");
Jiri Pirko6b75c482016-07-04 08:23:09 +0200800 return ERR_PTR(-EBUSY);
David Ahernf8fa9b42017-10-18 09:56:56 -0700801 }
Jiri Pirko0f2d2b22018-02-13 11:22:42 +0100802 fib4 = mlxsw_sp_fib_create(mlxsw_sp, vr, MLXSW_SP_L3_PROTO_IPV4);
803 if (IS_ERR(fib4))
804 return ERR_CAST(fib4);
805 fib6 = mlxsw_sp_fib_create(mlxsw_sp, vr, MLXSW_SP_L3_PROTO_IPV6);
806 if (IS_ERR(fib6)) {
807 err = PTR_ERR(fib6);
Ido Schimmela3d9bc52017-07-18 10:10:22 +0200808 goto err_fib6_create;
809 }
Jiri Pirko0f2d2b22018-02-13 11:22:42 +0100810 mr4_table = mlxsw_sp_mr_table_create(mlxsw_sp, vr->id,
811 MLXSW_SP_L3_PROTO_IPV4);
812 if (IS_ERR(mr4_table)) {
813 err = PTR_ERR(mr4_table);
Yotam Gigid42b0962017-09-27 08:23:20 +0200814 goto err_mr_table_create;
815 }
Jiri Pirko0f2d2b22018-02-13 11:22:42 +0100816 vr->fib4 = fib4;
817 vr->fib6 = fib6;
818 vr->mr4_table = mr4_table;
Jiri Pirko6b75c482016-07-04 08:23:09 +0200819 vr->tb_id = tb_id;
Jiri Pirko6b75c482016-07-04 08:23:09 +0200820 return vr;
Ido Schimmela3d9bc52017-07-18 10:10:22 +0200821
Yotam Gigid42b0962017-09-27 08:23:20 +0200822err_mr_table_create:
Jiri Pirko0f2d2b22018-02-13 11:22:42 +0100823 mlxsw_sp_fib_destroy(mlxsw_sp, fib6);
Ido Schimmela3d9bc52017-07-18 10:10:22 +0200824err_fib6_create:
Jiri Pirko0f2d2b22018-02-13 11:22:42 +0100825 mlxsw_sp_fib_destroy(mlxsw_sp, fib4);
Ido Schimmela3d9bc52017-07-18 10:10:22 +0200826 return ERR_PTR(err);
Jiri Pirko6b75c482016-07-04 08:23:09 +0200827}
828
Ido Schimmel2b52ce02018-01-22 09:17:42 +0100829static void mlxsw_sp_vr_destroy(struct mlxsw_sp *mlxsw_sp,
830 struct mlxsw_sp_vr *vr)
Jiri Pirko6b75c482016-07-04 08:23:09 +0200831{
Yotam Gigid42b0962017-09-27 08:23:20 +0200832 mlxsw_sp_mr_table_destroy(vr->mr4_table);
833 vr->mr4_table = NULL;
Ido Schimmel2b52ce02018-01-22 09:17:42 +0100834 mlxsw_sp_fib_destroy(mlxsw_sp, vr->fib6);
Ido Schimmela3d9bc52017-07-18 10:10:22 +0200835 vr->fib6 = NULL;
Ido Schimmel2b52ce02018-01-22 09:17:42 +0100836 mlxsw_sp_fib_destroy(mlxsw_sp, vr->fib4);
Ido Schimmel76610eb2017-03-10 08:53:41 +0100837 vr->fib4 = NULL;
Jiri Pirko6b75c482016-07-04 08:23:09 +0200838}
839
David Ahernf8fa9b42017-10-18 09:56:56 -0700840static struct mlxsw_sp_vr *mlxsw_sp_vr_get(struct mlxsw_sp *mlxsw_sp, u32 tb_id,
841 struct netlink_ext_ack *extack)
Jiri Pirko6b75c482016-07-04 08:23:09 +0200842{
843 struct mlxsw_sp_vr *vr;
Jiri Pirko6b75c482016-07-04 08:23:09 +0200844
845 tb_id = mlxsw_sp_fix_tb_id(tb_id);
Ido Schimmel76610eb2017-03-10 08:53:41 +0100846 vr = mlxsw_sp_vr_find(mlxsw_sp, tb_id);
847 if (!vr)
David Ahernf8fa9b42017-10-18 09:56:56 -0700848 vr = mlxsw_sp_vr_create(mlxsw_sp, tb_id, extack);
Jiri Pirko6b75c482016-07-04 08:23:09 +0200849 return vr;
850}
851
Ido Schimmel2b52ce02018-01-22 09:17:42 +0100852static void mlxsw_sp_vr_put(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_vr *vr)
Jiri Pirko6b75c482016-07-04 08:23:09 +0200853{
Ido Schimmela3d9bc52017-07-18 10:10:22 +0200854 if (!vr->rif_count && list_empty(&vr->fib4->node_list) &&
Yotam Gigid42b0962017-09-27 08:23:20 +0200855 list_empty(&vr->fib6->node_list) &&
856 mlxsw_sp_mr_table_empty(vr->mr4_table))
Ido Schimmel2b52ce02018-01-22 09:17:42 +0100857 mlxsw_sp_vr_destroy(mlxsw_sp, vr);
Jiri Pirko6b75c482016-07-04 08:23:09 +0200858}
859
Ido Schimmelfc922bb2017-08-14 10:54:05 +0200860static bool
861mlxsw_sp_vr_lpm_tree_should_replace(struct mlxsw_sp_vr *vr,
862 enum mlxsw_sp_l3proto proto, u8 tree_id)
863{
864 struct mlxsw_sp_fib *fib = mlxsw_sp_vr_fib(vr, proto);
865
866 if (!mlxsw_sp_vr_is_used(vr))
867 return false;
Ido Schimmel2b52ce02018-01-22 09:17:42 +0100868 if (fib->lpm_tree->id == tree_id)
Ido Schimmelfc922bb2017-08-14 10:54:05 +0200869 return true;
870 return false;
871}
872
873static int mlxsw_sp_vr_lpm_tree_replace(struct mlxsw_sp *mlxsw_sp,
874 struct mlxsw_sp_fib *fib,
875 struct mlxsw_sp_lpm_tree *new_tree)
876{
877 struct mlxsw_sp_lpm_tree *old_tree = fib->lpm_tree;
878 int err;
879
Ido Schimmelfc922bb2017-08-14 10:54:05 +0200880 fib->lpm_tree = new_tree;
881 mlxsw_sp_lpm_tree_hold(new_tree);
Ido Schimmeled604c52018-01-18 15:42:10 +0100882 err = mlxsw_sp_vr_lpm_tree_bind(mlxsw_sp, fib, new_tree->id);
883 if (err)
884 goto err_tree_bind;
Ido Schimmelfc922bb2017-08-14 10:54:05 +0200885 mlxsw_sp_lpm_tree_put(mlxsw_sp, old_tree);
886 return 0;
Ido Schimmeled604c52018-01-18 15:42:10 +0100887
888err_tree_bind:
889 mlxsw_sp_lpm_tree_put(mlxsw_sp, new_tree);
890 fib->lpm_tree = old_tree;
891 return err;
Ido Schimmelfc922bb2017-08-14 10:54:05 +0200892}
893
894static int mlxsw_sp_vrs_lpm_tree_replace(struct mlxsw_sp *mlxsw_sp,
895 struct mlxsw_sp_fib *fib,
896 struct mlxsw_sp_lpm_tree *new_tree)
897{
Ido Schimmelfc922bb2017-08-14 10:54:05 +0200898 enum mlxsw_sp_l3proto proto = fib->proto;
Ido Schimmel2b52ce02018-01-22 09:17:42 +0100899 struct mlxsw_sp_lpm_tree *old_tree;
Ido Schimmelfc922bb2017-08-14 10:54:05 +0200900 u8 old_id, new_id = new_tree->id;
901 struct mlxsw_sp_vr *vr;
902 int i, err;
903
Ido Schimmel2b52ce02018-01-22 09:17:42 +0100904 old_tree = mlxsw_sp->router->lpm.proto_trees[proto];
Ido Schimmelfc922bb2017-08-14 10:54:05 +0200905 old_id = old_tree->id;
906
907 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
908 vr = &mlxsw_sp->router->vrs[i];
909 if (!mlxsw_sp_vr_lpm_tree_should_replace(vr, proto, old_id))
910 continue;
911 err = mlxsw_sp_vr_lpm_tree_replace(mlxsw_sp,
912 mlxsw_sp_vr_fib(vr, proto),
913 new_tree);
914 if (err)
915 goto err_tree_replace;
916 }
917
Ido Schimmel2b52ce02018-01-22 09:17:42 +0100918 memcpy(new_tree->prefix_ref_count, old_tree->prefix_ref_count,
919 sizeof(new_tree->prefix_ref_count));
920 mlxsw_sp->router->lpm.proto_trees[proto] = new_tree;
921 mlxsw_sp_lpm_tree_put(mlxsw_sp, old_tree);
922
Ido Schimmelfc922bb2017-08-14 10:54:05 +0200923 return 0;
924
925err_tree_replace:
926 for (i--; i >= 0; i--) {
927 if (!mlxsw_sp_vr_lpm_tree_should_replace(vr, proto, new_id))
928 continue;
929 mlxsw_sp_vr_lpm_tree_replace(mlxsw_sp,
930 mlxsw_sp_vr_fib(vr, proto),
931 old_tree);
932 }
933 return err;
Ido Schimmelfc922bb2017-08-14 10:54:05 +0200934}
935
Nogah Frankel9497c042016-09-20 11:16:54 +0200936static int mlxsw_sp_vrs_init(struct mlxsw_sp *mlxsw_sp)
Jiri Pirko6b75c482016-07-04 08:23:09 +0200937{
938 struct mlxsw_sp_vr *vr;
Jiri Pirkoc1a38312016-10-21 16:07:23 +0200939 u64 max_vrs;
Jiri Pirko6b75c482016-07-04 08:23:09 +0200940 int i;
941
Jiri Pirkoc1a38312016-10-21 16:07:23 +0200942 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_VRS))
Nogah Frankel9497c042016-09-20 11:16:54 +0200943 return -EIO;
944
Jiri Pirkoc1a38312016-10-21 16:07:23 +0200945 max_vrs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS);
Ido Schimmel9011b672017-05-16 19:38:25 +0200946 mlxsw_sp->router->vrs = kcalloc(max_vrs, sizeof(struct mlxsw_sp_vr),
947 GFP_KERNEL);
948 if (!mlxsw_sp->router->vrs)
Nogah Frankel9497c042016-09-20 11:16:54 +0200949 return -ENOMEM;
950
Jiri Pirkoc1a38312016-10-21 16:07:23 +0200951 for (i = 0; i < max_vrs; i++) {
Ido Schimmel9011b672017-05-16 19:38:25 +0200952 vr = &mlxsw_sp->router->vrs[i];
Jiri Pirko6b75c482016-07-04 08:23:09 +0200953 vr->id = i;
954 }
Nogah Frankel9497c042016-09-20 11:16:54 +0200955
956 return 0;
957}
958
Ido Schimmelac571de2016-11-14 11:26:32 +0100959static void mlxsw_sp_router_fib_flush(struct mlxsw_sp *mlxsw_sp);
960
Nogah Frankel9497c042016-09-20 11:16:54 +0200961static void mlxsw_sp_vrs_fini(struct mlxsw_sp *mlxsw_sp)
962{
Ido Schimmel30572242016-12-03 16:45:01 +0100963 /* At this stage we're guaranteed not to have new incoming
964 * FIB notifications and the work queue is free from FIBs
965 * sitting on top of mlxsw netdevs. However, we can still
966 * have other FIBs queued. Flush the queue before flushing
967 * the device's tables. No need for locks, as we're the only
968 * writer.
969 */
970 mlxsw_core_flush_owq();
Ido Schimmelac571de2016-11-14 11:26:32 +0100971 mlxsw_sp_router_fib_flush(mlxsw_sp);
Ido Schimmel9011b672017-05-16 19:38:25 +0200972 kfree(mlxsw_sp->router->vrs);
Jiri Pirko6b75c482016-07-04 08:23:09 +0200973}
974
Petr Machata6ddb7422017-09-02 23:49:19 +0200975static struct net_device *
976__mlxsw_sp_ipip_netdev_ul_dev_get(const struct net_device *ol_dev)
977{
978 struct ip_tunnel *tun = netdev_priv(ol_dev);
979 struct net *net = dev_net(ol_dev);
980
981 return __dev_get_by_index(net, tun->parms.link);
982}
983
Petr Machata4cf04f32017-11-03 10:03:42 +0100984u32 mlxsw_sp_ipip_dev_ul_tb_id(const struct net_device *ol_dev)
Petr Machata6ddb7422017-09-02 23:49:19 +0200985{
986 struct net_device *d = __mlxsw_sp_ipip_netdev_ul_dev_get(ol_dev);
987
988 if (d)
989 return l3mdev_fib_table(d) ? : RT_TABLE_MAIN;
990 else
991 return l3mdev_fib_table(ol_dev) ? : RT_TABLE_MAIN;
992}
993
Petr Machata1012b9a2017-09-02 23:49:23 +0200994static struct mlxsw_sp_rif *
995mlxsw_sp_rif_create(struct mlxsw_sp *mlxsw_sp,
David Ahernf8fa9b42017-10-18 09:56:56 -0700996 const struct mlxsw_sp_rif_params *params,
997 struct netlink_ext_ack *extack);
Petr Machata1012b9a2017-09-02 23:49:23 +0200998
999static struct mlxsw_sp_rif_ipip_lb *
1000mlxsw_sp_ipip_ol_ipip_lb_create(struct mlxsw_sp *mlxsw_sp,
1001 enum mlxsw_sp_ipip_type ipipt,
Petr Machata7e75af62017-11-03 10:03:36 +01001002 struct net_device *ol_dev,
1003 struct netlink_ext_ack *extack)
Petr Machata1012b9a2017-09-02 23:49:23 +02001004{
1005 struct mlxsw_sp_rif_params_ipip_lb lb_params;
1006 const struct mlxsw_sp_ipip_ops *ipip_ops;
1007 struct mlxsw_sp_rif *rif;
1008
1009 ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipipt];
1010 lb_params = (struct mlxsw_sp_rif_params_ipip_lb) {
1011 .common.dev = ol_dev,
1012 .common.lag = false,
1013 .lb_config = ipip_ops->ol_loopback_config(mlxsw_sp, ol_dev),
1014 };
1015
Petr Machata7e75af62017-11-03 10:03:36 +01001016 rif = mlxsw_sp_rif_create(mlxsw_sp, &lb_params.common, extack);
Petr Machata1012b9a2017-09-02 23:49:23 +02001017 if (IS_ERR(rif))
1018 return ERR_CAST(rif);
1019 return container_of(rif, struct mlxsw_sp_rif_ipip_lb, common);
1020}
1021
1022static struct mlxsw_sp_ipip_entry *
1023mlxsw_sp_ipip_entry_alloc(struct mlxsw_sp *mlxsw_sp,
1024 enum mlxsw_sp_ipip_type ipipt,
1025 struct net_device *ol_dev)
1026{
1027 struct mlxsw_sp_ipip_entry *ipip_entry;
1028 struct mlxsw_sp_ipip_entry *ret = NULL;
1029
1030 ipip_entry = kzalloc(sizeof(*ipip_entry), GFP_KERNEL);
1031 if (!ipip_entry)
1032 return ERR_PTR(-ENOMEM);
1033
1034 ipip_entry->ol_lb = mlxsw_sp_ipip_ol_ipip_lb_create(mlxsw_sp, ipipt,
Petr Machata7e75af62017-11-03 10:03:36 +01001035 ol_dev, NULL);
Petr Machata1012b9a2017-09-02 23:49:23 +02001036 if (IS_ERR(ipip_entry->ol_lb)) {
1037 ret = ERR_CAST(ipip_entry->ol_lb);
1038 goto err_ol_ipip_lb_create;
1039 }
1040
1041 ipip_entry->ipipt = ipipt;
1042 ipip_entry->ol_dev = ol_dev;
Petr Machata4cf04f32017-11-03 10:03:42 +01001043 ipip_entry->parms = mlxsw_sp_ipip_netdev_parms(ol_dev);
Petr Machata1012b9a2017-09-02 23:49:23 +02001044
1045 return ipip_entry;
1046
1047err_ol_ipip_lb_create:
1048 kfree(ipip_entry);
1049 return ret;
1050}
1051
1052static void
Petr Machata4cccb732017-10-16 16:26:39 +02001053mlxsw_sp_ipip_entry_dealloc(struct mlxsw_sp_ipip_entry *ipip_entry)
Petr Machata1012b9a2017-09-02 23:49:23 +02001054{
Petr Machata1012b9a2017-09-02 23:49:23 +02001055 mlxsw_sp_rif_destroy(&ipip_entry->ol_lb->common);
1056 kfree(ipip_entry);
1057}
1058
Petr Machata1012b9a2017-09-02 23:49:23 +02001059static bool
1060mlxsw_sp_ipip_entry_saddr_matches(struct mlxsw_sp *mlxsw_sp,
1061 const enum mlxsw_sp_l3proto ul_proto,
1062 union mlxsw_sp_l3addr saddr,
1063 u32 ul_tb_id,
1064 struct mlxsw_sp_ipip_entry *ipip_entry)
1065{
1066 u32 tun_ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(ipip_entry->ol_dev);
1067 enum mlxsw_sp_ipip_type ipipt = ipip_entry->ipipt;
1068 union mlxsw_sp_l3addr tun_saddr;
1069
1070 if (mlxsw_sp->router->ipip_ops_arr[ipipt]->ul_proto != ul_proto)
1071 return false;
1072
1073 tun_saddr = mlxsw_sp_ipip_netdev_saddr(ul_proto, ipip_entry->ol_dev);
1074 return tun_ul_tb_id == ul_tb_id &&
1075 mlxsw_sp_l3addr_eq(&tun_saddr, &saddr);
1076}
1077
Petr Machata4607f6d2017-09-02 23:49:25 +02001078static int
1079mlxsw_sp_fib_entry_decap_init(struct mlxsw_sp *mlxsw_sp,
1080 struct mlxsw_sp_fib_entry *fib_entry,
1081 struct mlxsw_sp_ipip_entry *ipip_entry)
1082{
1083 u32 tunnel_index;
1084 int err;
1085
1086 err = mlxsw_sp_kvdl_alloc(mlxsw_sp, 1, &tunnel_index);
1087 if (err)
1088 return err;
1089
1090 ipip_entry->decap_fib_entry = fib_entry;
1091 fib_entry->decap.ipip_entry = ipip_entry;
1092 fib_entry->decap.tunnel_index = tunnel_index;
1093 return 0;
1094}
1095
1096static void mlxsw_sp_fib_entry_decap_fini(struct mlxsw_sp *mlxsw_sp,
1097 struct mlxsw_sp_fib_entry *fib_entry)
1098{
1099 /* Unlink this node from the IPIP entry that it's the decap entry of. */
1100 fib_entry->decap.ipip_entry->decap_fib_entry = NULL;
1101 fib_entry->decap.ipip_entry = NULL;
1102 mlxsw_sp_kvdl_free(mlxsw_sp, fib_entry->decap.tunnel_index);
1103}
1104
Petr Machata1cc38fb2017-09-02 23:49:26 +02001105static struct mlxsw_sp_fib_node *
1106mlxsw_sp_fib_node_lookup(struct mlxsw_sp_fib *fib, const void *addr,
1107 size_t addr_len, unsigned char prefix_len);
Petr Machata4607f6d2017-09-02 23:49:25 +02001108static int mlxsw_sp_fib_entry_update(struct mlxsw_sp *mlxsw_sp,
1109 struct mlxsw_sp_fib_entry *fib_entry);
1110
1111static void
1112mlxsw_sp_ipip_entry_demote_decap(struct mlxsw_sp *mlxsw_sp,
1113 struct mlxsw_sp_ipip_entry *ipip_entry)
1114{
1115 struct mlxsw_sp_fib_entry *fib_entry = ipip_entry->decap_fib_entry;
1116
1117 mlxsw_sp_fib_entry_decap_fini(mlxsw_sp, fib_entry);
1118 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
1119
1120 mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
1121}
1122
Petr Machata1cc38fb2017-09-02 23:49:26 +02001123static void
1124mlxsw_sp_ipip_entry_promote_decap(struct mlxsw_sp *mlxsw_sp,
1125 struct mlxsw_sp_ipip_entry *ipip_entry,
1126 struct mlxsw_sp_fib_entry *decap_fib_entry)
1127{
1128 if (mlxsw_sp_fib_entry_decap_init(mlxsw_sp, decap_fib_entry,
1129 ipip_entry))
1130 return;
1131 decap_fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP;
1132
1133 if (mlxsw_sp_fib_entry_update(mlxsw_sp, decap_fib_entry))
1134 mlxsw_sp_ipip_entry_demote_decap(mlxsw_sp, ipip_entry);
1135}
1136
1137/* Given an IPIP entry, find the corresponding decap route. */
1138static struct mlxsw_sp_fib_entry *
1139mlxsw_sp_ipip_entry_find_decap(struct mlxsw_sp *mlxsw_sp,
1140 struct mlxsw_sp_ipip_entry *ipip_entry)
1141{
1142 static struct mlxsw_sp_fib_node *fib_node;
1143 const struct mlxsw_sp_ipip_ops *ipip_ops;
1144 struct mlxsw_sp_fib_entry *fib_entry;
1145 unsigned char saddr_prefix_len;
1146 union mlxsw_sp_l3addr saddr;
1147 struct mlxsw_sp_fib *ul_fib;
1148 struct mlxsw_sp_vr *ul_vr;
1149 const void *saddrp;
1150 size_t saddr_len;
1151 u32 ul_tb_id;
1152 u32 saddr4;
1153
1154 ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt];
1155
1156 ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(ipip_entry->ol_dev);
1157 ul_vr = mlxsw_sp_vr_find(mlxsw_sp, ul_tb_id);
1158 if (!ul_vr)
1159 return NULL;
1160
1161 ul_fib = mlxsw_sp_vr_fib(ul_vr, ipip_ops->ul_proto);
1162 saddr = mlxsw_sp_ipip_netdev_saddr(ipip_ops->ul_proto,
1163 ipip_entry->ol_dev);
1164
1165 switch (ipip_ops->ul_proto) {
1166 case MLXSW_SP_L3_PROTO_IPV4:
1167 saddr4 = be32_to_cpu(saddr.addr4);
1168 saddrp = &saddr4;
1169 saddr_len = 4;
1170 saddr_prefix_len = 32;
1171 break;
1172 case MLXSW_SP_L3_PROTO_IPV6:
1173 WARN_ON(1);
1174 return NULL;
1175 }
1176
1177 fib_node = mlxsw_sp_fib_node_lookup(ul_fib, saddrp, saddr_len,
1178 saddr_prefix_len);
1179 if (!fib_node || list_empty(&fib_node->entry_list))
1180 return NULL;
1181
1182 fib_entry = list_first_entry(&fib_node->entry_list,
1183 struct mlxsw_sp_fib_entry, list);
1184 if (fib_entry->type != MLXSW_SP_FIB_ENTRY_TYPE_TRAP)
1185 return NULL;
1186
1187 return fib_entry;
1188}
1189
Petr Machata1012b9a2017-09-02 23:49:23 +02001190static struct mlxsw_sp_ipip_entry *
Petr Machata4cccb732017-10-16 16:26:39 +02001191mlxsw_sp_ipip_entry_create(struct mlxsw_sp *mlxsw_sp,
1192 enum mlxsw_sp_ipip_type ipipt,
1193 struct net_device *ol_dev)
Petr Machata1012b9a2017-09-02 23:49:23 +02001194{
Petr Machata1012b9a2017-09-02 23:49:23 +02001195 struct mlxsw_sp_ipip_entry *ipip_entry;
Petr Machata1012b9a2017-09-02 23:49:23 +02001196
1197 ipip_entry = mlxsw_sp_ipip_entry_alloc(mlxsw_sp, ipipt, ol_dev);
1198 if (IS_ERR(ipip_entry))
1199 return ipip_entry;
1200
1201 list_add_tail(&ipip_entry->ipip_list_node,
1202 &mlxsw_sp->router->ipip_list);
1203
Petr Machata1012b9a2017-09-02 23:49:23 +02001204 return ipip_entry;
1205}
1206
1207static void
Petr Machata4cccb732017-10-16 16:26:39 +02001208mlxsw_sp_ipip_entry_destroy(struct mlxsw_sp *mlxsw_sp,
1209 struct mlxsw_sp_ipip_entry *ipip_entry)
Petr Machata1012b9a2017-09-02 23:49:23 +02001210{
Petr Machata4cccb732017-10-16 16:26:39 +02001211 list_del(&ipip_entry->ipip_list_node);
1212 mlxsw_sp_ipip_entry_dealloc(ipip_entry);
Petr Machata1012b9a2017-09-02 23:49:23 +02001213}
1214
Petr Machata4607f6d2017-09-02 23:49:25 +02001215static bool
1216mlxsw_sp_ipip_entry_matches_decap(struct mlxsw_sp *mlxsw_sp,
1217 const struct net_device *ul_dev,
1218 enum mlxsw_sp_l3proto ul_proto,
1219 union mlxsw_sp_l3addr ul_dip,
1220 struct mlxsw_sp_ipip_entry *ipip_entry)
1221{
1222 u32 ul_tb_id = l3mdev_fib_table(ul_dev) ? : RT_TABLE_MAIN;
1223 enum mlxsw_sp_ipip_type ipipt = ipip_entry->ipipt;
1224 struct net_device *ipip_ul_dev;
1225
1226 if (mlxsw_sp->router->ipip_ops_arr[ipipt]->ul_proto != ul_proto)
1227 return false;
1228
1229 ipip_ul_dev = __mlxsw_sp_ipip_netdev_ul_dev_get(ipip_entry->ol_dev);
1230 return mlxsw_sp_ipip_entry_saddr_matches(mlxsw_sp, ul_proto, ul_dip,
1231 ul_tb_id, ipip_entry) &&
1232 (!ipip_ul_dev || ipip_ul_dev == ul_dev);
1233}
1234
1235/* Given decap parameters, find the corresponding IPIP entry. */
1236static struct mlxsw_sp_ipip_entry *
1237mlxsw_sp_ipip_entry_find_by_decap(struct mlxsw_sp *mlxsw_sp,
1238 const struct net_device *ul_dev,
1239 enum mlxsw_sp_l3proto ul_proto,
1240 union mlxsw_sp_l3addr ul_dip)
1241{
1242 struct mlxsw_sp_ipip_entry *ipip_entry;
1243
1244 list_for_each_entry(ipip_entry, &mlxsw_sp->router->ipip_list,
1245 ipip_list_node)
1246 if (mlxsw_sp_ipip_entry_matches_decap(mlxsw_sp, ul_dev,
1247 ul_proto, ul_dip,
1248 ipip_entry))
1249 return ipip_entry;
1250
1251 return NULL;
1252}
1253
Petr Machata6698c162017-10-16 16:26:36 +02001254static bool mlxsw_sp_netdev_ipip_type(const struct mlxsw_sp *mlxsw_sp,
1255 const struct net_device *dev,
1256 enum mlxsw_sp_ipip_type *p_type)
1257{
1258 struct mlxsw_sp_router *router = mlxsw_sp->router;
1259 const struct mlxsw_sp_ipip_ops *ipip_ops;
1260 enum mlxsw_sp_ipip_type ipipt;
1261
1262 for (ipipt = 0; ipipt < MLXSW_SP_IPIP_TYPE_MAX; ++ipipt) {
1263 ipip_ops = router->ipip_ops_arr[ipipt];
1264 if (dev->type == ipip_ops->dev_type) {
1265 if (p_type)
1266 *p_type = ipipt;
1267 return true;
1268 }
1269 }
1270 return false;
1271}
1272
Petr Machata796ec772017-11-03 10:03:29 +01001273bool mlxsw_sp_netdev_is_ipip_ol(const struct mlxsw_sp *mlxsw_sp,
1274 const struct net_device *dev)
Petr Machata00635872017-10-16 16:26:37 +02001275{
1276 return mlxsw_sp_netdev_ipip_type(mlxsw_sp, dev, NULL);
1277}
1278
1279static struct mlxsw_sp_ipip_entry *
1280mlxsw_sp_ipip_entry_find_by_ol_dev(struct mlxsw_sp *mlxsw_sp,
1281 const struct net_device *ol_dev)
1282{
1283 struct mlxsw_sp_ipip_entry *ipip_entry;
1284
1285 list_for_each_entry(ipip_entry, &mlxsw_sp->router->ipip_list,
1286 ipip_list_node)
1287 if (ipip_entry->ol_dev == ol_dev)
1288 return ipip_entry;
1289
1290 return NULL;
1291}
1292
Petr Machata61481f22017-11-03 10:03:41 +01001293static struct mlxsw_sp_ipip_entry *
1294mlxsw_sp_ipip_entry_find_by_ul_dev(const struct mlxsw_sp *mlxsw_sp,
1295 const struct net_device *ul_dev,
1296 struct mlxsw_sp_ipip_entry *start)
1297{
1298 struct mlxsw_sp_ipip_entry *ipip_entry;
1299
1300 ipip_entry = list_prepare_entry(start, &mlxsw_sp->router->ipip_list,
1301 ipip_list_node);
1302 list_for_each_entry_continue(ipip_entry, &mlxsw_sp->router->ipip_list,
1303 ipip_list_node) {
1304 struct net_device *ipip_ul_dev =
1305 __mlxsw_sp_ipip_netdev_ul_dev_get(ipip_entry->ol_dev);
1306
1307 if (ipip_ul_dev == ul_dev)
1308 return ipip_entry;
1309 }
1310
1311 return NULL;
1312}
1313
1314bool mlxsw_sp_netdev_is_ipip_ul(const struct mlxsw_sp *mlxsw_sp,
1315 const struct net_device *dev)
1316{
1317 return mlxsw_sp_ipip_entry_find_by_ul_dev(mlxsw_sp, dev, NULL);
1318}
1319
Petr Machatacafdb2a2017-11-03 10:03:30 +01001320static bool mlxsw_sp_netdevice_ipip_can_offload(struct mlxsw_sp *mlxsw_sp,
1321 const struct net_device *ol_dev,
1322 enum mlxsw_sp_ipip_type ipipt)
1323{
1324 const struct mlxsw_sp_ipip_ops *ops
1325 = mlxsw_sp->router->ipip_ops_arr[ipipt];
1326
1327 /* For deciding whether decap should be offloaded, we don't care about
1328 * overlay protocol, so ask whether either one is supported.
1329 */
1330 return ops->can_offload(mlxsw_sp, ol_dev, MLXSW_SP_L3_PROTO_IPV4) ||
1331 ops->can_offload(mlxsw_sp, ol_dev, MLXSW_SP_L3_PROTO_IPV6);
1332}
1333
Petr Machata796ec772017-11-03 10:03:29 +01001334static int mlxsw_sp_netdevice_ipip_ol_reg_event(struct mlxsw_sp *mlxsw_sp,
1335 struct net_device *ol_dev)
Petr Machata00635872017-10-16 16:26:37 +02001336{
Petr Machata00635872017-10-16 16:26:37 +02001337 struct mlxsw_sp_ipip_entry *ipip_entry;
Petr Machataaf641712017-11-03 10:03:40 +01001338 enum mlxsw_sp_l3proto ul_proto;
Petr Machata00635872017-10-16 16:26:37 +02001339 enum mlxsw_sp_ipip_type ipipt;
Petr Machataaf641712017-11-03 10:03:40 +01001340 union mlxsw_sp_l3addr saddr;
1341 u32 ul_tb_id;
Petr Machata00635872017-10-16 16:26:37 +02001342
1343 mlxsw_sp_netdev_ipip_type(mlxsw_sp, ol_dev, &ipipt);
Petr Machatacafdb2a2017-11-03 10:03:30 +01001344 if (mlxsw_sp_netdevice_ipip_can_offload(mlxsw_sp, ol_dev, ipipt)) {
Petr Machataaf641712017-11-03 10:03:40 +01001345 ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(ol_dev);
1346 ul_proto = mlxsw_sp->router->ipip_ops_arr[ipipt]->ul_proto;
1347 saddr = mlxsw_sp_ipip_netdev_saddr(ul_proto, ol_dev);
1348 if (!mlxsw_sp_ipip_demote_tunnel_by_saddr(mlxsw_sp, ul_proto,
1349 saddr, ul_tb_id,
1350 NULL)) {
1351 ipip_entry = mlxsw_sp_ipip_entry_create(mlxsw_sp, ipipt,
1352 ol_dev);
1353 if (IS_ERR(ipip_entry))
1354 return PTR_ERR(ipip_entry);
1355 }
Petr Machata00635872017-10-16 16:26:37 +02001356 }
1357
1358 return 0;
1359}
1360
Petr Machata796ec772017-11-03 10:03:29 +01001361static void mlxsw_sp_netdevice_ipip_ol_unreg_event(struct mlxsw_sp *mlxsw_sp,
1362 struct net_device *ol_dev)
Petr Machata00635872017-10-16 16:26:37 +02001363{
1364 struct mlxsw_sp_ipip_entry *ipip_entry;
1365
1366 ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
1367 if (ipip_entry)
Petr Machata4cccb732017-10-16 16:26:39 +02001368 mlxsw_sp_ipip_entry_destroy(mlxsw_sp, ipip_entry);
Petr Machata00635872017-10-16 16:26:37 +02001369}
1370
Petr Machata47518ca2017-11-03 10:03:35 +01001371static void
1372mlxsw_sp_ipip_entry_ol_up_event(struct mlxsw_sp *mlxsw_sp,
1373 struct mlxsw_sp_ipip_entry *ipip_entry)
1374{
1375 struct mlxsw_sp_fib_entry *decap_fib_entry;
1376
1377 decap_fib_entry = mlxsw_sp_ipip_entry_find_decap(mlxsw_sp, ipip_entry);
1378 if (decap_fib_entry)
1379 mlxsw_sp_ipip_entry_promote_decap(mlxsw_sp, ipip_entry,
1380 decap_fib_entry);
1381}
1382
Petr Machata22b990582018-03-22 19:53:34 +02001383static int
1384mlxsw_sp_rif_ipip_lb_op(struct mlxsw_sp_rif_ipip_lb *lb_rif,
1385 struct mlxsw_sp_vr *ul_vr, bool enable)
1386{
1387 struct mlxsw_sp_rif_ipip_lb_config lb_cf = lb_rif->lb_config;
1388 struct mlxsw_sp_rif *rif = &lb_rif->common;
1389 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
1390 char ritr_pl[MLXSW_REG_RITR_LEN];
1391 u32 saddr4;
1392
1393 switch (lb_cf.ul_protocol) {
1394 case MLXSW_SP_L3_PROTO_IPV4:
1395 saddr4 = be32_to_cpu(lb_cf.saddr.addr4);
1396 mlxsw_reg_ritr_pack(ritr_pl, enable, MLXSW_REG_RITR_LOOPBACK_IF,
1397 rif->rif_index, rif->vr_id, rif->dev->mtu);
1398 mlxsw_reg_ritr_loopback_ipip4_pack(ritr_pl, lb_cf.lb_ipipt,
1399 MLXSW_REG_RITR_LOOPBACK_IPIP_OPTIONS_GRE_KEY_PRESET,
1400 ul_vr->id, saddr4, lb_cf.okey);
1401 break;
1402
1403 case MLXSW_SP_L3_PROTO_IPV6:
1404 return -EAFNOSUPPORT;
1405 }
1406
1407 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
1408}
1409
Petr Machata68c3cd92018-03-22 19:53:35 +02001410static int mlxsw_sp_netdevice_ipip_ol_update_mtu(struct mlxsw_sp *mlxsw_sp,
1411 struct net_device *ol_dev)
1412{
1413 struct mlxsw_sp_ipip_entry *ipip_entry;
1414 struct mlxsw_sp_rif_ipip_lb *lb_rif;
1415 struct mlxsw_sp_vr *ul_vr;
1416 int err = 0;
1417
1418 ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
1419 if (ipip_entry) {
1420 lb_rif = ipip_entry->ol_lb;
1421 ul_vr = &mlxsw_sp->router->vrs[lb_rif->ul_vr_id];
1422 err = mlxsw_sp_rif_ipip_lb_op(lb_rif, ul_vr, true);
1423 if (err)
1424 goto out;
1425 lb_rif->common.mtu = ol_dev->mtu;
1426 }
1427
1428out:
1429 return err;
1430}
1431
Petr Machata6d4de442017-11-03 10:03:34 +01001432static void mlxsw_sp_netdevice_ipip_ol_up_event(struct mlxsw_sp *mlxsw_sp,
1433 struct net_device *ol_dev)
Petr Machata00635872017-10-16 16:26:37 +02001434{
Petr Machata00635872017-10-16 16:26:37 +02001435 struct mlxsw_sp_ipip_entry *ipip_entry;
1436
1437 ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
Petr Machata47518ca2017-11-03 10:03:35 +01001438 if (ipip_entry)
1439 mlxsw_sp_ipip_entry_ol_up_event(mlxsw_sp, ipip_entry);
Petr Machata00635872017-10-16 16:26:37 +02001440}
1441
Petr Machataa3fe1982017-11-03 10:03:33 +01001442static void
1443mlxsw_sp_ipip_entry_ol_down_event(struct mlxsw_sp *mlxsw_sp,
1444 struct mlxsw_sp_ipip_entry *ipip_entry)
1445{
1446 if (ipip_entry->decap_fib_entry)
1447 mlxsw_sp_ipip_entry_demote_decap(mlxsw_sp, ipip_entry);
1448}
1449
Petr Machata796ec772017-11-03 10:03:29 +01001450static void mlxsw_sp_netdevice_ipip_ol_down_event(struct mlxsw_sp *mlxsw_sp,
1451 struct net_device *ol_dev)
Petr Machata00635872017-10-16 16:26:37 +02001452{
1453 struct mlxsw_sp_ipip_entry *ipip_entry;
1454
1455 ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
Petr Machataa3fe1982017-11-03 10:03:33 +01001456 if (ipip_entry)
1457 mlxsw_sp_ipip_entry_ol_down_event(mlxsw_sp, ipip_entry);
Petr Machata00635872017-10-16 16:26:37 +02001458}
1459
Petr Machata09dbf622017-11-28 13:17:14 +01001460static void mlxsw_sp_nexthop_rif_migrate(struct mlxsw_sp *mlxsw_sp,
1461 struct mlxsw_sp_rif *old_rif,
1462 struct mlxsw_sp_rif *new_rif);
Petr Machata65a61212017-11-03 10:03:37 +01001463static int
1464mlxsw_sp_ipip_entry_ol_lb_update(struct mlxsw_sp *mlxsw_sp,
1465 struct mlxsw_sp_ipip_entry *ipip_entry,
Petr Machata0c5f1cd2017-11-03 10:03:38 +01001466 bool keep_encap,
Petr Machata65a61212017-11-03 10:03:37 +01001467 struct netlink_ext_ack *extack)
Petr Machataf63ce4e2017-10-16 16:26:38 +02001468{
Petr Machata65a61212017-11-03 10:03:37 +01001469 struct mlxsw_sp_rif_ipip_lb *old_lb_rif = ipip_entry->ol_lb;
1470 struct mlxsw_sp_rif_ipip_lb *new_lb_rif;
Petr Machataf63ce4e2017-10-16 16:26:38 +02001471
Petr Machata65a61212017-11-03 10:03:37 +01001472 new_lb_rif = mlxsw_sp_ipip_ol_ipip_lb_create(mlxsw_sp,
1473 ipip_entry->ipipt,
1474 ipip_entry->ol_dev,
1475 extack);
1476 if (IS_ERR(new_lb_rif))
1477 return PTR_ERR(new_lb_rif);
1478 ipip_entry->ol_lb = new_lb_rif;
Petr Machata0c5f1cd2017-11-03 10:03:38 +01001479
Petr Machata09dbf622017-11-28 13:17:14 +01001480 if (keep_encap)
1481 mlxsw_sp_nexthop_rif_migrate(mlxsw_sp, &old_lb_rif->common,
1482 &new_lb_rif->common);
Petr Machata0c5f1cd2017-11-03 10:03:38 +01001483
Petr Machata65a61212017-11-03 10:03:37 +01001484 mlxsw_sp_rif_destroy(&old_lb_rif->common);
Petr Machataf63ce4e2017-10-16 16:26:38 +02001485
Petr Machata65a61212017-11-03 10:03:37 +01001486 return 0;
1487}
1488
Petr Machata09dbf622017-11-28 13:17:14 +01001489static void mlxsw_sp_nexthop_rif_update(struct mlxsw_sp *mlxsw_sp,
1490 struct mlxsw_sp_rif *rif);
1491
Petr Machata0c5f1cd2017-11-03 10:03:38 +01001492/**
1493 * Update the offload related to an IPIP entry. This always updates decap, and
1494 * in addition to that it also:
1495 * @recreate_loopback: recreates the associated loopback RIF
1496 * @keep_encap: updates next hops that use the tunnel netdevice. This is only
1497 * relevant when recreate_loopback is true.
1498 * @update_nexthops: updates next hops, keeping the current loopback RIF. This
1499 * is only relevant when recreate_loopback is false.
1500 */
Petr Machata65a61212017-11-03 10:03:37 +01001501int __mlxsw_sp_ipip_entry_update_tunnel(struct mlxsw_sp *mlxsw_sp,
1502 struct mlxsw_sp_ipip_entry *ipip_entry,
Petr Machata0c5f1cd2017-11-03 10:03:38 +01001503 bool recreate_loopback,
1504 bool keep_encap,
1505 bool update_nexthops,
Petr Machata65a61212017-11-03 10:03:37 +01001506 struct netlink_ext_ack *extack)
1507{
1508 int err;
1509
1510 /* RIFs can't be edited, so to update loopback, we need to destroy and
1511 * recreate it. That creates a window of opportunity where RALUE and
1512 * RATR registers end up referencing a RIF that's already gone. RATRs
1513 * are handled in mlxsw_sp_ipip_entry_ol_lb_update(), and to take care
Petr Machataf63ce4e2017-10-16 16:26:38 +02001514 * of RALUE, demote the decap route back.
1515 */
1516 if (ipip_entry->decap_fib_entry)
1517 mlxsw_sp_ipip_entry_demote_decap(mlxsw_sp, ipip_entry);
1518
Petr Machata0c5f1cd2017-11-03 10:03:38 +01001519 if (recreate_loopback) {
1520 err = mlxsw_sp_ipip_entry_ol_lb_update(mlxsw_sp, ipip_entry,
1521 keep_encap, extack);
1522 if (err)
1523 return err;
1524 } else if (update_nexthops) {
1525 mlxsw_sp_nexthop_rif_update(mlxsw_sp,
1526 &ipip_entry->ol_lb->common);
1527 }
Petr Machataf63ce4e2017-10-16 16:26:38 +02001528
Petr Machata65a61212017-11-03 10:03:37 +01001529 if (ipip_entry->ol_dev->flags & IFF_UP)
1530 mlxsw_sp_ipip_entry_ol_up_event(mlxsw_sp, ipip_entry);
Petr Machataf63ce4e2017-10-16 16:26:38 +02001531
1532 return 0;
1533}
1534
Petr Machata65a61212017-11-03 10:03:37 +01001535static int mlxsw_sp_netdevice_ipip_ol_vrf_event(struct mlxsw_sp *mlxsw_sp,
1536 struct net_device *ol_dev,
1537 struct netlink_ext_ack *extack)
1538{
1539 struct mlxsw_sp_ipip_entry *ipip_entry =
1540 mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
Petr Machatacab43d92017-11-28 13:17:12 +01001541 enum mlxsw_sp_l3proto ul_proto;
1542 union mlxsw_sp_l3addr saddr;
1543 u32 ul_tb_id;
Petr Machata65a61212017-11-03 10:03:37 +01001544
1545 if (!ipip_entry)
1546 return 0;
Petr Machatacab43d92017-11-28 13:17:12 +01001547
1548 /* For flat configuration cases, moving overlay to a different VRF might
1549 * cause local address conflict, and the conflicting tunnels need to be
1550 * demoted.
1551 */
1552 ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(ol_dev);
1553 ul_proto = mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt]->ul_proto;
1554 saddr = mlxsw_sp_ipip_netdev_saddr(ul_proto, ol_dev);
1555 if (mlxsw_sp_ipip_demote_tunnel_by_saddr(mlxsw_sp, ul_proto,
1556 saddr, ul_tb_id,
1557 ipip_entry)) {
1558 mlxsw_sp_ipip_entry_demote_tunnel(mlxsw_sp, ipip_entry);
1559 return 0;
1560 }
1561
Petr Machata65a61212017-11-03 10:03:37 +01001562 return __mlxsw_sp_ipip_entry_update_tunnel(mlxsw_sp, ipip_entry,
Petr Machata0c5f1cd2017-11-03 10:03:38 +01001563 true, false, false, extack);
Petr Machata65a61212017-11-03 10:03:37 +01001564}
1565
Petr Machata61481f22017-11-03 10:03:41 +01001566static int
1567mlxsw_sp_netdevice_ipip_ul_vrf_event(struct mlxsw_sp *mlxsw_sp,
1568 struct mlxsw_sp_ipip_entry *ipip_entry,
1569 struct net_device *ul_dev,
1570 struct netlink_ext_ack *extack)
1571{
1572 return __mlxsw_sp_ipip_entry_update_tunnel(mlxsw_sp, ipip_entry,
1573 true, true, false, extack);
1574}
1575
Petr Machata4cf04f32017-11-03 10:03:42 +01001576static int
Petr Machata44b0fff2017-11-03 10:03:44 +01001577mlxsw_sp_netdevice_ipip_ul_up_event(struct mlxsw_sp *mlxsw_sp,
1578 struct mlxsw_sp_ipip_entry *ipip_entry,
1579 struct net_device *ul_dev)
1580{
1581 return __mlxsw_sp_ipip_entry_update_tunnel(mlxsw_sp, ipip_entry,
1582 false, false, true, NULL);
1583}
1584
1585static int
1586mlxsw_sp_netdevice_ipip_ul_down_event(struct mlxsw_sp *mlxsw_sp,
1587 struct mlxsw_sp_ipip_entry *ipip_entry,
1588 struct net_device *ul_dev)
1589{
1590 /* A down underlay device causes encapsulated packets to not be
1591 * forwarded, but decap still works. So refresh next hops without
1592 * touching anything else.
1593 */
1594 return __mlxsw_sp_ipip_entry_update_tunnel(mlxsw_sp, ipip_entry,
1595 false, false, true, NULL);
1596}
1597
1598static int
Petr Machata4cf04f32017-11-03 10:03:42 +01001599mlxsw_sp_netdevice_ipip_ol_change_event(struct mlxsw_sp *mlxsw_sp,
1600 struct net_device *ol_dev,
1601 struct netlink_ext_ack *extack)
1602{
1603 const struct mlxsw_sp_ipip_ops *ipip_ops;
1604 struct mlxsw_sp_ipip_entry *ipip_entry;
1605 int err;
1606
1607 ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
1608 if (!ipip_entry)
1609 /* A change might make a tunnel eligible for offloading, but
1610 * that is currently not implemented. What falls to slow path
1611 * stays there.
1612 */
1613 return 0;
1614
1615 /* A change might make a tunnel not eligible for offloading. */
1616 if (!mlxsw_sp_netdevice_ipip_can_offload(mlxsw_sp, ol_dev,
1617 ipip_entry->ipipt)) {
1618 mlxsw_sp_ipip_entry_demote_tunnel(mlxsw_sp, ipip_entry);
1619 return 0;
1620 }
1621
1622 ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt];
1623 err = ipip_ops->ol_netdev_change(mlxsw_sp, ipip_entry, extack);
1624 return err;
1625}
1626
Petr Machataaf641712017-11-03 10:03:40 +01001627void mlxsw_sp_ipip_entry_demote_tunnel(struct mlxsw_sp *mlxsw_sp,
1628 struct mlxsw_sp_ipip_entry *ipip_entry)
1629{
1630 struct net_device *ol_dev = ipip_entry->ol_dev;
1631
1632 if (ol_dev->flags & IFF_UP)
1633 mlxsw_sp_ipip_entry_ol_down_event(mlxsw_sp, ipip_entry);
1634 mlxsw_sp_ipip_entry_destroy(mlxsw_sp, ipip_entry);
1635}
1636
1637/* The configuration where several tunnels have the same local address in the
1638 * same underlay table needs special treatment in the HW. That is currently not
1639 * implemented in the driver. This function finds and demotes the first tunnel
1640 * with a given source address, except the one passed in in the argument
1641 * `except'.
1642 */
1643bool
1644mlxsw_sp_ipip_demote_tunnel_by_saddr(struct mlxsw_sp *mlxsw_sp,
1645 enum mlxsw_sp_l3proto ul_proto,
1646 union mlxsw_sp_l3addr saddr,
1647 u32 ul_tb_id,
1648 const struct mlxsw_sp_ipip_entry *except)
1649{
1650 struct mlxsw_sp_ipip_entry *ipip_entry, *tmp;
1651
1652 list_for_each_entry_safe(ipip_entry, tmp, &mlxsw_sp->router->ipip_list,
1653 ipip_list_node) {
1654 if (ipip_entry != except &&
1655 mlxsw_sp_ipip_entry_saddr_matches(mlxsw_sp, ul_proto, saddr,
1656 ul_tb_id, ipip_entry)) {
1657 mlxsw_sp_ipip_entry_demote_tunnel(mlxsw_sp, ipip_entry);
1658 return true;
1659 }
1660 }
1661
1662 return false;
1663}
1664
Petr Machata61481f22017-11-03 10:03:41 +01001665static void mlxsw_sp_ipip_demote_tunnel_by_ul_netdev(struct mlxsw_sp *mlxsw_sp,
1666 struct net_device *ul_dev)
1667{
1668 struct mlxsw_sp_ipip_entry *ipip_entry, *tmp;
1669
1670 list_for_each_entry_safe(ipip_entry, tmp, &mlxsw_sp->router->ipip_list,
1671 ipip_list_node) {
1672 struct net_device *ipip_ul_dev =
1673 __mlxsw_sp_ipip_netdev_ul_dev_get(ipip_entry->ol_dev);
1674
1675 if (ipip_ul_dev == ul_dev)
1676 mlxsw_sp_ipip_entry_demote_tunnel(mlxsw_sp, ipip_entry);
1677 }
1678}
1679
Petr Machata7e75af62017-11-03 10:03:36 +01001680int mlxsw_sp_netdevice_ipip_ol_event(struct mlxsw_sp *mlxsw_sp,
1681 struct net_device *ol_dev,
1682 unsigned long event,
1683 struct netdev_notifier_info *info)
Petr Machata00635872017-10-16 16:26:37 +02001684{
Petr Machata7e75af62017-11-03 10:03:36 +01001685 struct netdev_notifier_changeupper_info *chup;
1686 struct netlink_ext_ack *extack;
1687
Petr Machata00635872017-10-16 16:26:37 +02001688 switch (event) {
1689 case NETDEV_REGISTER:
Petr Machata796ec772017-11-03 10:03:29 +01001690 return mlxsw_sp_netdevice_ipip_ol_reg_event(mlxsw_sp, ol_dev);
Petr Machata00635872017-10-16 16:26:37 +02001691 case NETDEV_UNREGISTER:
Petr Machata796ec772017-11-03 10:03:29 +01001692 mlxsw_sp_netdevice_ipip_ol_unreg_event(mlxsw_sp, ol_dev);
Petr Machata00635872017-10-16 16:26:37 +02001693 return 0;
1694 case NETDEV_UP:
Petr Machata6d4de442017-11-03 10:03:34 +01001695 mlxsw_sp_netdevice_ipip_ol_up_event(mlxsw_sp, ol_dev);
1696 return 0;
Petr Machata00635872017-10-16 16:26:37 +02001697 case NETDEV_DOWN:
Petr Machata796ec772017-11-03 10:03:29 +01001698 mlxsw_sp_netdevice_ipip_ol_down_event(mlxsw_sp, ol_dev);
Petr Machata00635872017-10-16 16:26:37 +02001699 return 0;
Petr Machataf63ce4e2017-10-16 16:26:38 +02001700 case NETDEV_CHANGEUPPER:
Petr Machata7e75af62017-11-03 10:03:36 +01001701 chup = container_of(info, typeof(*chup), info);
1702 extack = info->extack;
1703 if (netif_is_l3_master(chup->upper_dev))
Petr Machata796ec772017-11-03 10:03:29 +01001704 return mlxsw_sp_netdevice_ipip_ol_vrf_event(mlxsw_sp,
Petr Machata7e75af62017-11-03 10:03:36 +01001705 ol_dev,
1706 extack);
Petr Machataf63ce4e2017-10-16 16:26:38 +02001707 return 0;
Petr Machata4cf04f32017-11-03 10:03:42 +01001708 case NETDEV_CHANGE:
1709 extack = info->extack;
1710 return mlxsw_sp_netdevice_ipip_ol_change_event(mlxsw_sp,
1711 ol_dev, extack);
Petr Machata68c3cd92018-03-22 19:53:35 +02001712 case NETDEV_CHANGEMTU:
1713 return mlxsw_sp_netdevice_ipip_ol_update_mtu(mlxsw_sp, ol_dev);
Petr Machata00635872017-10-16 16:26:37 +02001714 }
1715 return 0;
1716}
1717
Petr Machata61481f22017-11-03 10:03:41 +01001718static int
1719__mlxsw_sp_netdevice_ipip_ul_event(struct mlxsw_sp *mlxsw_sp,
1720 struct mlxsw_sp_ipip_entry *ipip_entry,
1721 struct net_device *ul_dev,
1722 unsigned long event,
1723 struct netdev_notifier_info *info)
1724{
1725 struct netdev_notifier_changeupper_info *chup;
1726 struct netlink_ext_ack *extack;
1727
1728 switch (event) {
1729 case NETDEV_CHANGEUPPER:
1730 chup = container_of(info, typeof(*chup), info);
1731 extack = info->extack;
1732 if (netif_is_l3_master(chup->upper_dev))
1733 return mlxsw_sp_netdevice_ipip_ul_vrf_event(mlxsw_sp,
1734 ipip_entry,
1735 ul_dev,
1736 extack);
1737 break;
Petr Machata44b0fff2017-11-03 10:03:44 +01001738
1739 case NETDEV_UP:
1740 return mlxsw_sp_netdevice_ipip_ul_up_event(mlxsw_sp, ipip_entry,
1741 ul_dev);
1742 case NETDEV_DOWN:
1743 return mlxsw_sp_netdevice_ipip_ul_down_event(mlxsw_sp,
1744 ipip_entry,
1745 ul_dev);
Petr Machata61481f22017-11-03 10:03:41 +01001746 }
1747 return 0;
1748}
1749
1750int
1751mlxsw_sp_netdevice_ipip_ul_event(struct mlxsw_sp *mlxsw_sp,
1752 struct net_device *ul_dev,
1753 unsigned long event,
1754 struct netdev_notifier_info *info)
1755{
1756 struct mlxsw_sp_ipip_entry *ipip_entry = NULL;
1757 int err;
1758
1759 while ((ipip_entry = mlxsw_sp_ipip_entry_find_by_ul_dev(mlxsw_sp,
1760 ul_dev,
1761 ipip_entry))) {
1762 err = __mlxsw_sp_netdevice_ipip_ul_event(mlxsw_sp, ipip_entry,
1763 ul_dev, event, info);
1764 if (err) {
1765 mlxsw_sp_ipip_demote_tunnel_by_ul_netdev(mlxsw_sp,
1766 ul_dev);
1767 return err;
1768 }
1769 }
1770
1771 return 0;
1772}
1773
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001774struct mlxsw_sp_neigh_key {
Jiri Pirko33b13412016-11-10 12:31:04 +01001775 struct neighbour *n;
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001776};
1777
1778struct mlxsw_sp_neigh_entry {
Ido Schimmel9665b742017-02-08 11:16:42 +01001779 struct list_head rif_list_node;
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001780 struct rhash_head ht_node;
1781 struct mlxsw_sp_neigh_key key;
1782 u16 rif;
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001783 bool connected;
Yotam Gigia6bf9e92016-07-05 11:27:44 +02001784 unsigned char ha[ETH_ALEN];
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001785 struct list_head nexthop_list; /* list of nexthops using
1786 * this neigh entry
1787 */
Yotam Gigib2157142016-07-05 11:27:51 +02001788 struct list_head nexthop_neighs_list_node;
Arkadi Sharshevsky7cfcbc72017-08-24 08:40:08 +02001789 unsigned int counter_index;
1790 bool counter_valid;
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001791};
1792
1793static const struct rhashtable_params mlxsw_sp_neigh_ht_params = {
1794 .key_offset = offsetof(struct mlxsw_sp_neigh_entry, key),
1795 .head_offset = offsetof(struct mlxsw_sp_neigh_entry, ht_node),
1796 .key_len = sizeof(struct mlxsw_sp_neigh_key),
1797};
1798
Arkadi Sharshevskyf17cc842017-08-24 08:40:04 +02001799struct mlxsw_sp_neigh_entry *
1800mlxsw_sp_rif_neigh_next(struct mlxsw_sp_rif *rif,
1801 struct mlxsw_sp_neigh_entry *neigh_entry)
1802{
1803 if (!neigh_entry) {
1804 if (list_empty(&rif->neigh_list))
1805 return NULL;
1806 else
1807 return list_first_entry(&rif->neigh_list,
1808 typeof(*neigh_entry),
1809 rif_list_node);
1810 }
Arkadi Sharshevskyec2437f2017-09-25 10:32:24 +02001811 if (list_is_last(&neigh_entry->rif_list_node, &rif->neigh_list))
Arkadi Sharshevskyf17cc842017-08-24 08:40:04 +02001812 return NULL;
1813 return list_next_entry(neigh_entry, rif_list_node);
1814}
1815
1816int mlxsw_sp_neigh_entry_type(struct mlxsw_sp_neigh_entry *neigh_entry)
1817{
1818 return neigh_entry->key.n->tbl->family;
1819}
1820
1821unsigned char *
1822mlxsw_sp_neigh_entry_ha(struct mlxsw_sp_neigh_entry *neigh_entry)
1823{
1824 return neigh_entry->ha;
1825}
1826
1827u32 mlxsw_sp_neigh4_entry_dip(struct mlxsw_sp_neigh_entry *neigh_entry)
1828{
1829 struct neighbour *n;
1830
1831 n = neigh_entry->key.n;
1832 return ntohl(*((__be32 *) n->primary_key));
1833}
1834
Arkadi Sharshevsky02507682017-08-31 17:59:15 +02001835struct in6_addr *
1836mlxsw_sp_neigh6_entry_dip(struct mlxsw_sp_neigh_entry *neigh_entry)
1837{
1838 struct neighbour *n;
1839
1840 n = neigh_entry->key.n;
1841 return (struct in6_addr *) &n->primary_key;
1842}
1843
Arkadi Sharshevsky7cfcbc72017-08-24 08:40:08 +02001844int mlxsw_sp_neigh_counter_get(struct mlxsw_sp *mlxsw_sp,
1845 struct mlxsw_sp_neigh_entry *neigh_entry,
1846 u64 *p_counter)
1847{
1848 if (!neigh_entry->counter_valid)
1849 return -EINVAL;
1850
1851 return mlxsw_sp_flow_counter_get(mlxsw_sp, neigh_entry->counter_index,
1852 p_counter, NULL);
1853}
1854
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001855static struct mlxsw_sp_neigh_entry *
1856mlxsw_sp_neigh_entry_alloc(struct mlxsw_sp *mlxsw_sp, struct neighbour *n,
1857 u16 rif)
1858{
1859 struct mlxsw_sp_neigh_entry *neigh_entry;
1860
1861 neigh_entry = kzalloc(sizeof(*neigh_entry), GFP_KERNEL);
1862 if (!neigh_entry)
1863 return NULL;
1864
1865 neigh_entry->key.n = n;
1866 neigh_entry->rif = rif;
1867 INIT_LIST_HEAD(&neigh_entry->nexthop_list);
1868
1869 return neigh_entry;
1870}
1871
1872static void mlxsw_sp_neigh_entry_free(struct mlxsw_sp_neigh_entry *neigh_entry)
1873{
1874 kfree(neigh_entry);
1875}
1876
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001877static int
1878mlxsw_sp_neigh_entry_insert(struct mlxsw_sp *mlxsw_sp,
1879 struct mlxsw_sp_neigh_entry *neigh_entry)
1880{
Ido Schimmel9011b672017-05-16 19:38:25 +02001881 return rhashtable_insert_fast(&mlxsw_sp->router->neigh_ht,
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001882 &neigh_entry->ht_node,
1883 mlxsw_sp_neigh_ht_params);
1884}
1885
1886static void
1887mlxsw_sp_neigh_entry_remove(struct mlxsw_sp *mlxsw_sp,
1888 struct mlxsw_sp_neigh_entry *neigh_entry)
1889{
Ido Schimmel9011b672017-05-16 19:38:25 +02001890 rhashtable_remove_fast(&mlxsw_sp->router->neigh_ht,
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001891 &neigh_entry->ht_node,
1892 mlxsw_sp_neigh_ht_params);
1893}
1894
Arkadi Sharshevsky7cfcbc72017-08-24 08:40:08 +02001895static bool
Arkadi Sharshevsky1ed55742017-08-31 17:59:18 +02001896mlxsw_sp_neigh_counter_should_alloc(struct mlxsw_sp *mlxsw_sp,
1897 struct mlxsw_sp_neigh_entry *neigh_entry)
Arkadi Sharshevsky7cfcbc72017-08-24 08:40:08 +02001898{
1899 struct devlink *devlink;
Arkadi Sharshevsky1ed55742017-08-31 17:59:18 +02001900 const char *table_name;
1901
1902 switch (mlxsw_sp_neigh_entry_type(neigh_entry)) {
1903 case AF_INET:
1904 table_name = MLXSW_SP_DPIPE_TABLE_NAME_HOST4;
1905 break;
1906 case AF_INET6:
1907 table_name = MLXSW_SP_DPIPE_TABLE_NAME_HOST6;
1908 break;
1909 default:
1910 WARN_ON(1);
1911 return false;
1912 }
Arkadi Sharshevsky7cfcbc72017-08-24 08:40:08 +02001913
1914 devlink = priv_to_devlink(mlxsw_sp->core);
Arkadi Sharshevsky1ed55742017-08-31 17:59:18 +02001915 return devlink_dpipe_table_counter_enabled(devlink, table_name);
Arkadi Sharshevsky7cfcbc72017-08-24 08:40:08 +02001916}
1917
1918static void
1919mlxsw_sp_neigh_counter_alloc(struct mlxsw_sp *mlxsw_sp,
1920 struct mlxsw_sp_neigh_entry *neigh_entry)
1921{
Arkadi Sharshevsky1ed55742017-08-31 17:59:18 +02001922 if (!mlxsw_sp_neigh_counter_should_alloc(mlxsw_sp, neigh_entry))
Arkadi Sharshevsky7cfcbc72017-08-24 08:40:08 +02001923 return;
1924
1925 if (mlxsw_sp_flow_counter_alloc(mlxsw_sp, &neigh_entry->counter_index))
1926 return;
1927
1928 neigh_entry->counter_valid = true;
1929}
1930
1931static void
1932mlxsw_sp_neigh_counter_free(struct mlxsw_sp *mlxsw_sp,
1933 struct mlxsw_sp_neigh_entry *neigh_entry)
1934{
1935 if (!neigh_entry->counter_valid)
1936 return;
1937 mlxsw_sp_flow_counter_free(mlxsw_sp,
1938 neigh_entry->counter_index);
1939 neigh_entry->counter_valid = false;
1940}
1941
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001942static struct mlxsw_sp_neigh_entry *
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001943mlxsw_sp_neigh_entry_create(struct mlxsw_sp *mlxsw_sp, struct neighbour *n)
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001944{
1945 struct mlxsw_sp_neigh_entry *neigh_entry;
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01001946 struct mlxsw_sp_rif *rif;
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001947 int err;
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001948
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01001949 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, n->dev);
1950 if (!rif)
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001951 return ERR_PTR(-EINVAL);
1952
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01001953 neigh_entry = mlxsw_sp_neigh_entry_alloc(mlxsw_sp, n, rif->rif_index);
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001954 if (!neigh_entry)
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001955 return ERR_PTR(-ENOMEM);
1956
1957 err = mlxsw_sp_neigh_entry_insert(mlxsw_sp, neigh_entry);
1958 if (err)
1959 goto err_neigh_entry_insert;
1960
Arkadi Sharshevsky7cfcbc72017-08-24 08:40:08 +02001961 mlxsw_sp_neigh_counter_alloc(mlxsw_sp, neigh_entry);
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01001962 list_add(&neigh_entry->rif_list_node, &rif->neigh_list);
Ido Schimmel9665b742017-02-08 11:16:42 +01001963
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001964 return neigh_entry;
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001965
1966err_neigh_entry_insert:
1967 mlxsw_sp_neigh_entry_free(neigh_entry);
1968 return ERR_PTR(err);
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001969}
1970
1971static void
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001972mlxsw_sp_neigh_entry_destroy(struct mlxsw_sp *mlxsw_sp,
1973 struct mlxsw_sp_neigh_entry *neigh_entry)
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001974{
Ido Schimmel9665b742017-02-08 11:16:42 +01001975 list_del(&neigh_entry->rif_list_node);
Arkadi Sharshevsky7cfcbc72017-08-24 08:40:08 +02001976 mlxsw_sp_neigh_counter_free(mlxsw_sp, neigh_entry);
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001977 mlxsw_sp_neigh_entry_remove(mlxsw_sp, neigh_entry);
1978 mlxsw_sp_neigh_entry_free(neigh_entry);
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001979}
1980
1981static struct mlxsw_sp_neigh_entry *
Jiri Pirko33b13412016-11-10 12:31:04 +01001982mlxsw_sp_neigh_entry_lookup(struct mlxsw_sp *mlxsw_sp, struct neighbour *n)
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001983{
Jiri Pirko33b13412016-11-10 12:31:04 +01001984 struct mlxsw_sp_neigh_key key;
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001985
Jiri Pirko33b13412016-11-10 12:31:04 +01001986 key.n = n;
Ido Schimmel9011b672017-05-16 19:38:25 +02001987 return rhashtable_lookup_fast(&mlxsw_sp->router->neigh_ht,
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001988 &key, mlxsw_sp_neigh_ht_params);
1989}
1990
Yotam Gigic723c7352016-07-05 11:27:43 +02001991static void
1992mlxsw_sp_router_neighs_update_interval_init(struct mlxsw_sp *mlxsw_sp)
1993{
Arkadi Sharshevskya6c9b5d2017-07-18 10:10:18 +02001994 unsigned long interval;
Yotam Gigic723c7352016-07-05 11:27:43 +02001995
Ido Schimmelb5f3e0d2017-07-24 09:56:00 +02001996#if IS_ENABLED(CONFIG_IPV6)
Arkadi Sharshevskya6c9b5d2017-07-18 10:10:18 +02001997 interval = min_t(unsigned long,
1998 NEIGH_VAR(&arp_tbl.parms, DELAY_PROBE_TIME),
1999 NEIGH_VAR(&nd_tbl.parms, DELAY_PROBE_TIME));
Ido Schimmelb5f3e0d2017-07-24 09:56:00 +02002000#else
2001 interval = NEIGH_VAR(&arp_tbl.parms, DELAY_PROBE_TIME);
2002#endif
Ido Schimmel9011b672017-05-16 19:38:25 +02002003 mlxsw_sp->router->neighs_update.interval = jiffies_to_msecs(interval);
Yotam Gigic723c7352016-07-05 11:27:43 +02002004}
2005
2006static void mlxsw_sp_router_neigh_ent_ipv4_process(struct mlxsw_sp *mlxsw_sp,
2007 char *rauhtd_pl,
2008 int ent_index)
2009{
2010 struct net_device *dev;
2011 struct neighbour *n;
2012 __be32 dipn;
2013 u32 dip;
2014 u16 rif;
2015
2016 mlxsw_reg_rauhtd_ent_ipv4_unpack(rauhtd_pl, ent_index, &rif, &dip);
2017
Ido Schimmel5f9efff2017-05-16 19:38:27 +02002018 if (!mlxsw_sp->router->rifs[rif]) {
Yotam Gigic723c7352016-07-05 11:27:43 +02002019 dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Incorrect RIF in neighbour entry\n");
2020 return;
2021 }
2022
2023 dipn = htonl(dip);
Ido Schimmel5f9efff2017-05-16 19:38:27 +02002024 dev = mlxsw_sp->router->rifs[rif]->dev;
Yotam Gigic723c7352016-07-05 11:27:43 +02002025 n = neigh_lookup(&arp_tbl, &dipn, dev);
Yuval Mintz1ecdaea2018-01-24 10:02:09 +01002026 if (!n)
Yotam Gigic723c7352016-07-05 11:27:43 +02002027 return;
Yotam Gigic723c7352016-07-05 11:27:43 +02002028
2029 netdev_dbg(dev, "Updating neighbour with IP=%pI4h\n", &dip);
2030 neigh_event_send(n, NULL);
2031 neigh_release(n);
2032}
2033
Ido Schimmeldf9a21f2017-08-15 09:10:33 +02002034#if IS_ENABLED(CONFIG_IPV6)
Arkadi Sharshevsky60f040c2017-07-18 10:10:17 +02002035static void mlxsw_sp_router_neigh_ent_ipv6_process(struct mlxsw_sp *mlxsw_sp,
2036 char *rauhtd_pl,
2037 int rec_index)
2038{
2039 struct net_device *dev;
2040 struct neighbour *n;
2041 struct in6_addr dip;
2042 u16 rif;
2043
2044 mlxsw_reg_rauhtd_ent_ipv6_unpack(rauhtd_pl, rec_index, &rif,
2045 (char *) &dip);
2046
2047 if (!mlxsw_sp->router->rifs[rif]) {
2048 dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Incorrect RIF in neighbour entry\n");
2049 return;
2050 }
2051
2052 dev = mlxsw_sp->router->rifs[rif]->dev;
2053 n = neigh_lookup(&nd_tbl, &dip, dev);
Yuval Mintz1ecdaea2018-01-24 10:02:09 +01002054 if (!n)
Arkadi Sharshevsky60f040c2017-07-18 10:10:17 +02002055 return;
Arkadi Sharshevsky60f040c2017-07-18 10:10:17 +02002056
2057 netdev_dbg(dev, "Updating neighbour with IP=%pI6c\n", &dip);
2058 neigh_event_send(n, NULL);
2059 neigh_release(n);
2060}
Ido Schimmelb5f3e0d2017-07-24 09:56:00 +02002061#else
2062static void mlxsw_sp_router_neigh_ent_ipv6_process(struct mlxsw_sp *mlxsw_sp,
2063 char *rauhtd_pl,
2064 int rec_index)
2065{
2066}
2067#endif
Arkadi Sharshevsky60f040c2017-07-18 10:10:17 +02002068
Yotam Gigic723c7352016-07-05 11:27:43 +02002069static void mlxsw_sp_router_neigh_rec_ipv4_process(struct mlxsw_sp *mlxsw_sp,
2070 char *rauhtd_pl,
2071 int rec_index)
2072{
2073 u8 num_entries;
2074 int i;
2075
2076 num_entries = mlxsw_reg_rauhtd_ipv4_rec_num_entries_get(rauhtd_pl,
2077 rec_index);
2078 /* Hardware starts counting at 0, so add 1. */
2079 num_entries++;
2080
2081 /* Each record consists of several neighbour entries. */
2082 for (i = 0; i < num_entries; i++) {
2083 int ent_index;
2084
2085 ent_index = rec_index * MLXSW_REG_RAUHTD_IPV4_ENT_PER_REC + i;
2086 mlxsw_sp_router_neigh_ent_ipv4_process(mlxsw_sp, rauhtd_pl,
2087 ent_index);
2088 }
2089
2090}
2091
Arkadi Sharshevsky60f040c2017-07-18 10:10:17 +02002092static void mlxsw_sp_router_neigh_rec_ipv6_process(struct mlxsw_sp *mlxsw_sp,
2093 char *rauhtd_pl,
2094 int rec_index)
2095{
2096 /* One record contains one entry. */
2097 mlxsw_sp_router_neigh_ent_ipv6_process(mlxsw_sp, rauhtd_pl,
2098 rec_index);
2099}
2100
Yotam Gigic723c7352016-07-05 11:27:43 +02002101static void mlxsw_sp_router_neigh_rec_process(struct mlxsw_sp *mlxsw_sp,
2102 char *rauhtd_pl, int rec_index)
2103{
2104 switch (mlxsw_reg_rauhtd_rec_type_get(rauhtd_pl, rec_index)) {
2105 case MLXSW_REG_RAUHTD_TYPE_IPV4:
2106 mlxsw_sp_router_neigh_rec_ipv4_process(mlxsw_sp, rauhtd_pl,
2107 rec_index);
2108 break;
2109 case MLXSW_REG_RAUHTD_TYPE_IPV6:
Arkadi Sharshevsky60f040c2017-07-18 10:10:17 +02002110 mlxsw_sp_router_neigh_rec_ipv6_process(mlxsw_sp, rauhtd_pl,
2111 rec_index);
Yotam Gigic723c7352016-07-05 11:27:43 +02002112 break;
2113 }
2114}
2115
Arkadi Sharshevsky42cdb332016-11-11 16:34:26 +01002116static bool mlxsw_sp_router_rauhtd_is_full(char *rauhtd_pl)
2117{
2118 u8 num_rec, last_rec_index, num_entries;
2119
2120 num_rec = mlxsw_reg_rauhtd_num_rec_get(rauhtd_pl);
2121 last_rec_index = num_rec - 1;
2122
2123 if (num_rec < MLXSW_REG_RAUHTD_REC_MAX_NUM)
2124 return false;
2125 if (mlxsw_reg_rauhtd_rec_type_get(rauhtd_pl, last_rec_index) ==
2126 MLXSW_REG_RAUHTD_TYPE_IPV6)
2127 return true;
2128
2129 num_entries = mlxsw_reg_rauhtd_ipv4_rec_num_entries_get(rauhtd_pl,
2130 last_rec_index);
2131 if (++num_entries == MLXSW_REG_RAUHTD_IPV4_ENT_PER_REC)
2132 return true;
2133 return false;
2134}
2135
Arkadi Sharshevsky60f040c2017-07-18 10:10:17 +02002136static int
2137__mlxsw_sp_router_neighs_update_rauhtd(struct mlxsw_sp *mlxsw_sp,
2138 char *rauhtd_pl,
2139 enum mlxsw_reg_rauhtd_type type)
Yotam Gigic723c7352016-07-05 11:27:43 +02002140{
Arkadi Sharshevsky60f040c2017-07-18 10:10:17 +02002141 int i, num_rec;
2142 int err;
Yotam Gigic723c7352016-07-05 11:27:43 +02002143
2144 /* Make sure the neighbour's netdev isn't removed in the
2145 * process.
2146 */
2147 rtnl_lock();
2148 do {
Arkadi Sharshevsky60f040c2017-07-18 10:10:17 +02002149 mlxsw_reg_rauhtd_pack(rauhtd_pl, type);
Yotam Gigic723c7352016-07-05 11:27:43 +02002150 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(rauhtd),
2151 rauhtd_pl);
2152 if (err) {
Petr Machata7ff176f2017-10-02 12:21:57 +02002153 dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Failed to dump neighbour table\n");
Yotam Gigic723c7352016-07-05 11:27:43 +02002154 break;
2155 }
2156 num_rec = mlxsw_reg_rauhtd_num_rec_get(rauhtd_pl);
2157 for (i = 0; i < num_rec; i++)
2158 mlxsw_sp_router_neigh_rec_process(mlxsw_sp, rauhtd_pl,
2159 i);
Arkadi Sharshevsky42cdb332016-11-11 16:34:26 +01002160 } while (mlxsw_sp_router_rauhtd_is_full(rauhtd_pl));
Yotam Gigic723c7352016-07-05 11:27:43 +02002161 rtnl_unlock();
2162
Arkadi Sharshevsky60f040c2017-07-18 10:10:17 +02002163 return err;
2164}
2165
2166static int mlxsw_sp_router_neighs_update_rauhtd(struct mlxsw_sp *mlxsw_sp)
2167{
2168 enum mlxsw_reg_rauhtd_type type;
2169 char *rauhtd_pl;
2170 int err;
2171
2172 rauhtd_pl = kmalloc(MLXSW_REG_RAUHTD_LEN, GFP_KERNEL);
2173 if (!rauhtd_pl)
2174 return -ENOMEM;
2175
2176 type = MLXSW_REG_RAUHTD_TYPE_IPV4;
2177 err = __mlxsw_sp_router_neighs_update_rauhtd(mlxsw_sp, rauhtd_pl, type);
2178 if (err)
2179 goto out;
2180
2181 type = MLXSW_REG_RAUHTD_TYPE_IPV6;
2182 err = __mlxsw_sp_router_neighs_update_rauhtd(mlxsw_sp, rauhtd_pl, type);
2183out:
Yotam Gigic723c7352016-07-05 11:27:43 +02002184 kfree(rauhtd_pl);
Yotam Gigib2157142016-07-05 11:27:51 +02002185 return err;
2186}
2187
2188static void mlxsw_sp_router_neighs_update_nh(struct mlxsw_sp *mlxsw_sp)
2189{
2190 struct mlxsw_sp_neigh_entry *neigh_entry;
2191
2192 /* Take RTNL mutex here to prevent lists from changes */
2193 rtnl_lock();
Ido Schimmel9011b672017-05-16 19:38:25 +02002194 list_for_each_entry(neigh_entry, &mlxsw_sp->router->nexthop_neighs_list,
Ido Schimmel8a0b7272017-02-06 16:20:15 +01002195 nexthop_neighs_list_node)
Yotam Gigib2157142016-07-05 11:27:51 +02002196 /* If this neigh have nexthops, make the kernel think this neigh
2197 * is active regardless of the traffic.
2198 */
Ido Schimmel8a0b7272017-02-06 16:20:15 +01002199 neigh_event_send(neigh_entry->key.n, NULL);
Yotam Gigib2157142016-07-05 11:27:51 +02002200 rtnl_unlock();
2201}
2202
2203static void
2204mlxsw_sp_router_neighs_update_work_schedule(struct mlxsw_sp *mlxsw_sp)
2205{
Ido Schimmel9011b672017-05-16 19:38:25 +02002206 unsigned long interval = mlxsw_sp->router->neighs_update.interval;
Yotam Gigib2157142016-07-05 11:27:51 +02002207
Ido Schimmel9011b672017-05-16 19:38:25 +02002208 mlxsw_core_schedule_dw(&mlxsw_sp->router->neighs_update.dw,
Yotam Gigib2157142016-07-05 11:27:51 +02002209 msecs_to_jiffies(interval));
2210}
2211
2212static void mlxsw_sp_router_neighs_update_work(struct work_struct *work)
2213{
Ido Schimmel9011b672017-05-16 19:38:25 +02002214 struct mlxsw_sp_router *router;
Yotam Gigib2157142016-07-05 11:27:51 +02002215 int err;
2216
Ido Schimmel9011b672017-05-16 19:38:25 +02002217 router = container_of(work, struct mlxsw_sp_router,
2218 neighs_update.dw.work);
2219 err = mlxsw_sp_router_neighs_update_rauhtd(router->mlxsw_sp);
Yotam Gigib2157142016-07-05 11:27:51 +02002220 if (err)
Ido Schimmel9011b672017-05-16 19:38:25 +02002221 dev_err(router->mlxsw_sp->bus_info->dev, "Could not update kernel for neigh activity");
Yotam Gigib2157142016-07-05 11:27:51 +02002222
Ido Schimmel9011b672017-05-16 19:38:25 +02002223 mlxsw_sp_router_neighs_update_nh(router->mlxsw_sp);
Yotam Gigib2157142016-07-05 11:27:51 +02002224
Ido Schimmel9011b672017-05-16 19:38:25 +02002225 mlxsw_sp_router_neighs_update_work_schedule(router->mlxsw_sp);
Yotam Gigic723c7352016-07-05 11:27:43 +02002226}
2227
Yotam Gigi0b2361d2016-07-05 11:27:52 +02002228static void mlxsw_sp_router_probe_unresolved_nexthops(struct work_struct *work)
2229{
2230 struct mlxsw_sp_neigh_entry *neigh_entry;
Ido Schimmel9011b672017-05-16 19:38:25 +02002231 struct mlxsw_sp_router *router;
Yotam Gigi0b2361d2016-07-05 11:27:52 +02002232
Ido Schimmel9011b672017-05-16 19:38:25 +02002233 router = container_of(work, struct mlxsw_sp_router,
2234 nexthop_probe_dw.work);
Yotam Gigi0b2361d2016-07-05 11:27:52 +02002235 /* Iterate over nexthop neighbours, find those who are unresolved and
2236 * send arp on them. This solves the chicken-egg problem when
2237 * the nexthop wouldn't get offloaded until the neighbor is resolved
2238 * but it wouldn't get resolved ever in case traffic is flowing in HW
2239 * using different nexthop.
2240 *
2241 * Take RTNL mutex here to prevent lists from changes.
2242 */
2243 rtnl_lock();
Ido Schimmel9011b672017-05-16 19:38:25 +02002244 list_for_each_entry(neigh_entry, &router->nexthop_neighs_list,
Ido Schimmel8a0b7272017-02-06 16:20:15 +01002245 nexthop_neighs_list_node)
Ido Schimmel01b1aa32017-02-06 16:20:16 +01002246 if (!neigh_entry->connected)
Jiri Pirko33b13412016-11-10 12:31:04 +01002247 neigh_event_send(neigh_entry->key.n, NULL);
Yotam Gigi0b2361d2016-07-05 11:27:52 +02002248 rtnl_unlock();
2249
Ido Schimmel9011b672017-05-16 19:38:25 +02002250 mlxsw_core_schedule_dw(&router->nexthop_probe_dw,
Yotam Gigi0b2361d2016-07-05 11:27:52 +02002251 MLXSW_SP_UNRESOLVED_NH_PROBE_INTERVAL);
2252}
2253
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002254static void
2255mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp *mlxsw_sp,
2256 struct mlxsw_sp_neigh_entry *neigh_entry,
2257 bool removing);
2258
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002259static enum mlxsw_reg_rauht_op mlxsw_sp_rauht_op(bool adding)
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002260{
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002261 return adding ? MLXSW_REG_RAUHT_OP_WRITE_ADD :
2262 MLXSW_REG_RAUHT_OP_WRITE_DELETE;
2263}
2264
2265static void
2266mlxsw_sp_router_neigh_entry_op4(struct mlxsw_sp *mlxsw_sp,
2267 struct mlxsw_sp_neigh_entry *neigh_entry,
2268 enum mlxsw_reg_rauht_op op)
2269{
Jiri Pirko33b13412016-11-10 12:31:04 +01002270 struct neighbour *n = neigh_entry->key.n;
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002271 u32 dip = ntohl(*((__be32 *) n->primary_key));
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002272 char rauht_pl[MLXSW_REG_RAUHT_LEN];
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002273
2274 mlxsw_reg_rauht_pack4(rauht_pl, op, neigh_entry->rif, neigh_entry->ha,
2275 dip);
Arkadi Sharshevsky7cfcbc72017-08-24 08:40:08 +02002276 if (neigh_entry->counter_valid)
2277 mlxsw_reg_rauht_pack_counter(rauht_pl,
2278 neigh_entry->counter_index);
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002279 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rauht), rauht_pl);
2280}
2281
2282static void
Arkadi Sharshevskyd5eb89c2017-07-18 10:10:15 +02002283mlxsw_sp_router_neigh_entry_op6(struct mlxsw_sp *mlxsw_sp,
2284 struct mlxsw_sp_neigh_entry *neigh_entry,
2285 enum mlxsw_reg_rauht_op op)
2286{
2287 struct neighbour *n = neigh_entry->key.n;
2288 char rauht_pl[MLXSW_REG_RAUHT_LEN];
2289 const char *dip = n->primary_key;
2290
2291 mlxsw_reg_rauht_pack6(rauht_pl, op, neigh_entry->rif, neigh_entry->ha,
2292 dip);
Arkadi Sharshevsky7cfcbc72017-08-24 08:40:08 +02002293 if (neigh_entry->counter_valid)
2294 mlxsw_reg_rauht_pack_counter(rauht_pl,
2295 neigh_entry->counter_index);
Arkadi Sharshevskyd5eb89c2017-07-18 10:10:15 +02002296 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rauht), rauht_pl);
2297}
2298
Arkadi Sharshevsky1d1056d82017-08-31 17:59:13 +02002299bool mlxsw_sp_neigh_ipv6_ignore(struct mlxsw_sp_neigh_entry *neigh_entry)
Arkadi Sharshevskyd5eb89c2017-07-18 10:10:15 +02002300{
Arkadi Sharshevsky1d1056d82017-08-31 17:59:13 +02002301 struct neighbour *n = neigh_entry->key.n;
2302
Arkadi Sharshevskyd5eb89c2017-07-18 10:10:15 +02002303 /* Packets with a link-local destination address are trapped
2304 * after LPM lookup and never reach the neighbour table, so
2305 * there is no need to program such neighbours to the device.
2306 */
2307 if (ipv6_addr_type((struct in6_addr *) &n->primary_key) &
2308 IPV6_ADDR_LINKLOCAL)
2309 return true;
2310 return false;
2311}
2312
2313static void
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002314mlxsw_sp_neigh_entry_update(struct mlxsw_sp *mlxsw_sp,
2315 struct mlxsw_sp_neigh_entry *neigh_entry,
2316 bool adding)
2317{
2318 if (!adding && !neigh_entry->connected)
2319 return;
2320 neigh_entry->connected = adding;
Ido Schimmelb5f3e0d2017-07-24 09:56:00 +02002321 if (neigh_entry->key.n->tbl->family == AF_INET) {
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002322 mlxsw_sp_router_neigh_entry_op4(mlxsw_sp, neigh_entry,
2323 mlxsw_sp_rauht_op(adding));
Ido Schimmelb5f3e0d2017-07-24 09:56:00 +02002324 } else if (neigh_entry->key.n->tbl->family == AF_INET6) {
Arkadi Sharshevsky1d1056d82017-08-31 17:59:13 +02002325 if (mlxsw_sp_neigh_ipv6_ignore(neigh_entry))
Arkadi Sharshevskyd5eb89c2017-07-18 10:10:15 +02002326 return;
2327 mlxsw_sp_router_neigh_entry_op6(mlxsw_sp, neigh_entry,
2328 mlxsw_sp_rauht_op(adding));
2329 } else {
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002330 WARN_ON_ONCE(1);
Arkadi Sharshevskyd5eb89c2017-07-18 10:10:15 +02002331 }
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002332}
2333
Arkadi Sharshevskya481d712017-08-24 08:40:10 +02002334void
2335mlxsw_sp_neigh_entry_counter_update(struct mlxsw_sp *mlxsw_sp,
2336 struct mlxsw_sp_neigh_entry *neigh_entry,
2337 bool adding)
2338{
2339 if (adding)
2340 mlxsw_sp_neigh_counter_alloc(mlxsw_sp, neigh_entry);
2341 else
2342 mlxsw_sp_neigh_counter_free(mlxsw_sp, neigh_entry);
2343 mlxsw_sp_neigh_entry_update(mlxsw_sp, neigh_entry, true);
2344}
2345
Ido Schimmelceb88812017-11-02 17:14:07 +01002346struct mlxsw_sp_netevent_work {
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002347 struct work_struct work;
2348 struct mlxsw_sp *mlxsw_sp;
2349 struct neighbour *n;
2350};
2351
2352static void mlxsw_sp_router_neigh_event_work(struct work_struct *work)
2353{
Ido Schimmelceb88812017-11-02 17:14:07 +01002354 struct mlxsw_sp_netevent_work *net_work =
2355 container_of(work, struct mlxsw_sp_netevent_work, work);
2356 struct mlxsw_sp *mlxsw_sp = net_work->mlxsw_sp;
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002357 struct mlxsw_sp_neigh_entry *neigh_entry;
Ido Schimmelceb88812017-11-02 17:14:07 +01002358 struct neighbour *n = net_work->n;
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002359 unsigned char ha[ETH_ALEN];
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002360 bool entry_connected;
Ido Schimmel93a87e52016-12-23 09:32:49 +01002361 u8 nud_state, dead;
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002362
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002363 /* If these parameters are changed after we release the lock,
2364 * then we are guaranteed to receive another event letting us
2365 * know about it.
2366 */
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002367 read_lock_bh(&n->lock);
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002368 memcpy(ha, n->ha, ETH_ALEN);
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002369 nud_state = n->nud_state;
Ido Schimmel93a87e52016-12-23 09:32:49 +01002370 dead = n->dead;
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002371 read_unlock_bh(&n->lock);
2372
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002373 rtnl_lock();
Ido Schimmel93a87e52016-12-23 09:32:49 +01002374 entry_connected = nud_state & NUD_VALID && !dead;
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002375 neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp, n);
2376 if (!entry_connected && !neigh_entry)
2377 goto out;
2378 if (!neigh_entry) {
2379 neigh_entry = mlxsw_sp_neigh_entry_create(mlxsw_sp, n);
2380 if (IS_ERR(neigh_entry))
2381 goto out;
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002382 }
2383
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002384 memcpy(neigh_entry->ha, ha, ETH_ALEN);
2385 mlxsw_sp_neigh_entry_update(mlxsw_sp, neigh_entry, entry_connected);
2386 mlxsw_sp_nexthop_neigh_update(mlxsw_sp, neigh_entry, !entry_connected);
2387
2388 if (!neigh_entry->connected && list_empty(&neigh_entry->nexthop_list))
2389 mlxsw_sp_neigh_entry_destroy(mlxsw_sp, neigh_entry);
2390
2391out:
2392 rtnl_unlock();
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002393 neigh_release(n);
Ido Schimmelceb88812017-11-02 17:14:07 +01002394 kfree(net_work);
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002395}
2396
Ido Schimmel28678f02017-11-02 17:14:10 +01002397static int mlxsw_sp_mp_hash_init(struct mlxsw_sp *mlxsw_sp);
2398
2399static void mlxsw_sp_router_mp_hash_event_work(struct work_struct *work)
2400{
2401 struct mlxsw_sp_netevent_work *net_work =
2402 container_of(work, struct mlxsw_sp_netevent_work, work);
2403 struct mlxsw_sp *mlxsw_sp = net_work->mlxsw_sp;
2404
2405 mlxsw_sp_mp_hash_init(mlxsw_sp);
2406 kfree(net_work);
2407}
2408
2409static int mlxsw_sp_router_netevent_event(struct notifier_block *nb,
Ido Schimmel48fac882017-11-02 17:14:06 +01002410 unsigned long event, void *ptr)
Yotam Gigic723c7352016-07-05 11:27:43 +02002411{
Ido Schimmelceb88812017-11-02 17:14:07 +01002412 struct mlxsw_sp_netevent_work *net_work;
Yotam Gigic723c7352016-07-05 11:27:43 +02002413 struct mlxsw_sp_port *mlxsw_sp_port;
Ido Schimmel28678f02017-11-02 17:14:10 +01002414 struct mlxsw_sp_router *router;
Yotam Gigic723c7352016-07-05 11:27:43 +02002415 struct mlxsw_sp *mlxsw_sp;
2416 unsigned long interval;
2417 struct neigh_parms *p;
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002418 struct neighbour *n;
Ido Schimmel28678f02017-11-02 17:14:10 +01002419 struct net *net;
Yotam Gigic723c7352016-07-05 11:27:43 +02002420
2421 switch (event) {
2422 case NETEVENT_DELAY_PROBE_TIME_UPDATE:
2423 p = ptr;
2424
2425 /* We don't care about changes in the default table. */
Ido Schimmelb5f3e0d2017-07-24 09:56:00 +02002426 if (!p->dev || (p->tbl->family != AF_INET &&
2427 p->tbl->family != AF_INET6))
Yotam Gigic723c7352016-07-05 11:27:43 +02002428 return NOTIFY_DONE;
2429
2430 /* We are in atomic context and can't take RTNL mutex,
2431 * so use RCU variant to walk the device chain.
2432 */
2433 mlxsw_sp_port = mlxsw_sp_port_lower_dev_hold(p->dev);
2434 if (!mlxsw_sp_port)
2435 return NOTIFY_DONE;
2436
2437 mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2438 interval = jiffies_to_msecs(NEIGH_VAR(p, DELAY_PROBE_TIME));
Ido Schimmel9011b672017-05-16 19:38:25 +02002439 mlxsw_sp->router->neighs_update.interval = interval;
Yotam Gigic723c7352016-07-05 11:27:43 +02002440
2441 mlxsw_sp_port_dev_put(mlxsw_sp_port);
2442 break;
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002443 case NETEVENT_NEIGH_UPDATE:
2444 n = ptr;
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002445
Ido Schimmelb5f3e0d2017-07-24 09:56:00 +02002446 if (n->tbl->family != AF_INET && n->tbl->family != AF_INET6)
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002447 return NOTIFY_DONE;
2448
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002449 mlxsw_sp_port = mlxsw_sp_port_lower_dev_hold(n->dev);
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002450 if (!mlxsw_sp_port)
2451 return NOTIFY_DONE;
2452
Ido Schimmelceb88812017-11-02 17:14:07 +01002453 net_work = kzalloc(sizeof(*net_work), GFP_ATOMIC);
2454 if (!net_work) {
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002455 mlxsw_sp_port_dev_put(mlxsw_sp_port);
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002456 return NOTIFY_BAD;
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002457 }
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002458
Ido Schimmelceb88812017-11-02 17:14:07 +01002459 INIT_WORK(&net_work->work, mlxsw_sp_router_neigh_event_work);
2460 net_work->mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2461 net_work->n = n;
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002462
2463 /* Take a reference to ensure the neighbour won't be
2464 * destructed until we drop the reference in delayed
2465 * work.
2466 */
2467 neigh_clone(n);
Ido Schimmelceb88812017-11-02 17:14:07 +01002468 mlxsw_core_schedule_work(&net_work->work);
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002469 mlxsw_sp_port_dev_put(mlxsw_sp_port);
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002470 break;
Ido Schimmel28678f02017-11-02 17:14:10 +01002471 case NETEVENT_MULTIPATH_HASH_UPDATE:
2472 net = ptr;
2473
2474 if (!net_eq(net, &init_net))
2475 return NOTIFY_DONE;
2476
2477 net_work = kzalloc(sizeof(*net_work), GFP_ATOMIC);
2478 if (!net_work)
2479 return NOTIFY_BAD;
2480
2481 router = container_of(nb, struct mlxsw_sp_router, netevent_nb);
2482 INIT_WORK(&net_work->work, mlxsw_sp_router_mp_hash_event_work);
2483 net_work->mlxsw_sp = router->mlxsw_sp;
2484 mlxsw_core_schedule_work(&net_work->work);
2485 break;
Yotam Gigic723c7352016-07-05 11:27:43 +02002486 }
2487
2488 return NOTIFY_DONE;
2489}
2490
Jiri Pirko6cf3c972016-07-05 11:27:39 +02002491static int mlxsw_sp_neigh_init(struct mlxsw_sp *mlxsw_sp)
2492{
Yotam Gigic723c7352016-07-05 11:27:43 +02002493 int err;
2494
Ido Schimmel9011b672017-05-16 19:38:25 +02002495 err = rhashtable_init(&mlxsw_sp->router->neigh_ht,
Yotam Gigic723c7352016-07-05 11:27:43 +02002496 &mlxsw_sp_neigh_ht_params);
2497 if (err)
2498 return err;
2499
2500 /* Initialize the polling interval according to the default
2501 * table.
2502 */
2503 mlxsw_sp_router_neighs_update_interval_init(mlxsw_sp);
2504
Yotam Gigi0b2361d2016-07-05 11:27:52 +02002505 /* Create the delayed works for the activity_update */
Ido Schimmel9011b672017-05-16 19:38:25 +02002506 INIT_DELAYED_WORK(&mlxsw_sp->router->neighs_update.dw,
Yotam Gigic723c7352016-07-05 11:27:43 +02002507 mlxsw_sp_router_neighs_update_work);
Ido Schimmel9011b672017-05-16 19:38:25 +02002508 INIT_DELAYED_WORK(&mlxsw_sp->router->nexthop_probe_dw,
Yotam Gigi0b2361d2016-07-05 11:27:52 +02002509 mlxsw_sp_router_probe_unresolved_nexthops);
Ido Schimmel9011b672017-05-16 19:38:25 +02002510 mlxsw_core_schedule_dw(&mlxsw_sp->router->neighs_update.dw, 0);
2511 mlxsw_core_schedule_dw(&mlxsw_sp->router->nexthop_probe_dw, 0);
Yotam Gigic723c7352016-07-05 11:27:43 +02002512 return 0;
Jiri Pirko6cf3c972016-07-05 11:27:39 +02002513}
2514
2515static void mlxsw_sp_neigh_fini(struct mlxsw_sp *mlxsw_sp)
2516{
Ido Schimmel9011b672017-05-16 19:38:25 +02002517 cancel_delayed_work_sync(&mlxsw_sp->router->neighs_update.dw);
2518 cancel_delayed_work_sync(&mlxsw_sp->router->nexthop_probe_dw);
2519 rhashtable_destroy(&mlxsw_sp->router->neigh_ht);
Jiri Pirko6cf3c972016-07-05 11:27:39 +02002520}
2521
Ido Schimmel9665b742017-02-08 11:16:42 +01002522static void mlxsw_sp_neigh_rif_gone_sync(struct mlxsw_sp *mlxsw_sp,
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01002523 struct mlxsw_sp_rif *rif)
Ido Schimmel9665b742017-02-08 11:16:42 +01002524{
2525 struct mlxsw_sp_neigh_entry *neigh_entry, *tmp;
2526
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01002527 list_for_each_entry_safe(neigh_entry, tmp, &rif->neigh_list,
Petr Machata8ba6b302017-12-17 17:16:43 +01002528 rif_list_node) {
2529 mlxsw_sp_neigh_entry_update(mlxsw_sp, neigh_entry, false);
Ido Schimmel9665b742017-02-08 11:16:42 +01002530 mlxsw_sp_neigh_entry_destroy(mlxsw_sp, neigh_entry);
Petr Machata8ba6b302017-12-17 17:16:43 +01002531 }
Ido Schimmel9665b742017-02-08 11:16:42 +01002532}
2533
Petr Machata35225e42017-09-02 23:49:22 +02002534enum mlxsw_sp_nexthop_type {
2535 MLXSW_SP_NEXTHOP_TYPE_ETH,
Petr Machata1012b9a2017-09-02 23:49:23 +02002536 MLXSW_SP_NEXTHOP_TYPE_IPIP,
Petr Machata35225e42017-09-02 23:49:22 +02002537};
2538
Ido Schimmelc53b8e12017-02-08 11:16:30 +01002539struct mlxsw_sp_nexthop_key {
2540 struct fib_nh *fib_nh;
2541};
2542
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002543struct mlxsw_sp_nexthop {
2544 struct list_head neigh_list_node; /* member of neigh entry list */
Ido Schimmel9665b742017-02-08 11:16:42 +01002545 struct list_head rif_list_node;
Arkadi Sharshevskydbe45982017-09-25 10:32:23 +02002546 struct list_head router_list_node;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002547 struct mlxsw_sp_nexthop_group *nh_grp; /* pointer back to the group
2548 * this belongs to
2549 */
Ido Schimmelc53b8e12017-02-08 11:16:30 +01002550 struct rhash_head ht_node;
2551 struct mlxsw_sp_nexthop_key key;
Ido Schimmel58adf2c2017-07-18 10:10:19 +02002552 unsigned char gw_addr[sizeof(struct in6_addr)];
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02002553 int ifindex;
Ido Schimmel408bd942017-10-22 23:11:46 +02002554 int nh_weight;
Ido Schimmeleb789982017-10-22 23:11:48 +02002555 int norm_nh_weight;
2556 int num_adj_entries;
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01002557 struct mlxsw_sp_rif *rif;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002558 u8 should_offload:1, /* set indicates this neigh is connected and
2559 * should be put to KVD linear area of this group.
2560 */
2561 offloaded:1, /* set in case the neigh is actually put into
2562 * KVD linear area of this group.
2563 */
2564 update:1; /* set indicates that MAC of this neigh should be
2565 * updated in HW
2566 */
Petr Machata35225e42017-09-02 23:49:22 +02002567 enum mlxsw_sp_nexthop_type type;
2568 union {
2569 struct mlxsw_sp_neigh_entry *neigh_entry;
Petr Machata1012b9a2017-09-02 23:49:23 +02002570 struct mlxsw_sp_ipip_entry *ipip_entry;
Petr Machata35225e42017-09-02 23:49:22 +02002571 };
Arkadi Sharshevskya5390272017-09-25 10:32:28 +02002572 unsigned int counter_index;
2573 bool counter_valid;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002574};
2575
2576struct mlxsw_sp_nexthop_group {
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02002577 void *priv;
Ido Schimmele9ad5e72017-02-08 11:16:29 +01002578 struct rhash_head ht_node;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002579 struct list_head fib_list; /* list of fib entries that use this group */
Ido Schimmel58adf2c2017-07-18 10:10:19 +02002580 struct neigh_table *neigh_tbl;
Ido Schimmelb3e8d1e2017-02-08 11:16:32 +01002581 u8 adj_index_valid:1,
2582 gateway:1; /* routes using the group use a gateway */
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002583 u32 adj_index;
2584 u16 ecmp_size;
2585 u16 count;
Ido Schimmeleb789982017-10-22 23:11:48 +02002586 int sum_norm_weight;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002587 struct mlxsw_sp_nexthop nexthops[0];
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01002588#define nh_rif nexthops[0].rif
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002589};
2590
Arkadi Sharshevsky427e6522017-09-25 10:32:30 +02002591void mlxsw_sp_nexthop_counter_alloc(struct mlxsw_sp *mlxsw_sp,
2592 struct mlxsw_sp_nexthop *nh)
Arkadi Sharshevskya5390272017-09-25 10:32:28 +02002593{
2594 struct devlink *devlink;
2595
2596 devlink = priv_to_devlink(mlxsw_sp->core);
2597 if (!devlink_dpipe_table_counter_enabled(devlink,
2598 MLXSW_SP_DPIPE_TABLE_NAME_ADJ))
2599 return;
2600
2601 if (mlxsw_sp_flow_counter_alloc(mlxsw_sp, &nh->counter_index))
2602 return;
2603
2604 nh->counter_valid = true;
2605}
2606
Arkadi Sharshevsky427e6522017-09-25 10:32:30 +02002607void mlxsw_sp_nexthop_counter_free(struct mlxsw_sp *mlxsw_sp,
2608 struct mlxsw_sp_nexthop *nh)
Arkadi Sharshevskya5390272017-09-25 10:32:28 +02002609{
2610 if (!nh->counter_valid)
2611 return;
2612 mlxsw_sp_flow_counter_free(mlxsw_sp, nh->counter_index);
2613 nh->counter_valid = false;
2614}
2615
2616int mlxsw_sp_nexthop_counter_get(struct mlxsw_sp *mlxsw_sp,
2617 struct mlxsw_sp_nexthop *nh, u64 *p_counter)
2618{
2619 if (!nh->counter_valid)
2620 return -EINVAL;
2621
2622 return mlxsw_sp_flow_counter_get(mlxsw_sp, nh->counter_index,
2623 p_counter, NULL);
2624}
2625
Arkadi Sharshevskyc556cd22017-09-25 10:32:25 +02002626struct mlxsw_sp_nexthop *mlxsw_sp_nexthop_next(struct mlxsw_sp_router *router,
2627 struct mlxsw_sp_nexthop *nh)
2628{
2629 if (!nh) {
2630 if (list_empty(&router->nexthop_list))
2631 return NULL;
2632 else
2633 return list_first_entry(&router->nexthop_list,
2634 typeof(*nh), router_list_node);
2635 }
2636 if (list_is_last(&nh->router_list_node, &router->nexthop_list))
2637 return NULL;
2638 return list_next_entry(nh, router_list_node);
2639}
2640
2641bool mlxsw_sp_nexthop_offload(struct mlxsw_sp_nexthop *nh)
2642{
2643 return nh->offloaded;
2644}
2645
2646unsigned char *mlxsw_sp_nexthop_ha(struct mlxsw_sp_nexthop *nh)
2647{
2648 if (!nh->offloaded)
2649 return NULL;
2650 return nh->neigh_entry->ha;
2651}
2652
2653int mlxsw_sp_nexthop_indexes(struct mlxsw_sp_nexthop *nh, u32 *p_adj_index,
Ido Schimmele69cd9d2017-10-22 23:11:43 +02002654 u32 *p_adj_size, u32 *p_adj_hash_index)
Arkadi Sharshevskyc556cd22017-09-25 10:32:25 +02002655{
2656 struct mlxsw_sp_nexthop_group *nh_grp = nh->nh_grp;
2657 u32 adj_hash_index = 0;
2658 int i;
2659
2660 if (!nh->offloaded || !nh_grp->adj_index_valid)
2661 return -EINVAL;
2662
2663 *p_adj_index = nh_grp->adj_index;
Ido Schimmele69cd9d2017-10-22 23:11:43 +02002664 *p_adj_size = nh_grp->ecmp_size;
Arkadi Sharshevskyc556cd22017-09-25 10:32:25 +02002665
2666 for (i = 0; i < nh_grp->count; i++) {
2667 struct mlxsw_sp_nexthop *nh_iter = &nh_grp->nexthops[i];
2668
2669 if (nh_iter == nh)
2670 break;
2671 if (nh_iter->offloaded)
Ido Schimmeleb789982017-10-22 23:11:48 +02002672 adj_hash_index += nh_iter->num_adj_entries;
Arkadi Sharshevskyc556cd22017-09-25 10:32:25 +02002673 }
2674
2675 *p_adj_hash_index = adj_hash_index;
2676 return 0;
2677}
2678
2679struct mlxsw_sp_rif *mlxsw_sp_nexthop_rif(struct mlxsw_sp_nexthop *nh)
2680{
2681 return nh->rif;
2682}
2683
2684bool mlxsw_sp_nexthop_group_has_ipip(struct mlxsw_sp_nexthop *nh)
2685{
2686 struct mlxsw_sp_nexthop_group *nh_grp = nh->nh_grp;
2687 int i;
2688
2689 for (i = 0; i < nh_grp->count; i++) {
2690 struct mlxsw_sp_nexthop *nh_iter = &nh_grp->nexthops[i];
2691
2692 if (nh_iter->type == MLXSW_SP_NEXTHOP_TYPE_IPIP)
2693 return true;
2694 }
2695 return false;
2696}
2697
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02002698static struct fib_info *
2699mlxsw_sp_nexthop4_group_fi(const struct mlxsw_sp_nexthop_group *nh_grp)
2700{
2701 return nh_grp->priv;
2702}
2703
2704struct mlxsw_sp_nexthop_group_cmp_arg {
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02002705 enum mlxsw_sp_l3proto proto;
2706 union {
2707 struct fib_info *fi;
2708 struct mlxsw_sp_fib6_entry *fib6_entry;
2709 };
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02002710};
2711
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02002712static bool
2713mlxsw_sp_nexthop6_group_has_nexthop(const struct mlxsw_sp_nexthop_group *nh_grp,
Ido Schimmel3743d882018-01-12 17:15:59 +01002714 const struct in6_addr *gw, int ifindex,
2715 int weight)
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02002716{
2717 int i;
2718
2719 for (i = 0; i < nh_grp->count; i++) {
2720 const struct mlxsw_sp_nexthop *nh;
2721
2722 nh = &nh_grp->nexthops[i];
Ido Schimmel3743d882018-01-12 17:15:59 +01002723 if (nh->ifindex == ifindex && nh->nh_weight == weight &&
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02002724 ipv6_addr_equal(gw, (struct in6_addr *) nh->gw_addr))
2725 return true;
2726 }
2727
2728 return false;
2729}
2730
2731static bool
2732mlxsw_sp_nexthop6_group_cmp(const struct mlxsw_sp_nexthop_group *nh_grp,
2733 const struct mlxsw_sp_fib6_entry *fib6_entry)
2734{
2735 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
2736
2737 if (nh_grp->count != fib6_entry->nrt6)
2738 return false;
2739
2740 list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) {
2741 struct in6_addr *gw;
Ido Schimmel3743d882018-01-12 17:15:59 +01002742 int ifindex, weight;
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02002743
2744 ifindex = mlxsw_sp_rt6->rt->dst.dev->ifindex;
Ido Schimmel3743d882018-01-12 17:15:59 +01002745 weight = mlxsw_sp_rt6->rt->rt6i_nh_weight;
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02002746 gw = &mlxsw_sp_rt6->rt->rt6i_gateway;
Ido Schimmel3743d882018-01-12 17:15:59 +01002747 if (!mlxsw_sp_nexthop6_group_has_nexthop(nh_grp, gw, ifindex,
2748 weight))
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02002749 return false;
2750 }
2751
2752 return true;
2753}
2754
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02002755static int
2756mlxsw_sp_nexthop_group_cmp(struct rhashtable_compare_arg *arg, const void *ptr)
2757{
2758 const struct mlxsw_sp_nexthop_group_cmp_arg *cmp_arg = arg->key;
2759 const struct mlxsw_sp_nexthop_group *nh_grp = ptr;
2760
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02002761 switch (cmp_arg->proto) {
2762 case MLXSW_SP_L3_PROTO_IPV4:
2763 return cmp_arg->fi != mlxsw_sp_nexthop4_group_fi(nh_grp);
2764 case MLXSW_SP_L3_PROTO_IPV6:
2765 return !mlxsw_sp_nexthop6_group_cmp(nh_grp,
2766 cmp_arg->fib6_entry);
2767 default:
2768 WARN_ON(1);
2769 return 1;
2770 }
2771}
2772
2773static int
2774mlxsw_sp_nexthop_group_type(const struct mlxsw_sp_nexthop_group *nh_grp)
2775{
2776 return nh_grp->neigh_tbl->family;
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02002777}
2778
2779static u32 mlxsw_sp_nexthop_group_hash_obj(const void *data, u32 len, u32 seed)
2780{
2781 const struct mlxsw_sp_nexthop_group *nh_grp = data;
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02002782 const struct mlxsw_sp_nexthop *nh;
2783 struct fib_info *fi;
2784 unsigned int val;
2785 int i;
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02002786
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02002787 switch (mlxsw_sp_nexthop_group_type(nh_grp)) {
2788 case AF_INET:
2789 fi = mlxsw_sp_nexthop4_group_fi(nh_grp);
2790 return jhash(&fi, sizeof(fi), seed);
2791 case AF_INET6:
2792 val = nh_grp->count;
2793 for (i = 0; i < nh_grp->count; i++) {
2794 nh = &nh_grp->nexthops[i];
2795 val ^= nh->ifindex;
2796 }
2797 return jhash(&val, sizeof(val), seed);
2798 default:
2799 WARN_ON(1);
2800 return 0;
2801 }
2802}
2803
2804static u32
2805mlxsw_sp_nexthop6_group_hash(struct mlxsw_sp_fib6_entry *fib6_entry, u32 seed)
2806{
2807 unsigned int val = fib6_entry->nrt6;
2808 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
2809 struct net_device *dev;
2810
2811 list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) {
2812 dev = mlxsw_sp_rt6->rt->dst.dev;
2813 val ^= dev->ifindex;
2814 }
2815
2816 return jhash(&val, sizeof(val), seed);
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02002817}
2818
2819static u32
2820mlxsw_sp_nexthop_group_hash(const void *data, u32 len, u32 seed)
2821{
2822 const struct mlxsw_sp_nexthop_group_cmp_arg *cmp_arg = data;
2823
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02002824 switch (cmp_arg->proto) {
2825 case MLXSW_SP_L3_PROTO_IPV4:
2826 return jhash(&cmp_arg->fi, sizeof(cmp_arg->fi), seed);
2827 case MLXSW_SP_L3_PROTO_IPV6:
2828 return mlxsw_sp_nexthop6_group_hash(cmp_arg->fib6_entry, seed);
2829 default:
2830 WARN_ON(1);
2831 return 0;
2832 }
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02002833}
2834
Ido Schimmele9ad5e72017-02-08 11:16:29 +01002835static const struct rhashtable_params mlxsw_sp_nexthop_group_ht_params = {
Ido Schimmele9ad5e72017-02-08 11:16:29 +01002836 .head_offset = offsetof(struct mlxsw_sp_nexthop_group, ht_node),
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02002837 .hashfn = mlxsw_sp_nexthop_group_hash,
2838 .obj_hashfn = mlxsw_sp_nexthop_group_hash_obj,
2839 .obj_cmpfn = mlxsw_sp_nexthop_group_cmp,
Ido Schimmele9ad5e72017-02-08 11:16:29 +01002840};
2841
2842static int mlxsw_sp_nexthop_group_insert(struct mlxsw_sp *mlxsw_sp,
2843 struct mlxsw_sp_nexthop_group *nh_grp)
2844{
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02002845 if (mlxsw_sp_nexthop_group_type(nh_grp) == AF_INET6 &&
2846 !nh_grp->gateway)
2847 return 0;
2848
Ido Schimmel9011b672017-05-16 19:38:25 +02002849 return rhashtable_insert_fast(&mlxsw_sp->router->nexthop_group_ht,
Ido Schimmele9ad5e72017-02-08 11:16:29 +01002850 &nh_grp->ht_node,
2851 mlxsw_sp_nexthop_group_ht_params);
2852}
2853
2854static void mlxsw_sp_nexthop_group_remove(struct mlxsw_sp *mlxsw_sp,
2855 struct mlxsw_sp_nexthop_group *nh_grp)
2856{
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02002857 if (mlxsw_sp_nexthop_group_type(nh_grp) == AF_INET6 &&
2858 !nh_grp->gateway)
2859 return;
2860
Ido Schimmel9011b672017-05-16 19:38:25 +02002861 rhashtable_remove_fast(&mlxsw_sp->router->nexthop_group_ht,
Ido Schimmele9ad5e72017-02-08 11:16:29 +01002862 &nh_grp->ht_node,
2863 mlxsw_sp_nexthop_group_ht_params);
2864}
2865
2866static struct mlxsw_sp_nexthop_group *
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02002867mlxsw_sp_nexthop4_group_lookup(struct mlxsw_sp *mlxsw_sp,
2868 struct fib_info *fi)
Ido Schimmele9ad5e72017-02-08 11:16:29 +01002869{
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02002870 struct mlxsw_sp_nexthop_group_cmp_arg cmp_arg;
2871
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02002872 cmp_arg.proto = MLXSW_SP_L3_PROTO_IPV4;
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02002873 cmp_arg.fi = fi;
2874 return rhashtable_lookup_fast(&mlxsw_sp->router->nexthop_group_ht,
2875 &cmp_arg,
Ido Schimmele9ad5e72017-02-08 11:16:29 +01002876 mlxsw_sp_nexthop_group_ht_params);
2877}
2878
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02002879static struct mlxsw_sp_nexthop_group *
2880mlxsw_sp_nexthop6_group_lookup(struct mlxsw_sp *mlxsw_sp,
2881 struct mlxsw_sp_fib6_entry *fib6_entry)
2882{
2883 struct mlxsw_sp_nexthop_group_cmp_arg cmp_arg;
2884
2885 cmp_arg.proto = MLXSW_SP_L3_PROTO_IPV6;
2886 cmp_arg.fib6_entry = fib6_entry;
2887 return rhashtable_lookup_fast(&mlxsw_sp->router->nexthop_group_ht,
2888 &cmp_arg,
2889 mlxsw_sp_nexthop_group_ht_params);
2890}
2891
Ido Schimmelc53b8e12017-02-08 11:16:30 +01002892static const struct rhashtable_params mlxsw_sp_nexthop_ht_params = {
2893 .key_offset = offsetof(struct mlxsw_sp_nexthop, key),
2894 .head_offset = offsetof(struct mlxsw_sp_nexthop, ht_node),
2895 .key_len = sizeof(struct mlxsw_sp_nexthop_key),
2896};
2897
2898static int mlxsw_sp_nexthop_insert(struct mlxsw_sp *mlxsw_sp,
2899 struct mlxsw_sp_nexthop *nh)
2900{
Ido Schimmel9011b672017-05-16 19:38:25 +02002901 return rhashtable_insert_fast(&mlxsw_sp->router->nexthop_ht,
Ido Schimmelc53b8e12017-02-08 11:16:30 +01002902 &nh->ht_node, mlxsw_sp_nexthop_ht_params);
2903}
2904
2905static void mlxsw_sp_nexthop_remove(struct mlxsw_sp *mlxsw_sp,
2906 struct mlxsw_sp_nexthop *nh)
2907{
Ido Schimmel9011b672017-05-16 19:38:25 +02002908 rhashtable_remove_fast(&mlxsw_sp->router->nexthop_ht, &nh->ht_node,
Ido Schimmelc53b8e12017-02-08 11:16:30 +01002909 mlxsw_sp_nexthop_ht_params);
2910}
2911
Ido Schimmelad178c82017-02-08 11:16:40 +01002912static struct mlxsw_sp_nexthop *
2913mlxsw_sp_nexthop_lookup(struct mlxsw_sp *mlxsw_sp,
2914 struct mlxsw_sp_nexthop_key key)
2915{
Ido Schimmel9011b672017-05-16 19:38:25 +02002916 return rhashtable_lookup_fast(&mlxsw_sp->router->nexthop_ht, &key,
Ido Schimmelad178c82017-02-08 11:16:40 +01002917 mlxsw_sp_nexthop_ht_params);
2918}
2919
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002920static int mlxsw_sp_adj_index_mass_update_vr(struct mlxsw_sp *mlxsw_sp,
Ido Schimmel76610eb2017-03-10 08:53:41 +01002921 const struct mlxsw_sp_fib *fib,
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002922 u32 adj_index, u16 ecmp_size,
2923 u32 new_adj_index,
2924 u16 new_ecmp_size)
2925{
2926 char raleu_pl[MLXSW_REG_RALEU_LEN];
2927
Ido Schimmel1a9234e662016-09-19 08:29:26 +02002928 mlxsw_reg_raleu_pack(raleu_pl,
Ido Schimmel76610eb2017-03-10 08:53:41 +01002929 (enum mlxsw_reg_ralxx_protocol) fib->proto,
2930 fib->vr->id, adj_index, ecmp_size, new_adj_index,
Ido Schimmel1a9234e662016-09-19 08:29:26 +02002931 new_ecmp_size);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002932 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raleu), raleu_pl);
2933}
2934
2935static int mlxsw_sp_adj_index_mass_update(struct mlxsw_sp *mlxsw_sp,
2936 struct mlxsw_sp_nexthop_group *nh_grp,
2937 u32 old_adj_index, u16 old_ecmp_size)
2938{
2939 struct mlxsw_sp_fib_entry *fib_entry;
Ido Schimmel76610eb2017-03-10 08:53:41 +01002940 struct mlxsw_sp_fib *fib = NULL;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002941 int err;
2942
2943 list_for_each_entry(fib_entry, &nh_grp->fib_list, nexthop_group_node) {
Ido Schimmel76610eb2017-03-10 08:53:41 +01002944 if (fib == fib_entry->fib_node->fib)
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002945 continue;
Ido Schimmel76610eb2017-03-10 08:53:41 +01002946 fib = fib_entry->fib_node->fib;
2947 err = mlxsw_sp_adj_index_mass_update_vr(mlxsw_sp, fib,
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002948 old_adj_index,
2949 old_ecmp_size,
2950 nh_grp->adj_index,
2951 nh_grp->ecmp_size);
2952 if (err)
2953 return err;
2954 }
2955 return 0;
2956}
2957
Ido Schimmeleb789982017-10-22 23:11:48 +02002958static int __mlxsw_sp_nexthop_update(struct mlxsw_sp *mlxsw_sp, u32 adj_index,
2959 struct mlxsw_sp_nexthop *nh)
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002960{
2961 struct mlxsw_sp_neigh_entry *neigh_entry = nh->neigh_entry;
2962 char ratr_pl[MLXSW_REG_RATR_LEN];
2963
2964 mlxsw_reg_ratr_pack(ratr_pl, MLXSW_REG_RATR_OP_WRITE_WRITE_ENTRY,
Petr Machata89e41982017-09-02 23:49:15 +02002965 true, MLXSW_REG_RATR_TYPE_ETHERNET,
2966 adj_index, neigh_entry->rif);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002967 mlxsw_reg_ratr_eth_entry_pack(ratr_pl, neigh_entry->ha);
Arkadi Sharshevskya5390272017-09-25 10:32:28 +02002968 if (nh->counter_valid)
2969 mlxsw_reg_ratr_counter_pack(ratr_pl, nh->counter_index, true);
2970 else
2971 mlxsw_reg_ratr_counter_pack(ratr_pl, 0, false);
2972
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002973 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ratr), ratr_pl);
2974}
2975
Ido Schimmeleb789982017-10-22 23:11:48 +02002976int mlxsw_sp_nexthop_update(struct mlxsw_sp *mlxsw_sp, u32 adj_index,
2977 struct mlxsw_sp_nexthop *nh)
2978{
2979 int i;
2980
2981 for (i = 0; i < nh->num_adj_entries; i++) {
2982 int err;
2983
2984 err = __mlxsw_sp_nexthop_update(mlxsw_sp, adj_index + i, nh);
2985 if (err)
2986 return err;
2987 }
2988
2989 return 0;
2990}
2991
2992static int __mlxsw_sp_nexthop_ipip_update(struct mlxsw_sp *mlxsw_sp,
2993 u32 adj_index,
2994 struct mlxsw_sp_nexthop *nh)
Petr Machata1012b9a2017-09-02 23:49:23 +02002995{
2996 const struct mlxsw_sp_ipip_ops *ipip_ops;
2997
2998 ipip_ops = mlxsw_sp->router->ipip_ops_arr[nh->ipip_entry->ipipt];
2999 return ipip_ops->nexthop_update(mlxsw_sp, adj_index, nh->ipip_entry);
3000}
3001
Ido Schimmeleb789982017-10-22 23:11:48 +02003002static int mlxsw_sp_nexthop_ipip_update(struct mlxsw_sp *mlxsw_sp,
3003 u32 adj_index,
3004 struct mlxsw_sp_nexthop *nh)
3005{
3006 int i;
3007
3008 for (i = 0; i < nh->num_adj_entries; i++) {
3009 int err;
3010
3011 err = __mlxsw_sp_nexthop_ipip_update(mlxsw_sp, adj_index + i,
3012 nh);
3013 if (err)
3014 return err;
3015 }
3016
3017 return 0;
3018}
3019
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003020static int
Petr Machata35225e42017-09-02 23:49:22 +02003021mlxsw_sp_nexthop_group_update(struct mlxsw_sp *mlxsw_sp,
3022 struct mlxsw_sp_nexthop_group *nh_grp,
3023 bool reallocate)
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003024{
3025 u32 adj_index = nh_grp->adj_index; /* base */
3026 struct mlxsw_sp_nexthop *nh;
3027 int i;
3028 int err;
3029
3030 for (i = 0; i < nh_grp->count; i++) {
3031 nh = &nh_grp->nexthops[i];
3032
3033 if (!nh->should_offload) {
3034 nh->offloaded = 0;
3035 continue;
3036 }
3037
Ido Schimmela59b7e02017-01-23 11:11:42 +01003038 if (nh->update || reallocate) {
Petr Machata35225e42017-09-02 23:49:22 +02003039 switch (nh->type) {
3040 case MLXSW_SP_NEXTHOP_TYPE_ETH:
Arkadi Sharshevskya5390272017-09-25 10:32:28 +02003041 err = mlxsw_sp_nexthop_update
Petr Machata35225e42017-09-02 23:49:22 +02003042 (mlxsw_sp, adj_index, nh);
3043 break;
Petr Machata1012b9a2017-09-02 23:49:23 +02003044 case MLXSW_SP_NEXTHOP_TYPE_IPIP:
3045 err = mlxsw_sp_nexthop_ipip_update
3046 (mlxsw_sp, adj_index, nh);
3047 break;
Petr Machata35225e42017-09-02 23:49:22 +02003048 }
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003049 if (err)
3050 return err;
3051 nh->update = 0;
3052 nh->offloaded = 1;
3053 }
Ido Schimmeleb789982017-10-22 23:11:48 +02003054 adj_index += nh->num_adj_entries;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003055 }
3056 return 0;
3057}
3058
Ido Schimmel1819ae32017-07-21 18:04:28 +02003059static bool
3060mlxsw_sp_fib_node_entry_is_first(const struct mlxsw_sp_fib_node *fib_node,
3061 const struct mlxsw_sp_fib_entry *fib_entry);
3062
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003063static int
3064mlxsw_sp_nexthop_fib_entries_update(struct mlxsw_sp *mlxsw_sp,
3065 struct mlxsw_sp_nexthop_group *nh_grp)
3066{
3067 struct mlxsw_sp_fib_entry *fib_entry;
3068 int err;
3069
3070 list_for_each_entry(fib_entry, &nh_grp->fib_list, nexthop_group_node) {
Ido Schimmel1819ae32017-07-21 18:04:28 +02003071 if (!mlxsw_sp_fib_node_entry_is_first(fib_entry->fib_node,
3072 fib_entry))
3073 continue;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003074 err = mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
3075 if (err)
3076 return err;
3077 }
3078 return 0;
3079}
3080
3081static void
Ido Schimmel77d964e2017-08-02 09:56:05 +02003082mlxsw_sp_fib_entry_offload_refresh(struct mlxsw_sp_fib_entry *fib_entry,
3083 enum mlxsw_reg_ralue_op op, int err);
3084
3085static void
3086mlxsw_sp_nexthop_fib_entries_refresh(struct mlxsw_sp_nexthop_group *nh_grp)
3087{
3088 enum mlxsw_reg_ralue_op op = MLXSW_REG_RALUE_OP_WRITE_WRITE;
3089 struct mlxsw_sp_fib_entry *fib_entry;
3090
3091 list_for_each_entry(fib_entry, &nh_grp->fib_list, nexthop_group_node) {
3092 if (!mlxsw_sp_fib_node_entry_is_first(fib_entry->fib_node,
3093 fib_entry))
3094 continue;
3095 mlxsw_sp_fib_entry_offload_refresh(fib_entry, op, 0);
3096 }
3097}
3098
Ido Schimmel425a08c2017-10-22 23:11:47 +02003099static void mlxsw_sp_adj_grp_size_round_up(u16 *p_adj_grp_size)
3100{
3101 /* Valid sizes for an adjacency group are:
3102 * 1-64, 512, 1024, 2048 and 4096.
3103 */
3104 if (*p_adj_grp_size <= 64)
3105 return;
3106 else if (*p_adj_grp_size <= 512)
3107 *p_adj_grp_size = 512;
3108 else if (*p_adj_grp_size <= 1024)
3109 *p_adj_grp_size = 1024;
3110 else if (*p_adj_grp_size <= 2048)
3111 *p_adj_grp_size = 2048;
3112 else
3113 *p_adj_grp_size = 4096;
3114}
3115
3116static void mlxsw_sp_adj_grp_size_round_down(u16 *p_adj_grp_size,
3117 unsigned int alloc_size)
3118{
3119 if (alloc_size >= 4096)
3120 *p_adj_grp_size = 4096;
3121 else if (alloc_size >= 2048)
3122 *p_adj_grp_size = 2048;
3123 else if (alloc_size >= 1024)
3124 *p_adj_grp_size = 1024;
3125 else if (alloc_size >= 512)
3126 *p_adj_grp_size = 512;
3127}
3128
3129static int mlxsw_sp_fix_adj_grp_size(struct mlxsw_sp *mlxsw_sp,
3130 u16 *p_adj_grp_size)
3131{
3132 unsigned int alloc_size;
3133 int err;
3134
3135 /* Round up the requested group size to the next size supported
3136 * by the device and make sure the request can be satisfied.
3137 */
3138 mlxsw_sp_adj_grp_size_round_up(p_adj_grp_size);
3139 err = mlxsw_sp_kvdl_alloc_size_query(mlxsw_sp, *p_adj_grp_size,
3140 &alloc_size);
3141 if (err)
3142 return err;
3143 /* It is possible the allocation results in more allocated
3144 * entries than requested. Try to use as much of them as
3145 * possible.
3146 */
3147 mlxsw_sp_adj_grp_size_round_down(p_adj_grp_size, alloc_size);
3148
3149 return 0;
3150}
3151
Ido Schimmel77d964e2017-08-02 09:56:05 +02003152static void
Ido Schimmeleb789982017-10-22 23:11:48 +02003153mlxsw_sp_nexthop_group_normalize(struct mlxsw_sp_nexthop_group *nh_grp)
3154{
3155 int i, g = 0, sum_norm_weight = 0;
3156 struct mlxsw_sp_nexthop *nh;
3157
3158 for (i = 0; i < nh_grp->count; i++) {
3159 nh = &nh_grp->nexthops[i];
3160
3161 if (!nh->should_offload)
3162 continue;
3163 if (g > 0)
3164 g = gcd(nh->nh_weight, g);
3165 else
3166 g = nh->nh_weight;
3167 }
3168
3169 for (i = 0; i < nh_grp->count; i++) {
3170 nh = &nh_grp->nexthops[i];
3171
3172 if (!nh->should_offload)
3173 continue;
3174 nh->norm_nh_weight = nh->nh_weight / g;
3175 sum_norm_weight += nh->norm_nh_weight;
3176 }
3177
3178 nh_grp->sum_norm_weight = sum_norm_weight;
3179}
3180
3181static void
3182mlxsw_sp_nexthop_group_rebalance(struct mlxsw_sp_nexthop_group *nh_grp)
3183{
3184 int total = nh_grp->sum_norm_weight;
3185 u16 ecmp_size = nh_grp->ecmp_size;
3186 int i, weight = 0, lower_bound = 0;
3187
3188 for (i = 0; i < nh_grp->count; i++) {
3189 struct mlxsw_sp_nexthop *nh = &nh_grp->nexthops[i];
3190 int upper_bound;
3191
3192 if (!nh->should_offload)
3193 continue;
3194 weight += nh->norm_nh_weight;
3195 upper_bound = DIV_ROUND_CLOSEST(ecmp_size * weight, total);
3196 nh->num_adj_entries = upper_bound - lower_bound;
3197 lower_bound = upper_bound;
3198 }
3199}
3200
3201static void
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003202mlxsw_sp_nexthop_group_refresh(struct mlxsw_sp *mlxsw_sp,
3203 struct mlxsw_sp_nexthop_group *nh_grp)
3204{
Ido Schimmeleb789982017-10-22 23:11:48 +02003205 u16 ecmp_size, old_ecmp_size;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003206 struct mlxsw_sp_nexthop *nh;
3207 bool offload_change = false;
3208 u32 adj_index;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003209 bool old_adj_index_valid;
3210 u32 old_adj_index;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003211 int i;
3212 int err;
3213
Ido Schimmelb3e8d1e2017-02-08 11:16:32 +01003214 if (!nh_grp->gateway) {
3215 mlxsw_sp_nexthop_fib_entries_update(mlxsw_sp, nh_grp);
3216 return;
3217 }
3218
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003219 for (i = 0; i < nh_grp->count; i++) {
3220 nh = &nh_grp->nexthops[i];
3221
Petr Machata56b8a9e2017-07-31 09:27:29 +02003222 if (nh->should_offload != nh->offloaded) {
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003223 offload_change = true;
3224 if (nh->should_offload)
3225 nh->update = 1;
3226 }
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003227 }
3228 if (!offload_change) {
3229 /* Nothing was added or removed, so no need to reallocate. Just
3230 * update MAC on existing adjacency indexes.
3231 */
Petr Machata35225e42017-09-02 23:49:22 +02003232 err = mlxsw_sp_nexthop_group_update(mlxsw_sp, nh_grp, false);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003233 if (err) {
3234 dev_warn(mlxsw_sp->bus_info->dev, "Failed to update neigh MAC in adjacency table.\n");
3235 goto set_trap;
3236 }
3237 return;
3238 }
Ido Schimmeleb789982017-10-22 23:11:48 +02003239 mlxsw_sp_nexthop_group_normalize(nh_grp);
3240 if (!nh_grp->sum_norm_weight)
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003241 /* No neigh of this group is connected so we just set
3242 * the trap and let everthing flow through kernel.
3243 */
3244 goto set_trap;
3245
Ido Schimmeleb789982017-10-22 23:11:48 +02003246 ecmp_size = nh_grp->sum_norm_weight;
Ido Schimmel425a08c2017-10-22 23:11:47 +02003247 err = mlxsw_sp_fix_adj_grp_size(mlxsw_sp, &ecmp_size);
3248 if (err)
3249 /* No valid allocation size available. */
3250 goto set_trap;
3251
Arkadi Sharshevsky13124442017-03-25 08:28:22 +01003252 err = mlxsw_sp_kvdl_alloc(mlxsw_sp, ecmp_size, &adj_index);
3253 if (err) {
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003254 /* We ran out of KVD linear space, just set the
3255 * trap and let everything flow through kernel.
3256 */
3257 dev_warn(mlxsw_sp->bus_info->dev, "Failed to allocate KVD linear area for nexthop group.\n");
3258 goto set_trap;
3259 }
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003260 old_adj_index_valid = nh_grp->adj_index_valid;
3261 old_adj_index = nh_grp->adj_index;
3262 old_ecmp_size = nh_grp->ecmp_size;
3263 nh_grp->adj_index_valid = 1;
3264 nh_grp->adj_index = adj_index;
3265 nh_grp->ecmp_size = ecmp_size;
Ido Schimmeleb789982017-10-22 23:11:48 +02003266 mlxsw_sp_nexthop_group_rebalance(nh_grp);
Petr Machata35225e42017-09-02 23:49:22 +02003267 err = mlxsw_sp_nexthop_group_update(mlxsw_sp, nh_grp, true);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003268 if (err) {
3269 dev_warn(mlxsw_sp->bus_info->dev, "Failed to update neigh MAC in adjacency table.\n");
3270 goto set_trap;
3271 }
3272
3273 if (!old_adj_index_valid) {
3274 /* The trap was set for fib entries, so we have to call
3275 * fib entry update to unset it and use adjacency index.
3276 */
3277 err = mlxsw_sp_nexthop_fib_entries_update(mlxsw_sp, nh_grp);
3278 if (err) {
3279 dev_warn(mlxsw_sp->bus_info->dev, "Failed to add adjacency index to fib entries.\n");
3280 goto set_trap;
3281 }
3282 return;
3283 }
3284
3285 err = mlxsw_sp_adj_index_mass_update(mlxsw_sp, nh_grp,
3286 old_adj_index, old_ecmp_size);
3287 mlxsw_sp_kvdl_free(mlxsw_sp, old_adj_index);
3288 if (err) {
3289 dev_warn(mlxsw_sp->bus_info->dev, "Failed to mass-update adjacency index for nexthop group.\n");
3290 goto set_trap;
3291 }
Ido Schimmel77d964e2017-08-02 09:56:05 +02003292
3293 /* Offload state within the group changed, so update the flags. */
3294 mlxsw_sp_nexthop_fib_entries_refresh(nh_grp);
3295
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003296 return;
3297
3298set_trap:
3299 old_adj_index_valid = nh_grp->adj_index_valid;
3300 nh_grp->adj_index_valid = 0;
3301 for (i = 0; i < nh_grp->count; i++) {
3302 nh = &nh_grp->nexthops[i];
3303 nh->offloaded = 0;
3304 }
3305 err = mlxsw_sp_nexthop_fib_entries_update(mlxsw_sp, nh_grp);
3306 if (err)
3307 dev_warn(mlxsw_sp->bus_info->dev, "Failed to set traps for fib entries.\n");
3308 if (old_adj_index_valid)
3309 mlxsw_sp_kvdl_free(mlxsw_sp, nh_grp->adj_index);
3310}
3311
3312static void __mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp_nexthop *nh,
3313 bool removing)
3314{
Petr Machata213666a2017-07-31 09:27:30 +02003315 if (!removing)
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003316 nh->should_offload = 1;
Ido Schimmel8764a822017-12-25 08:57:35 +01003317 else
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003318 nh->should_offload = 0;
3319 nh->update = 1;
3320}
3321
3322static void
3323mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp *mlxsw_sp,
3324 struct mlxsw_sp_neigh_entry *neigh_entry,
3325 bool removing)
3326{
3327 struct mlxsw_sp_nexthop *nh;
3328
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003329 list_for_each_entry(nh, &neigh_entry->nexthop_list,
3330 neigh_list_node) {
3331 __mlxsw_sp_nexthop_neigh_update(nh, removing);
3332 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nh_grp);
3333 }
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003334}
3335
Ido Schimmel9665b742017-02-08 11:16:42 +01003336static void mlxsw_sp_nexthop_rif_init(struct mlxsw_sp_nexthop *nh,
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003337 struct mlxsw_sp_rif *rif)
Ido Schimmel9665b742017-02-08 11:16:42 +01003338{
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003339 if (nh->rif)
Ido Schimmel9665b742017-02-08 11:16:42 +01003340 return;
3341
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003342 nh->rif = rif;
3343 list_add(&nh->rif_list_node, &rif->nexthop_list);
Ido Schimmel9665b742017-02-08 11:16:42 +01003344}
3345
3346static void mlxsw_sp_nexthop_rif_fini(struct mlxsw_sp_nexthop *nh)
3347{
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003348 if (!nh->rif)
Ido Schimmel9665b742017-02-08 11:16:42 +01003349 return;
3350
3351 list_del(&nh->rif_list_node);
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003352 nh->rif = NULL;
Ido Schimmel9665b742017-02-08 11:16:42 +01003353}
3354
Ido Schimmela8c97012017-02-08 11:16:35 +01003355static int mlxsw_sp_nexthop_neigh_init(struct mlxsw_sp *mlxsw_sp,
3356 struct mlxsw_sp_nexthop *nh)
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003357{
3358 struct mlxsw_sp_neigh_entry *neigh_entry;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003359 struct neighbour *n;
Ido Schimmel93a87e52016-12-23 09:32:49 +01003360 u8 nud_state, dead;
Ido Schimmelc53b8e12017-02-08 11:16:30 +01003361 int err;
3362
Ido Schimmelad178c82017-02-08 11:16:40 +01003363 if (!nh->nh_grp->gateway || nh->neigh_entry)
Ido Schimmelb8399a12017-02-08 11:16:33 +01003364 return 0;
3365
Jiri Pirko33b13412016-11-10 12:31:04 +01003366 /* Take a reference of neigh here ensuring that neigh would
Petr Machata8de3c172017-07-31 09:27:25 +02003367 * not be destructed before the nexthop entry is finished.
Jiri Pirko33b13412016-11-10 12:31:04 +01003368 * The reference is taken either in neigh_lookup() or
Ido Schimmelfd76d912017-02-06 16:20:17 +01003369 * in neigh_create() in case n is not found.
Jiri Pirko33b13412016-11-10 12:31:04 +01003370 */
Ido Schimmel58adf2c2017-07-18 10:10:19 +02003371 n = neigh_lookup(nh->nh_grp->neigh_tbl, &nh->gw_addr, nh->rif->dev);
Jiri Pirko33b13412016-11-10 12:31:04 +01003372 if (!n) {
Ido Schimmel58adf2c2017-07-18 10:10:19 +02003373 n = neigh_create(nh->nh_grp->neigh_tbl, &nh->gw_addr,
3374 nh->rif->dev);
Ido Schimmela8c97012017-02-08 11:16:35 +01003375 if (IS_ERR(n))
3376 return PTR_ERR(n);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003377 neigh_event_send(n, NULL);
Jiri Pirko33b13412016-11-10 12:31:04 +01003378 }
3379 neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp, n);
3380 if (!neigh_entry) {
Ido Schimmel5c8802f2017-02-06 16:20:13 +01003381 neigh_entry = mlxsw_sp_neigh_entry_create(mlxsw_sp, n);
3382 if (IS_ERR(neigh_entry)) {
Ido Schimmelc53b8e12017-02-08 11:16:30 +01003383 err = -EINVAL;
3384 goto err_neigh_entry_create;
Ido Schimmel5c8802f2017-02-06 16:20:13 +01003385 }
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003386 }
Yotam Gigib2157142016-07-05 11:27:51 +02003387
3388 /* If that is the first nexthop connected to that neigh, add to
3389 * nexthop_neighs_list
3390 */
3391 if (list_empty(&neigh_entry->nexthop_list))
3392 list_add_tail(&neigh_entry->nexthop_neighs_list_node,
Ido Schimmel9011b672017-05-16 19:38:25 +02003393 &mlxsw_sp->router->nexthop_neighs_list);
Yotam Gigib2157142016-07-05 11:27:51 +02003394
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003395 nh->neigh_entry = neigh_entry;
3396 list_add_tail(&nh->neigh_list_node, &neigh_entry->nexthop_list);
3397 read_lock_bh(&n->lock);
3398 nud_state = n->nud_state;
Ido Schimmel93a87e52016-12-23 09:32:49 +01003399 dead = n->dead;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003400 read_unlock_bh(&n->lock);
Ido Schimmel93a87e52016-12-23 09:32:49 +01003401 __mlxsw_sp_nexthop_neigh_update(nh, !(nud_state & NUD_VALID && !dead));
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003402
3403 return 0;
Ido Schimmelc53b8e12017-02-08 11:16:30 +01003404
3405err_neigh_entry_create:
3406 neigh_release(n);
Ido Schimmelc53b8e12017-02-08 11:16:30 +01003407 return err;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003408}
3409
Ido Schimmela8c97012017-02-08 11:16:35 +01003410static void mlxsw_sp_nexthop_neigh_fini(struct mlxsw_sp *mlxsw_sp,
3411 struct mlxsw_sp_nexthop *nh)
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003412{
3413 struct mlxsw_sp_neigh_entry *neigh_entry = nh->neigh_entry;
Ido Schimmela8c97012017-02-08 11:16:35 +01003414 struct neighbour *n;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003415
Ido Schimmelb8399a12017-02-08 11:16:33 +01003416 if (!neigh_entry)
Ido Schimmela8c97012017-02-08 11:16:35 +01003417 return;
3418 n = neigh_entry->key.n;
Ido Schimmelb8399a12017-02-08 11:16:33 +01003419
Ido Schimmel58312122016-12-23 09:32:50 +01003420 __mlxsw_sp_nexthop_neigh_update(nh, true);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003421 list_del(&nh->neigh_list_node);
Ido Schimmele58be792017-02-08 11:16:28 +01003422 nh->neigh_entry = NULL;
Yotam Gigib2157142016-07-05 11:27:51 +02003423
3424 /* If that is the last nexthop connected to that neigh, remove from
3425 * nexthop_neighs_list
3426 */
Ido Schimmele58be792017-02-08 11:16:28 +01003427 if (list_empty(&neigh_entry->nexthop_list))
3428 list_del(&neigh_entry->nexthop_neighs_list_node);
Yotam Gigib2157142016-07-05 11:27:51 +02003429
Ido Schimmel5c8802f2017-02-06 16:20:13 +01003430 if (!neigh_entry->connected && list_empty(&neigh_entry->nexthop_list))
3431 mlxsw_sp_neigh_entry_destroy(mlxsw_sp, neigh_entry);
3432
3433 neigh_release(n);
Ido Schimmela8c97012017-02-08 11:16:35 +01003434}
Ido Schimmelc53b8e12017-02-08 11:16:30 +01003435
Petr Machata44b0fff2017-11-03 10:03:44 +01003436static bool mlxsw_sp_ipip_netdev_ul_up(struct net_device *ol_dev)
3437{
3438 struct net_device *ul_dev = __mlxsw_sp_ipip_netdev_ul_dev_get(ol_dev);
3439
3440 return ul_dev ? (ul_dev->flags & IFF_UP) : true;
3441}
3442
Petr Machatad97cda52017-11-28 13:17:13 +01003443static void mlxsw_sp_nexthop_ipip_init(struct mlxsw_sp *mlxsw_sp,
3444 struct mlxsw_sp_nexthop *nh,
3445 struct mlxsw_sp_ipip_entry *ipip_entry)
Petr Machata1012b9a2017-09-02 23:49:23 +02003446{
Petr Machata44b0fff2017-11-03 10:03:44 +01003447 bool removing;
3448
Petr Machata1012b9a2017-09-02 23:49:23 +02003449 if (!nh->nh_grp->gateway || nh->ipip_entry)
Petr Machatad97cda52017-11-28 13:17:13 +01003450 return;
Petr Machata1012b9a2017-09-02 23:49:23 +02003451
Petr Machatad97cda52017-11-28 13:17:13 +01003452 nh->ipip_entry = ipip_entry;
3453 removing = !mlxsw_sp_ipip_netdev_ul_up(ipip_entry->ol_dev);
Petr Machata44b0fff2017-11-03 10:03:44 +01003454 __mlxsw_sp_nexthop_neigh_update(nh, removing);
Petr Machatad97cda52017-11-28 13:17:13 +01003455 mlxsw_sp_nexthop_rif_init(nh, &ipip_entry->ol_lb->common);
Petr Machata1012b9a2017-09-02 23:49:23 +02003456}
3457
3458static void mlxsw_sp_nexthop_ipip_fini(struct mlxsw_sp *mlxsw_sp,
3459 struct mlxsw_sp_nexthop *nh)
3460{
3461 struct mlxsw_sp_ipip_entry *ipip_entry = nh->ipip_entry;
3462
3463 if (!ipip_entry)
3464 return;
3465
3466 __mlxsw_sp_nexthop_neigh_update(nh, true);
Petr Machata1012b9a2017-09-02 23:49:23 +02003467 nh->ipip_entry = NULL;
3468}
3469
3470static bool mlxsw_sp_nexthop4_ipip_type(const struct mlxsw_sp *mlxsw_sp,
3471 const struct fib_nh *fib_nh,
3472 enum mlxsw_sp_ipip_type *p_ipipt)
3473{
3474 struct net_device *dev = fib_nh->nh_dev;
3475
3476 return dev &&
3477 fib_nh->nh_parent->fib_type == RTN_UNICAST &&
3478 mlxsw_sp_netdev_ipip_type(mlxsw_sp, dev, p_ipipt);
3479}
3480
Petr Machata35225e42017-09-02 23:49:22 +02003481static void mlxsw_sp_nexthop_type_fini(struct mlxsw_sp *mlxsw_sp,
3482 struct mlxsw_sp_nexthop *nh)
3483{
3484 switch (nh->type) {
3485 case MLXSW_SP_NEXTHOP_TYPE_ETH:
3486 mlxsw_sp_nexthop_neigh_fini(mlxsw_sp, nh);
3487 mlxsw_sp_nexthop_rif_fini(nh);
3488 break;
Petr Machata1012b9a2017-09-02 23:49:23 +02003489 case MLXSW_SP_NEXTHOP_TYPE_IPIP:
Petr Machatade0f43c2017-10-02 12:14:57 +02003490 mlxsw_sp_nexthop_rif_fini(nh);
Petr Machata1012b9a2017-09-02 23:49:23 +02003491 mlxsw_sp_nexthop_ipip_fini(mlxsw_sp, nh);
3492 break;
Petr Machata35225e42017-09-02 23:49:22 +02003493 }
3494}
3495
3496static int mlxsw_sp_nexthop4_type_init(struct mlxsw_sp *mlxsw_sp,
3497 struct mlxsw_sp_nexthop *nh,
3498 struct fib_nh *fib_nh)
3499{
Petr Machatad97cda52017-11-28 13:17:13 +01003500 const struct mlxsw_sp_ipip_ops *ipip_ops;
Petr Machata35225e42017-09-02 23:49:22 +02003501 struct net_device *dev = fib_nh->nh_dev;
Petr Machatad97cda52017-11-28 13:17:13 +01003502 struct mlxsw_sp_ipip_entry *ipip_entry;
Petr Machata35225e42017-09-02 23:49:22 +02003503 struct mlxsw_sp_rif *rif;
3504 int err;
3505
Petr Machatad97cda52017-11-28 13:17:13 +01003506 ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, dev);
3507 if (ipip_entry) {
3508 ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt];
3509 if (ipip_ops->can_offload(mlxsw_sp, dev,
3510 MLXSW_SP_L3_PROTO_IPV4)) {
3511 nh->type = MLXSW_SP_NEXTHOP_TYPE_IPIP;
3512 mlxsw_sp_nexthop_ipip_init(mlxsw_sp, nh, ipip_entry);
3513 return 0;
3514 }
Petr Machata1012b9a2017-09-02 23:49:23 +02003515 }
3516
Petr Machata35225e42017-09-02 23:49:22 +02003517 nh->type = MLXSW_SP_NEXTHOP_TYPE_ETH;
3518 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
3519 if (!rif)
3520 return 0;
3521
3522 mlxsw_sp_nexthop_rif_init(nh, rif);
3523 err = mlxsw_sp_nexthop_neigh_init(mlxsw_sp, nh);
3524 if (err)
3525 goto err_neigh_init;
3526
3527 return 0;
3528
3529err_neigh_init:
3530 mlxsw_sp_nexthop_rif_fini(nh);
3531 return err;
3532}
3533
3534static void mlxsw_sp_nexthop4_type_fini(struct mlxsw_sp *mlxsw_sp,
3535 struct mlxsw_sp_nexthop *nh)
3536{
3537 mlxsw_sp_nexthop_type_fini(mlxsw_sp, nh);
3538}
3539
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02003540static int mlxsw_sp_nexthop4_init(struct mlxsw_sp *mlxsw_sp,
3541 struct mlxsw_sp_nexthop_group *nh_grp,
3542 struct mlxsw_sp_nexthop *nh,
3543 struct fib_nh *fib_nh)
Ido Schimmela8c97012017-02-08 11:16:35 +01003544{
3545 struct net_device *dev = fib_nh->nh_dev;
Ido Schimmeldf6dd79b2017-02-08 14:36:49 +01003546 struct in_device *in_dev;
Ido Schimmela8c97012017-02-08 11:16:35 +01003547 int err;
3548
3549 nh->nh_grp = nh_grp;
3550 nh->key.fib_nh = fib_nh;
Ido Schimmel408bd942017-10-22 23:11:46 +02003551#ifdef CONFIG_IP_ROUTE_MULTIPATH
3552 nh->nh_weight = fib_nh->nh_weight;
3553#else
3554 nh->nh_weight = 1;
3555#endif
Ido Schimmel58adf2c2017-07-18 10:10:19 +02003556 memcpy(&nh->gw_addr, &fib_nh->nh_gw, sizeof(fib_nh->nh_gw));
Ido Schimmela8c97012017-02-08 11:16:35 +01003557 err = mlxsw_sp_nexthop_insert(mlxsw_sp, nh);
3558 if (err)
3559 return err;
3560
Arkadi Sharshevskya5390272017-09-25 10:32:28 +02003561 mlxsw_sp_nexthop_counter_alloc(mlxsw_sp, nh);
Arkadi Sharshevskydbe45982017-09-25 10:32:23 +02003562 list_add_tail(&nh->router_list_node, &mlxsw_sp->router->nexthop_list);
3563
Ido Schimmel97989ee2017-03-10 08:53:38 +01003564 if (!dev)
3565 return 0;
3566
Ido Schimmeldf6dd79b2017-02-08 14:36:49 +01003567 in_dev = __in_dev_get_rtnl(dev);
3568 if (in_dev && IN_DEV_IGNORE_ROUTES_WITH_LINKDOWN(in_dev) &&
3569 fib_nh->nh_flags & RTNH_F_LINKDOWN)
3570 return 0;
3571
Petr Machata35225e42017-09-02 23:49:22 +02003572 err = mlxsw_sp_nexthop4_type_init(mlxsw_sp, nh, fib_nh);
Ido Schimmela8c97012017-02-08 11:16:35 +01003573 if (err)
3574 goto err_nexthop_neigh_init;
3575
3576 return 0;
3577
3578err_nexthop_neigh_init:
3579 mlxsw_sp_nexthop_remove(mlxsw_sp, nh);
3580 return err;
3581}
3582
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02003583static void mlxsw_sp_nexthop4_fini(struct mlxsw_sp *mlxsw_sp,
3584 struct mlxsw_sp_nexthop *nh)
Ido Schimmela8c97012017-02-08 11:16:35 +01003585{
Petr Machata35225e42017-09-02 23:49:22 +02003586 mlxsw_sp_nexthop4_type_fini(mlxsw_sp, nh);
Arkadi Sharshevskydbe45982017-09-25 10:32:23 +02003587 list_del(&nh->router_list_node);
Arkadi Sharshevskya5390272017-09-25 10:32:28 +02003588 mlxsw_sp_nexthop_counter_free(mlxsw_sp, nh);
Ido Schimmelc53b8e12017-02-08 11:16:30 +01003589 mlxsw_sp_nexthop_remove(mlxsw_sp, nh);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003590}
3591
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02003592static void mlxsw_sp_nexthop4_event(struct mlxsw_sp *mlxsw_sp,
3593 unsigned long event, struct fib_nh *fib_nh)
Ido Schimmelad178c82017-02-08 11:16:40 +01003594{
3595 struct mlxsw_sp_nexthop_key key;
3596 struct mlxsw_sp_nexthop *nh;
Ido Schimmelad178c82017-02-08 11:16:40 +01003597
Ido Schimmel9011b672017-05-16 19:38:25 +02003598 if (mlxsw_sp->router->aborted)
Ido Schimmelad178c82017-02-08 11:16:40 +01003599 return;
3600
3601 key.fib_nh = fib_nh;
3602 nh = mlxsw_sp_nexthop_lookup(mlxsw_sp, key);
3603 if (WARN_ON_ONCE(!nh))
3604 return;
3605
Ido Schimmelad178c82017-02-08 11:16:40 +01003606 switch (event) {
3607 case FIB_EVENT_NH_ADD:
Petr Machata35225e42017-09-02 23:49:22 +02003608 mlxsw_sp_nexthop4_type_init(mlxsw_sp, nh, fib_nh);
Ido Schimmelad178c82017-02-08 11:16:40 +01003609 break;
3610 case FIB_EVENT_NH_DEL:
Petr Machata35225e42017-09-02 23:49:22 +02003611 mlxsw_sp_nexthop4_type_fini(mlxsw_sp, nh);
Ido Schimmelad178c82017-02-08 11:16:40 +01003612 break;
3613 }
3614
3615 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nh_grp);
3616}
3617
Petr Machata0c5f1cd2017-11-03 10:03:38 +01003618static void mlxsw_sp_nexthop_rif_update(struct mlxsw_sp *mlxsw_sp,
3619 struct mlxsw_sp_rif *rif)
3620{
3621 struct mlxsw_sp_nexthop *nh;
Petr Machata44b0fff2017-11-03 10:03:44 +01003622 bool removing;
Petr Machata0c5f1cd2017-11-03 10:03:38 +01003623
3624 list_for_each_entry(nh, &rif->nexthop_list, rif_list_node) {
Petr Machata44b0fff2017-11-03 10:03:44 +01003625 switch (nh->type) {
3626 case MLXSW_SP_NEXTHOP_TYPE_ETH:
3627 removing = false;
3628 break;
3629 case MLXSW_SP_NEXTHOP_TYPE_IPIP:
3630 removing = !mlxsw_sp_ipip_netdev_ul_up(rif->dev);
3631 break;
3632 default:
3633 WARN_ON(1);
3634 continue;
3635 }
3636
3637 __mlxsw_sp_nexthop_neigh_update(nh, removing);
Petr Machata0c5f1cd2017-11-03 10:03:38 +01003638 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nh_grp);
3639 }
3640}
3641
Petr Machata09dbf622017-11-28 13:17:14 +01003642static void mlxsw_sp_nexthop_rif_migrate(struct mlxsw_sp *mlxsw_sp,
3643 struct mlxsw_sp_rif *old_rif,
3644 struct mlxsw_sp_rif *new_rif)
3645{
3646 struct mlxsw_sp_nexthop *nh;
3647
3648 list_splice_init(&old_rif->nexthop_list, &new_rif->nexthop_list);
3649 list_for_each_entry(nh, &new_rif->nexthop_list, rif_list_node)
3650 nh->rif = new_rif;
3651 mlxsw_sp_nexthop_rif_update(mlxsw_sp, new_rif);
3652}
3653
Ido Schimmel9665b742017-02-08 11:16:42 +01003654static void mlxsw_sp_nexthop_rif_gone_sync(struct mlxsw_sp *mlxsw_sp,
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003655 struct mlxsw_sp_rif *rif)
Ido Schimmel9665b742017-02-08 11:16:42 +01003656{
3657 struct mlxsw_sp_nexthop *nh, *tmp;
3658
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003659 list_for_each_entry_safe(nh, tmp, &rif->nexthop_list, rif_list_node) {
Petr Machata35225e42017-09-02 23:49:22 +02003660 mlxsw_sp_nexthop_type_fini(mlxsw_sp, nh);
Ido Schimmel9665b742017-02-08 11:16:42 +01003661 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nh_grp);
3662 }
3663}
3664
Petr Machata9b014512017-09-02 23:49:20 +02003665static bool mlxsw_sp_fi_is_gateway(const struct mlxsw_sp *mlxsw_sp,
3666 const struct fib_info *fi)
3667{
Petr Machata1012b9a2017-09-02 23:49:23 +02003668 return fi->fib_nh->nh_scope == RT_SCOPE_LINK ||
3669 mlxsw_sp_nexthop4_ipip_type(mlxsw_sp, fi->fib_nh, NULL);
Petr Machata9b014512017-09-02 23:49:20 +02003670}
3671
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003672static struct mlxsw_sp_nexthop_group *
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02003673mlxsw_sp_nexthop4_group_create(struct mlxsw_sp *mlxsw_sp, struct fib_info *fi)
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003674{
3675 struct mlxsw_sp_nexthop_group *nh_grp;
3676 struct mlxsw_sp_nexthop *nh;
3677 struct fib_nh *fib_nh;
3678 size_t alloc_size;
3679 int i;
3680 int err;
3681
3682 alloc_size = sizeof(*nh_grp) +
3683 fi->fib_nhs * sizeof(struct mlxsw_sp_nexthop);
3684 nh_grp = kzalloc(alloc_size, GFP_KERNEL);
3685 if (!nh_grp)
3686 return ERR_PTR(-ENOMEM);
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02003687 nh_grp->priv = fi;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003688 INIT_LIST_HEAD(&nh_grp->fib_list);
Ido Schimmel58adf2c2017-07-18 10:10:19 +02003689 nh_grp->neigh_tbl = &arp_tbl;
3690
Petr Machata9b014512017-09-02 23:49:20 +02003691 nh_grp->gateway = mlxsw_sp_fi_is_gateway(mlxsw_sp, fi);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003692 nh_grp->count = fi->fib_nhs;
Ido Schimmel7387dbb2017-07-12 09:12:53 +02003693 fib_info_hold(fi);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003694 for (i = 0; i < nh_grp->count; i++) {
3695 nh = &nh_grp->nexthops[i];
3696 fib_nh = &fi->fib_nh[i];
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02003697 err = mlxsw_sp_nexthop4_init(mlxsw_sp, nh_grp, nh, fib_nh);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003698 if (err)
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02003699 goto err_nexthop4_init;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003700 }
Ido Schimmele9ad5e72017-02-08 11:16:29 +01003701 err = mlxsw_sp_nexthop_group_insert(mlxsw_sp, nh_grp);
3702 if (err)
3703 goto err_nexthop_group_insert;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003704 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
3705 return nh_grp;
3706
Ido Schimmele9ad5e72017-02-08 11:16:29 +01003707err_nexthop_group_insert:
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02003708err_nexthop4_init:
Ido Schimmeldf6dd79b2017-02-08 14:36:49 +01003709 for (i--; i >= 0; i--) {
3710 nh = &nh_grp->nexthops[i];
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02003711 mlxsw_sp_nexthop4_fini(mlxsw_sp, nh);
Ido Schimmeldf6dd79b2017-02-08 14:36:49 +01003712 }
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02003713 fib_info_put(fi);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003714 kfree(nh_grp);
3715 return ERR_PTR(err);
3716}
3717
3718static void
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02003719mlxsw_sp_nexthop4_group_destroy(struct mlxsw_sp *mlxsw_sp,
3720 struct mlxsw_sp_nexthop_group *nh_grp)
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003721{
3722 struct mlxsw_sp_nexthop *nh;
3723 int i;
3724
Ido Schimmele9ad5e72017-02-08 11:16:29 +01003725 mlxsw_sp_nexthop_group_remove(mlxsw_sp, nh_grp);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003726 for (i = 0; i < nh_grp->count; i++) {
3727 nh = &nh_grp->nexthops[i];
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02003728 mlxsw_sp_nexthop4_fini(mlxsw_sp, nh);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003729 }
Ido Schimmel58312122016-12-23 09:32:50 +01003730 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
3731 WARN_ON_ONCE(nh_grp->adj_index_valid);
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02003732 fib_info_put(mlxsw_sp_nexthop4_group_fi(nh_grp));
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003733 kfree(nh_grp);
3734}
3735
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02003736static int mlxsw_sp_nexthop4_group_get(struct mlxsw_sp *mlxsw_sp,
3737 struct mlxsw_sp_fib_entry *fib_entry,
3738 struct fib_info *fi)
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003739{
3740 struct mlxsw_sp_nexthop_group *nh_grp;
3741
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02003742 nh_grp = mlxsw_sp_nexthop4_group_lookup(mlxsw_sp, fi);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003743 if (!nh_grp) {
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02003744 nh_grp = mlxsw_sp_nexthop4_group_create(mlxsw_sp, fi);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003745 if (IS_ERR(nh_grp))
3746 return PTR_ERR(nh_grp);
3747 }
3748 list_add_tail(&fib_entry->nexthop_group_node, &nh_grp->fib_list);
3749 fib_entry->nh_group = nh_grp;
3750 return 0;
3751}
3752
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02003753static void mlxsw_sp_nexthop4_group_put(struct mlxsw_sp *mlxsw_sp,
3754 struct mlxsw_sp_fib_entry *fib_entry)
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003755{
3756 struct mlxsw_sp_nexthop_group *nh_grp = fib_entry->nh_group;
3757
3758 list_del(&fib_entry->nexthop_group_node);
3759 if (!list_empty(&nh_grp->fib_list))
3760 return;
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02003761 mlxsw_sp_nexthop4_group_destroy(mlxsw_sp, nh_grp);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003762}
3763
Ido Schimmel013b20f2017-02-08 11:16:36 +01003764static bool
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02003765mlxsw_sp_fib4_entry_should_offload(const struct mlxsw_sp_fib_entry *fib_entry)
3766{
3767 struct mlxsw_sp_fib4_entry *fib4_entry;
3768
3769 fib4_entry = container_of(fib_entry, struct mlxsw_sp_fib4_entry,
3770 common);
3771 return !fib4_entry->tos;
3772}
3773
3774static bool
Ido Schimmel013b20f2017-02-08 11:16:36 +01003775mlxsw_sp_fib_entry_should_offload(const struct mlxsw_sp_fib_entry *fib_entry)
3776{
3777 struct mlxsw_sp_nexthop_group *nh_group = fib_entry->nh_group;
3778
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02003779 switch (fib_entry->fib_node->fib->proto) {
3780 case MLXSW_SP_L3_PROTO_IPV4:
3781 if (!mlxsw_sp_fib4_entry_should_offload(fib_entry))
3782 return false;
3783 break;
3784 case MLXSW_SP_L3_PROTO_IPV6:
3785 break;
3786 }
Ido Schimmel9aecce12017-02-09 10:28:42 +01003787
Ido Schimmel013b20f2017-02-08 11:16:36 +01003788 switch (fib_entry->type) {
3789 case MLXSW_SP_FIB_ENTRY_TYPE_REMOTE:
3790 return !!nh_group->adj_index_valid;
3791 case MLXSW_SP_FIB_ENTRY_TYPE_LOCAL:
Ido Schimmel70ad3502017-02-08 11:16:38 +01003792 return !!nh_group->nh_rif;
Petr Machata4607f6d2017-09-02 23:49:25 +02003793 case MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP:
3794 return true;
Ido Schimmel013b20f2017-02-08 11:16:36 +01003795 default:
3796 return false;
3797 }
3798}
3799
Ido Schimmel428b8512017-08-03 13:28:28 +02003800static struct mlxsw_sp_nexthop *
3801mlxsw_sp_rt6_nexthop(struct mlxsw_sp_nexthop_group *nh_grp,
3802 const struct mlxsw_sp_rt6 *mlxsw_sp_rt6)
3803{
3804 int i;
3805
3806 for (i = 0; i < nh_grp->count; i++) {
3807 struct mlxsw_sp_nexthop *nh = &nh_grp->nexthops[i];
3808 struct rt6_info *rt = mlxsw_sp_rt6->rt;
3809
3810 if (nh->rif && nh->rif->dev == rt->dst.dev &&
3811 ipv6_addr_equal((const struct in6_addr *) &nh->gw_addr,
3812 &rt->rt6i_gateway))
3813 return nh;
3814 continue;
3815 }
3816
3817 return NULL;
3818}
3819
Ido Schimmel3984d1a2017-08-02 09:56:03 +02003820static void
3821mlxsw_sp_fib4_entry_offload_set(struct mlxsw_sp_fib_entry *fib_entry)
3822{
3823 struct mlxsw_sp_nexthop_group *nh_grp = fib_entry->nh_group;
3824 int i;
3825
Petr Machata4607f6d2017-09-02 23:49:25 +02003826 if (fib_entry->type == MLXSW_SP_FIB_ENTRY_TYPE_LOCAL ||
3827 fib_entry->type == MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP) {
Ido Schimmel3984d1a2017-08-02 09:56:03 +02003828 nh_grp->nexthops->key.fib_nh->nh_flags |= RTNH_F_OFFLOAD;
3829 return;
3830 }
3831
3832 for (i = 0; i < nh_grp->count; i++) {
3833 struct mlxsw_sp_nexthop *nh = &nh_grp->nexthops[i];
3834
3835 if (nh->offloaded)
3836 nh->key.fib_nh->nh_flags |= RTNH_F_OFFLOAD;
3837 else
3838 nh->key.fib_nh->nh_flags &= ~RTNH_F_OFFLOAD;
3839 }
3840}
3841
3842static void
3843mlxsw_sp_fib4_entry_offload_unset(struct mlxsw_sp_fib_entry *fib_entry)
3844{
3845 struct mlxsw_sp_nexthop_group *nh_grp = fib_entry->nh_group;
3846 int i;
3847
Ido Schimmeld1c95af2018-02-17 00:30:44 +01003848 if (!list_is_singular(&nh_grp->fib_list))
3849 return;
3850
Ido Schimmel3984d1a2017-08-02 09:56:03 +02003851 for (i = 0; i < nh_grp->count; i++) {
3852 struct mlxsw_sp_nexthop *nh = &nh_grp->nexthops[i];
3853
3854 nh->key.fib_nh->nh_flags &= ~RTNH_F_OFFLOAD;
3855 }
3856}
3857
Ido Schimmel428b8512017-08-03 13:28:28 +02003858static void
3859mlxsw_sp_fib6_entry_offload_set(struct mlxsw_sp_fib_entry *fib_entry)
3860{
3861 struct mlxsw_sp_fib6_entry *fib6_entry;
3862 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
3863
3864 fib6_entry = container_of(fib_entry, struct mlxsw_sp_fib6_entry,
3865 common);
3866
3867 if (fib_entry->type == MLXSW_SP_FIB_ENTRY_TYPE_LOCAL) {
3868 list_first_entry(&fib6_entry->rt6_list, struct mlxsw_sp_rt6,
Ido Schimmelfe400792017-08-15 09:09:49 +02003869 list)->rt->rt6i_nh_flags |= RTNH_F_OFFLOAD;
Ido Schimmel428b8512017-08-03 13:28:28 +02003870 return;
3871 }
3872
3873 list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) {
3874 struct mlxsw_sp_nexthop_group *nh_grp = fib_entry->nh_group;
3875 struct mlxsw_sp_nexthop *nh;
3876
3877 nh = mlxsw_sp_rt6_nexthop(nh_grp, mlxsw_sp_rt6);
3878 if (nh && nh->offloaded)
Ido Schimmelfe400792017-08-15 09:09:49 +02003879 mlxsw_sp_rt6->rt->rt6i_nh_flags |= RTNH_F_OFFLOAD;
Ido Schimmel428b8512017-08-03 13:28:28 +02003880 else
Ido Schimmelfe400792017-08-15 09:09:49 +02003881 mlxsw_sp_rt6->rt->rt6i_nh_flags &= ~RTNH_F_OFFLOAD;
Ido Schimmel428b8512017-08-03 13:28:28 +02003882 }
3883}
3884
3885static void
3886mlxsw_sp_fib6_entry_offload_unset(struct mlxsw_sp_fib_entry *fib_entry)
3887{
3888 struct mlxsw_sp_fib6_entry *fib6_entry;
3889 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
3890
3891 fib6_entry = container_of(fib_entry, struct mlxsw_sp_fib6_entry,
3892 common);
3893 list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) {
3894 struct rt6_info *rt = mlxsw_sp_rt6->rt;
3895
Ido Schimmelfe400792017-08-15 09:09:49 +02003896 rt->rt6i_nh_flags &= ~RTNH_F_OFFLOAD;
Ido Schimmel428b8512017-08-03 13:28:28 +02003897 }
3898}
3899
Ido Schimmel013b20f2017-02-08 11:16:36 +01003900static void mlxsw_sp_fib_entry_offload_set(struct mlxsw_sp_fib_entry *fib_entry)
3901{
Ido Schimmel76610eb2017-03-10 08:53:41 +01003902 switch (fib_entry->fib_node->fib->proto) {
Ido Schimmel013b20f2017-02-08 11:16:36 +01003903 case MLXSW_SP_L3_PROTO_IPV4:
Ido Schimmel3984d1a2017-08-02 09:56:03 +02003904 mlxsw_sp_fib4_entry_offload_set(fib_entry);
Ido Schimmel013b20f2017-02-08 11:16:36 +01003905 break;
3906 case MLXSW_SP_L3_PROTO_IPV6:
Ido Schimmel428b8512017-08-03 13:28:28 +02003907 mlxsw_sp_fib6_entry_offload_set(fib_entry);
3908 break;
Ido Schimmel013b20f2017-02-08 11:16:36 +01003909 }
3910}
3911
3912static void
3913mlxsw_sp_fib_entry_offload_unset(struct mlxsw_sp_fib_entry *fib_entry)
3914{
Ido Schimmel76610eb2017-03-10 08:53:41 +01003915 switch (fib_entry->fib_node->fib->proto) {
Ido Schimmel013b20f2017-02-08 11:16:36 +01003916 case MLXSW_SP_L3_PROTO_IPV4:
Ido Schimmel3984d1a2017-08-02 09:56:03 +02003917 mlxsw_sp_fib4_entry_offload_unset(fib_entry);
Ido Schimmel013b20f2017-02-08 11:16:36 +01003918 break;
3919 case MLXSW_SP_L3_PROTO_IPV6:
Ido Schimmel428b8512017-08-03 13:28:28 +02003920 mlxsw_sp_fib6_entry_offload_unset(fib_entry);
3921 break;
Ido Schimmel013b20f2017-02-08 11:16:36 +01003922 }
Ido Schimmel013b20f2017-02-08 11:16:36 +01003923}
3924
3925static void
3926mlxsw_sp_fib_entry_offload_refresh(struct mlxsw_sp_fib_entry *fib_entry,
3927 enum mlxsw_reg_ralue_op op, int err)
3928{
3929 switch (op) {
3930 case MLXSW_REG_RALUE_OP_WRITE_DELETE:
Ido Schimmel013b20f2017-02-08 11:16:36 +01003931 return mlxsw_sp_fib_entry_offload_unset(fib_entry);
3932 case MLXSW_REG_RALUE_OP_WRITE_WRITE:
3933 if (err)
3934 return;
Ido Schimmel1353ee72017-08-02 09:56:04 +02003935 if (mlxsw_sp_fib_entry_should_offload(fib_entry))
Ido Schimmel013b20f2017-02-08 11:16:36 +01003936 mlxsw_sp_fib_entry_offload_set(fib_entry);
Petr Machata85f44a12017-10-02 12:21:58 +02003937 else
Ido Schimmel013b20f2017-02-08 11:16:36 +01003938 mlxsw_sp_fib_entry_offload_unset(fib_entry);
3939 return;
3940 default:
3941 return;
3942 }
3943}
3944
Ido Schimmel9dbf4d72017-07-18 10:10:24 +02003945static void
3946mlxsw_sp_fib_entry_ralue_pack(char *ralue_pl,
3947 const struct mlxsw_sp_fib_entry *fib_entry,
3948 enum mlxsw_reg_ralue_op op)
3949{
3950 struct mlxsw_sp_fib *fib = fib_entry->fib_node->fib;
3951 enum mlxsw_reg_ralxx_protocol proto;
3952 u32 *p_dip;
3953
3954 proto = (enum mlxsw_reg_ralxx_protocol) fib->proto;
3955
3956 switch (fib->proto) {
3957 case MLXSW_SP_L3_PROTO_IPV4:
3958 p_dip = (u32 *) fib_entry->fib_node->key.addr;
3959 mlxsw_reg_ralue_pack4(ralue_pl, proto, op, fib->vr->id,
3960 fib_entry->fib_node->key.prefix_len,
3961 *p_dip);
3962 break;
3963 case MLXSW_SP_L3_PROTO_IPV6:
3964 mlxsw_reg_ralue_pack6(ralue_pl, proto, op, fib->vr->id,
3965 fib_entry->fib_node->key.prefix_len,
3966 fib_entry->fib_node->key.addr);
3967 break;
3968 }
3969}
3970
3971static int mlxsw_sp_fib_entry_op_remote(struct mlxsw_sp *mlxsw_sp,
3972 struct mlxsw_sp_fib_entry *fib_entry,
3973 enum mlxsw_reg_ralue_op op)
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003974{
3975 char ralue_pl[MLXSW_REG_RALUE_LEN];
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003976 enum mlxsw_reg_ralue_trap_action trap_action;
3977 u16 trap_id = 0;
3978 u32 adjacency_index = 0;
3979 u16 ecmp_size = 0;
3980
3981 /* In case the nexthop group adjacency index is valid, use it
3982 * with provided ECMP size. Otherwise, setup trap and pass
3983 * traffic to kernel.
3984 */
Ido Schimmel4b411472017-02-08 11:16:37 +01003985 if (mlxsw_sp_fib_entry_should_offload(fib_entry)) {
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003986 trap_action = MLXSW_REG_RALUE_TRAP_ACTION_NOP;
3987 adjacency_index = fib_entry->nh_group->adj_index;
3988 ecmp_size = fib_entry->nh_group->ecmp_size;
3989 } else {
3990 trap_action = MLXSW_REG_RALUE_TRAP_ACTION_TRAP;
3991 trap_id = MLXSW_TRAP_ID_RTR_INGRESS0;
3992 }
3993
Ido Schimmel9dbf4d72017-07-18 10:10:24 +02003994 mlxsw_sp_fib_entry_ralue_pack(ralue_pl, fib_entry, op);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003995 mlxsw_reg_ralue_act_remote_pack(ralue_pl, trap_action, trap_id,
3996 adjacency_index, ecmp_size);
3997 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
3998}
3999
Ido Schimmel9dbf4d72017-07-18 10:10:24 +02004000static int mlxsw_sp_fib_entry_op_local(struct mlxsw_sp *mlxsw_sp,
4001 struct mlxsw_sp_fib_entry *fib_entry,
4002 enum mlxsw_reg_ralue_op op)
Jiri Pirko61c503f2016-07-04 08:23:11 +02004003{
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01004004 struct mlxsw_sp_rif *rif = fib_entry->nh_group->nh_rif;
Ido Schimmel70ad3502017-02-08 11:16:38 +01004005 enum mlxsw_reg_ralue_trap_action trap_action;
Jiri Pirko61c503f2016-07-04 08:23:11 +02004006 char ralue_pl[MLXSW_REG_RALUE_LEN];
Ido Schimmel70ad3502017-02-08 11:16:38 +01004007 u16 trap_id = 0;
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01004008 u16 rif_index = 0;
Ido Schimmel70ad3502017-02-08 11:16:38 +01004009
4010 if (mlxsw_sp_fib_entry_should_offload(fib_entry)) {
4011 trap_action = MLXSW_REG_RALUE_TRAP_ACTION_NOP;
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01004012 rif_index = rif->rif_index;
Ido Schimmel70ad3502017-02-08 11:16:38 +01004013 } else {
4014 trap_action = MLXSW_REG_RALUE_TRAP_ACTION_TRAP;
4015 trap_id = MLXSW_TRAP_ID_RTR_INGRESS0;
4016 }
Jiri Pirko61c503f2016-07-04 08:23:11 +02004017
Ido Schimmel9dbf4d72017-07-18 10:10:24 +02004018 mlxsw_sp_fib_entry_ralue_pack(ralue_pl, fib_entry, op);
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01004019 mlxsw_reg_ralue_act_local_pack(ralue_pl, trap_action, trap_id,
4020 rif_index);
Jiri Pirko61c503f2016-07-04 08:23:11 +02004021 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
4022}
4023
Ido Schimmel9dbf4d72017-07-18 10:10:24 +02004024static int mlxsw_sp_fib_entry_op_trap(struct mlxsw_sp *mlxsw_sp,
4025 struct mlxsw_sp_fib_entry *fib_entry,
4026 enum mlxsw_reg_ralue_op op)
Jiri Pirko61c503f2016-07-04 08:23:11 +02004027{
4028 char ralue_pl[MLXSW_REG_RALUE_LEN];
Jiri Pirko61c503f2016-07-04 08:23:11 +02004029
Ido Schimmel9dbf4d72017-07-18 10:10:24 +02004030 mlxsw_sp_fib_entry_ralue_pack(ralue_pl, fib_entry, op);
Jiri Pirko61c503f2016-07-04 08:23:11 +02004031 mlxsw_reg_ralue_act_ip2me_pack(ralue_pl);
4032 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
4033}
4034
Petr Machata4607f6d2017-09-02 23:49:25 +02004035static int
4036mlxsw_sp_fib_entry_op_ipip_decap(struct mlxsw_sp *mlxsw_sp,
4037 struct mlxsw_sp_fib_entry *fib_entry,
4038 enum mlxsw_reg_ralue_op op)
4039{
4040 struct mlxsw_sp_ipip_entry *ipip_entry = fib_entry->decap.ipip_entry;
4041 const struct mlxsw_sp_ipip_ops *ipip_ops;
4042
4043 if (WARN_ON(!ipip_entry))
4044 return -EINVAL;
4045
4046 ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt];
4047 return ipip_ops->fib_entry_op(mlxsw_sp, ipip_entry, op,
4048 fib_entry->decap.tunnel_index);
4049}
4050
Ido Schimmel9dbf4d72017-07-18 10:10:24 +02004051static int __mlxsw_sp_fib_entry_op(struct mlxsw_sp *mlxsw_sp,
4052 struct mlxsw_sp_fib_entry *fib_entry,
4053 enum mlxsw_reg_ralue_op op)
Jiri Pirko61c503f2016-07-04 08:23:11 +02004054{
4055 switch (fib_entry->type) {
4056 case MLXSW_SP_FIB_ENTRY_TYPE_REMOTE:
Ido Schimmel9dbf4d72017-07-18 10:10:24 +02004057 return mlxsw_sp_fib_entry_op_remote(mlxsw_sp, fib_entry, op);
Jiri Pirko61c503f2016-07-04 08:23:11 +02004058 case MLXSW_SP_FIB_ENTRY_TYPE_LOCAL:
Ido Schimmel9dbf4d72017-07-18 10:10:24 +02004059 return mlxsw_sp_fib_entry_op_local(mlxsw_sp, fib_entry, op);
Jiri Pirko61c503f2016-07-04 08:23:11 +02004060 case MLXSW_SP_FIB_ENTRY_TYPE_TRAP:
Ido Schimmel9dbf4d72017-07-18 10:10:24 +02004061 return mlxsw_sp_fib_entry_op_trap(mlxsw_sp, fib_entry, op);
Petr Machata4607f6d2017-09-02 23:49:25 +02004062 case MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP:
4063 return mlxsw_sp_fib_entry_op_ipip_decap(mlxsw_sp,
4064 fib_entry, op);
Jiri Pirko61c503f2016-07-04 08:23:11 +02004065 }
4066 return -EINVAL;
4067}
4068
4069static int mlxsw_sp_fib_entry_op(struct mlxsw_sp *mlxsw_sp,
4070 struct mlxsw_sp_fib_entry *fib_entry,
4071 enum mlxsw_reg_ralue_op op)
4072{
Ido Schimmel9dbf4d72017-07-18 10:10:24 +02004073 int err = __mlxsw_sp_fib_entry_op(mlxsw_sp, fib_entry, op);
Ido Schimmel013b20f2017-02-08 11:16:36 +01004074
Ido Schimmel013b20f2017-02-08 11:16:36 +01004075 mlxsw_sp_fib_entry_offload_refresh(fib_entry, op, err);
Ido Schimmel9dbf4d72017-07-18 10:10:24 +02004076
Ido Schimmel013b20f2017-02-08 11:16:36 +01004077 return err;
Jiri Pirko61c503f2016-07-04 08:23:11 +02004078}
4079
4080static int mlxsw_sp_fib_entry_update(struct mlxsw_sp *mlxsw_sp,
4081 struct mlxsw_sp_fib_entry *fib_entry)
4082{
Jiri Pirko7146da32016-09-01 10:37:41 +02004083 return mlxsw_sp_fib_entry_op(mlxsw_sp, fib_entry,
4084 MLXSW_REG_RALUE_OP_WRITE_WRITE);
Jiri Pirko61c503f2016-07-04 08:23:11 +02004085}
4086
4087static int mlxsw_sp_fib_entry_del(struct mlxsw_sp *mlxsw_sp,
4088 struct mlxsw_sp_fib_entry *fib_entry)
4089{
4090 return mlxsw_sp_fib_entry_op(mlxsw_sp, fib_entry,
4091 MLXSW_REG_RALUE_OP_WRITE_DELETE);
4092}
4093
Jiri Pirko61c503f2016-07-04 08:23:11 +02004094static int
Ido Schimmel013b20f2017-02-08 11:16:36 +01004095mlxsw_sp_fib4_entry_type_set(struct mlxsw_sp *mlxsw_sp,
4096 const struct fib_entry_notifier_info *fen_info,
4097 struct mlxsw_sp_fib_entry *fib_entry)
Jiri Pirko61c503f2016-07-04 08:23:11 +02004098{
Petr Machata4607f6d2017-09-02 23:49:25 +02004099 union mlxsw_sp_l3addr dip = { .addr4 = htonl(fen_info->dst) };
4100 struct net_device *dev = fen_info->fi->fib_dev;
4101 struct mlxsw_sp_ipip_entry *ipip_entry;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02004102 struct fib_info *fi = fen_info->fi;
Jiri Pirko61c503f2016-07-04 08:23:11 +02004103
Ido Schimmel97989ee2017-03-10 08:53:38 +01004104 switch (fen_info->type) {
Ido Schimmel97989ee2017-03-10 08:53:38 +01004105 case RTN_LOCAL:
Petr Machata4607f6d2017-09-02 23:49:25 +02004106 ipip_entry = mlxsw_sp_ipip_entry_find_by_decap(mlxsw_sp, dev,
4107 MLXSW_SP_L3_PROTO_IPV4, dip);
Petr Machata57c77ce2017-11-28 13:17:11 +01004108 if (ipip_entry && ipip_entry->ol_dev->flags & IFF_UP) {
Petr Machata4607f6d2017-09-02 23:49:25 +02004109 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP;
4110 return mlxsw_sp_fib_entry_decap_init(mlxsw_sp,
4111 fib_entry,
4112 ipip_entry);
4113 }
4114 /* fall through */
4115 case RTN_BROADCAST:
Jiri Pirko61c503f2016-07-04 08:23:11 +02004116 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
4117 return 0;
Ido Schimmel97989ee2017-03-10 08:53:38 +01004118 case RTN_UNREACHABLE: /* fall through */
4119 case RTN_BLACKHOLE: /* fall through */
4120 case RTN_PROHIBIT:
4121 /* Packets hitting these routes need to be trapped, but
4122 * can do so with a lower priority than packets directed
4123 * at the host, so use action type local instead of trap.
4124 */
Jiri Pirkob45f64d2016-09-26 12:52:31 +02004125 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_LOCAL;
Ido Schimmel97989ee2017-03-10 08:53:38 +01004126 return 0;
4127 case RTN_UNICAST:
Petr Machata9b014512017-09-02 23:49:20 +02004128 if (mlxsw_sp_fi_is_gateway(mlxsw_sp, fi))
Ido Schimmel97989ee2017-03-10 08:53:38 +01004129 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_REMOTE;
Petr Machata9b014512017-09-02 23:49:20 +02004130 else
4131 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_LOCAL;
Ido Schimmel97989ee2017-03-10 08:53:38 +01004132 return 0;
4133 default:
4134 return -EINVAL;
4135 }
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02004136}
4137
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004138static struct mlxsw_sp_fib4_entry *
Ido Schimmel9aecce12017-02-09 10:28:42 +01004139mlxsw_sp_fib4_entry_create(struct mlxsw_sp *mlxsw_sp,
4140 struct mlxsw_sp_fib_node *fib_node,
4141 const struct fib_entry_notifier_info *fen_info)
Jiri Pirko5b004412016-09-01 10:37:40 +02004142{
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004143 struct mlxsw_sp_fib4_entry *fib4_entry;
Jiri Pirko5b004412016-09-01 10:37:40 +02004144 struct mlxsw_sp_fib_entry *fib_entry;
Ido Schimmel9aecce12017-02-09 10:28:42 +01004145 int err;
4146
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004147 fib4_entry = kzalloc(sizeof(*fib4_entry), GFP_KERNEL);
4148 if (!fib4_entry)
4149 return ERR_PTR(-ENOMEM);
4150 fib_entry = &fib4_entry->common;
Ido Schimmel9aecce12017-02-09 10:28:42 +01004151
4152 err = mlxsw_sp_fib4_entry_type_set(mlxsw_sp, fen_info, fib_entry);
4153 if (err)
4154 goto err_fib4_entry_type_set;
4155
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02004156 err = mlxsw_sp_nexthop4_group_get(mlxsw_sp, fib_entry, fen_info->fi);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004157 if (err)
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02004158 goto err_nexthop4_group_get;
Ido Schimmel9aecce12017-02-09 10:28:42 +01004159
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004160 fib4_entry->prio = fen_info->fi->fib_priority;
4161 fib4_entry->tb_id = fen_info->tb_id;
4162 fib4_entry->type = fen_info->type;
4163 fib4_entry->tos = fen_info->tos;
Ido Schimmel9aecce12017-02-09 10:28:42 +01004164
4165 fib_entry->fib_node = fib_node;
4166
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004167 return fib4_entry;
Ido Schimmel9aecce12017-02-09 10:28:42 +01004168
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02004169err_nexthop4_group_get:
Ido Schimmel9aecce12017-02-09 10:28:42 +01004170err_fib4_entry_type_set:
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004171 kfree(fib4_entry);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004172 return ERR_PTR(err);
4173}
4174
4175static void mlxsw_sp_fib4_entry_destroy(struct mlxsw_sp *mlxsw_sp,
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004176 struct mlxsw_sp_fib4_entry *fib4_entry)
Ido Schimmel9aecce12017-02-09 10:28:42 +01004177{
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02004178 mlxsw_sp_nexthop4_group_put(mlxsw_sp, &fib4_entry->common);
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004179 kfree(fib4_entry);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004180}
4181
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004182static struct mlxsw_sp_fib4_entry *
Ido Schimmel9aecce12017-02-09 10:28:42 +01004183mlxsw_sp_fib4_entry_lookup(struct mlxsw_sp *mlxsw_sp,
4184 const struct fib_entry_notifier_info *fen_info)
4185{
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004186 struct mlxsw_sp_fib4_entry *fib4_entry;
Ido Schimmel9aecce12017-02-09 10:28:42 +01004187 struct mlxsw_sp_fib_node *fib_node;
Ido Schimmel160e22a2017-07-18 10:10:20 +02004188 struct mlxsw_sp_fib *fib;
4189 struct mlxsw_sp_vr *vr;
Ido Schimmel9aecce12017-02-09 10:28:42 +01004190
Ido Schimmel160e22a2017-07-18 10:10:20 +02004191 vr = mlxsw_sp_vr_find(mlxsw_sp, fen_info->tb_id);
4192 if (!vr)
4193 return NULL;
4194 fib = mlxsw_sp_vr_fib(vr, MLXSW_SP_L3_PROTO_IPV4);
4195
4196 fib_node = mlxsw_sp_fib_node_lookup(fib, &fen_info->dst,
4197 sizeof(fen_info->dst),
4198 fen_info->dst_len);
4199 if (!fib_node)
Ido Schimmel9aecce12017-02-09 10:28:42 +01004200 return NULL;
4201
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004202 list_for_each_entry(fib4_entry, &fib_node->entry_list, common.list) {
4203 if (fib4_entry->tb_id == fen_info->tb_id &&
4204 fib4_entry->tos == fen_info->tos &&
4205 fib4_entry->type == fen_info->type &&
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02004206 mlxsw_sp_nexthop4_group_fi(fib4_entry->common.nh_group) ==
4207 fen_info->fi) {
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004208 return fib4_entry;
Ido Schimmel9aecce12017-02-09 10:28:42 +01004209 }
4210 }
4211
4212 return NULL;
4213}
4214
4215static const struct rhashtable_params mlxsw_sp_fib_ht_params = {
4216 .key_offset = offsetof(struct mlxsw_sp_fib_node, key),
4217 .head_offset = offsetof(struct mlxsw_sp_fib_node, ht_node),
4218 .key_len = sizeof(struct mlxsw_sp_fib_key),
4219 .automatic_shrinking = true,
4220};
4221
4222static int mlxsw_sp_fib_node_insert(struct mlxsw_sp_fib *fib,
4223 struct mlxsw_sp_fib_node *fib_node)
4224{
4225 return rhashtable_insert_fast(&fib->ht, &fib_node->ht_node,
4226 mlxsw_sp_fib_ht_params);
4227}
4228
4229static void mlxsw_sp_fib_node_remove(struct mlxsw_sp_fib *fib,
4230 struct mlxsw_sp_fib_node *fib_node)
4231{
4232 rhashtable_remove_fast(&fib->ht, &fib_node->ht_node,
4233 mlxsw_sp_fib_ht_params);
4234}
4235
4236static struct mlxsw_sp_fib_node *
4237mlxsw_sp_fib_node_lookup(struct mlxsw_sp_fib *fib, const void *addr,
4238 size_t addr_len, unsigned char prefix_len)
4239{
4240 struct mlxsw_sp_fib_key key;
4241
4242 memset(&key, 0, sizeof(key));
4243 memcpy(key.addr, addr, addr_len);
4244 key.prefix_len = prefix_len;
4245 return rhashtable_lookup_fast(&fib->ht, &key, mlxsw_sp_fib_ht_params);
4246}
4247
4248static struct mlxsw_sp_fib_node *
Ido Schimmel76610eb2017-03-10 08:53:41 +01004249mlxsw_sp_fib_node_create(struct mlxsw_sp_fib *fib, const void *addr,
Ido Schimmel9aecce12017-02-09 10:28:42 +01004250 size_t addr_len, unsigned char prefix_len)
4251{
4252 struct mlxsw_sp_fib_node *fib_node;
4253
4254 fib_node = kzalloc(sizeof(*fib_node), GFP_KERNEL);
4255 if (!fib_node)
4256 return NULL;
4257
4258 INIT_LIST_HEAD(&fib_node->entry_list);
Ido Schimmel76610eb2017-03-10 08:53:41 +01004259 list_add(&fib_node->list, &fib->node_list);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004260 memcpy(fib_node->key.addr, addr, addr_len);
4261 fib_node->key.prefix_len = prefix_len;
Ido Schimmel9aecce12017-02-09 10:28:42 +01004262
4263 return fib_node;
4264}
4265
4266static void mlxsw_sp_fib_node_destroy(struct mlxsw_sp_fib_node *fib_node)
4267{
Ido Schimmel9aecce12017-02-09 10:28:42 +01004268 list_del(&fib_node->list);
4269 WARN_ON(!list_empty(&fib_node->entry_list));
4270 kfree(fib_node);
4271}
4272
4273static bool
4274mlxsw_sp_fib_node_entry_is_first(const struct mlxsw_sp_fib_node *fib_node,
4275 const struct mlxsw_sp_fib_entry *fib_entry)
4276{
4277 return list_first_entry(&fib_node->entry_list,
4278 struct mlxsw_sp_fib_entry, list) == fib_entry;
4279}
4280
Ido Schimmelfc922bb2017-08-14 10:54:05 +02004281static int mlxsw_sp_fib_lpm_tree_link(struct mlxsw_sp *mlxsw_sp,
Ido Schimmelfc922bb2017-08-14 10:54:05 +02004282 struct mlxsw_sp_fib_node *fib_node)
4283{
Ido Schimmel2b52ce02018-01-22 09:17:42 +01004284 struct mlxsw_sp_prefix_usage req_prefix_usage;
Ido Schimmel3aad95d2018-01-22 09:17:41 +01004285 struct mlxsw_sp_fib *fib = fib_node->fib;
Ido Schimmelfc922bb2017-08-14 10:54:05 +02004286 struct mlxsw_sp_lpm_tree *lpm_tree;
4287 int err;
4288
Ido Schimmel2b52ce02018-01-22 09:17:42 +01004289 lpm_tree = mlxsw_sp->router->lpm.proto_trees[fib->proto];
4290 if (lpm_tree->prefix_ref_count[fib_node->key.prefix_len] != 0)
4291 goto out;
Ido Schimmelfc922bb2017-08-14 10:54:05 +02004292
Ido Schimmel2b52ce02018-01-22 09:17:42 +01004293 mlxsw_sp_prefix_usage_cpy(&req_prefix_usage, &lpm_tree->prefix_usage);
4294 mlxsw_sp_prefix_usage_set(&req_prefix_usage, fib_node->key.prefix_len);
Ido Schimmelfc922bb2017-08-14 10:54:05 +02004295 lpm_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, &req_prefix_usage,
4296 fib->proto);
4297 if (IS_ERR(lpm_tree))
4298 return PTR_ERR(lpm_tree);
4299
Ido Schimmelfc922bb2017-08-14 10:54:05 +02004300 err = mlxsw_sp_vrs_lpm_tree_replace(mlxsw_sp, fib, lpm_tree);
4301 if (err)
Ido Schimmel2b52ce02018-01-22 09:17:42 +01004302 goto err_lpm_tree_replace;
Ido Schimmelfc922bb2017-08-14 10:54:05 +02004303
Ido Schimmel2b52ce02018-01-22 09:17:42 +01004304out:
4305 lpm_tree->prefix_ref_count[fib_node->key.prefix_len]++;
Ido Schimmelfc922bb2017-08-14 10:54:05 +02004306 return 0;
Ido Schimmel2b52ce02018-01-22 09:17:42 +01004307
4308err_lpm_tree_replace:
4309 mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
4310 return err;
Ido Schimmelfc922bb2017-08-14 10:54:05 +02004311}
4312
4313static void mlxsw_sp_fib_lpm_tree_unlink(struct mlxsw_sp *mlxsw_sp,
Ido Schimmel3aad95d2018-01-22 09:17:41 +01004314 struct mlxsw_sp_fib_node *fib_node)
Ido Schimmelfc922bb2017-08-14 10:54:05 +02004315{
Ido Schimmel2b52ce02018-01-22 09:17:42 +01004316 struct mlxsw_sp_lpm_tree *lpm_tree = fib_node->fib->lpm_tree;
4317 struct mlxsw_sp_prefix_usage req_prefix_usage;
Ido Schimmel3aad95d2018-01-22 09:17:41 +01004318 struct mlxsw_sp_fib *fib = fib_node->fib;
Ido Schimmel2b52ce02018-01-22 09:17:42 +01004319 int err;
Ido Schimmel3aad95d2018-01-22 09:17:41 +01004320
Ido Schimmel2b52ce02018-01-22 09:17:42 +01004321 if (--lpm_tree->prefix_ref_count[fib_node->key.prefix_len] != 0)
Ido Schimmelfc922bb2017-08-14 10:54:05 +02004322 return;
Ido Schimmel2b52ce02018-01-22 09:17:42 +01004323 /* Try to construct a new LPM tree from the current prefix usage
4324 * minus the unused one. If we fail, continue using the old one.
Ido Schimmel4fd00312018-01-22 09:17:40 +01004325 */
Ido Schimmel2b52ce02018-01-22 09:17:42 +01004326 mlxsw_sp_prefix_usage_cpy(&req_prefix_usage, &lpm_tree->prefix_usage);
4327 mlxsw_sp_prefix_usage_clear(&req_prefix_usage,
4328 fib_node->key.prefix_len);
4329 lpm_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, &req_prefix_usage,
4330 fib->proto);
4331 if (IS_ERR(lpm_tree))
4332 return;
Ido Schimmelfc922bb2017-08-14 10:54:05 +02004333
Ido Schimmel2b52ce02018-01-22 09:17:42 +01004334 err = mlxsw_sp_vrs_lpm_tree_replace(mlxsw_sp, fib, lpm_tree);
4335 if (err)
4336 goto err_lpm_tree_replace;
Ido Schimmel9aecce12017-02-09 10:28:42 +01004337
Ido Schimmel2b52ce02018-01-22 09:17:42 +01004338 return;
Ido Schimmel9aecce12017-02-09 10:28:42 +01004339
Ido Schimmel2b52ce02018-01-22 09:17:42 +01004340err_lpm_tree_replace:
4341 mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004342}
4343
Ido Schimmel76610eb2017-03-10 08:53:41 +01004344static int mlxsw_sp_fib_node_init(struct mlxsw_sp *mlxsw_sp,
4345 struct mlxsw_sp_fib_node *fib_node,
4346 struct mlxsw_sp_fib *fib)
4347{
Ido Schimmel76610eb2017-03-10 08:53:41 +01004348 int err;
4349
4350 err = mlxsw_sp_fib_node_insert(fib, fib_node);
4351 if (err)
4352 return err;
4353 fib_node->fib = fib;
4354
Ido Schimmel3aad95d2018-01-22 09:17:41 +01004355 err = mlxsw_sp_fib_lpm_tree_link(mlxsw_sp, fib_node);
Ido Schimmelfc922bb2017-08-14 10:54:05 +02004356 if (err)
4357 goto err_fib_lpm_tree_link;
Ido Schimmel76610eb2017-03-10 08:53:41 +01004358
Ido Schimmel76610eb2017-03-10 08:53:41 +01004359 return 0;
4360
Ido Schimmelfc922bb2017-08-14 10:54:05 +02004361err_fib_lpm_tree_link:
Ido Schimmel76610eb2017-03-10 08:53:41 +01004362 fib_node->fib = NULL;
4363 mlxsw_sp_fib_node_remove(fib, fib_node);
4364 return err;
4365}
4366
4367static void mlxsw_sp_fib_node_fini(struct mlxsw_sp *mlxsw_sp,
4368 struct mlxsw_sp_fib_node *fib_node)
4369{
Ido Schimmel76610eb2017-03-10 08:53:41 +01004370 struct mlxsw_sp_fib *fib = fib_node->fib;
4371
Ido Schimmel3aad95d2018-01-22 09:17:41 +01004372 mlxsw_sp_fib_lpm_tree_unlink(mlxsw_sp, fib_node);
Ido Schimmel76610eb2017-03-10 08:53:41 +01004373 fib_node->fib = NULL;
4374 mlxsw_sp_fib_node_remove(fib, fib_node);
4375}
4376
Ido Schimmel9aecce12017-02-09 10:28:42 +01004377static struct mlxsw_sp_fib_node *
Ido Schimmel731ea1c2017-07-18 10:10:21 +02004378mlxsw_sp_fib_node_get(struct mlxsw_sp *mlxsw_sp, u32 tb_id, const void *addr,
4379 size_t addr_len, unsigned char prefix_len,
4380 enum mlxsw_sp_l3proto proto)
Ido Schimmel9aecce12017-02-09 10:28:42 +01004381{
4382 struct mlxsw_sp_fib_node *fib_node;
Ido Schimmel76610eb2017-03-10 08:53:41 +01004383 struct mlxsw_sp_fib *fib;
Jiri Pirko5b004412016-09-01 10:37:40 +02004384 struct mlxsw_sp_vr *vr;
4385 int err;
4386
David Ahernf8fa9b42017-10-18 09:56:56 -07004387 vr = mlxsw_sp_vr_get(mlxsw_sp, tb_id, NULL);
Jiri Pirko5b004412016-09-01 10:37:40 +02004388 if (IS_ERR(vr))
4389 return ERR_CAST(vr);
Ido Schimmel731ea1c2017-07-18 10:10:21 +02004390 fib = mlxsw_sp_vr_fib(vr, proto);
Jiri Pirko5b004412016-09-01 10:37:40 +02004391
Ido Schimmel731ea1c2017-07-18 10:10:21 +02004392 fib_node = mlxsw_sp_fib_node_lookup(fib, addr, addr_len, prefix_len);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004393 if (fib_node)
4394 return fib_node;
4395
Ido Schimmel731ea1c2017-07-18 10:10:21 +02004396 fib_node = mlxsw_sp_fib_node_create(fib, addr, addr_len, prefix_len);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004397 if (!fib_node) {
Jiri Pirko5b004412016-09-01 10:37:40 +02004398 err = -ENOMEM;
Ido Schimmel9aecce12017-02-09 10:28:42 +01004399 goto err_fib_node_create;
Jiri Pirko5b004412016-09-01 10:37:40 +02004400 }
Jiri Pirko5b004412016-09-01 10:37:40 +02004401
Ido Schimmel76610eb2017-03-10 08:53:41 +01004402 err = mlxsw_sp_fib_node_init(mlxsw_sp, fib_node, fib);
4403 if (err)
4404 goto err_fib_node_init;
4405
Ido Schimmel9aecce12017-02-09 10:28:42 +01004406 return fib_node;
Jiri Pirko5b004412016-09-01 10:37:40 +02004407
Ido Schimmel76610eb2017-03-10 08:53:41 +01004408err_fib_node_init:
4409 mlxsw_sp_fib_node_destroy(fib_node);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004410err_fib_node_create:
Ido Schimmel2b52ce02018-01-22 09:17:42 +01004411 mlxsw_sp_vr_put(mlxsw_sp, vr);
Jiri Pirko5b004412016-09-01 10:37:40 +02004412 return ERR_PTR(err);
4413}
4414
Ido Schimmel731ea1c2017-07-18 10:10:21 +02004415static void mlxsw_sp_fib_node_put(struct mlxsw_sp *mlxsw_sp,
4416 struct mlxsw_sp_fib_node *fib_node)
Jiri Pirko5b004412016-09-01 10:37:40 +02004417{
Ido Schimmel76610eb2017-03-10 08:53:41 +01004418 struct mlxsw_sp_vr *vr = fib_node->fib->vr;
Jiri Pirko5b004412016-09-01 10:37:40 +02004419
Ido Schimmel9aecce12017-02-09 10:28:42 +01004420 if (!list_empty(&fib_node->entry_list))
4421 return;
Ido Schimmel76610eb2017-03-10 08:53:41 +01004422 mlxsw_sp_fib_node_fini(mlxsw_sp, fib_node);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004423 mlxsw_sp_fib_node_destroy(fib_node);
Ido Schimmel2b52ce02018-01-22 09:17:42 +01004424 mlxsw_sp_vr_put(mlxsw_sp, vr);
Jiri Pirko5b004412016-09-01 10:37:40 +02004425}
4426
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004427static struct mlxsw_sp_fib4_entry *
Ido Schimmel9aecce12017-02-09 10:28:42 +01004428mlxsw_sp_fib4_node_entry_find(const struct mlxsw_sp_fib_node *fib_node,
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004429 const struct mlxsw_sp_fib4_entry *new4_entry)
Jiri Pirko61c503f2016-07-04 08:23:11 +02004430{
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004431 struct mlxsw_sp_fib4_entry *fib4_entry;
Ido Schimmel9aecce12017-02-09 10:28:42 +01004432
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004433 list_for_each_entry(fib4_entry, &fib_node->entry_list, common.list) {
4434 if (fib4_entry->tb_id > new4_entry->tb_id)
Ido Schimmel9aecce12017-02-09 10:28:42 +01004435 continue;
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004436 if (fib4_entry->tb_id != new4_entry->tb_id)
Ido Schimmel9aecce12017-02-09 10:28:42 +01004437 break;
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004438 if (fib4_entry->tos > new4_entry->tos)
Ido Schimmel9aecce12017-02-09 10:28:42 +01004439 continue;
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004440 if (fib4_entry->prio >= new4_entry->prio ||
4441 fib4_entry->tos < new4_entry->tos)
4442 return fib4_entry;
Ido Schimmel9aecce12017-02-09 10:28:42 +01004443 }
4444
4445 return NULL;
4446}
4447
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004448static int
4449mlxsw_sp_fib4_node_list_append(struct mlxsw_sp_fib4_entry *fib4_entry,
4450 struct mlxsw_sp_fib4_entry *new4_entry)
Ido Schimmel4283bce2017-02-09 10:28:43 +01004451{
4452 struct mlxsw_sp_fib_node *fib_node;
4453
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004454 if (WARN_ON(!fib4_entry))
Ido Schimmel4283bce2017-02-09 10:28:43 +01004455 return -EINVAL;
4456
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004457 fib_node = fib4_entry->common.fib_node;
4458 list_for_each_entry_from(fib4_entry, &fib_node->entry_list,
4459 common.list) {
4460 if (fib4_entry->tb_id != new4_entry->tb_id ||
4461 fib4_entry->tos != new4_entry->tos ||
4462 fib4_entry->prio != new4_entry->prio)
Ido Schimmel4283bce2017-02-09 10:28:43 +01004463 break;
4464 }
4465
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004466 list_add_tail(&new4_entry->common.list, &fib4_entry->common.list);
Ido Schimmel4283bce2017-02-09 10:28:43 +01004467 return 0;
4468}
4469
Ido Schimmel9aecce12017-02-09 10:28:42 +01004470static int
Ido Schimmel9efbee62017-07-18 10:10:28 +02004471mlxsw_sp_fib4_node_list_insert(struct mlxsw_sp_fib4_entry *new4_entry,
Ido Schimmel599cf8f2017-02-09 10:28:44 +01004472 bool replace, bool append)
Ido Schimmel9aecce12017-02-09 10:28:42 +01004473{
Ido Schimmel9efbee62017-07-18 10:10:28 +02004474 struct mlxsw_sp_fib_node *fib_node = new4_entry->common.fib_node;
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004475 struct mlxsw_sp_fib4_entry *fib4_entry;
Ido Schimmel9aecce12017-02-09 10:28:42 +01004476
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004477 fib4_entry = mlxsw_sp_fib4_node_entry_find(fib_node, new4_entry);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004478
Ido Schimmel4283bce2017-02-09 10:28:43 +01004479 if (append)
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004480 return mlxsw_sp_fib4_node_list_append(fib4_entry, new4_entry);
4481 if (replace && WARN_ON(!fib4_entry))
Ido Schimmel599cf8f2017-02-09 10:28:44 +01004482 return -EINVAL;
Ido Schimmel4283bce2017-02-09 10:28:43 +01004483
Ido Schimmel599cf8f2017-02-09 10:28:44 +01004484 /* Insert new entry before replaced one, so that we can later
4485 * remove the second.
4486 */
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004487 if (fib4_entry) {
4488 list_add_tail(&new4_entry->common.list,
4489 &fib4_entry->common.list);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004490 } else {
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004491 struct mlxsw_sp_fib4_entry *last;
Ido Schimmel9aecce12017-02-09 10:28:42 +01004492
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004493 list_for_each_entry(last, &fib_node->entry_list, common.list) {
4494 if (new4_entry->tb_id > last->tb_id)
Ido Schimmel9aecce12017-02-09 10:28:42 +01004495 break;
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004496 fib4_entry = last;
Ido Schimmel9aecce12017-02-09 10:28:42 +01004497 }
4498
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004499 if (fib4_entry)
4500 list_add(&new4_entry->common.list,
4501 &fib4_entry->common.list);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004502 else
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004503 list_add(&new4_entry->common.list,
4504 &fib_node->entry_list);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004505 }
4506
4507 return 0;
4508}
4509
4510static void
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004511mlxsw_sp_fib4_node_list_remove(struct mlxsw_sp_fib4_entry *fib4_entry)
Ido Schimmel9aecce12017-02-09 10:28:42 +01004512{
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004513 list_del(&fib4_entry->common.list);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004514}
4515
Ido Schimmel80c238f2017-07-18 10:10:29 +02004516static int mlxsw_sp_fib_node_entry_add(struct mlxsw_sp *mlxsw_sp,
4517 struct mlxsw_sp_fib_entry *fib_entry)
Ido Schimmel9aecce12017-02-09 10:28:42 +01004518{
Ido Schimmel9efbee62017-07-18 10:10:28 +02004519 struct mlxsw_sp_fib_node *fib_node = fib_entry->fib_node;
4520
Ido Schimmel9aecce12017-02-09 10:28:42 +01004521 if (!mlxsw_sp_fib_node_entry_is_first(fib_node, fib_entry))
4522 return 0;
4523
4524 /* To prevent packet loss, overwrite the previously offloaded
4525 * entry.
4526 */
4527 if (!list_is_singular(&fib_node->entry_list)) {
4528 enum mlxsw_reg_ralue_op op = MLXSW_REG_RALUE_OP_WRITE_DELETE;
4529 struct mlxsw_sp_fib_entry *n = list_next_entry(fib_entry, list);
4530
4531 mlxsw_sp_fib_entry_offload_refresh(n, op, 0);
4532 }
4533
4534 return mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
4535}
4536
Ido Schimmel80c238f2017-07-18 10:10:29 +02004537static void mlxsw_sp_fib_node_entry_del(struct mlxsw_sp *mlxsw_sp,
4538 struct mlxsw_sp_fib_entry *fib_entry)
Ido Schimmel9aecce12017-02-09 10:28:42 +01004539{
Ido Schimmel9efbee62017-07-18 10:10:28 +02004540 struct mlxsw_sp_fib_node *fib_node = fib_entry->fib_node;
4541
Ido Schimmel9aecce12017-02-09 10:28:42 +01004542 if (!mlxsw_sp_fib_node_entry_is_first(fib_node, fib_entry))
4543 return;
4544
4545 /* Promote the next entry by overwriting the deleted entry */
4546 if (!list_is_singular(&fib_node->entry_list)) {
4547 struct mlxsw_sp_fib_entry *n = list_next_entry(fib_entry, list);
4548 enum mlxsw_reg_ralue_op op = MLXSW_REG_RALUE_OP_WRITE_DELETE;
4549
4550 mlxsw_sp_fib_entry_update(mlxsw_sp, n);
4551 mlxsw_sp_fib_entry_offload_refresh(fib_entry, op, 0);
4552 return;
4553 }
4554
4555 mlxsw_sp_fib_entry_del(mlxsw_sp, fib_entry);
4556}
4557
4558static int mlxsw_sp_fib4_node_entry_link(struct mlxsw_sp *mlxsw_sp,
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004559 struct mlxsw_sp_fib4_entry *fib4_entry,
Ido Schimmel599cf8f2017-02-09 10:28:44 +01004560 bool replace, bool append)
Ido Schimmel9aecce12017-02-09 10:28:42 +01004561{
Ido Schimmel9aecce12017-02-09 10:28:42 +01004562 int err;
4563
Ido Schimmel9efbee62017-07-18 10:10:28 +02004564 err = mlxsw_sp_fib4_node_list_insert(fib4_entry, replace, append);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004565 if (err)
4566 return err;
4567
Ido Schimmel80c238f2017-07-18 10:10:29 +02004568 err = mlxsw_sp_fib_node_entry_add(mlxsw_sp, &fib4_entry->common);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004569 if (err)
Ido Schimmel80c238f2017-07-18 10:10:29 +02004570 goto err_fib_node_entry_add;
Ido Schimmel9aecce12017-02-09 10:28:42 +01004571
Ido Schimmel9aecce12017-02-09 10:28:42 +01004572 return 0;
4573
Ido Schimmel80c238f2017-07-18 10:10:29 +02004574err_fib_node_entry_add:
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004575 mlxsw_sp_fib4_node_list_remove(fib4_entry);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004576 return err;
4577}
4578
4579static void
4580mlxsw_sp_fib4_node_entry_unlink(struct mlxsw_sp *mlxsw_sp,
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004581 struct mlxsw_sp_fib4_entry *fib4_entry)
Ido Schimmel9aecce12017-02-09 10:28:42 +01004582{
Ido Schimmel80c238f2017-07-18 10:10:29 +02004583 mlxsw_sp_fib_node_entry_del(mlxsw_sp, &fib4_entry->common);
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004584 mlxsw_sp_fib4_node_list_remove(fib4_entry);
Petr Machata4607f6d2017-09-02 23:49:25 +02004585
4586 if (fib4_entry->common.type == MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP)
4587 mlxsw_sp_fib_entry_decap_fini(mlxsw_sp, &fib4_entry->common);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004588}
4589
Ido Schimmel599cf8f2017-02-09 10:28:44 +01004590static void mlxsw_sp_fib4_entry_replace(struct mlxsw_sp *mlxsw_sp,
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004591 struct mlxsw_sp_fib4_entry *fib4_entry,
Ido Schimmel599cf8f2017-02-09 10:28:44 +01004592 bool replace)
4593{
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004594 struct mlxsw_sp_fib_node *fib_node = fib4_entry->common.fib_node;
4595 struct mlxsw_sp_fib4_entry *replaced;
Ido Schimmel599cf8f2017-02-09 10:28:44 +01004596
4597 if (!replace)
4598 return;
4599
4600 /* We inserted the new entry before replaced one */
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004601 replaced = list_next_entry(fib4_entry, common.list);
Ido Schimmel599cf8f2017-02-09 10:28:44 +01004602
4603 mlxsw_sp_fib4_node_entry_unlink(mlxsw_sp, replaced);
4604 mlxsw_sp_fib4_entry_destroy(mlxsw_sp, replaced);
Ido Schimmel731ea1c2017-07-18 10:10:21 +02004605 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
Ido Schimmel599cf8f2017-02-09 10:28:44 +01004606}
4607
Ido Schimmel9aecce12017-02-09 10:28:42 +01004608static int
4609mlxsw_sp_router_fib4_add(struct mlxsw_sp *mlxsw_sp,
Ido Schimmel4283bce2017-02-09 10:28:43 +01004610 const struct fib_entry_notifier_info *fen_info,
Ido Schimmel599cf8f2017-02-09 10:28:44 +01004611 bool replace, bool append)
Ido Schimmel9aecce12017-02-09 10:28:42 +01004612{
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004613 struct mlxsw_sp_fib4_entry *fib4_entry;
Ido Schimmel9aecce12017-02-09 10:28:42 +01004614 struct mlxsw_sp_fib_node *fib_node;
Jiri Pirko61c503f2016-07-04 08:23:11 +02004615 int err;
4616
Ido Schimmel9011b672017-05-16 19:38:25 +02004617 if (mlxsw_sp->router->aborted)
Jiri Pirkob45f64d2016-09-26 12:52:31 +02004618 return 0;
4619
Ido Schimmel731ea1c2017-07-18 10:10:21 +02004620 fib_node = mlxsw_sp_fib_node_get(mlxsw_sp, fen_info->tb_id,
4621 &fen_info->dst, sizeof(fen_info->dst),
4622 fen_info->dst_len,
4623 MLXSW_SP_L3_PROTO_IPV4);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004624 if (IS_ERR(fib_node)) {
4625 dev_warn(mlxsw_sp->bus_info->dev, "Failed to get FIB node\n");
4626 return PTR_ERR(fib_node);
4627 }
4628
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004629 fib4_entry = mlxsw_sp_fib4_entry_create(mlxsw_sp, fib_node, fen_info);
4630 if (IS_ERR(fib4_entry)) {
Ido Schimmel9aecce12017-02-09 10:28:42 +01004631 dev_warn(mlxsw_sp->bus_info->dev, "Failed to create FIB entry\n");
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004632 err = PTR_ERR(fib4_entry);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004633 goto err_fib4_entry_create;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02004634 }
Jiri Pirko61c503f2016-07-04 08:23:11 +02004635
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004636 err = mlxsw_sp_fib4_node_entry_link(mlxsw_sp, fib4_entry, replace,
Ido Schimmel599cf8f2017-02-09 10:28:44 +01004637 append);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02004638 if (err) {
Ido Schimmel9aecce12017-02-09 10:28:42 +01004639 dev_warn(mlxsw_sp->bus_info->dev, "Failed to link FIB entry to node\n");
4640 goto err_fib4_node_entry_link;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02004641 }
Ido Schimmel9aecce12017-02-09 10:28:42 +01004642
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004643 mlxsw_sp_fib4_entry_replace(mlxsw_sp, fib4_entry, replace);
Ido Schimmel599cf8f2017-02-09 10:28:44 +01004644
Jiri Pirko61c503f2016-07-04 08:23:11 +02004645 return 0;
4646
Ido Schimmel9aecce12017-02-09 10:28:42 +01004647err_fib4_node_entry_link:
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004648 mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib4_entry);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004649err_fib4_entry_create:
Ido Schimmel731ea1c2017-07-18 10:10:21 +02004650 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
Jiri Pirko61c503f2016-07-04 08:23:11 +02004651 return err;
4652}
4653
Jiri Pirko37956d72016-10-20 16:05:43 +02004654static void mlxsw_sp_router_fib4_del(struct mlxsw_sp *mlxsw_sp,
4655 struct fib_entry_notifier_info *fen_info)
Jiri Pirko61c503f2016-07-04 08:23:11 +02004656{
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004657 struct mlxsw_sp_fib4_entry *fib4_entry;
Ido Schimmel9aecce12017-02-09 10:28:42 +01004658 struct mlxsw_sp_fib_node *fib_node;
Jiri Pirko61c503f2016-07-04 08:23:11 +02004659
Ido Schimmel9011b672017-05-16 19:38:25 +02004660 if (mlxsw_sp->router->aborted)
Jiri Pirko37956d72016-10-20 16:05:43 +02004661 return;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02004662
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004663 fib4_entry = mlxsw_sp_fib4_entry_lookup(mlxsw_sp, fen_info);
4664 if (WARN_ON(!fib4_entry))
Jiri Pirko37956d72016-10-20 16:05:43 +02004665 return;
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004666 fib_node = fib4_entry->common.fib_node;
Jiri Pirko5b004412016-09-01 10:37:40 +02004667
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004668 mlxsw_sp_fib4_node_entry_unlink(mlxsw_sp, fib4_entry);
4669 mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib4_entry);
Ido Schimmel731ea1c2017-07-18 10:10:21 +02004670 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
Jiri Pirko61c503f2016-07-04 08:23:11 +02004671}
Jiri Pirkob45f64d2016-09-26 12:52:31 +02004672
Ido Schimmel428b8512017-08-03 13:28:28 +02004673static bool mlxsw_sp_fib6_rt_should_ignore(const struct rt6_info *rt)
4674{
4675 /* Packets with link-local destination IP arriving to the router
4676 * are trapped to the CPU, so no need to program specific routes
4677 * for them.
4678 */
4679 if (ipv6_addr_type(&rt->rt6i_dst.addr) & IPV6_ADDR_LINKLOCAL)
4680 return true;
4681
4682 /* Multicast routes aren't supported, so ignore them. Neighbour
4683 * Discovery packets are specifically trapped.
4684 */
4685 if (ipv6_addr_type(&rt->rt6i_dst.addr) & IPV6_ADDR_MULTICAST)
4686 return true;
4687
4688 /* Cloned routes are irrelevant in the forwarding path. */
4689 if (rt->rt6i_flags & RTF_CACHE)
4690 return true;
4691
4692 return false;
4693}
4694
4695static struct mlxsw_sp_rt6 *mlxsw_sp_rt6_create(struct rt6_info *rt)
4696{
4697 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
4698
4699 mlxsw_sp_rt6 = kzalloc(sizeof(*mlxsw_sp_rt6), GFP_KERNEL);
4700 if (!mlxsw_sp_rt6)
4701 return ERR_PTR(-ENOMEM);
4702
4703 /* In case of route replace, replaced route is deleted with
4704 * no notification. Take reference to prevent accessing freed
4705 * memory.
4706 */
4707 mlxsw_sp_rt6->rt = rt;
4708 rt6_hold(rt);
4709
4710 return mlxsw_sp_rt6;
4711}
4712
4713#if IS_ENABLED(CONFIG_IPV6)
4714static void mlxsw_sp_rt6_release(struct rt6_info *rt)
4715{
4716 rt6_release(rt);
4717}
4718#else
4719static void mlxsw_sp_rt6_release(struct rt6_info *rt)
4720{
4721}
4722#endif
4723
4724static void mlxsw_sp_rt6_destroy(struct mlxsw_sp_rt6 *mlxsw_sp_rt6)
4725{
4726 mlxsw_sp_rt6_release(mlxsw_sp_rt6->rt);
4727 kfree(mlxsw_sp_rt6);
4728}
4729
4730static bool mlxsw_sp_fib6_rt_can_mp(const struct rt6_info *rt)
4731{
4732 /* RTF_CACHE routes are ignored */
4733 return (rt->rt6i_flags & (RTF_GATEWAY | RTF_ADDRCONF)) == RTF_GATEWAY;
4734}
4735
4736static struct rt6_info *
4737mlxsw_sp_fib6_entry_rt(const struct mlxsw_sp_fib6_entry *fib6_entry)
4738{
4739 return list_first_entry(&fib6_entry->rt6_list, struct mlxsw_sp_rt6,
4740 list)->rt;
4741}
4742
4743static struct mlxsw_sp_fib6_entry *
4744mlxsw_sp_fib6_node_mp_entry_find(const struct mlxsw_sp_fib_node *fib_node,
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02004745 const struct rt6_info *nrt, bool replace)
Ido Schimmel428b8512017-08-03 13:28:28 +02004746{
4747 struct mlxsw_sp_fib6_entry *fib6_entry;
4748
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02004749 if (!mlxsw_sp_fib6_rt_can_mp(nrt) || replace)
Ido Schimmel428b8512017-08-03 13:28:28 +02004750 return NULL;
4751
4752 list_for_each_entry(fib6_entry, &fib_node->entry_list, common.list) {
4753 struct rt6_info *rt = mlxsw_sp_fib6_entry_rt(fib6_entry);
4754
4755 /* RT6_TABLE_LOCAL and RT6_TABLE_MAIN share the same
4756 * virtual router.
4757 */
4758 if (rt->rt6i_table->tb6_id > nrt->rt6i_table->tb6_id)
4759 continue;
4760 if (rt->rt6i_table->tb6_id != nrt->rt6i_table->tb6_id)
4761 break;
4762 if (rt->rt6i_metric < nrt->rt6i_metric)
4763 continue;
4764 if (rt->rt6i_metric == nrt->rt6i_metric &&
4765 mlxsw_sp_fib6_rt_can_mp(rt))
4766 return fib6_entry;
4767 if (rt->rt6i_metric > nrt->rt6i_metric)
4768 break;
4769 }
4770
4771 return NULL;
4772}
4773
4774static struct mlxsw_sp_rt6 *
4775mlxsw_sp_fib6_entry_rt_find(const struct mlxsw_sp_fib6_entry *fib6_entry,
4776 const struct rt6_info *rt)
4777{
4778 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
4779
4780 list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) {
4781 if (mlxsw_sp_rt6->rt == rt)
4782 return mlxsw_sp_rt6;
4783 }
4784
4785 return NULL;
4786}
4787
Petr Machata8f28a302017-09-02 23:49:24 +02004788static bool mlxsw_sp_nexthop6_ipip_type(const struct mlxsw_sp *mlxsw_sp,
4789 const struct rt6_info *rt,
4790 enum mlxsw_sp_ipip_type *ret)
4791{
4792 return rt->dst.dev &&
4793 mlxsw_sp_netdev_ipip_type(mlxsw_sp, rt->dst.dev, ret);
4794}
4795
Petr Machata35225e42017-09-02 23:49:22 +02004796static int mlxsw_sp_nexthop6_type_init(struct mlxsw_sp *mlxsw_sp,
4797 struct mlxsw_sp_nexthop_group *nh_grp,
4798 struct mlxsw_sp_nexthop *nh,
4799 const struct rt6_info *rt)
Ido Schimmel428b8512017-08-03 13:28:28 +02004800{
Petr Machatad97cda52017-11-28 13:17:13 +01004801 const struct mlxsw_sp_ipip_ops *ipip_ops;
4802 struct mlxsw_sp_ipip_entry *ipip_entry;
Ido Schimmel428b8512017-08-03 13:28:28 +02004803 struct net_device *dev = rt->dst.dev;
4804 struct mlxsw_sp_rif *rif;
4805 int err;
4806
Petr Machatad97cda52017-11-28 13:17:13 +01004807 ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, dev);
4808 if (ipip_entry) {
4809 ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt];
4810 if (ipip_ops->can_offload(mlxsw_sp, dev,
4811 MLXSW_SP_L3_PROTO_IPV6)) {
4812 nh->type = MLXSW_SP_NEXTHOP_TYPE_IPIP;
4813 mlxsw_sp_nexthop_ipip_init(mlxsw_sp, nh, ipip_entry);
4814 return 0;
4815 }
Petr Machata8f28a302017-09-02 23:49:24 +02004816 }
4817
Petr Machata35225e42017-09-02 23:49:22 +02004818 nh->type = MLXSW_SP_NEXTHOP_TYPE_ETH;
Ido Schimmel428b8512017-08-03 13:28:28 +02004819 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
4820 if (!rif)
4821 return 0;
4822 mlxsw_sp_nexthop_rif_init(nh, rif);
4823
4824 err = mlxsw_sp_nexthop_neigh_init(mlxsw_sp, nh);
4825 if (err)
4826 goto err_nexthop_neigh_init;
4827
4828 return 0;
4829
4830err_nexthop_neigh_init:
4831 mlxsw_sp_nexthop_rif_fini(nh);
4832 return err;
4833}
4834
Petr Machata35225e42017-09-02 23:49:22 +02004835static void mlxsw_sp_nexthop6_type_fini(struct mlxsw_sp *mlxsw_sp,
4836 struct mlxsw_sp_nexthop *nh)
4837{
4838 mlxsw_sp_nexthop_type_fini(mlxsw_sp, nh);
4839}
4840
4841static int mlxsw_sp_nexthop6_init(struct mlxsw_sp *mlxsw_sp,
4842 struct mlxsw_sp_nexthop_group *nh_grp,
4843 struct mlxsw_sp_nexthop *nh,
4844 const struct rt6_info *rt)
4845{
4846 struct net_device *dev = rt->dst.dev;
4847
4848 nh->nh_grp = nh_grp;
Ido Schimmel3743d882018-01-12 17:15:59 +01004849 nh->nh_weight = rt->rt6i_nh_weight;
Petr Machata35225e42017-09-02 23:49:22 +02004850 memcpy(&nh->gw_addr, &rt->rt6i_gateway, sizeof(nh->gw_addr));
Arkadi Sharshevskya5390272017-09-25 10:32:28 +02004851 mlxsw_sp_nexthop_counter_alloc(mlxsw_sp, nh);
Petr Machata35225e42017-09-02 23:49:22 +02004852
Arkadi Sharshevskydbe45982017-09-25 10:32:23 +02004853 list_add_tail(&nh->router_list_node, &mlxsw_sp->router->nexthop_list);
4854
Petr Machata35225e42017-09-02 23:49:22 +02004855 if (!dev)
4856 return 0;
4857 nh->ifindex = dev->ifindex;
4858
4859 return mlxsw_sp_nexthop6_type_init(mlxsw_sp, nh_grp, nh, rt);
4860}
4861
Ido Schimmel428b8512017-08-03 13:28:28 +02004862static void mlxsw_sp_nexthop6_fini(struct mlxsw_sp *mlxsw_sp,
4863 struct mlxsw_sp_nexthop *nh)
4864{
Petr Machata35225e42017-09-02 23:49:22 +02004865 mlxsw_sp_nexthop6_type_fini(mlxsw_sp, nh);
Arkadi Sharshevskydbe45982017-09-25 10:32:23 +02004866 list_del(&nh->router_list_node);
Arkadi Sharshevskya5390272017-09-25 10:32:28 +02004867 mlxsw_sp_nexthop_counter_free(mlxsw_sp, nh);
Ido Schimmel428b8512017-08-03 13:28:28 +02004868}
4869
Petr Machataf6050ee2017-09-02 23:49:21 +02004870static bool mlxsw_sp_rt6_is_gateway(const struct mlxsw_sp *mlxsw_sp,
4871 const struct rt6_info *rt)
4872{
Petr Machata8f28a302017-09-02 23:49:24 +02004873 return rt->rt6i_flags & RTF_GATEWAY ||
4874 mlxsw_sp_nexthop6_ipip_type(mlxsw_sp, rt, NULL);
Petr Machataf6050ee2017-09-02 23:49:21 +02004875}
4876
Ido Schimmel428b8512017-08-03 13:28:28 +02004877static struct mlxsw_sp_nexthop_group *
4878mlxsw_sp_nexthop6_group_create(struct mlxsw_sp *mlxsw_sp,
4879 struct mlxsw_sp_fib6_entry *fib6_entry)
4880{
4881 struct mlxsw_sp_nexthop_group *nh_grp;
4882 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
4883 struct mlxsw_sp_nexthop *nh;
4884 size_t alloc_size;
4885 int i = 0;
4886 int err;
4887
4888 alloc_size = sizeof(*nh_grp) +
4889 fib6_entry->nrt6 * sizeof(struct mlxsw_sp_nexthop);
4890 nh_grp = kzalloc(alloc_size, GFP_KERNEL);
4891 if (!nh_grp)
4892 return ERR_PTR(-ENOMEM);
4893 INIT_LIST_HEAD(&nh_grp->fib_list);
4894#if IS_ENABLED(CONFIG_IPV6)
4895 nh_grp->neigh_tbl = &nd_tbl;
4896#endif
4897 mlxsw_sp_rt6 = list_first_entry(&fib6_entry->rt6_list,
4898 struct mlxsw_sp_rt6, list);
Petr Machataf6050ee2017-09-02 23:49:21 +02004899 nh_grp->gateway = mlxsw_sp_rt6_is_gateway(mlxsw_sp, mlxsw_sp_rt6->rt);
Ido Schimmel428b8512017-08-03 13:28:28 +02004900 nh_grp->count = fib6_entry->nrt6;
4901 for (i = 0; i < nh_grp->count; i++) {
4902 struct rt6_info *rt = mlxsw_sp_rt6->rt;
4903
4904 nh = &nh_grp->nexthops[i];
4905 err = mlxsw_sp_nexthop6_init(mlxsw_sp, nh_grp, nh, rt);
4906 if (err)
4907 goto err_nexthop6_init;
4908 mlxsw_sp_rt6 = list_next_entry(mlxsw_sp_rt6, list);
4909 }
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02004910
4911 err = mlxsw_sp_nexthop_group_insert(mlxsw_sp, nh_grp);
4912 if (err)
4913 goto err_nexthop_group_insert;
4914
Ido Schimmel428b8512017-08-03 13:28:28 +02004915 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
4916 return nh_grp;
4917
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02004918err_nexthop_group_insert:
Ido Schimmel428b8512017-08-03 13:28:28 +02004919err_nexthop6_init:
4920 for (i--; i >= 0; i--) {
4921 nh = &nh_grp->nexthops[i];
4922 mlxsw_sp_nexthop6_fini(mlxsw_sp, nh);
4923 }
4924 kfree(nh_grp);
4925 return ERR_PTR(err);
4926}
4927
4928static void
4929mlxsw_sp_nexthop6_group_destroy(struct mlxsw_sp *mlxsw_sp,
4930 struct mlxsw_sp_nexthop_group *nh_grp)
4931{
4932 struct mlxsw_sp_nexthop *nh;
4933 int i = nh_grp->count;
4934
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02004935 mlxsw_sp_nexthop_group_remove(mlxsw_sp, nh_grp);
Ido Schimmel428b8512017-08-03 13:28:28 +02004936 for (i--; i >= 0; i--) {
4937 nh = &nh_grp->nexthops[i];
4938 mlxsw_sp_nexthop6_fini(mlxsw_sp, nh);
4939 }
4940 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
4941 WARN_ON(nh_grp->adj_index_valid);
4942 kfree(nh_grp);
4943}
4944
4945static int mlxsw_sp_nexthop6_group_get(struct mlxsw_sp *mlxsw_sp,
4946 struct mlxsw_sp_fib6_entry *fib6_entry)
4947{
4948 struct mlxsw_sp_nexthop_group *nh_grp;
4949
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02004950 nh_grp = mlxsw_sp_nexthop6_group_lookup(mlxsw_sp, fib6_entry);
4951 if (!nh_grp) {
4952 nh_grp = mlxsw_sp_nexthop6_group_create(mlxsw_sp, fib6_entry);
4953 if (IS_ERR(nh_grp))
4954 return PTR_ERR(nh_grp);
4955 }
Ido Schimmel428b8512017-08-03 13:28:28 +02004956
4957 list_add_tail(&fib6_entry->common.nexthop_group_node,
4958 &nh_grp->fib_list);
4959 fib6_entry->common.nh_group = nh_grp;
4960
4961 return 0;
4962}
4963
4964static void mlxsw_sp_nexthop6_group_put(struct mlxsw_sp *mlxsw_sp,
4965 struct mlxsw_sp_fib_entry *fib_entry)
4966{
4967 struct mlxsw_sp_nexthop_group *nh_grp = fib_entry->nh_group;
4968
4969 list_del(&fib_entry->nexthop_group_node);
4970 if (!list_empty(&nh_grp->fib_list))
4971 return;
4972 mlxsw_sp_nexthop6_group_destroy(mlxsw_sp, nh_grp);
4973}
4974
4975static int
4976mlxsw_sp_nexthop6_group_update(struct mlxsw_sp *mlxsw_sp,
4977 struct mlxsw_sp_fib6_entry *fib6_entry)
4978{
4979 struct mlxsw_sp_nexthop_group *old_nh_grp = fib6_entry->common.nh_group;
4980 int err;
4981
4982 fib6_entry->common.nh_group = NULL;
4983 list_del(&fib6_entry->common.nexthop_group_node);
4984
4985 err = mlxsw_sp_nexthop6_group_get(mlxsw_sp, fib6_entry);
4986 if (err)
4987 goto err_nexthop6_group_get;
4988
4989 /* In case this entry is offloaded, then the adjacency index
4990 * currently associated with it in the device's table is that
4991 * of the old group. Start using the new one instead.
4992 */
4993 err = mlxsw_sp_fib_node_entry_add(mlxsw_sp, &fib6_entry->common);
4994 if (err)
4995 goto err_fib_node_entry_add;
4996
4997 if (list_empty(&old_nh_grp->fib_list))
4998 mlxsw_sp_nexthop6_group_destroy(mlxsw_sp, old_nh_grp);
4999
5000 return 0;
5001
5002err_fib_node_entry_add:
5003 mlxsw_sp_nexthop6_group_put(mlxsw_sp, &fib6_entry->common);
5004err_nexthop6_group_get:
5005 list_add_tail(&fib6_entry->common.nexthop_group_node,
5006 &old_nh_grp->fib_list);
5007 fib6_entry->common.nh_group = old_nh_grp;
5008 return err;
5009}
5010
5011static int
5012mlxsw_sp_fib6_entry_nexthop_add(struct mlxsw_sp *mlxsw_sp,
5013 struct mlxsw_sp_fib6_entry *fib6_entry,
5014 struct rt6_info *rt)
5015{
5016 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
5017 int err;
5018
5019 mlxsw_sp_rt6 = mlxsw_sp_rt6_create(rt);
5020 if (IS_ERR(mlxsw_sp_rt6))
5021 return PTR_ERR(mlxsw_sp_rt6);
5022
5023 list_add_tail(&mlxsw_sp_rt6->list, &fib6_entry->rt6_list);
5024 fib6_entry->nrt6++;
5025
5026 err = mlxsw_sp_nexthop6_group_update(mlxsw_sp, fib6_entry);
5027 if (err)
5028 goto err_nexthop6_group_update;
5029
5030 return 0;
5031
5032err_nexthop6_group_update:
5033 fib6_entry->nrt6--;
5034 list_del(&mlxsw_sp_rt6->list);
5035 mlxsw_sp_rt6_destroy(mlxsw_sp_rt6);
5036 return err;
5037}
5038
5039static void
5040mlxsw_sp_fib6_entry_nexthop_del(struct mlxsw_sp *mlxsw_sp,
5041 struct mlxsw_sp_fib6_entry *fib6_entry,
5042 struct rt6_info *rt)
5043{
5044 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
5045
5046 mlxsw_sp_rt6 = mlxsw_sp_fib6_entry_rt_find(fib6_entry, rt);
5047 if (WARN_ON(!mlxsw_sp_rt6))
5048 return;
5049
5050 fib6_entry->nrt6--;
5051 list_del(&mlxsw_sp_rt6->list);
5052 mlxsw_sp_nexthop6_group_update(mlxsw_sp, fib6_entry);
5053 mlxsw_sp_rt6_destroy(mlxsw_sp_rt6);
5054}
5055
Petr Machataf6050ee2017-09-02 23:49:21 +02005056static void mlxsw_sp_fib6_entry_type_set(struct mlxsw_sp *mlxsw_sp,
5057 struct mlxsw_sp_fib_entry *fib_entry,
Ido Schimmel428b8512017-08-03 13:28:28 +02005058 const struct rt6_info *rt)
5059{
5060 /* Packets hitting RTF_REJECT routes need to be discarded by the
5061 * stack. We can rely on their destination device not having a
5062 * RIF (it's the loopback device) and can thus use action type
5063 * local, which will cause them to be trapped with a lower
5064 * priority than packets that need to be locally received.
5065 */
Ido Schimmeld3b6d372017-09-01 10:58:55 +02005066 if (rt->rt6i_flags & (RTF_LOCAL | RTF_ANYCAST))
Ido Schimmel428b8512017-08-03 13:28:28 +02005067 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
5068 else if (rt->rt6i_flags & RTF_REJECT)
5069 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_LOCAL;
Petr Machataf6050ee2017-09-02 23:49:21 +02005070 else if (mlxsw_sp_rt6_is_gateway(mlxsw_sp, rt))
Ido Schimmel428b8512017-08-03 13:28:28 +02005071 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_REMOTE;
5072 else
5073 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_LOCAL;
5074}
5075
5076static void
5077mlxsw_sp_fib6_entry_rt_destroy_all(struct mlxsw_sp_fib6_entry *fib6_entry)
5078{
5079 struct mlxsw_sp_rt6 *mlxsw_sp_rt6, *tmp;
5080
5081 list_for_each_entry_safe(mlxsw_sp_rt6, tmp, &fib6_entry->rt6_list,
5082 list) {
5083 fib6_entry->nrt6--;
5084 list_del(&mlxsw_sp_rt6->list);
5085 mlxsw_sp_rt6_destroy(mlxsw_sp_rt6);
5086 }
5087}
5088
5089static struct mlxsw_sp_fib6_entry *
5090mlxsw_sp_fib6_entry_create(struct mlxsw_sp *mlxsw_sp,
5091 struct mlxsw_sp_fib_node *fib_node,
5092 struct rt6_info *rt)
5093{
5094 struct mlxsw_sp_fib6_entry *fib6_entry;
5095 struct mlxsw_sp_fib_entry *fib_entry;
5096 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
5097 int err;
5098
5099 fib6_entry = kzalloc(sizeof(*fib6_entry), GFP_KERNEL);
5100 if (!fib6_entry)
5101 return ERR_PTR(-ENOMEM);
5102 fib_entry = &fib6_entry->common;
5103
5104 mlxsw_sp_rt6 = mlxsw_sp_rt6_create(rt);
5105 if (IS_ERR(mlxsw_sp_rt6)) {
5106 err = PTR_ERR(mlxsw_sp_rt6);
5107 goto err_rt6_create;
5108 }
5109
Petr Machataf6050ee2017-09-02 23:49:21 +02005110 mlxsw_sp_fib6_entry_type_set(mlxsw_sp, fib_entry, mlxsw_sp_rt6->rt);
Ido Schimmel428b8512017-08-03 13:28:28 +02005111
5112 INIT_LIST_HEAD(&fib6_entry->rt6_list);
5113 list_add_tail(&mlxsw_sp_rt6->list, &fib6_entry->rt6_list);
5114 fib6_entry->nrt6 = 1;
5115 err = mlxsw_sp_nexthop6_group_get(mlxsw_sp, fib6_entry);
5116 if (err)
5117 goto err_nexthop6_group_get;
5118
5119 fib_entry->fib_node = fib_node;
5120
5121 return fib6_entry;
5122
5123err_nexthop6_group_get:
5124 list_del(&mlxsw_sp_rt6->list);
5125 mlxsw_sp_rt6_destroy(mlxsw_sp_rt6);
5126err_rt6_create:
5127 kfree(fib6_entry);
5128 return ERR_PTR(err);
5129}
5130
5131static void mlxsw_sp_fib6_entry_destroy(struct mlxsw_sp *mlxsw_sp,
5132 struct mlxsw_sp_fib6_entry *fib6_entry)
5133{
5134 mlxsw_sp_nexthop6_group_put(mlxsw_sp, &fib6_entry->common);
5135 mlxsw_sp_fib6_entry_rt_destroy_all(fib6_entry);
5136 WARN_ON(fib6_entry->nrt6);
5137 kfree(fib6_entry);
5138}
5139
5140static struct mlxsw_sp_fib6_entry *
5141mlxsw_sp_fib6_node_entry_find(const struct mlxsw_sp_fib_node *fib_node,
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02005142 const struct rt6_info *nrt, bool replace)
Ido Schimmel428b8512017-08-03 13:28:28 +02005143{
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02005144 struct mlxsw_sp_fib6_entry *fib6_entry, *fallback = NULL;
Ido Schimmel428b8512017-08-03 13:28:28 +02005145
5146 list_for_each_entry(fib6_entry, &fib_node->entry_list, common.list) {
5147 struct rt6_info *rt = mlxsw_sp_fib6_entry_rt(fib6_entry);
5148
5149 if (rt->rt6i_table->tb6_id > nrt->rt6i_table->tb6_id)
5150 continue;
5151 if (rt->rt6i_table->tb6_id != nrt->rt6i_table->tb6_id)
5152 break;
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02005153 if (replace && rt->rt6i_metric == nrt->rt6i_metric) {
5154 if (mlxsw_sp_fib6_rt_can_mp(rt) ==
5155 mlxsw_sp_fib6_rt_can_mp(nrt))
5156 return fib6_entry;
5157 if (mlxsw_sp_fib6_rt_can_mp(nrt))
5158 fallback = fallback ?: fib6_entry;
5159 }
Ido Schimmel428b8512017-08-03 13:28:28 +02005160 if (rt->rt6i_metric > nrt->rt6i_metric)
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02005161 return fallback ?: fib6_entry;
Ido Schimmel428b8512017-08-03 13:28:28 +02005162 }
5163
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02005164 return fallback;
Ido Schimmel428b8512017-08-03 13:28:28 +02005165}
5166
5167static int
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02005168mlxsw_sp_fib6_node_list_insert(struct mlxsw_sp_fib6_entry *new6_entry,
5169 bool replace)
Ido Schimmel428b8512017-08-03 13:28:28 +02005170{
5171 struct mlxsw_sp_fib_node *fib_node = new6_entry->common.fib_node;
5172 struct rt6_info *nrt = mlxsw_sp_fib6_entry_rt(new6_entry);
5173 struct mlxsw_sp_fib6_entry *fib6_entry;
5174
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02005175 fib6_entry = mlxsw_sp_fib6_node_entry_find(fib_node, nrt, replace);
5176
5177 if (replace && WARN_ON(!fib6_entry))
5178 return -EINVAL;
Ido Schimmel428b8512017-08-03 13:28:28 +02005179
5180 if (fib6_entry) {
5181 list_add_tail(&new6_entry->common.list,
5182 &fib6_entry->common.list);
5183 } else {
5184 struct mlxsw_sp_fib6_entry *last;
5185
5186 list_for_each_entry(last, &fib_node->entry_list, common.list) {
5187 struct rt6_info *rt = mlxsw_sp_fib6_entry_rt(last);
5188
5189 if (nrt->rt6i_table->tb6_id > rt->rt6i_table->tb6_id)
5190 break;
5191 fib6_entry = last;
5192 }
5193
5194 if (fib6_entry)
5195 list_add(&new6_entry->common.list,
5196 &fib6_entry->common.list);
5197 else
5198 list_add(&new6_entry->common.list,
5199 &fib_node->entry_list);
5200 }
5201
5202 return 0;
5203}
5204
5205static void
5206mlxsw_sp_fib6_node_list_remove(struct mlxsw_sp_fib6_entry *fib6_entry)
5207{
5208 list_del(&fib6_entry->common.list);
5209}
5210
5211static int mlxsw_sp_fib6_node_entry_link(struct mlxsw_sp *mlxsw_sp,
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02005212 struct mlxsw_sp_fib6_entry *fib6_entry,
5213 bool replace)
Ido Schimmel428b8512017-08-03 13:28:28 +02005214{
5215 int err;
5216
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02005217 err = mlxsw_sp_fib6_node_list_insert(fib6_entry, replace);
Ido Schimmel428b8512017-08-03 13:28:28 +02005218 if (err)
5219 return err;
5220
5221 err = mlxsw_sp_fib_node_entry_add(mlxsw_sp, &fib6_entry->common);
5222 if (err)
5223 goto err_fib_node_entry_add;
5224
5225 return 0;
5226
5227err_fib_node_entry_add:
5228 mlxsw_sp_fib6_node_list_remove(fib6_entry);
5229 return err;
5230}
5231
5232static void
5233mlxsw_sp_fib6_node_entry_unlink(struct mlxsw_sp *mlxsw_sp,
5234 struct mlxsw_sp_fib6_entry *fib6_entry)
5235{
5236 mlxsw_sp_fib_node_entry_del(mlxsw_sp, &fib6_entry->common);
5237 mlxsw_sp_fib6_node_list_remove(fib6_entry);
5238}
5239
5240static struct mlxsw_sp_fib6_entry *
5241mlxsw_sp_fib6_entry_lookup(struct mlxsw_sp *mlxsw_sp,
5242 const struct rt6_info *rt)
5243{
5244 struct mlxsw_sp_fib6_entry *fib6_entry;
5245 struct mlxsw_sp_fib_node *fib_node;
5246 struct mlxsw_sp_fib *fib;
5247 struct mlxsw_sp_vr *vr;
5248
5249 vr = mlxsw_sp_vr_find(mlxsw_sp, rt->rt6i_table->tb6_id);
5250 if (!vr)
5251 return NULL;
5252 fib = mlxsw_sp_vr_fib(vr, MLXSW_SP_L3_PROTO_IPV6);
5253
5254 fib_node = mlxsw_sp_fib_node_lookup(fib, &rt->rt6i_dst.addr,
5255 sizeof(rt->rt6i_dst.addr),
5256 rt->rt6i_dst.plen);
5257 if (!fib_node)
5258 return NULL;
5259
5260 list_for_each_entry(fib6_entry, &fib_node->entry_list, common.list) {
5261 struct rt6_info *iter_rt = mlxsw_sp_fib6_entry_rt(fib6_entry);
5262
5263 if (rt->rt6i_table->tb6_id == iter_rt->rt6i_table->tb6_id &&
5264 rt->rt6i_metric == iter_rt->rt6i_metric &&
5265 mlxsw_sp_fib6_entry_rt_find(fib6_entry, rt))
5266 return fib6_entry;
5267 }
5268
5269 return NULL;
5270}
5271
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02005272static void mlxsw_sp_fib6_entry_replace(struct mlxsw_sp *mlxsw_sp,
5273 struct mlxsw_sp_fib6_entry *fib6_entry,
5274 bool replace)
5275{
5276 struct mlxsw_sp_fib_node *fib_node = fib6_entry->common.fib_node;
5277 struct mlxsw_sp_fib6_entry *replaced;
5278
5279 if (!replace)
5280 return;
5281
5282 replaced = list_next_entry(fib6_entry, common.list);
5283
5284 mlxsw_sp_fib6_node_entry_unlink(mlxsw_sp, replaced);
5285 mlxsw_sp_fib6_entry_destroy(mlxsw_sp, replaced);
5286 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
5287}
5288
Ido Schimmel428b8512017-08-03 13:28:28 +02005289static int mlxsw_sp_router_fib6_add(struct mlxsw_sp *mlxsw_sp,
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02005290 struct rt6_info *rt, bool replace)
Ido Schimmel428b8512017-08-03 13:28:28 +02005291{
5292 struct mlxsw_sp_fib6_entry *fib6_entry;
5293 struct mlxsw_sp_fib_node *fib_node;
5294 int err;
5295
5296 if (mlxsw_sp->router->aborted)
5297 return 0;
5298
Ido Schimmelf36f5ac2017-08-03 13:28:30 +02005299 if (rt->rt6i_src.plen)
5300 return -EINVAL;
5301
Ido Schimmel428b8512017-08-03 13:28:28 +02005302 if (mlxsw_sp_fib6_rt_should_ignore(rt))
5303 return 0;
5304
5305 fib_node = mlxsw_sp_fib_node_get(mlxsw_sp, rt->rt6i_table->tb6_id,
5306 &rt->rt6i_dst.addr,
5307 sizeof(rt->rt6i_dst.addr),
5308 rt->rt6i_dst.plen,
5309 MLXSW_SP_L3_PROTO_IPV6);
5310 if (IS_ERR(fib_node))
5311 return PTR_ERR(fib_node);
5312
5313 /* Before creating a new entry, try to append route to an existing
5314 * multipath entry.
5315 */
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02005316 fib6_entry = mlxsw_sp_fib6_node_mp_entry_find(fib_node, rt, replace);
Ido Schimmel428b8512017-08-03 13:28:28 +02005317 if (fib6_entry) {
5318 err = mlxsw_sp_fib6_entry_nexthop_add(mlxsw_sp, fib6_entry, rt);
5319 if (err)
5320 goto err_fib6_entry_nexthop_add;
5321 return 0;
5322 }
5323
5324 fib6_entry = mlxsw_sp_fib6_entry_create(mlxsw_sp, fib_node, rt);
5325 if (IS_ERR(fib6_entry)) {
5326 err = PTR_ERR(fib6_entry);
5327 goto err_fib6_entry_create;
5328 }
5329
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02005330 err = mlxsw_sp_fib6_node_entry_link(mlxsw_sp, fib6_entry, replace);
Ido Schimmel428b8512017-08-03 13:28:28 +02005331 if (err)
5332 goto err_fib6_node_entry_link;
5333
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02005334 mlxsw_sp_fib6_entry_replace(mlxsw_sp, fib6_entry, replace);
5335
Ido Schimmel428b8512017-08-03 13:28:28 +02005336 return 0;
5337
5338err_fib6_node_entry_link:
5339 mlxsw_sp_fib6_entry_destroy(mlxsw_sp, fib6_entry);
5340err_fib6_entry_create:
5341err_fib6_entry_nexthop_add:
5342 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
5343 return err;
5344}
5345
5346static void mlxsw_sp_router_fib6_del(struct mlxsw_sp *mlxsw_sp,
5347 struct rt6_info *rt)
5348{
5349 struct mlxsw_sp_fib6_entry *fib6_entry;
5350 struct mlxsw_sp_fib_node *fib_node;
5351
5352 if (mlxsw_sp->router->aborted)
5353 return;
5354
5355 if (mlxsw_sp_fib6_rt_should_ignore(rt))
5356 return;
5357
5358 fib6_entry = mlxsw_sp_fib6_entry_lookup(mlxsw_sp, rt);
5359 if (WARN_ON(!fib6_entry))
5360 return;
5361
5362 /* If route is part of a multipath entry, but not the last one
5363 * removed, then only reduce its nexthop group.
5364 */
5365 if (!list_is_singular(&fib6_entry->rt6_list)) {
5366 mlxsw_sp_fib6_entry_nexthop_del(mlxsw_sp, fib6_entry, rt);
5367 return;
5368 }
5369
5370 fib_node = fib6_entry->common.fib_node;
5371
5372 mlxsw_sp_fib6_node_entry_unlink(mlxsw_sp, fib6_entry);
5373 mlxsw_sp_fib6_entry_destroy(mlxsw_sp, fib6_entry);
5374 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
5375}
5376
Ido Schimmelbc65a8a2017-07-18 10:10:25 +02005377static int __mlxsw_sp_router_set_abort_trap(struct mlxsw_sp *mlxsw_sp,
5378 enum mlxsw_reg_ralxx_protocol proto,
5379 u8 tree_id)
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005380{
5381 char ralta_pl[MLXSW_REG_RALTA_LEN];
5382 char ralst_pl[MLXSW_REG_RALST_LEN];
Ido Schimmelb5d90e62017-03-10 08:53:43 +01005383 int i, err;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005384
Ido Schimmelbc65a8a2017-07-18 10:10:25 +02005385 mlxsw_reg_ralta_pack(ralta_pl, true, proto, tree_id);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005386 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralta), ralta_pl);
5387 if (err)
5388 return err;
5389
Ido Schimmelbc65a8a2017-07-18 10:10:25 +02005390 mlxsw_reg_ralst_pack(ralst_pl, 0xff, tree_id);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005391 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralst), ralst_pl);
5392 if (err)
5393 return err;
5394
Ido Schimmelb5d90e62017-03-10 08:53:43 +01005395 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
Ido Schimmel9011b672017-05-16 19:38:25 +02005396 struct mlxsw_sp_vr *vr = &mlxsw_sp->router->vrs[i];
Ido Schimmelb5d90e62017-03-10 08:53:43 +01005397 char raltb_pl[MLXSW_REG_RALTB_LEN];
5398 char ralue_pl[MLXSW_REG_RALUE_LEN];
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005399
Ido Schimmelbc65a8a2017-07-18 10:10:25 +02005400 mlxsw_reg_raltb_pack(raltb_pl, vr->id, proto, tree_id);
Ido Schimmelb5d90e62017-03-10 08:53:43 +01005401 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raltb),
5402 raltb_pl);
5403 if (err)
5404 return err;
5405
Ido Schimmelbc65a8a2017-07-18 10:10:25 +02005406 mlxsw_reg_ralue_pack(ralue_pl, proto,
5407 MLXSW_REG_RALUE_OP_WRITE_WRITE, vr->id, 0);
Ido Schimmelb5d90e62017-03-10 08:53:43 +01005408 mlxsw_reg_ralue_act_ip2me_pack(ralue_pl);
5409 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue),
5410 ralue_pl);
5411 if (err)
5412 return err;
5413 }
5414
5415 return 0;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005416}
5417
Yotam Gigid42b0962017-09-27 08:23:20 +02005418static int mlxsw_sp_router_fibmr_add(struct mlxsw_sp *mlxsw_sp,
5419 struct mfc_entry_notifier_info *men_info,
5420 bool replace)
5421{
5422 struct mlxsw_sp_vr *vr;
5423
5424 if (mlxsw_sp->router->aborted)
5425 return 0;
5426
David Ahernf8fa9b42017-10-18 09:56:56 -07005427 vr = mlxsw_sp_vr_get(mlxsw_sp, men_info->tb_id, NULL);
Yotam Gigid42b0962017-09-27 08:23:20 +02005428 if (IS_ERR(vr))
5429 return PTR_ERR(vr);
5430
5431 return mlxsw_sp_mr_route4_add(vr->mr4_table, men_info->mfc, replace);
5432}
5433
5434static void mlxsw_sp_router_fibmr_del(struct mlxsw_sp *mlxsw_sp,
5435 struct mfc_entry_notifier_info *men_info)
5436{
5437 struct mlxsw_sp_vr *vr;
5438
5439 if (mlxsw_sp->router->aborted)
5440 return;
5441
5442 vr = mlxsw_sp_vr_find(mlxsw_sp, men_info->tb_id);
5443 if (WARN_ON(!vr))
5444 return;
5445
5446 mlxsw_sp_mr_route4_del(vr->mr4_table, men_info->mfc);
Ido Schimmel2b52ce02018-01-22 09:17:42 +01005447 mlxsw_sp_vr_put(mlxsw_sp, vr);
Yotam Gigid42b0962017-09-27 08:23:20 +02005448}
5449
5450static int
5451mlxsw_sp_router_fibmr_vif_add(struct mlxsw_sp *mlxsw_sp,
5452 struct vif_entry_notifier_info *ven_info)
5453{
5454 struct mlxsw_sp_rif *rif;
5455 struct mlxsw_sp_vr *vr;
5456
5457 if (mlxsw_sp->router->aborted)
5458 return 0;
5459
David Ahernf8fa9b42017-10-18 09:56:56 -07005460 vr = mlxsw_sp_vr_get(mlxsw_sp, ven_info->tb_id, NULL);
Yotam Gigid42b0962017-09-27 08:23:20 +02005461 if (IS_ERR(vr))
5462 return PTR_ERR(vr);
5463
5464 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, ven_info->dev);
5465 return mlxsw_sp_mr_vif_add(vr->mr4_table, ven_info->dev,
5466 ven_info->vif_index,
5467 ven_info->vif_flags, rif);
5468}
5469
5470static void
5471mlxsw_sp_router_fibmr_vif_del(struct mlxsw_sp *mlxsw_sp,
5472 struct vif_entry_notifier_info *ven_info)
5473{
5474 struct mlxsw_sp_vr *vr;
5475
5476 if (mlxsw_sp->router->aborted)
5477 return;
5478
5479 vr = mlxsw_sp_vr_find(mlxsw_sp, ven_info->tb_id);
5480 if (WARN_ON(!vr))
5481 return;
5482
5483 mlxsw_sp_mr_vif_del(vr->mr4_table, ven_info->vif_index);
Ido Schimmel2b52ce02018-01-22 09:17:42 +01005484 mlxsw_sp_vr_put(mlxsw_sp, vr);
Yotam Gigid42b0962017-09-27 08:23:20 +02005485}
5486
Ido Schimmelbc65a8a2017-07-18 10:10:25 +02005487static int mlxsw_sp_router_set_abort_trap(struct mlxsw_sp *mlxsw_sp)
5488{
5489 enum mlxsw_reg_ralxx_protocol proto = MLXSW_REG_RALXX_PROTOCOL_IPV4;
5490 int err;
5491
5492 err = __mlxsw_sp_router_set_abort_trap(mlxsw_sp, proto,
5493 MLXSW_SP_LPM_TREE_MIN);
5494 if (err)
5495 return err;
5496
Yotam Gigid42b0962017-09-27 08:23:20 +02005497 /* The multicast router code does not need an abort trap as by default,
5498 * packets that don't match any routes are trapped to the CPU.
5499 */
5500
Ido Schimmelbc65a8a2017-07-18 10:10:25 +02005501 proto = MLXSW_REG_RALXX_PROTOCOL_IPV6;
5502 return __mlxsw_sp_router_set_abort_trap(mlxsw_sp, proto,
5503 MLXSW_SP_LPM_TREE_MIN + 1);
5504}
5505
Ido Schimmel9aecce12017-02-09 10:28:42 +01005506static void mlxsw_sp_fib4_node_flush(struct mlxsw_sp *mlxsw_sp,
5507 struct mlxsw_sp_fib_node *fib_node)
5508{
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02005509 struct mlxsw_sp_fib4_entry *fib4_entry, *tmp;
Ido Schimmel9aecce12017-02-09 10:28:42 +01005510
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02005511 list_for_each_entry_safe(fib4_entry, tmp, &fib_node->entry_list,
5512 common.list) {
5513 bool do_break = &tmp->common.list == &fib_node->entry_list;
Ido Schimmel9aecce12017-02-09 10:28:42 +01005514
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02005515 mlxsw_sp_fib4_node_entry_unlink(mlxsw_sp, fib4_entry);
5516 mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib4_entry);
Ido Schimmel731ea1c2017-07-18 10:10:21 +02005517 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
Ido Schimmel9aecce12017-02-09 10:28:42 +01005518 /* Break when entry list is empty and node was freed.
5519 * Otherwise, we'll access freed memory in the next
5520 * iteration.
5521 */
5522 if (do_break)
5523 break;
5524 }
5525}
5526
Ido Schimmel428b8512017-08-03 13:28:28 +02005527static void mlxsw_sp_fib6_node_flush(struct mlxsw_sp *mlxsw_sp,
5528 struct mlxsw_sp_fib_node *fib_node)
5529{
5530 struct mlxsw_sp_fib6_entry *fib6_entry, *tmp;
5531
5532 list_for_each_entry_safe(fib6_entry, tmp, &fib_node->entry_list,
5533 common.list) {
5534 bool do_break = &tmp->common.list == &fib_node->entry_list;
5535
5536 mlxsw_sp_fib6_node_entry_unlink(mlxsw_sp, fib6_entry);
5537 mlxsw_sp_fib6_entry_destroy(mlxsw_sp, fib6_entry);
5538 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
5539 if (do_break)
5540 break;
5541 }
5542}
5543
Ido Schimmel9aecce12017-02-09 10:28:42 +01005544static void mlxsw_sp_fib_node_flush(struct mlxsw_sp *mlxsw_sp,
5545 struct mlxsw_sp_fib_node *fib_node)
5546{
Ido Schimmel76610eb2017-03-10 08:53:41 +01005547 switch (fib_node->fib->proto) {
Ido Schimmel9aecce12017-02-09 10:28:42 +01005548 case MLXSW_SP_L3_PROTO_IPV4:
5549 mlxsw_sp_fib4_node_flush(mlxsw_sp, fib_node);
5550 break;
5551 case MLXSW_SP_L3_PROTO_IPV6:
Ido Schimmel428b8512017-08-03 13:28:28 +02005552 mlxsw_sp_fib6_node_flush(mlxsw_sp, fib_node);
Ido Schimmel9aecce12017-02-09 10:28:42 +01005553 break;
5554 }
5555}
5556
Ido Schimmel76610eb2017-03-10 08:53:41 +01005557static void mlxsw_sp_vr_fib_flush(struct mlxsw_sp *mlxsw_sp,
5558 struct mlxsw_sp_vr *vr,
5559 enum mlxsw_sp_l3proto proto)
5560{
5561 struct mlxsw_sp_fib *fib = mlxsw_sp_vr_fib(vr, proto);
5562 struct mlxsw_sp_fib_node *fib_node, *tmp;
5563
5564 list_for_each_entry_safe(fib_node, tmp, &fib->node_list, list) {
5565 bool do_break = &tmp->list == &fib->node_list;
5566
5567 mlxsw_sp_fib_node_flush(mlxsw_sp, fib_node);
5568 if (do_break)
5569 break;
5570 }
5571}
5572
Ido Schimmelac571de2016-11-14 11:26:32 +01005573static void mlxsw_sp_router_fib_flush(struct mlxsw_sp *mlxsw_sp)
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005574{
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005575 int i;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005576
Jiri Pirkoc1a38312016-10-21 16:07:23 +02005577 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
Ido Schimmel9011b672017-05-16 19:38:25 +02005578 struct mlxsw_sp_vr *vr = &mlxsw_sp->router->vrs[i];
Ido Schimmelac571de2016-11-14 11:26:32 +01005579
Ido Schimmel76610eb2017-03-10 08:53:41 +01005580 if (!mlxsw_sp_vr_is_used(vr))
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005581 continue;
Yotam Gigid42b0962017-09-27 08:23:20 +02005582
5583 mlxsw_sp_mr_table_flush(vr->mr4_table);
Ido Schimmel76610eb2017-03-10 08:53:41 +01005584 mlxsw_sp_vr_fib_flush(mlxsw_sp, vr, MLXSW_SP_L3_PROTO_IPV4);
Ido Schimmela3d9bc52017-07-18 10:10:22 +02005585
5586 /* If virtual router was only used for IPv4, then it's no
5587 * longer used.
5588 */
5589 if (!mlxsw_sp_vr_is_used(vr))
5590 continue;
5591 mlxsw_sp_vr_fib_flush(mlxsw_sp, vr, MLXSW_SP_L3_PROTO_IPV6);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005592 }
Ido Schimmelac571de2016-11-14 11:26:32 +01005593}
5594
Ido Schimmelbc65a8a2017-07-18 10:10:25 +02005595static void mlxsw_sp_router_fib_abort(struct mlxsw_sp *mlxsw_sp)
Ido Schimmelac571de2016-11-14 11:26:32 +01005596{
5597 int err;
5598
Ido Schimmel9011b672017-05-16 19:38:25 +02005599 if (mlxsw_sp->router->aborted)
Ido Schimmeld331d302016-11-16 09:51:58 +01005600 return;
5601 dev_warn(mlxsw_sp->bus_info->dev, "FIB abort triggered. Note that FIB entries are no longer being offloaded to this device.\n");
Ido Schimmelac571de2016-11-14 11:26:32 +01005602 mlxsw_sp_router_fib_flush(mlxsw_sp);
Ido Schimmel9011b672017-05-16 19:38:25 +02005603 mlxsw_sp->router->aborted = true;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005604 err = mlxsw_sp_router_set_abort_trap(mlxsw_sp);
5605 if (err)
5606 dev_warn(mlxsw_sp->bus_info->dev, "Failed to set abort trap.\n");
5607}
5608
Ido Schimmel30572242016-12-03 16:45:01 +01005609struct mlxsw_sp_fib_event_work {
Ido Schimmela0e47612017-02-06 16:20:10 +01005610 struct work_struct work;
Ido Schimmelad178c82017-02-08 11:16:40 +01005611 union {
Ido Schimmel428b8512017-08-03 13:28:28 +02005612 struct fib6_entry_notifier_info fen6_info;
Ido Schimmelad178c82017-02-08 11:16:40 +01005613 struct fib_entry_notifier_info fen_info;
Ido Schimmel5d7bfd12017-03-16 09:08:14 +01005614 struct fib_rule_notifier_info fr_info;
Ido Schimmelad178c82017-02-08 11:16:40 +01005615 struct fib_nh_notifier_info fnh_info;
Yotam Gigid42b0962017-09-27 08:23:20 +02005616 struct mfc_entry_notifier_info men_info;
5617 struct vif_entry_notifier_info ven_info;
Ido Schimmelad178c82017-02-08 11:16:40 +01005618 };
Ido Schimmel30572242016-12-03 16:45:01 +01005619 struct mlxsw_sp *mlxsw_sp;
5620 unsigned long event;
5621};
5622
Ido Schimmel66a57632017-08-03 13:28:26 +02005623static void mlxsw_sp_router_fib4_event_work(struct work_struct *work)
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005624{
Ido Schimmel30572242016-12-03 16:45:01 +01005625 struct mlxsw_sp_fib_event_work *fib_work =
Ido Schimmela0e47612017-02-06 16:20:10 +01005626 container_of(work, struct mlxsw_sp_fib_event_work, work);
Ido Schimmel30572242016-12-03 16:45:01 +01005627 struct mlxsw_sp *mlxsw_sp = fib_work->mlxsw_sp;
Ido Schimmel599cf8f2017-02-09 10:28:44 +01005628 bool replace, append;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005629 int err;
5630
Ido Schimmel30572242016-12-03 16:45:01 +01005631 /* Protect internal structures from changes */
5632 rtnl_lock();
5633 switch (fib_work->event) {
Ido Schimmel599cf8f2017-02-09 10:28:44 +01005634 case FIB_EVENT_ENTRY_REPLACE: /* fall through */
Ido Schimmel4283bce2017-02-09 10:28:43 +01005635 case FIB_EVENT_ENTRY_APPEND: /* fall through */
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005636 case FIB_EVENT_ENTRY_ADD:
Ido Schimmel599cf8f2017-02-09 10:28:44 +01005637 replace = fib_work->event == FIB_EVENT_ENTRY_REPLACE;
Ido Schimmel4283bce2017-02-09 10:28:43 +01005638 append = fib_work->event == FIB_EVENT_ENTRY_APPEND;
5639 err = mlxsw_sp_router_fib4_add(mlxsw_sp, &fib_work->fen_info,
Ido Schimmel599cf8f2017-02-09 10:28:44 +01005640 replace, append);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005641 if (err)
Ido Schimmelbc65a8a2017-07-18 10:10:25 +02005642 mlxsw_sp_router_fib_abort(mlxsw_sp);
Ido Schimmel30572242016-12-03 16:45:01 +01005643 fib_info_put(fib_work->fen_info.fi);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005644 break;
5645 case FIB_EVENT_ENTRY_DEL:
Ido Schimmel30572242016-12-03 16:45:01 +01005646 mlxsw_sp_router_fib4_del(mlxsw_sp, &fib_work->fen_info);
5647 fib_info_put(fib_work->fen_info.fi);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005648 break;
David Ahern1f279232017-10-27 17:37:14 -07005649 case FIB_EVENT_RULE_ADD:
5650 /* if we get here, a rule was added that we do not support.
5651 * just do the fib_abort
5652 */
5653 mlxsw_sp_router_fib_abort(mlxsw_sp);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005654 break;
Ido Schimmelad178c82017-02-08 11:16:40 +01005655 case FIB_EVENT_NH_ADD: /* fall through */
5656 case FIB_EVENT_NH_DEL:
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02005657 mlxsw_sp_nexthop4_event(mlxsw_sp, fib_work->event,
5658 fib_work->fnh_info.fib_nh);
Ido Schimmelad178c82017-02-08 11:16:40 +01005659 fib_info_put(fib_work->fnh_info.fib_nh->nh_parent);
5660 break;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005661 }
Ido Schimmel30572242016-12-03 16:45:01 +01005662 rtnl_unlock();
5663 kfree(fib_work);
5664}
5665
Ido Schimmel66a57632017-08-03 13:28:26 +02005666static void mlxsw_sp_router_fib6_event_work(struct work_struct *work)
5667{
Ido Schimmel583419f2017-08-03 13:28:27 +02005668 struct mlxsw_sp_fib_event_work *fib_work =
5669 container_of(work, struct mlxsw_sp_fib_event_work, work);
5670 struct mlxsw_sp *mlxsw_sp = fib_work->mlxsw_sp;
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02005671 bool replace;
Ido Schimmel428b8512017-08-03 13:28:28 +02005672 int err;
Ido Schimmel583419f2017-08-03 13:28:27 +02005673
5674 rtnl_lock();
5675 switch (fib_work->event) {
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02005676 case FIB_EVENT_ENTRY_REPLACE: /* fall through */
Ido Schimmel428b8512017-08-03 13:28:28 +02005677 case FIB_EVENT_ENTRY_ADD:
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02005678 replace = fib_work->event == FIB_EVENT_ENTRY_REPLACE;
Ido Schimmel428b8512017-08-03 13:28:28 +02005679 err = mlxsw_sp_router_fib6_add(mlxsw_sp,
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02005680 fib_work->fen6_info.rt, replace);
Ido Schimmel428b8512017-08-03 13:28:28 +02005681 if (err)
5682 mlxsw_sp_router_fib_abort(mlxsw_sp);
5683 mlxsw_sp_rt6_release(fib_work->fen6_info.rt);
5684 break;
5685 case FIB_EVENT_ENTRY_DEL:
5686 mlxsw_sp_router_fib6_del(mlxsw_sp, fib_work->fen6_info.rt);
5687 mlxsw_sp_rt6_release(fib_work->fen6_info.rt);
5688 break;
David Ahern1f279232017-10-27 17:37:14 -07005689 case FIB_EVENT_RULE_ADD:
5690 /* if we get here, a rule was added that we do not support.
5691 * just do the fib_abort
5692 */
5693 mlxsw_sp_router_fib_abort(mlxsw_sp);
Ido Schimmel583419f2017-08-03 13:28:27 +02005694 break;
5695 }
5696 rtnl_unlock();
5697 kfree(fib_work);
Ido Schimmel66a57632017-08-03 13:28:26 +02005698}
5699
Yotam Gigid42b0962017-09-27 08:23:20 +02005700static void mlxsw_sp_router_fibmr_event_work(struct work_struct *work)
5701{
5702 struct mlxsw_sp_fib_event_work *fib_work =
5703 container_of(work, struct mlxsw_sp_fib_event_work, work);
5704 struct mlxsw_sp *mlxsw_sp = fib_work->mlxsw_sp;
Yotam Gigid42b0962017-09-27 08:23:20 +02005705 bool replace;
5706 int err;
5707
5708 rtnl_lock();
5709 switch (fib_work->event) {
5710 case FIB_EVENT_ENTRY_REPLACE: /* fall through */
5711 case FIB_EVENT_ENTRY_ADD:
5712 replace = fib_work->event == FIB_EVENT_ENTRY_REPLACE;
5713
5714 err = mlxsw_sp_router_fibmr_add(mlxsw_sp, &fib_work->men_info,
5715 replace);
5716 if (err)
5717 mlxsw_sp_router_fib_abort(mlxsw_sp);
5718 ipmr_cache_put(fib_work->men_info.mfc);
5719 break;
5720 case FIB_EVENT_ENTRY_DEL:
5721 mlxsw_sp_router_fibmr_del(mlxsw_sp, &fib_work->men_info);
5722 ipmr_cache_put(fib_work->men_info.mfc);
5723 break;
5724 case FIB_EVENT_VIF_ADD:
5725 err = mlxsw_sp_router_fibmr_vif_add(mlxsw_sp,
5726 &fib_work->ven_info);
5727 if (err)
5728 mlxsw_sp_router_fib_abort(mlxsw_sp);
5729 dev_put(fib_work->ven_info.dev);
5730 break;
5731 case FIB_EVENT_VIF_DEL:
5732 mlxsw_sp_router_fibmr_vif_del(mlxsw_sp,
5733 &fib_work->ven_info);
5734 dev_put(fib_work->ven_info.dev);
5735 break;
David Ahern1f279232017-10-27 17:37:14 -07005736 case FIB_EVENT_RULE_ADD:
5737 /* if we get here, a rule was added that we do not support.
5738 * just do the fib_abort
5739 */
5740 mlxsw_sp_router_fib_abort(mlxsw_sp);
Yotam Gigid42b0962017-09-27 08:23:20 +02005741 break;
5742 }
5743 rtnl_unlock();
5744 kfree(fib_work);
5745}
5746
Ido Schimmel66a57632017-08-03 13:28:26 +02005747static void mlxsw_sp_router_fib4_event(struct mlxsw_sp_fib_event_work *fib_work,
5748 struct fib_notifier_info *info)
5749{
David Ahern3c75f9b2017-10-18 15:01:38 -07005750 struct fib_entry_notifier_info *fen_info;
David Ahern3c75f9b2017-10-18 15:01:38 -07005751 struct fib_nh_notifier_info *fnh_info;
5752
Ido Schimmel66a57632017-08-03 13:28:26 +02005753 switch (fib_work->event) {
5754 case FIB_EVENT_ENTRY_REPLACE: /* fall through */
5755 case FIB_EVENT_ENTRY_APPEND: /* fall through */
5756 case FIB_EVENT_ENTRY_ADD: /* fall through */
5757 case FIB_EVENT_ENTRY_DEL:
David Ahern3c75f9b2017-10-18 15:01:38 -07005758 fen_info = container_of(info, struct fib_entry_notifier_info,
5759 info);
5760 fib_work->fen_info = *fen_info;
5761 /* Take reference on fib_info to prevent it from being
Ido Schimmel66a57632017-08-03 13:28:26 +02005762 * freed while work is queued. Release it afterwards.
5763 */
5764 fib_info_hold(fib_work->fen_info.fi);
5765 break;
Ido Schimmel66a57632017-08-03 13:28:26 +02005766 case FIB_EVENT_NH_ADD: /* fall through */
5767 case FIB_EVENT_NH_DEL:
David Ahern3c75f9b2017-10-18 15:01:38 -07005768 fnh_info = container_of(info, struct fib_nh_notifier_info,
5769 info);
5770 fib_work->fnh_info = *fnh_info;
Ido Schimmel66a57632017-08-03 13:28:26 +02005771 fib_info_hold(fib_work->fnh_info.fib_nh->nh_parent);
5772 break;
5773 }
5774}
5775
5776static void mlxsw_sp_router_fib6_event(struct mlxsw_sp_fib_event_work *fib_work,
5777 struct fib_notifier_info *info)
5778{
David Ahern3c75f9b2017-10-18 15:01:38 -07005779 struct fib6_entry_notifier_info *fen6_info;
David Ahern3c75f9b2017-10-18 15:01:38 -07005780
Ido Schimmel583419f2017-08-03 13:28:27 +02005781 switch (fib_work->event) {
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02005782 case FIB_EVENT_ENTRY_REPLACE: /* fall through */
Ido Schimmel428b8512017-08-03 13:28:28 +02005783 case FIB_EVENT_ENTRY_ADD: /* fall through */
5784 case FIB_EVENT_ENTRY_DEL:
David Ahern3c75f9b2017-10-18 15:01:38 -07005785 fen6_info = container_of(info, struct fib6_entry_notifier_info,
5786 info);
5787 fib_work->fen6_info = *fen6_info;
Ido Schimmel428b8512017-08-03 13:28:28 +02005788 rt6_hold(fib_work->fen6_info.rt);
5789 break;
Ido Schimmel583419f2017-08-03 13:28:27 +02005790 }
Ido Schimmel66a57632017-08-03 13:28:26 +02005791}
5792
Yotam Gigid42b0962017-09-27 08:23:20 +02005793static void
5794mlxsw_sp_router_fibmr_event(struct mlxsw_sp_fib_event_work *fib_work,
5795 struct fib_notifier_info *info)
5796{
5797 switch (fib_work->event) {
5798 case FIB_EVENT_ENTRY_REPLACE: /* fall through */
5799 case FIB_EVENT_ENTRY_ADD: /* fall through */
5800 case FIB_EVENT_ENTRY_DEL:
5801 memcpy(&fib_work->men_info, info, sizeof(fib_work->men_info));
5802 ipmr_cache_hold(fib_work->men_info.mfc);
5803 break;
5804 case FIB_EVENT_VIF_ADD: /* fall through */
5805 case FIB_EVENT_VIF_DEL:
5806 memcpy(&fib_work->ven_info, info, sizeof(fib_work->ven_info));
5807 dev_hold(fib_work->ven_info.dev);
5808 break;
David Ahern1f279232017-10-27 17:37:14 -07005809 }
5810}
5811
5812static int mlxsw_sp_router_fib_rule_event(unsigned long event,
5813 struct fib_notifier_info *info,
5814 struct mlxsw_sp *mlxsw_sp)
5815{
5816 struct netlink_ext_ack *extack = info->extack;
5817 struct fib_rule_notifier_info *fr_info;
5818 struct fib_rule *rule;
5819 int err = 0;
5820
5821 /* nothing to do at the moment */
5822 if (event == FIB_EVENT_RULE_DEL)
5823 return 0;
5824
5825 if (mlxsw_sp->router->aborted)
5826 return 0;
5827
5828 fr_info = container_of(info, struct fib_rule_notifier_info, info);
5829 rule = fr_info->rule;
5830
5831 switch (info->family) {
5832 case AF_INET:
5833 if (!fib4_rule_default(rule) && !rule->l3mdev)
5834 err = -1;
5835 break;
5836 case AF_INET6:
5837 if (!fib6_rule_default(rule) && !rule->l3mdev)
5838 err = -1;
5839 break;
5840 case RTNL_FAMILY_IPMR:
5841 if (!ipmr_rule_default(rule) && !rule->l3mdev)
5842 err = -1;
Yotam Gigid42b0962017-09-27 08:23:20 +02005843 break;
5844 }
David Ahern1f279232017-10-27 17:37:14 -07005845
5846 if (err < 0)
5847 NL_SET_ERR_MSG(extack, "spectrum: FIB rules not supported. Aborting offload");
5848
5849 return err;
Yotam Gigid42b0962017-09-27 08:23:20 +02005850}
5851
Ido Schimmel30572242016-12-03 16:45:01 +01005852/* Called with rcu_read_lock() */
5853static int mlxsw_sp_router_fib_event(struct notifier_block *nb,
5854 unsigned long event, void *ptr)
5855{
Ido Schimmel30572242016-12-03 16:45:01 +01005856 struct mlxsw_sp_fib_event_work *fib_work;
5857 struct fib_notifier_info *info = ptr;
Ido Schimmel7e39d112017-05-16 19:38:28 +02005858 struct mlxsw_sp_router *router;
David Ahern1f279232017-10-27 17:37:14 -07005859 int err;
Ido Schimmel30572242016-12-03 16:45:01 +01005860
Ido Schimmel8e29f972017-09-15 15:31:07 +02005861 if (!net_eq(info->net, &init_net) ||
Yotam Gigi664375e2017-09-27 08:23:22 +02005862 (info->family != AF_INET && info->family != AF_INET6 &&
5863 info->family != RTNL_FAMILY_IPMR))
Ido Schimmel30572242016-12-03 16:45:01 +01005864 return NOTIFY_DONE;
5865
David Ahern1f279232017-10-27 17:37:14 -07005866 router = container_of(nb, struct mlxsw_sp_router, fib_nb);
5867
5868 switch (event) {
5869 case FIB_EVENT_RULE_ADD: /* fall through */
5870 case FIB_EVENT_RULE_DEL:
5871 err = mlxsw_sp_router_fib_rule_event(event, info,
5872 router->mlxsw_sp);
5873 if (!err)
5874 return NOTIFY_DONE;
5875 }
5876
Ido Schimmel30572242016-12-03 16:45:01 +01005877 fib_work = kzalloc(sizeof(*fib_work), GFP_ATOMIC);
5878 if (WARN_ON(!fib_work))
5879 return NOTIFY_BAD;
5880
Ido Schimmel7e39d112017-05-16 19:38:28 +02005881 fib_work->mlxsw_sp = router->mlxsw_sp;
Ido Schimmel30572242016-12-03 16:45:01 +01005882 fib_work->event = event;
5883
Ido Schimmel66a57632017-08-03 13:28:26 +02005884 switch (info->family) {
5885 case AF_INET:
5886 INIT_WORK(&fib_work->work, mlxsw_sp_router_fib4_event_work);
5887 mlxsw_sp_router_fib4_event(fib_work, info);
Ido Schimmel30572242016-12-03 16:45:01 +01005888 break;
Ido Schimmel66a57632017-08-03 13:28:26 +02005889 case AF_INET6:
5890 INIT_WORK(&fib_work->work, mlxsw_sp_router_fib6_event_work);
5891 mlxsw_sp_router_fib6_event(fib_work, info);
Ido Schimmelad178c82017-02-08 11:16:40 +01005892 break;
Yotam Gigid42b0962017-09-27 08:23:20 +02005893 case RTNL_FAMILY_IPMR:
5894 INIT_WORK(&fib_work->work, mlxsw_sp_router_fibmr_event_work);
5895 mlxsw_sp_router_fibmr_event(fib_work, info);
5896 break;
Ido Schimmel30572242016-12-03 16:45:01 +01005897 }
5898
Ido Schimmela0e47612017-02-06 16:20:10 +01005899 mlxsw_core_schedule_work(&fib_work->work);
Ido Schimmel30572242016-12-03 16:45:01 +01005900
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005901 return NOTIFY_DONE;
5902}
5903
Ido Schimmel4724ba562017-03-10 08:53:39 +01005904static struct mlxsw_sp_rif *
5905mlxsw_sp_rif_find_by_dev(const struct mlxsw_sp *mlxsw_sp,
5906 const struct net_device *dev)
5907{
5908 int i;
5909
5910 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++)
Ido Schimmel5f9efff2017-05-16 19:38:27 +02005911 if (mlxsw_sp->router->rifs[i] &&
5912 mlxsw_sp->router->rifs[i]->dev == dev)
5913 return mlxsw_sp->router->rifs[i];
Ido Schimmel4724ba562017-03-10 08:53:39 +01005914
5915 return NULL;
5916}
5917
5918static int mlxsw_sp_router_rif_disable(struct mlxsw_sp *mlxsw_sp, u16 rif)
5919{
5920 char ritr_pl[MLXSW_REG_RITR_LEN];
5921 int err;
5922
5923 mlxsw_reg_ritr_rif_pack(ritr_pl, rif);
5924 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
5925 if (WARN_ON_ONCE(err))
5926 return err;
5927
5928 mlxsw_reg_ritr_enable_set(ritr_pl, false);
5929 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
5930}
5931
5932static void mlxsw_sp_router_rif_gone_sync(struct mlxsw_sp *mlxsw_sp,
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01005933 struct mlxsw_sp_rif *rif)
Ido Schimmel4724ba562017-03-10 08:53:39 +01005934{
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01005935 mlxsw_sp_router_rif_disable(mlxsw_sp, rif->rif_index);
5936 mlxsw_sp_nexthop_rif_gone_sync(mlxsw_sp, rif);
5937 mlxsw_sp_neigh_rif_gone_sync(mlxsw_sp, rif);
Ido Schimmel4724ba562017-03-10 08:53:39 +01005938}
5939
Arkadi Sharshevsky5ea12372017-07-18 10:10:13 +02005940static bool
5941mlxsw_sp_rif_should_config(struct mlxsw_sp_rif *rif, struct net_device *dev,
5942 unsigned long event)
Ido Schimmel4724ba562017-03-10 08:53:39 +01005943{
Arkadi Sharshevsky5ea12372017-07-18 10:10:13 +02005944 struct inet6_dev *inet6_dev;
5945 bool addr_list_empty = true;
5946 struct in_device *idev;
5947
Ido Schimmel4724ba562017-03-10 08:53:39 +01005948 switch (event) {
5949 case NETDEV_UP:
Petr Machataf1b1f272017-07-31 09:27:28 +02005950 return rif == NULL;
Ido Schimmel4724ba562017-03-10 08:53:39 +01005951 case NETDEV_DOWN:
Arkadi Sharshevsky5ea12372017-07-18 10:10:13 +02005952 idev = __in_dev_get_rtnl(dev);
5953 if (idev && idev->ifa_list)
5954 addr_list_empty = false;
5955
5956 inet6_dev = __in6_dev_get(dev);
5957 if (addr_list_empty && inet6_dev &&
5958 !list_empty(&inet6_dev->addr_list))
5959 addr_list_empty = false;
5960
5961 if (rif && addr_list_empty &&
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01005962 !netif_is_l3_slave(rif->dev))
Ido Schimmel4724ba562017-03-10 08:53:39 +01005963 return true;
5964 /* It is possible we already removed the RIF ourselves
5965 * if it was assigned to a netdev that is now a bridge
5966 * or LAG slave.
5967 */
5968 return false;
5969 }
5970
5971 return false;
5972}
5973
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02005974static enum mlxsw_sp_rif_type
5975mlxsw_sp_dev_rif_type(const struct mlxsw_sp *mlxsw_sp,
5976 const struct net_device *dev)
5977{
5978 enum mlxsw_sp_fid_type type;
5979
Petr Machata6ddb7422017-09-02 23:49:19 +02005980 if (mlxsw_sp_netdev_ipip_type(mlxsw_sp, dev, NULL))
5981 return MLXSW_SP_RIF_TYPE_IPIP_LB;
5982
5983 /* Otherwise RIF type is derived from the type of the underlying FID. */
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02005984 if (is_vlan_dev(dev) && netif_is_bridge_master(vlan_dev_real_dev(dev)))
5985 type = MLXSW_SP_FID_TYPE_8021Q;
5986 else if (netif_is_bridge_master(dev) && br_vlan_enabled(dev))
5987 type = MLXSW_SP_FID_TYPE_8021Q;
5988 else if (netif_is_bridge_master(dev))
5989 type = MLXSW_SP_FID_TYPE_8021D;
5990 else
5991 type = MLXSW_SP_FID_TYPE_RFID;
5992
5993 return mlxsw_sp_fid_type_rif_type(mlxsw_sp, type);
5994}
5995
Ido Schimmelde5ed992017-06-04 16:53:40 +02005996static int mlxsw_sp_rif_index_alloc(struct mlxsw_sp *mlxsw_sp, u16 *p_rif_index)
Ido Schimmel4724ba562017-03-10 08:53:39 +01005997{
5998 int i;
5999
Ido Schimmelde5ed992017-06-04 16:53:40 +02006000 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++) {
6001 if (!mlxsw_sp->router->rifs[i]) {
6002 *p_rif_index = i;
6003 return 0;
6004 }
6005 }
Ido Schimmel4724ba562017-03-10 08:53:39 +01006006
Ido Schimmelde5ed992017-06-04 16:53:40 +02006007 return -ENOBUFS;
Ido Schimmel4724ba562017-03-10 08:53:39 +01006008}
6009
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006010static struct mlxsw_sp_rif *mlxsw_sp_rif_alloc(size_t rif_size, u16 rif_index,
6011 u16 vr_id,
6012 struct net_device *l3_dev)
Ido Schimmel4724ba562017-03-10 08:53:39 +01006013{
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01006014 struct mlxsw_sp_rif *rif;
Ido Schimmel4724ba562017-03-10 08:53:39 +01006015
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006016 rif = kzalloc(rif_size, GFP_KERNEL);
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01006017 if (!rif)
Ido Schimmel4724ba562017-03-10 08:53:39 +01006018 return NULL;
6019
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01006020 INIT_LIST_HEAD(&rif->nexthop_list);
6021 INIT_LIST_HEAD(&rif->neigh_list);
6022 ether_addr_copy(rif->addr, l3_dev->dev_addr);
6023 rif->mtu = l3_dev->mtu;
6024 rif->vr_id = vr_id;
6025 rif->dev = l3_dev;
6026 rif->rif_index = rif_index;
Ido Schimmel4724ba562017-03-10 08:53:39 +01006027
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01006028 return rif;
Ido Schimmel4724ba562017-03-10 08:53:39 +01006029}
6030
Ido Schimmel5f9efff2017-05-16 19:38:27 +02006031struct mlxsw_sp_rif *mlxsw_sp_rif_by_index(const struct mlxsw_sp *mlxsw_sp,
6032 u16 rif_index)
6033{
6034 return mlxsw_sp->router->rifs[rif_index];
6035}
6036
Arkadi Sharshevskyfd1b9d42017-03-28 17:24:16 +02006037u16 mlxsw_sp_rif_index(const struct mlxsw_sp_rif *rif)
6038{
6039 return rif->rif_index;
6040}
6041
Petr Machata92107cf2017-09-02 23:49:28 +02006042u16 mlxsw_sp_ipip_lb_rif_index(const struct mlxsw_sp_rif_ipip_lb *lb_rif)
6043{
6044 return lb_rif->common.rif_index;
6045}
6046
6047u16 mlxsw_sp_ipip_lb_ul_vr_id(const struct mlxsw_sp_rif_ipip_lb *lb_rif)
6048{
6049 return lb_rif->ul_vr_id;
6050}
6051
Arkadi Sharshevskyfd1b9d42017-03-28 17:24:16 +02006052int mlxsw_sp_rif_dev_ifindex(const struct mlxsw_sp_rif *rif)
6053{
6054 return rif->dev->ifindex;
6055}
6056
Yotam Gigi91e4d592017-09-19 10:00:19 +02006057const struct net_device *mlxsw_sp_rif_dev(const struct mlxsw_sp_rif *rif)
6058{
6059 return rif->dev;
6060}
6061
Ido Schimmel4724ba562017-03-10 08:53:39 +01006062static struct mlxsw_sp_rif *
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006063mlxsw_sp_rif_create(struct mlxsw_sp *mlxsw_sp,
David Ahernf8fa9b42017-10-18 09:56:56 -07006064 const struct mlxsw_sp_rif_params *params,
6065 struct netlink_ext_ack *extack)
Ido Schimmel4724ba562017-03-10 08:53:39 +01006066{
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006067 u32 tb_id = l3mdev_fib_table(params->dev);
6068 const struct mlxsw_sp_rif_ops *ops;
Petr Machata010cadf2017-09-02 23:49:18 +02006069 struct mlxsw_sp_fid *fid = NULL;
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006070 enum mlxsw_sp_rif_type type;
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01006071 struct mlxsw_sp_rif *rif;
Ido Schimmela1107482017-05-26 08:37:39 +02006072 struct mlxsw_sp_vr *vr;
6073 u16 rif_index;
Ido Schimmel4724ba562017-03-10 08:53:39 +01006074 int err;
6075
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006076 type = mlxsw_sp_dev_rif_type(mlxsw_sp, params->dev);
6077 ops = mlxsw_sp->router->rif_ops_arr[type];
6078
David Ahernf8fa9b42017-10-18 09:56:56 -07006079 vr = mlxsw_sp_vr_get(mlxsw_sp, tb_id ? : RT_TABLE_MAIN, extack);
Ido Schimmelc9ec53f2017-05-26 08:37:38 +02006080 if (IS_ERR(vr))
6081 return ERR_CAST(vr);
Petr Machata28a04c72017-10-02 12:14:56 +02006082 vr->rif_count++;
Ido Schimmelc9ec53f2017-05-26 08:37:38 +02006083
Ido Schimmelde5ed992017-06-04 16:53:40 +02006084 err = mlxsw_sp_rif_index_alloc(mlxsw_sp, &rif_index);
David Ahernf8fa9b42017-10-18 09:56:56 -07006085 if (err) {
6086 NL_SET_ERR_MSG(extack, "spectrum: Exceeded number of supported router interfaces");
Ido Schimmelde5ed992017-06-04 16:53:40 +02006087 goto err_rif_index_alloc;
David Ahernf8fa9b42017-10-18 09:56:56 -07006088 }
Ido Schimmel4724ba562017-03-10 08:53:39 +01006089
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006090 rif = mlxsw_sp_rif_alloc(ops->rif_size, rif_index, vr->id, params->dev);
Ido Schimmela13a5942017-05-26 08:37:33 +02006091 if (!rif) {
6092 err = -ENOMEM;
6093 goto err_rif_alloc;
6094 }
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006095 rif->mlxsw_sp = mlxsw_sp;
6096 rif->ops = ops;
Ido Schimmela13a5942017-05-26 08:37:33 +02006097
Petr Machata010cadf2017-09-02 23:49:18 +02006098 if (ops->fid_get) {
6099 fid = ops->fid_get(rif);
6100 if (IS_ERR(fid)) {
6101 err = PTR_ERR(fid);
6102 goto err_fid_get;
6103 }
6104 rif->fid = fid;
Ido Schimmel4d93cee2017-05-26 08:37:34 +02006105 }
6106
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006107 if (ops->setup)
6108 ops->setup(rif, params);
6109
6110 err = ops->configure(rif);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006111 if (err)
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006112 goto err_configure;
Ido Schimmel4724ba562017-03-10 08:53:39 +01006113
Yotam Gigid42b0962017-09-27 08:23:20 +02006114 err = mlxsw_sp_mr_rif_add(vr->mr4_table, rif);
6115 if (err)
6116 goto err_mr_rif_add;
6117
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006118 mlxsw_sp_rif_counters_alloc(rif);
Ido Schimmel5f9efff2017-05-16 19:38:27 +02006119 mlxsw_sp->router->rifs[rif_index] = rif;
Ido Schimmel4724ba562017-03-10 08:53:39 +01006120
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01006121 return rif;
Ido Schimmel4724ba562017-03-10 08:53:39 +01006122
Yotam Gigid42b0962017-09-27 08:23:20 +02006123err_mr_rif_add:
6124 ops->deconfigure(rif);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006125err_configure:
Petr Machata010cadf2017-09-02 23:49:18 +02006126 if (fid)
6127 mlxsw_sp_fid_put(fid);
Ido Schimmela1107482017-05-26 08:37:39 +02006128err_fid_get:
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006129 kfree(rif);
6130err_rif_alloc:
Ido Schimmelde5ed992017-06-04 16:53:40 +02006131err_rif_index_alloc:
Petr Machata28a04c72017-10-02 12:14:56 +02006132 vr->rif_count--;
Ido Schimmel2b52ce02018-01-22 09:17:42 +01006133 mlxsw_sp_vr_put(mlxsw_sp, vr);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006134 return ERR_PTR(err);
6135}
6136
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006137void mlxsw_sp_rif_destroy(struct mlxsw_sp_rif *rif)
Ido Schimmel4724ba562017-03-10 08:53:39 +01006138{
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006139 const struct mlxsw_sp_rif_ops *ops = rif->ops;
6140 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
Ido Schimmela1107482017-05-26 08:37:39 +02006141 struct mlxsw_sp_fid *fid = rif->fid;
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006142 struct mlxsw_sp_vr *vr;
Ido Schimmel4724ba562017-03-10 08:53:39 +01006143
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01006144 mlxsw_sp_router_rif_gone_sync(mlxsw_sp, rif);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006145 vr = &mlxsw_sp->router->vrs[rif->vr_id];
Arkadi Sharshevskye0c0afd2017-03-28 17:24:15 +02006146
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006147 mlxsw_sp->router->rifs[rif->rif_index] = NULL;
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006148 mlxsw_sp_rif_counters_free(rif);
Yotam Gigid42b0962017-09-27 08:23:20 +02006149 mlxsw_sp_mr_rif_del(vr->mr4_table, rif);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006150 ops->deconfigure(rif);
Petr Machata010cadf2017-09-02 23:49:18 +02006151 if (fid)
6152 /* Loopback RIFs are not associated with a FID. */
6153 mlxsw_sp_fid_put(fid);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006154 kfree(rif);
Petr Machata28a04c72017-10-02 12:14:56 +02006155 vr->rif_count--;
Ido Schimmel2b52ce02018-01-22 09:17:42 +01006156 mlxsw_sp_vr_put(mlxsw_sp, vr);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006157}
6158
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006159static void
6160mlxsw_sp_rif_subport_params_init(struct mlxsw_sp_rif_params *params,
6161 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
6162{
6163 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
6164
6165 params->vid = mlxsw_sp_port_vlan->vid;
6166 params->lag = mlxsw_sp_port->lagged;
6167 if (params->lag)
6168 params->lag_id = mlxsw_sp_port->lag_id;
6169 else
6170 params->system_port = mlxsw_sp_port->local_port;
6171}
6172
Ido Schimmel7cbecf22017-05-26 08:37:28 +02006173static int
Ido Schimmela1107482017-05-26 08:37:39 +02006174mlxsw_sp_port_vlan_router_join(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan,
David Ahernf8fa9b42017-10-18 09:56:56 -07006175 struct net_device *l3_dev,
6176 struct netlink_ext_ack *extack)
Ido Schimmel4724ba562017-03-10 08:53:39 +01006177{
Ido Schimmel7cbecf22017-05-26 08:37:28 +02006178 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
Ido Schimmel1b8f09a2017-05-26 08:37:36 +02006179 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
Ido Schimmel7cbecf22017-05-26 08:37:28 +02006180 u16 vid = mlxsw_sp_port_vlan->vid;
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01006181 struct mlxsw_sp_rif *rif;
Ido Schimmela1107482017-05-26 08:37:39 +02006182 struct mlxsw_sp_fid *fid;
Ido Schimmel03ea01e2017-05-23 21:56:30 +02006183 int err;
Ido Schimmel4724ba562017-03-10 08:53:39 +01006184
Ido Schimmel1b8f09a2017-05-26 08:37:36 +02006185 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev);
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01006186 if (!rif) {
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006187 struct mlxsw_sp_rif_params params = {
6188 .dev = l3_dev,
6189 };
6190
6191 mlxsw_sp_rif_subport_params_init(&params, mlxsw_sp_port_vlan);
David Ahernf8fa9b42017-10-18 09:56:56 -07006192 rif = mlxsw_sp_rif_create(mlxsw_sp, &params, extack);
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01006193 if (IS_ERR(rif))
6194 return PTR_ERR(rif);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006195 }
6196
Ido Schimmela1107482017-05-26 08:37:39 +02006197 /* FID was already created, just take a reference */
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006198 fid = rif->ops->fid_get(rif);
Ido Schimmela1107482017-05-26 08:37:39 +02006199 err = mlxsw_sp_fid_port_vid_map(fid, mlxsw_sp_port, vid);
6200 if (err)
6201 goto err_fid_port_vid_map;
6202
Ido Schimmel7cbecf22017-05-26 08:37:28 +02006203 err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, false);
Ido Schimmel03ea01e2017-05-23 21:56:30 +02006204 if (err)
6205 goto err_port_vid_learning_set;
6206
Ido Schimmel7cbecf22017-05-26 08:37:28 +02006207 err = mlxsw_sp_port_vid_stp_set(mlxsw_sp_port, vid,
Ido Schimmel03ea01e2017-05-23 21:56:30 +02006208 BR_STATE_FORWARDING);
6209 if (err)
6210 goto err_port_vid_stp_set;
6211
Ido Schimmela1107482017-05-26 08:37:39 +02006212 mlxsw_sp_port_vlan->fid = fid;
Ido Schimmel4724ba562017-03-10 08:53:39 +01006213
Ido Schimmel4724ba562017-03-10 08:53:39 +01006214 return 0;
Ido Schimmel03ea01e2017-05-23 21:56:30 +02006215
6216err_port_vid_stp_set:
Ido Schimmel7cbecf22017-05-26 08:37:28 +02006217 mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, true);
Ido Schimmel03ea01e2017-05-23 21:56:30 +02006218err_port_vid_learning_set:
Ido Schimmela1107482017-05-26 08:37:39 +02006219 mlxsw_sp_fid_port_vid_unmap(fid, mlxsw_sp_port, vid);
6220err_fid_port_vid_map:
6221 mlxsw_sp_fid_put(fid);
Ido Schimmel03ea01e2017-05-23 21:56:30 +02006222 return err;
Ido Schimmel4724ba562017-03-10 08:53:39 +01006223}
6224
Ido Schimmela1107482017-05-26 08:37:39 +02006225void
6226mlxsw_sp_port_vlan_router_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
Ido Schimmel4724ba562017-03-10 08:53:39 +01006227{
Ido Schimmelce95e152017-05-26 08:37:27 +02006228 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
Ido Schimmel7cbecf22017-05-26 08:37:28 +02006229 struct mlxsw_sp_fid *fid = mlxsw_sp_port_vlan->fid;
Ido Schimmelce95e152017-05-26 08:37:27 +02006230 u16 vid = mlxsw_sp_port_vlan->vid;
Ido Schimmelce95e152017-05-26 08:37:27 +02006231
Ido Schimmela1107482017-05-26 08:37:39 +02006232 if (WARN_ON(mlxsw_sp_fid_type(fid) != MLXSW_SP_FID_TYPE_RFID))
6233 return;
Ido Schimmel4aafc362017-05-26 08:37:25 +02006234
Ido Schimmela1107482017-05-26 08:37:39 +02006235 mlxsw_sp_port_vlan->fid = NULL;
Ido Schimmel7cbecf22017-05-26 08:37:28 +02006236 mlxsw_sp_port_vid_stp_set(mlxsw_sp_port, vid, BR_STATE_BLOCKING);
6237 mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, true);
Ido Schimmela1107482017-05-26 08:37:39 +02006238 mlxsw_sp_fid_port_vid_unmap(fid, mlxsw_sp_port, vid);
6239 /* If router port holds the last reference on the rFID, then the
6240 * associated Sub-port RIF will be destroyed.
6241 */
6242 mlxsw_sp_fid_put(fid);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006243}
6244
Ido Schimmel7cbecf22017-05-26 08:37:28 +02006245static int mlxsw_sp_inetaddr_port_vlan_event(struct net_device *l3_dev,
6246 struct net_device *port_dev,
David Ahernf8fa9b42017-10-18 09:56:56 -07006247 unsigned long event, u16 vid,
6248 struct netlink_ext_ack *extack)
Ido Schimmel4724ba562017-03-10 08:53:39 +01006249{
6250 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(port_dev);
Ido Schimmelce95e152017-05-26 08:37:27 +02006251 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
Ido Schimmel4724ba562017-03-10 08:53:39 +01006252
Ido Schimmelce95e152017-05-26 08:37:27 +02006253 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
Ido Schimmel7cbecf22017-05-26 08:37:28 +02006254 if (WARN_ON(!mlxsw_sp_port_vlan))
6255 return -EINVAL;
Ido Schimmel4724ba562017-03-10 08:53:39 +01006256
6257 switch (event) {
6258 case NETDEV_UP:
Ido Schimmela1107482017-05-26 08:37:39 +02006259 return mlxsw_sp_port_vlan_router_join(mlxsw_sp_port_vlan,
David Ahernf8fa9b42017-10-18 09:56:56 -07006260 l3_dev, extack);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006261 case NETDEV_DOWN:
Ido Schimmela1107482017-05-26 08:37:39 +02006262 mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port_vlan);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006263 break;
6264 }
6265
6266 return 0;
6267}
6268
6269static int mlxsw_sp_inetaddr_port_event(struct net_device *port_dev,
David Ahernf8fa9b42017-10-18 09:56:56 -07006270 unsigned long event,
6271 struct netlink_ext_ack *extack)
Ido Schimmel4724ba562017-03-10 08:53:39 +01006272{
Jiri Pirko2b94e582017-04-18 16:55:37 +02006273 if (netif_is_bridge_port(port_dev) ||
6274 netif_is_lag_port(port_dev) ||
6275 netif_is_ovs_port(port_dev))
Ido Schimmel4724ba562017-03-10 08:53:39 +01006276 return 0;
6277
David Ahernf8fa9b42017-10-18 09:56:56 -07006278 return mlxsw_sp_inetaddr_port_vlan_event(port_dev, port_dev, event, 1,
6279 extack);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006280}
6281
6282static int __mlxsw_sp_inetaddr_lag_event(struct net_device *l3_dev,
6283 struct net_device *lag_dev,
David Ahernf8fa9b42017-10-18 09:56:56 -07006284 unsigned long event, u16 vid,
6285 struct netlink_ext_ack *extack)
Ido Schimmel4724ba562017-03-10 08:53:39 +01006286{
6287 struct net_device *port_dev;
6288 struct list_head *iter;
6289 int err;
6290
6291 netdev_for_each_lower_dev(lag_dev, port_dev, iter) {
6292 if (mlxsw_sp_port_dev_check(port_dev)) {
Ido Schimmel7cbecf22017-05-26 08:37:28 +02006293 err = mlxsw_sp_inetaddr_port_vlan_event(l3_dev,
6294 port_dev,
David Ahernf8fa9b42017-10-18 09:56:56 -07006295 event, vid,
6296 extack);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006297 if (err)
6298 return err;
6299 }
6300 }
6301
6302 return 0;
6303}
6304
6305static int mlxsw_sp_inetaddr_lag_event(struct net_device *lag_dev,
David Ahernf8fa9b42017-10-18 09:56:56 -07006306 unsigned long event,
6307 struct netlink_ext_ack *extack)
Ido Schimmel4724ba562017-03-10 08:53:39 +01006308{
6309 if (netif_is_bridge_port(lag_dev))
6310 return 0;
6311
David Ahernf8fa9b42017-10-18 09:56:56 -07006312 return __mlxsw_sp_inetaddr_lag_event(lag_dev, lag_dev, event, 1,
6313 extack);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006314}
6315
Ido Schimmel4724ba562017-03-10 08:53:39 +01006316static int mlxsw_sp_inetaddr_bridge_event(struct net_device *l3_dev,
David Ahernf8fa9b42017-10-18 09:56:56 -07006317 unsigned long event,
6318 struct netlink_ext_ack *extack)
Ido Schimmel4724ba562017-03-10 08:53:39 +01006319{
6320 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(l3_dev);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006321 struct mlxsw_sp_rif_params params = {
6322 .dev = l3_dev,
6323 };
Ido Schimmela1107482017-05-26 08:37:39 +02006324 struct mlxsw_sp_rif *rif;
Ido Schimmel4724ba562017-03-10 08:53:39 +01006325
6326 switch (event) {
6327 case NETDEV_UP:
David Ahernf8fa9b42017-10-18 09:56:56 -07006328 rif = mlxsw_sp_rif_create(mlxsw_sp, &params, extack);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006329 if (IS_ERR(rif))
6330 return PTR_ERR(rif);
6331 break;
Ido Schimmel4724ba562017-03-10 08:53:39 +01006332 case NETDEV_DOWN:
Ido Schimmela1107482017-05-26 08:37:39 +02006333 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006334 mlxsw_sp_rif_destroy(rif);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006335 break;
6336 }
6337
6338 return 0;
6339}
6340
6341static int mlxsw_sp_inetaddr_vlan_event(struct net_device *vlan_dev,
David Ahernf8fa9b42017-10-18 09:56:56 -07006342 unsigned long event,
6343 struct netlink_ext_ack *extack)
Ido Schimmel4724ba562017-03-10 08:53:39 +01006344{
6345 struct net_device *real_dev = vlan_dev_real_dev(vlan_dev);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006346 u16 vid = vlan_dev_vlan_id(vlan_dev);
6347
Ido Schimmel6b27c8a2017-06-28 09:03:12 +03006348 if (netif_is_bridge_port(vlan_dev))
6349 return 0;
6350
Ido Schimmel4724ba562017-03-10 08:53:39 +01006351 if (mlxsw_sp_port_dev_check(real_dev))
Ido Schimmel7cbecf22017-05-26 08:37:28 +02006352 return mlxsw_sp_inetaddr_port_vlan_event(vlan_dev, real_dev,
David Ahernf8fa9b42017-10-18 09:56:56 -07006353 event, vid, extack);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006354 else if (netif_is_lag_master(real_dev))
6355 return __mlxsw_sp_inetaddr_lag_event(vlan_dev, real_dev, event,
David Ahernf8fa9b42017-10-18 09:56:56 -07006356 vid, extack);
Ido Schimmelc57529e2017-05-26 08:37:31 +02006357 else if (netif_is_bridge_master(real_dev) && br_vlan_enabled(real_dev))
David Ahernf8fa9b42017-10-18 09:56:56 -07006358 return mlxsw_sp_inetaddr_bridge_event(vlan_dev, event, extack);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006359
6360 return 0;
6361}
6362
Ido Schimmelb1e45522017-04-30 19:47:14 +03006363static int __mlxsw_sp_inetaddr_event(struct net_device *dev,
David Ahernf8fa9b42017-10-18 09:56:56 -07006364 unsigned long event,
6365 struct netlink_ext_ack *extack)
Ido Schimmelb1e45522017-04-30 19:47:14 +03006366{
6367 if (mlxsw_sp_port_dev_check(dev))
David Ahernf8fa9b42017-10-18 09:56:56 -07006368 return mlxsw_sp_inetaddr_port_event(dev, event, extack);
Ido Schimmelb1e45522017-04-30 19:47:14 +03006369 else if (netif_is_lag_master(dev))
David Ahernf8fa9b42017-10-18 09:56:56 -07006370 return mlxsw_sp_inetaddr_lag_event(dev, event, extack);
Ido Schimmelb1e45522017-04-30 19:47:14 +03006371 else if (netif_is_bridge_master(dev))
David Ahernf8fa9b42017-10-18 09:56:56 -07006372 return mlxsw_sp_inetaddr_bridge_event(dev, event, extack);
Ido Schimmelb1e45522017-04-30 19:47:14 +03006373 else if (is_vlan_dev(dev))
David Ahernf8fa9b42017-10-18 09:56:56 -07006374 return mlxsw_sp_inetaddr_vlan_event(dev, event, extack);
Ido Schimmelb1e45522017-04-30 19:47:14 +03006375 else
6376 return 0;
6377}
6378
Ido Schimmel4724ba562017-03-10 08:53:39 +01006379int mlxsw_sp_inetaddr_event(struct notifier_block *unused,
6380 unsigned long event, void *ptr)
6381{
6382 struct in_ifaddr *ifa = (struct in_ifaddr *) ptr;
6383 struct net_device *dev = ifa->ifa_dev->dev;
6384 struct mlxsw_sp *mlxsw_sp;
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01006385 struct mlxsw_sp_rif *rif;
Ido Schimmel4724ba562017-03-10 08:53:39 +01006386 int err = 0;
6387
David Ahern89d5dd22017-10-18 09:56:55 -07006388 /* NETDEV_UP event is handled by mlxsw_sp_inetaddr_valid_event */
6389 if (event == NETDEV_UP)
6390 goto out;
6391
6392 mlxsw_sp = mlxsw_sp_lower_get(dev);
6393 if (!mlxsw_sp)
6394 goto out;
6395
6396 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
6397 if (!mlxsw_sp_rif_should_config(rif, dev, event))
6398 goto out;
6399
David Ahernf8fa9b42017-10-18 09:56:56 -07006400 err = __mlxsw_sp_inetaddr_event(dev, event, NULL);
David Ahern89d5dd22017-10-18 09:56:55 -07006401out:
6402 return notifier_from_errno(err);
6403}
6404
6405int mlxsw_sp_inetaddr_valid_event(struct notifier_block *unused,
6406 unsigned long event, void *ptr)
6407{
6408 struct in_validator_info *ivi = (struct in_validator_info *) ptr;
6409 struct net_device *dev = ivi->ivi_dev->dev;
6410 struct mlxsw_sp *mlxsw_sp;
6411 struct mlxsw_sp_rif *rif;
6412 int err = 0;
6413
Ido Schimmel4724ba562017-03-10 08:53:39 +01006414 mlxsw_sp = mlxsw_sp_lower_get(dev);
6415 if (!mlxsw_sp)
6416 goto out;
6417
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01006418 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
Arkadi Sharshevsky5ea12372017-07-18 10:10:13 +02006419 if (!mlxsw_sp_rif_should_config(rif, dev, event))
Ido Schimmel4724ba562017-03-10 08:53:39 +01006420 goto out;
6421
David Ahernf8fa9b42017-10-18 09:56:56 -07006422 err = __mlxsw_sp_inetaddr_event(dev, event, ivi->extack);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006423out:
6424 return notifier_from_errno(err);
6425}
6426
Arkadi Sharshevsky5ea12372017-07-18 10:10:13 +02006427struct mlxsw_sp_inet6addr_event_work {
6428 struct work_struct work;
6429 struct net_device *dev;
6430 unsigned long event;
6431};
6432
6433static void mlxsw_sp_inet6addr_event_work(struct work_struct *work)
6434{
6435 struct mlxsw_sp_inet6addr_event_work *inet6addr_work =
6436 container_of(work, struct mlxsw_sp_inet6addr_event_work, work);
6437 struct net_device *dev = inet6addr_work->dev;
6438 unsigned long event = inet6addr_work->event;
6439 struct mlxsw_sp *mlxsw_sp;
6440 struct mlxsw_sp_rif *rif;
6441
6442 rtnl_lock();
6443 mlxsw_sp = mlxsw_sp_lower_get(dev);
6444 if (!mlxsw_sp)
6445 goto out;
6446
6447 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
6448 if (!mlxsw_sp_rif_should_config(rif, dev, event))
6449 goto out;
6450
David Ahernf8fa9b42017-10-18 09:56:56 -07006451 __mlxsw_sp_inetaddr_event(dev, event, NULL);
Arkadi Sharshevsky5ea12372017-07-18 10:10:13 +02006452out:
6453 rtnl_unlock();
6454 dev_put(dev);
6455 kfree(inet6addr_work);
6456}
6457
6458/* Called with rcu_read_lock() */
6459int mlxsw_sp_inet6addr_event(struct notifier_block *unused,
6460 unsigned long event, void *ptr)
6461{
6462 struct inet6_ifaddr *if6 = (struct inet6_ifaddr *) ptr;
6463 struct mlxsw_sp_inet6addr_event_work *inet6addr_work;
6464 struct net_device *dev = if6->idev->dev;
6465
David Ahern89d5dd22017-10-18 09:56:55 -07006466 /* NETDEV_UP event is handled by mlxsw_sp_inet6addr_valid_event */
6467 if (event == NETDEV_UP)
6468 return NOTIFY_DONE;
6469
Arkadi Sharshevsky5ea12372017-07-18 10:10:13 +02006470 if (!mlxsw_sp_port_dev_lower_find_rcu(dev))
6471 return NOTIFY_DONE;
6472
6473 inet6addr_work = kzalloc(sizeof(*inet6addr_work), GFP_ATOMIC);
6474 if (!inet6addr_work)
6475 return NOTIFY_BAD;
6476
6477 INIT_WORK(&inet6addr_work->work, mlxsw_sp_inet6addr_event_work);
6478 inet6addr_work->dev = dev;
6479 inet6addr_work->event = event;
6480 dev_hold(dev);
6481 mlxsw_core_schedule_work(&inet6addr_work->work);
6482
6483 return NOTIFY_DONE;
6484}
6485
David Ahern89d5dd22017-10-18 09:56:55 -07006486int mlxsw_sp_inet6addr_valid_event(struct notifier_block *unused,
6487 unsigned long event, void *ptr)
6488{
6489 struct in6_validator_info *i6vi = (struct in6_validator_info *) ptr;
6490 struct net_device *dev = i6vi->i6vi_dev->dev;
6491 struct mlxsw_sp *mlxsw_sp;
6492 struct mlxsw_sp_rif *rif;
6493 int err = 0;
6494
6495 mlxsw_sp = mlxsw_sp_lower_get(dev);
6496 if (!mlxsw_sp)
6497 goto out;
6498
6499 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
6500 if (!mlxsw_sp_rif_should_config(rif, dev, event))
6501 goto out;
6502
David Ahernf8fa9b42017-10-18 09:56:56 -07006503 err = __mlxsw_sp_inetaddr_event(dev, event, i6vi->extack);
David Ahern89d5dd22017-10-18 09:56:55 -07006504out:
6505 return notifier_from_errno(err);
6506}
6507
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01006508static int mlxsw_sp_rif_edit(struct mlxsw_sp *mlxsw_sp, u16 rif_index,
Ido Schimmel4724ba562017-03-10 08:53:39 +01006509 const char *mac, int mtu)
6510{
6511 char ritr_pl[MLXSW_REG_RITR_LEN];
6512 int err;
6513
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01006514 mlxsw_reg_ritr_rif_pack(ritr_pl, rif_index);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006515 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
6516 if (err)
6517 return err;
6518
6519 mlxsw_reg_ritr_mtu_set(ritr_pl, mtu);
6520 mlxsw_reg_ritr_if_mac_memcpy_to(ritr_pl, mac);
6521 mlxsw_reg_ritr_op_set(ritr_pl, MLXSW_REG_RITR_RIF_CREATE);
6522 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
6523}
6524
6525int mlxsw_sp_netdevice_router_port_event(struct net_device *dev)
6526{
6527 struct mlxsw_sp *mlxsw_sp;
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01006528 struct mlxsw_sp_rif *rif;
Ido Schimmela1107482017-05-26 08:37:39 +02006529 u16 fid_index;
Ido Schimmel4724ba562017-03-10 08:53:39 +01006530 int err;
6531
6532 mlxsw_sp = mlxsw_sp_lower_get(dev);
6533 if (!mlxsw_sp)
6534 return 0;
6535
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01006536 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
6537 if (!rif)
Ido Schimmel4724ba562017-03-10 08:53:39 +01006538 return 0;
Ido Schimmela1107482017-05-26 08:37:39 +02006539 fid_index = mlxsw_sp_fid_index(rif->fid);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006540
Ido Schimmela1107482017-05-26 08:37:39 +02006541 err = mlxsw_sp_rif_fdb_op(mlxsw_sp, rif->addr, fid_index, false);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006542 if (err)
6543 return err;
6544
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01006545 err = mlxsw_sp_rif_edit(mlxsw_sp, rif->rif_index, dev->dev_addr,
6546 dev->mtu);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006547 if (err)
6548 goto err_rif_edit;
6549
Ido Schimmela1107482017-05-26 08:37:39 +02006550 err = mlxsw_sp_rif_fdb_op(mlxsw_sp, dev->dev_addr, fid_index, true);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006551 if (err)
6552 goto err_rif_fdb_op;
6553
Yotam Gigifd890fe2017-09-27 08:23:21 +02006554 if (rif->mtu != dev->mtu) {
6555 struct mlxsw_sp_vr *vr;
6556
6557 /* The RIF is relevant only to its mr_table instance, as unlike
6558 * unicast routing, in multicast routing a RIF cannot be shared
6559 * between several multicast routing tables.
6560 */
6561 vr = &mlxsw_sp->router->vrs[rif->vr_id];
6562 mlxsw_sp_mr_rif_mtu_update(vr->mr4_table, rif, dev->mtu);
6563 }
6564
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01006565 ether_addr_copy(rif->addr, dev->dev_addr);
6566 rif->mtu = dev->mtu;
Ido Schimmel4724ba562017-03-10 08:53:39 +01006567
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01006568 netdev_dbg(dev, "Updated RIF=%d\n", rif->rif_index);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006569
6570 return 0;
6571
6572err_rif_fdb_op:
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01006573 mlxsw_sp_rif_edit(mlxsw_sp, rif->rif_index, rif->addr, rif->mtu);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006574err_rif_edit:
Ido Schimmela1107482017-05-26 08:37:39 +02006575 mlxsw_sp_rif_fdb_op(mlxsw_sp, rif->addr, fid_index, true);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006576 return err;
6577}
6578
Ido Schimmelb1e45522017-04-30 19:47:14 +03006579static int mlxsw_sp_port_vrf_join(struct mlxsw_sp *mlxsw_sp,
David Ahernf8fa9b42017-10-18 09:56:56 -07006580 struct net_device *l3_dev,
6581 struct netlink_ext_ack *extack)
Ido Schimmel7179eb52017-03-16 09:08:18 +01006582{
Ido Schimmelb1e45522017-04-30 19:47:14 +03006583 struct mlxsw_sp_rif *rif;
Ido Schimmel7179eb52017-03-16 09:08:18 +01006584
Ido Schimmelb1e45522017-04-30 19:47:14 +03006585 /* If netdev is already associated with a RIF, then we need to
6586 * destroy it and create a new one with the new virtual router ID.
Ido Schimmel7179eb52017-03-16 09:08:18 +01006587 */
Ido Schimmelb1e45522017-04-30 19:47:14 +03006588 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev);
6589 if (rif)
David Ahernf8fa9b42017-10-18 09:56:56 -07006590 __mlxsw_sp_inetaddr_event(l3_dev, NETDEV_DOWN, extack);
Ido Schimmel7179eb52017-03-16 09:08:18 +01006591
David Ahernf8fa9b42017-10-18 09:56:56 -07006592 return __mlxsw_sp_inetaddr_event(l3_dev, NETDEV_UP, extack);
Ido Schimmel7179eb52017-03-16 09:08:18 +01006593}
6594
Ido Schimmelb1e45522017-04-30 19:47:14 +03006595static void mlxsw_sp_port_vrf_leave(struct mlxsw_sp *mlxsw_sp,
6596 struct net_device *l3_dev)
Ido Schimmel7179eb52017-03-16 09:08:18 +01006597{
Ido Schimmelb1e45522017-04-30 19:47:14 +03006598 struct mlxsw_sp_rif *rif;
Ido Schimmel7179eb52017-03-16 09:08:18 +01006599
Ido Schimmelb1e45522017-04-30 19:47:14 +03006600 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev);
6601 if (!rif)
Ido Schimmel7179eb52017-03-16 09:08:18 +01006602 return;
David Ahernf8fa9b42017-10-18 09:56:56 -07006603 __mlxsw_sp_inetaddr_event(l3_dev, NETDEV_DOWN, NULL);
Ido Schimmel7179eb52017-03-16 09:08:18 +01006604}
6605
Ido Schimmelb1e45522017-04-30 19:47:14 +03006606int mlxsw_sp_netdevice_vrf_event(struct net_device *l3_dev, unsigned long event,
6607 struct netdev_notifier_changeupper_info *info)
Ido Schimmel3d70e4582017-03-16 09:08:19 +01006608{
Ido Schimmelb1e45522017-04-30 19:47:14 +03006609 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(l3_dev);
6610 int err = 0;
Ido Schimmel3d70e4582017-03-16 09:08:19 +01006611
Ido Schimmelb1e45522017-04-30 19:47:14 +03006612 if (!mlxsw_sp)
6613 return 0;
Ido Schimmel3d70e4582017-03-16 09:08:19 +01006614
Ido Schimmelb1e45522017-04-30 19:47:14 +03006615 switch (event) {
6616 case NETDEV_PRECHANGEUPPER:
6617 return 0;
6618 case NETDEV_CHANGEUPPER:
David Ahernf8fa9b42017-10-18 09:56:56 -07006619 if (info->linking) {
6620 struct netlink_ext_ack *extack;
6621
6622 extack = netdev_notifier_info_to_extack(&info->info);
6623 err = mlxsw_sp_port_vrf_join(mlxsw_sp, l3_dev, extack);
6624 } else {
Ido Schimmelb1e45522017-04-30 19:47:14 +03006625 mlxsw_sp_port_vrf_leave(mlxsw_sp, l3_dev);
David Ahernf8fa9b42017-10-18 09:56:56 -07006626 }
Ido Schimmelb1e45522017-04-30 19:47:14 +03006627 break;
6628 }
Ido Schimmel3d70e4582017-03-16 09:08:19 +01006629
Ido Schimmelb1e45522017-04-30 19:47:14 +03006630 return err;
Ido Schimmel3d70e4582017-03-16 09:08:19 +01006631}
6632
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006633static struct mlxsw_sp_rif_subport *
6634mlxsw_sp_rif_subport_rif(const struct mlxsw_sp_rif *rif)
Ido Schimmela1107482017-05-26 08:37:39 +02006635{
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006636 return container_of(rif, struct mlxsw_sp_rif_subport, common);
Ido Schimmela1107482017-05-26 08:37:39 +02006637}
6638
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006639static void mlxsw_sp_rif_subport_setup(struct mlxsw_sp_rif *rif,
6640 const struct mlxsw_sp_rif_params *params)
6641{
6642 struct mlxsw_sp_rif_subport *rif_subport;
6643
6644 rif_subport = mlxsw_sp_rif_subport_rif(rif);
6645 rif_subport->vid = params->vid;
6646 rif_subport->lag = params->lag;
6647 if (params->lag)
6648 rif_subport->lag_id = params->lag_id;
6649 else
6650 rif_subport->system_port = params->system_port;
6651}
6652
6653static int mlxsw_sp_rif_subport_op(struct mlxsw_sp_rif *rif, bool enable)
6654{
6655 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
6656 struct mlxsw_sp_rif_subport *rif_subport;
6657 char ritr_pl[MLXSW_REG_RITR_LEN];
6658
6659 rif_subport = mlxsw_sp_rif_subport_rif(rif);
6660 mlxsw_reg_ritr_pack(ritr_pl, enable, MLXSW_REG_RITR_SP_IF,
Petr Machata9571e822017-09-02 23:49:14 +02006661 rif->rif_index, rif->vr_id, rif->dev->mtu);
6662 mlxsw_reg_ritr_mac_pack(ritr_pl, rif->dev->dev_addr);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006663 mlxsw_reg_ritr_sp_if_pack(ritr_pl, rif_subport->lag,
6664 rif_subport->lag ? rif_subport->lag_id :
6665 rif_subport->system_port,
6666 rif_subport->vid);
6667
6668 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
6669}
6670
6671static int mlxsw_sp_rif_subport_configure(struct mlxsw_sp_rif *rif)
6672{
Petr Machata010cadf2017-09-02 23:49:18 +02006673 int err;
6674
6675 err = mlxsw_sp_rif_subport_op(rif, true);
6676 if (err)
6677 return err;
6678
6679 err = mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
6680 mlxsw_sp_fid_index(rif->fid), true);
6681 if (err)
6682 goto err_rif_fdb_op;
6683
6684 mlxsw_sp_fid_rif_set(rif->fid, rif);
6685 return 0;
6686
6687err_rif_fdb_op:
6688 mlxsw_sp_rif_subport_op(rif, false);
6689 return err;
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006690}
6691
6692static void mlxsw_sp_rif_subport_deconfigure(struct mlxsw_sp_rif *rif)
6693{
Petr Machata010cadf2017-09-02 23:49:18 +02006694 struct mlxsw_sp_fid *fid = rif->fid;
6695
6696 mlxsw_sp_fid_rif_set(fid, NULL);
6697 mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
6698 mlxsw_sp_fid_index(fid), false);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006699 mlxsw_sp_rif_subport_op(rif, false);
6700}
6701
6702static struct mlxsw_sp_fid *
6703mlxsw_sp_rif_subport_fid_get(struct mlxsw_sp_rif *rif)
6704{
6705 return mlxsw_sp_fid_rfid_get(rif->mlxsw_sp, rif->rif_index);
6706}
6707
6708static const struct mlxsw_sp_rif_ops mlxsw_sp_rif_subport_ops = {
6709 .type = MLXSW_SP_RIF_TYPE_SUBPORT,
6710 .rif_size = sizeof(struct mlxsw_sp_rif_subport),
6711 .setup = mlxsw_sp_rif_subport_setup,
6712 .configure = mlxsw_sp_rif_subport_configure,
6713 .deconfigure = mlxsw_sp_rif_subport_deconfigure,
6714 .fid_get = mlxsw_sp_rif_subport_fid_get,
6715};
6716
6717static int mlxsw_sp_rif_vlan_fid_op(struct mlxsw_sp_rif *rif,
6718 enum mlxsw_reg_ritr_if_type type,
6719 u16 vid_fid, bool enable)
6720{
6721 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
6722 char ritr_pl[MLXSW_REG_RITR_LEN];
6723
6724 mlxsw_reg_ritr_pack(ritr_pl, enable, type, rif->rif_index, rif->vr_id,
Petr Machata9571e822017-09-02 23:49:14 +02006725 rif->dev->mtu);
6726 mlxsw_reg_ritr_mac_pack(ritr_pl, rif->dev->dev_addr);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006727 mlxsw_reg_ritr_fid_set(ritr_pl, type, vid_fid);
6728
6729 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
6730}
6731
Yotam Gigib35750f2017-10-09 11:15:33 +02006732u8 mlxsw_sp_router_port(const struct mlxsw_sp *mlxsw_sp)
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006733{
6734 return mlxsw_core_max_ports(mlxsw_sp->core) + 1;
6735}
6736
6737static int mlxsw_sp_rif_vlan_configure(struct mlxsw_sp_rif *rif)
6738{
6739 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
6740 u16 vid = mlxsw_sp_fid_8021q_vid(rif->fid);
6741 int err;
6742
6743 err = mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_VLAN_IF, vid, true);
6744 if (err)
6745 return err;
6746
Ido Schimmel0d284812017-07-18 10:10:12 +02006747 err = mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
6748 mlxsw_sp_router_port(mlxsw_sp), true);
6749 if (err)
6750 goto err_fid_mc_flood_set;
6751
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006752 err = mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
6753 mlxsw_sp_router_port(mlxsw_sp), true);
6754 if (err)
6755 goto err_fid_bc_flood_set;
6756
Petr Machata010cadf2017-09-02 23:49:18 +02006757 err = mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
6758 mlxsw_sp_fid_index(rif->fid), true);
6759 if (err)
6760 goto err_rif_fdb_op;
6761
6762 mlxsw_sp_fid_rif_set(rif->fid, rif);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006763 return 0;
6764
Petr Machata010cadf2017-09-02 23:49:18 +02006765err_rif_fdb_op:
6766 mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
6767 mlxsw_sp_router_port(mlxsw_sp), false);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006768err_fid_bc_flood_set:
Ido Schimmel0d284812017-07-18 10:10:12 +02006769 mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
6770 mlxsw_sp_router_port(mlxsw_sp), false);
6771err_fid_mc_flood_set:
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006772 mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_VLAN_IF, vid, false);
6773 return err;
6774}
6775
6776static void mlxsw_sp_rif_vlan_deconfigure(struct mlxsw_sp_rif *rif)
6777{
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006778 u16 vid = mlxsw_sp_fid_8021q_vid(rif->fid);
Petr Machata010cadf2017-09-02 23:49:18 +02006779 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
6780 struct mlxsw_sp_fid *fid = rif->fid;
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006781
Petr Machata010cadf2017-09-02 23:49:18 +02006782 mlxsw_sp_fid_rif_set(fid, NULL);
6783 mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
6784 mlxsw_sp_fid_index(fid), false);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006785 mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
6786 mlxsw_sp_router_port(mlxsw_sp), false);
Ido Schimmel0d284812017-07-18 10:10:12 +02006787 mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
6788 mlxsw_sp_router_port(mlxsw_sp), false);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006789 mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_VLAN_IF, vid, false);
6790}
6791
6792static struct mlxsw_sp_fid *
6793mlxsw_sp_rif_vlan_fid_get(struct mlxsw_sp_rif *rif)
6794{
6795 u16 vid = is_vlan_dev(rif->dev) ? vlan_dev_vlan_id(rif->dev) : 1;
6796
6797 return mlxsw_sp_fid_8021q_get(rif->mlxsw_sp, vid);
6798}
6799
6800static const struct mlxsw_sp_rif_ops mlxsw_sp_rif_vlan_ops = {
6801 .type = MLXSW_SP_RIF_TYPE_VLAN,
6802 .rif_size = sizeof(struct mlxsw_sp_rif),
6803 .configure = mlxsw_sp_rif_vlan_configure,
6804 .deconfigure = mlxsw_sp_rif_vlan_deconfigure,
6805 .fid_get = mlxsw_sp_rif_vlan_fid_get,
6806};
6807
6808static int mlxsw_sp_rif_fid_configure(struct mlxsw_sp_rif *rif)
6809{
6810 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
6811 u16 fid_index = mlxsw_sp_fid_index(rif->fid);
6812 int err;
6813
6814 err = mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_FID_IF, fid_index,
6815 true);
6816 if (err)
6817 return err;
6818
Ido Schimmel0d284812017-07-18 10:10:12 +02006819 err = mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
6820 mlxsw_sp_router_port(mlxsw_sp), true);
6821 if (err)
6822 goto err_fid_mc_flood_set;
6823
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006824 err = mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
6825 mlxsw_sp_router_port(mlxsw_sp), true);
6826 if (err)
6827 goto err_fid_bc_flood_set;
6828
Petr Machata010cadf2017-09-02 23:49:18 +02006829 err = mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
6830 mlxsw_sp_fid_index(rif->fid), true);
6831 if (err)
6832 goto err_rif_fdb_op;
6833
6834 mlxsw_sp_fid_rif_set(rif->fid, rif);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006835 return 0;
6836
Petr Machata010cadf2017-09-02 23:49:18 +02006837err_rif_fdb_op:
6838 mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
6839 mlxsw_sp_router_port(mlxsw_sp), false);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006840err_fid_bc_flood_set:
Ido Schimmel0d284812017-07-18 10:10:12 +02006841 mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
6842 mlxsw_sp_router_port(mlxsw_sp), false);
6843err_fid_mc_flood_set:
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006844 mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_FID_IF, fid_index, false);
6845 return err;
6846}
6847
6848static void mlxsw_sp_rif_fid_deconfigure(struct mlxsw_sp_rif *rif)
6849{
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006850 u16 fid_index = mlxsw_sp_fid_index(rif->fid);
Petr Machata010cadf2017-09-02 23:49:18 +02006851 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
6852 struct mlxsw_sp_fid *fid = rif->fid;
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006853
Petr Machata010cadf2017-09-02 23:49:18 +02006854 mlxsw_sp_fid_rif_set(fid, NULL);
6855 mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
6856 mlxsw_sp_fid_index(fid), false);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006857 mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
6858 mlxsw_sp_router_port(mlxsw_sp), false);
Ido Schimmel0d284812017-07-18 10:10:12 +02006859 mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
6860 mlxsw_sp_router_port(mlxsw_sp), false);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006861 mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_FID_IF, fid_index, false);
6862}
6863
6864static struct mlxsw_sp_fid *
6865mlxsw_sp_rif_fid_fid_get(struct mlxsw_sp_rif *rif)
6866{
6867 return mlxsw_sp_fid_8021d_get(rif->mlxsw_sp, rif->dev->ifindex);
6868}
6869
6870static const struct mlxsw_sp_rif_ops mlxsw_sp_rif_fid_ops = {
6871 .type = MLXSW_SP_RIF_TYPE_FID,
6872 .rif_size = sizeof(struct mlxsw_sp_rif),
6873 .configure = mlxsw_sp_rif_fid_configure,
6874 .deconfigure = mlxsw_sp_rif_fid_deconfigure,
6875 .fid_get = mlxsw_sp_rif_fid_fid_get,
6876};
6877
Petr Machata6ddb7422017-09-02 23:49:19 +02006878static struct mlxsw_sp_rif_ipip_lb *
6879mlxsw_sp_rif_ipip_lb_rif(struct mlxsw_sp_rif *rif)
6880{
6881 return container_of(rif, struct mlxsw_sp_rif_ipip_lb, common);
6882}
6883
6884static void
6885mlxsw_sp_rif_ipip_lb_setup(struct mlxsw_sp_rif *rif,
6886 const struct mlxsw_sp_rif_params *params)
6887{
6888 struct mlxsw_sp_rif_params_ipip_lb *params_lb;
6889 struct mlxsw_sp_rif_ipip_lb *rif_lb;
6890
6891 params_lb = container_of(params, struct mlxsw_sp_rif_params_ipip_lb,
6892 common);
6893 rif_lb = mlxsw_sp_rif_ipip_lb_rif(rif);
6894 rif_lb->lb_config = params_lb->lb_config;
6895}
6896
6897static int
Petr Machata6ddb7422017-09-02 23:49:19 +02006898mlxsw_sp_rif_ipip_lb_configure(struct mlxsw_sp_rif *rif)
6899{
6900 struct mlxsw_sp_rif_ipip_lb *lb_rif = mlxsw_sp_rif_ipip_lb_rif(rif);
6901 u32 ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(rif->dev);
6902 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
6903 struct mlxsw_sp_vr *ul_vr;
6904 int err;
6905
David Ahernf8fa9b42017-10-18 09:56:56 -07006906 ul_vr = mlxsw_sp_vr_get(mlxsw_sp, ul_tb_id, NULL);
Petr Machata6ddb7422017-09-02 23:49:19 +02006907 if (IS_ERR(ul_vr))
6908 return PTR_ERR(ul_vr);
6909
6910 err = mlxsw_sp_rif_ipip_lb_op(lb_rif, ul_vr, true);
6911 if (err)
6912 goto err_loopback_op;
6913
6914 lb_rif->ul_vr_id = ul_vr->id;
6915 ++ul_vr->rif_count;
6916 return 0;
6917
6918err_loopback_op:
Ido Schimmel2b52ce02018-01-22 09:17:42 +01006919 mlxsw_sp_vr_put(mlxsw_sp, ul_vr);
Petr Machata6ddb7422017-09-02 23:49:19 +02006920 return err;
6921}
6922
6923static void mlxsw_sp_rif_ipip_lb_deconfigure(struct mlxsw_sp_rif *rif)
6924{
6925 struct mlxsw_sp_rif_ipip_lb *lb_rif = mlxsw_sp_rif_ipip_lb_rif(rif);
6926 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
6927 struct mlxsw_sp_vr *ul_vr;
6928
6929 ul_vr = &mlxsw_sp->router->vrs[lb_rif->ul_vr_id];
6930 mlxsw_sp_rif_ipip_lb_op(lb_rif, ul_vr, false);
6931
6932 --ul_vr->rif_count;
Ido Schimmel2b52ce02018-01-22 09:17:42 +01006933 mlxsw_sp_vr_put(mlxsw_sp, ul_vr);
Petr Machata6ddb7422017-09-02 23:49:19 +02006934}
6935
6936static const struct mlxsw_sp_rif_ops mlxsw_sp_rif_ipip_lb_ops = {
6937 .type = MLXSW_SP_RIF_TYPE_IPIP_LB,
6938 .rif_size = sizeof(struct mlxsw_sp_rif_ipip_lb),
6939 .setup = mlxsw_sp_rif_ipip_lb_setup,
6940 .configure = mlxsw_sp_rif_ipip_lb_configure,
6941 .deconfigure = mlxsw_sp_rif_ipip_lb_deconfigure,
6942};
6943
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006944static const struct mlxsw_sp_rif_ops *mlxsw_sp_rif_ops_arr[] = {
6945 [MLXSW_SP_RIF_TYPE_SUBPORT] = &mlxsw_sp_rif_subport_ops,
6946 [MLXSW_SP_RIF_TYPE_VLAN] = &mlxsw_sp_rif_vlan_ops,
6947 [MLXSW_SP_RIF_TYPE_FID] = &mlxsw_sp_rif_fid_ops,
Petr Machata6ddb7422017-09-02 23:49:19 +02006948 [MLXSW_SP_RIF_TYPE_IPIP_LB] = &mlxsw_sp_rif_ipip_lb_ops,
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006949};
6950
Ido Schimmel348b8fc2017-05-16 19:38:29 +02006951static int mlxsw_sp_rifs_init(struct mlxsw_sp *mlxsw_sp)
6952{
6953 u64 max_rifs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS);
6954
6955 mlxsw_sp->router->rifs = kcalloc(max_rifs,
6956 sizeof(struct mlxsw_sp_rif *),
6957 GFP_KERNEL);
6958 if (!mlxsw_sp->router->rifs)
6959 return -ENOMEM;
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006960
6961 mlxsw_sp->router->rif_ops_arr = mlxsw_sp_rif_ops_arr;
6962
Ido Schimmel348b8fc2017-05-16 19:38:29 +02006963 return 0;
6964}
6965
6966static void mlxsw_sp_rifs_fini(struct mlxsw_sp *mlxsw_sp)
6967{
6968 int i;
6969
6970 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++)
6971 WARN_ON_ONCE(mlxsw_sp->router->rifs[i]);
6972
6973 kfree(mlxsw_sp->router->rifs);
6974}
6975
Petr Machatadcbda282017-10-20 09:16:16 +02006976static int
6977mlxsw_sp_ipip_config_tigcr(struct mlxsw_sp *mlxsw_sp)
6978{
6979 char tigcr_pl[MLXSW_REG_TIGCR_LEN];
6980
6981 mlxsw_reg_tigcr_pack(tigcr_pl, true, 0);
6982 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(tigcr), tigcr_pl);
6983}
6984
Petr Machata38ebc0f2017-09-02 23:49:17 +02006985static int mlxsw_sp_ipips_init(struct mlxsw_sp *mlxsw_sp)
6986{
6987 mlxsw_sp->router->ipip_ops_arr = mlxsw_sp_ipip_ops_arr;
Petr Machata1012b9a2017-09-02 23:49:23 +02006988 INIT_LIST_HEAD(&mlxsw_sp->router->ipip_list);
Petr Machatadcbda282017-10-20 09:16:16 +02006989 return mlxsw_sp_ipip_config_tigcr(mlxsw_sp);
Petr Machata38ebc0f2017-09-02 23:49:17 +02006990}
6991
6992static void mlxsw_sp_ipips_fini(struct mlxsw_sp *mlxsw_sp)
6993{
Petr Machata1012b9a2017-09-02 23:49:23 +02006994 WARN_ON(!list_empty(&mlxsw_sp->router->ipip_list));
Petr Machata38ebc0f2017-09-02 23:49:17 +02006995}
6996
Ido Schimmelc3852ef2016-12-03 16:45:07 +01006997static void mlxsw_sp_router_fib_dump_flush(struct notifier_block *nb)
6998{
Ido Schimmel7e39d112017-05-16 19:38:28 +02006999 struct mlxsw_sp_router *router;
Ido Schimmelc3852ef2016-12-03 16:45:07 +01007000
7001 /* Flush pending FIB notifications and then flush the device's
7002 * table before requesting another dump. The FIB notification
7003 * block is unregistered, so no need to take RTNL.
7004 */
7005 mlxsw_core_flush_owq();
Ido Schimmel7e39d112017-05-16 19:38:28 +02007006 router = container_of(nb, struct mlxsw_sp_router, fib_nb);
7007 mlxsw_sp_router_fib_flush(router->mlxsw_sp);
Ido Schimmelc3852ef2016-12-03 16:45:07 +01007008}
7009
Ido Schimmelaf658b62017-11-02 17:14:09 +01007010#ifdef CONFIG_IP_ROUTE_MULTIPATH
7011static void mlxsw_sp_mp_hash_header_set(char *recr2_pl, int header)
7012{
7013 mlxsw_reg_recr2_outer_header_enables_set(recr2_pl, header, true);
7014}
7015
7016static void mlxsw_sp_mp_hash_field_set(char *recr2_pl, int field)
7017{
7018 mlxsw_reg_recr2_outer_header_fields_enable_set(recr2_pl, field, true);
7019}
7020
7021static void mlxsw_sp_mp4_hash_init(char *recr2_pl)
7022{
7023 bool only_l3 = !init_net.ipv4.sysctl_fib_multipath_hash_policy;
7024
7025 mlxsw_sp_mp_hash_header_set(recr2_pl,
7026 MLXSW_REG_RECR2_IPV4_EN_NOT_TCP_NOT_UDP);
7027 mlxsw_sp_mp_hash_header_set(recr2_pl, MLXSW_REG_RECR2_IPV4_EN_TCP_UDP);
7028 mlxsw_reg_recr2_ipv4_sip_enable(recr2_pl);
7029 mlxsw_reg_recr2_ipv4_dip_enable(recr2_pl);
7030 if (only_l3)
7031 return;
7032 mlxsw_sp_mp_hash_header_set(recr2_pl, MLXSW_REG_RECR2_TCP_UDP_EN_IPV4);
7033 mlxsw_sp_mp_hash_field_set(recr2_pl, MLXSW_REG_RECR2_IPV4_PROTOCOL);
7034 mlxsw_sp_mp_hash_field_set(recr2_pl, MLXSW_REG_RECR2_TCP_UDP_SPORT);
7035 mlxsw_sp_mp_hash_field_set(recr2_pl, MLXSW_REG_RECR2_TCP_UDP_DPORT);
7036}
7037
7038static void mlxsw_sp_mp6_hash_init(char *recr2_pl)
7039{
7040 mlxsw_sp_mp_hash_header_set(recr2_pl,
7041 MLXSW_REG_RECR2_IPV6_EN_NOT_TCP_NOT_UDP);
7042 mlxsw_sp_mp_hash_header_set(recr2_pl, MLXSW_REG_RECR2_IPV6_EN_TCP_UDP);
7043 mlxsw_reg_recr2_ipv6_sip_enable(recr2_pl);
7044 mlxsw_reg_recr2_ipv6_dip_enable(recr2_pl);
7045 mlxsw_sp_mp_hash_field_set(recr2_pl, MLXSW_REG_RECR2_IPV6_FLOW_LABEL);
7046 mlxsw_sp_mp_hash_field_set(recr2_pl, MLXSW_REG_RECR2_IPV6_NEXT_HEADER);
7047}
7048
7049static int mlxsw_sp_mp_hash_init(struct mlxsw_sp *mlxsw_sp)
7050{
7051 char recr2_pl[MLXSW_REG_RECR2_LEN];
7052 u32 seed;
7053
7054 get_random_bytes(&seed, sizeof(seed));
7055 mlxsw_reg_recr2_pack(recr2_pl, seed);
7056 mlxsw_sp_mp4_hash_init(recr2_pl);
7057 mlxsw_sp_mp6_hash_init(recr2_pl);
7058
7059 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(recr2), recr2_pl);
7060}
7061#else
7062static int mlxsw_sp_mp_hash_init(struct mlxsw_sp *mlxsw_sp)
7063{
7064 return 0;
7065}
7066#endif
7067
Yuval Mintz48276a22018-01-14 12:33:14 +01007068static int mlxsw_sp_dscp_init(struct mlxsw_sp *mlxsw_sp)
7069{
7070 char rdpm_pl[MLXSW_REG_RDPM_LEN];
7071 unsigned int i;
7072
7073 MLXSW_REG_ZERO(rdpm, rdpm_pl);
7074
7075 /* HW is determining switch priority based on DSCP-bits, but the
7076 * kernel is still doing that based on the ToS. Since there's a
7077 * mismatch in bits we need to make sure to translate the right
7078 * value ToS would observe, skipping the 2 least-significant ECN bits.
7079 */
7080 for (i = 0; i < MLXSW_REG_RDPM_DSCP_ENTRY_REC_MAX_COUNT; i++)
7081 mlxsw_reg_rdpm_pack(rdpm_pl, i, rt_tos2priority(i << 2));
7082
7083 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rdpm), rdpm_pl);
7084}
7085
Ido Schimmel4724ba562017-03-10 08:53:39 +01007086static int __mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp)
7087{
7088 char rgcr_pl[MLXSW_REG_RGCR_LEN];
7089 u64 max_rifs;
7090 int err;
7091
7092 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_RIFS))
7093 return -EIO;
Ido Schimmel4724ba562017-03-10 08:53:39 +01007094 max_rifs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS);
Ido Schimmel4724ba562017-03-10 08:53:39 +01007095
Arkadi Sharshevskye29237e2017-07-18 10:10:09 +02007096 mlxsw_reg_rgcr_pack(rgcr_pl, true, true);
Ido Schimmel4724ba562017-03-10 08:53:39 +01007097 mlxsw_reg_rgcr_max_router_interfaces_set(rgcr_pl, max_rifs);
Yuval Mintz48276a22018-01-14 12:33:14 +01007098 mlxsw_reg_rgcr_usp_set(rgcr_pl, true);
Ido Schimmel4724ba562017-03-10 08:53:39 +01007099 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rgcr), rgcr_pl);
7100 if (err)
Ido Schimmel348b8fc2017-05-16 19:38:29 +02007101 return err;
Ido Schimmel4724ba562017-03-10 08:53:39 +01007102 return 0;
Ido Schimmel4724ba562017-03-10 08:53:39 +01007103}
7104
7105static void __mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp)
7106{
7107 char rgcr_pl[MLXSW_REG_RGCR_LEN];
Ido Schimmel4724ba562017-03-10 08:53:39 +01007108
Arkadi Sharshevskye29237e2017-07-18 10:10:09 +02007109 mlxsw_reg_rgcr_pack(rgcr_pl, false, false);
Ido Schimmel4724ba562017-03-10 08:53:39 +01007110 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rgcr), rgcr_pl);
Ido Schimmel4724ba562017-03-10 08:53:39 +01007111}
7112
Jiri Pirkob45f64d2016-09-26 12:52:31 +02007113int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp)
7114{
Ido Schimmel9011b672017-05-16 19:38:25 +02007115 struct mlxsw_sp_router *router;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02007116 int err;
7117
Ido Schimmel9011b672017-05-16 19:38:25 +02007118 router = kzalloc(sizeof(*mlxsw_sp->router), GFP_KERNEL);
7119 if (!router)
7120 return -ENOMEM;
7121 mlxsw_sp->router = router;
7122 router->mlxsw_sp = mlxsw_sp;
7123
7124 INIT_LIST_HEAD(&mlxsw_sp->router->nexthop_neighs_list);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02007125 err = __mlxsw_sp_router_init(mlxsw_sp);
7126 if (err)
Ido Schimmel9011b672017-05-16 19:38:25 +02007127 goto err_router_init;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02007128
Ido Schimmel348b8fc2017-05-16 19:38:29 +02007129 err = mlxsw_sp_rifs_init(mlxsw_sp);
7130 if (err)
7131 goto err_rifs_init;
7132
Petr Machata38ebc0f2017-09-02 23:49:17 +02007133 err = mlxsw_sp_ipips_init(mlxsw_sp);
7134 if (err)
7135 goto err_ipips_init;
7136
Ido Schimmel9011b672017-05-16 19:38:25 +02007137 err = rhashtable_init(&mlxsw_sp->router->nexthop_ht,
Ido Schimmelc53b8e12017-02-08 11:16:30 +01007138 &mlxsw_sp_nexthop_ht_params);
7139 if (err)
7140 goto err_nexthop_ht_init;
7141
Ido Schimmel9011b672017-05-16 19:38:25 +02007142 err = rhashtable_init(&mlxsw_sp->router->nexthop_group_ht,
Ido Schimmele9ad5e72017-02-08 11:16:29 +01007143 &mlxsw_sp_nexthop_group_ht_params);
7144 if (err)
7145 goto err_nexthop_group_ht_init;
7146
Arkadi Sharshevskydbe45982017-09-25 10:32:23 +02007147 INIT_LIST_HEAD(&mlxsw_sp->router->nexthop_list);
Ido Schimmel8494ab02017-03-24 08:02:47 +01007148 err = mlxsw_sp_lpm_init(mlxsw_sp);
7149 if (err)
7150 goto err_lpm_init;
7151
Yotam Gigid42b0962017-09-27 08:23:20 +02007152 err = mlxsw_sp_mr_init(mlxsw_sp, &mlxsw_sp_mr_tcam_ops);
7153 if (err)
7154 goto err_mr_init;
7155
Jiri Pirkob45f64d2016-09-26 12:52:31 +02007156 err = mlxsw_sp_vrs_init(mlxsw_sp);
7157 if (err)
7158 goto err_vrs_init;
7159
Ido Schimmel8c9583a2016-10-27 15:12:57 +02007160 err = mlxsw_sp_neigh_init(mlxsw_sp);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02007161 if (err)
7162 goto err_neigh_init;
7163
Ido Schimmel48fac882017-11-02 17:14:06 +01007164 mlxsw_sp->router->netevent_nb.notifier_call =
7165 mlxsw_sp_router_netevent_event;
7166 err = register_netevent_notifier(&mlxsw_sp->router->netevent_nb);
7167 if (err)
7168 goto err_register_netevent_notifier;
7169
Ido Schimmelaf658b62017-11-02 17:14:09 +01007170 err = mlxsw_sp_mp_hash_init(mlxsw_sp);
7171 if (err)
7172 goto err_mp_hash_init;
7173
Yuval Mintz48276a22018-01-14 12:33:14 +01007174 err = mlxsw_sp_dscp_init(mlxsw_sp);
7175 if (err)
7176 goto err_dscp_init;
7177
Ido Schimmel7e39d112017-05-16 19:38:28 +02007178 mlxsw_sp->router->fib_nb.notifier_call = mlxsw_sp_router_fib_event;
7179 err = register_fib_notifier(&mlxsw_sp->router->fib_nb,
Ido Schimmelc3852ef2016-12-03 16:45:07 +01007180 mlxsw_sp_router_fib_dump_flush);
7181 if (err)
7182 goto err_register_fib_notifier;
7183
Jiri Pirkob45f64d2016-09-26 12:52:31 +02007184 return 0;
7185
Ido Schimmelc3852ef2016-12-03 16:45:07 +01007186err_register_fib_notifier:
Yuval Mintz48276a22018-01-14 12:33:14 +01007187err_dscp_init:
Ido Schimmelaf658b62017-11-02 17:14:09 +01007188err_mp_hash_init:
Ido Schimmel48fac882017-11-02 17:14:06 +01007189 unregister_netevent_notifier(&mlxsw_sp->router->netevent_nb);
7190err_register_netevent_notifier:
Ido Schimmelc3852ef2016-12-03 16:45:07 +01007191 mlxsw_sp_neigh_fini(mlxsw_sp);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02007192err_neigh_init:
7193 mlxsw_sp_vrs_fini(mlxsw_sp);
7194err_vrs_init:
Yotam Gigid42b0962017-09-27 08:23:20 +02007195 mlxsw_sp_mr_fini(mlxsw_sp);
7196err_mr_init:
Ido Schimmel8494ab02017-03-24 08:02:47 +01007197 mlxsw_sp_lpm_fini(mlxsw_sp);
7198err_lpm_init:
Ido Schimmel9011b672017-05-16 19:38:25 +02007199 rhashtable_destroy(&mlxsw_sp->router->nexthop_group_ht);
Ido Schimmele9ad5e72017-02-08 11:16:29 +01007200err_nexthop_group_ht_init:
Ido Schimmel9011b672017-05-16 19:38:25 +02007201 rhashtable_destroy(&mlxsw_sp->router->nexthop_ht);
Ido Schimmelc53b8e12017-02-08 11:16:30 +01007202err_nexthop_ht_init:
Petr Machata38ebc0f2017-09-02 23:49:17 +02007203 mlxsw_sp_ipips_fini(mlxsw_sp);
7204err_ipips_init:
Ido Schimmel348b8fc2017-05-16 19:38:29 +02007205 mlxsw_sp_rifs_fini(mlxsw_sp);
7206err_rifs_init:
Jiri Pirkob45f64d2016-09-26 12:52:31 +02007207 __mlxsw_sp_router_fini(mlxsw_sp);
Ido Schimmel9011b672017-05-16 19:38:25 +02007208err_router_init:
7209 kfree(mlxsw_sp->router);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02007210 return err;
7211}
7212
7213void mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp)
7214{
Ido Schimmel7e39d112017-05-16 19:38:28 +02007215 unregister_fib_notifier(&mlxsw_sp->router->fib_nb);
Ido Schimmel48fac882017-11-02 17:14:06 +01007216 unregister_netevent_notifier(&mlxsw_sp->router->netevent_nb);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02007217 mlxsw_sp_neigh_fini(mlxsw_sp);
7218 mlxsw_sp_vrs_fini(mlxsw_sp);
Yotam Gigid42b0962017-09-27 08:23:20 +02007219 mlxsw_sp_mr_fini(mlxsw_sp);
Ido Schimmel8494ab02017-03-24 08:02:47 +01007220 mlxsw_sp_lpm_fini(mlxsw_sp);
Ido Schimmel9011b672017-05-16 19:38:25 +02007221 rhashtable_destroy(&mlxsw_sp->router->nexthop_group_ht);
7222 rhashtable_destroy(&mlxsw_sp->router->nexthop_ht);
Petr Machata38ebc0f2017-09-02 23:49:17 +02007223 mlxsw_sp_ipips_fini(mlxsw_sp);
Ido Schimmel348b8fc2017-05-16 19:38:29 +02007224 mlxsw_sp_rifs_fini(mlxsw_sp);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02007225 __mlxsw_sp_router_fini(mlxsw_sp);
Ido Schimmel9011b672017-05-16 19:38:25 +02007226 kfree(mlxsw_sp->router);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02007227}