blob: 867d3db2c1270898029a96b3a2e4467a0c48793e [file] [log] [blame]
Ido Schimmel464dce12016-07-02 11:00:15 +02001/*
2 * drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
Petr Machatae437f3b2018-02-13 11:26:09 +01003 * Copyright (c) 2016-2018 Mellanox Technologies. All rights reserved.
Ido Schimmel464dce12016-07-02 11:00:15 +02004 * Copyright (c) 2016 Jiri Pirko <jiri@mellanox.com>
5 * Copyright (c) 2016 Ido Schimmel <idosch@mellanox.com>
Yotam Gigic723c7352016-07-05 11:27:43 +02006 * Copyright (c) 2016 Yotam Gigi <yotamg@mellanox.com>
Petr Machatae437f3b2018-02-13 11:26:09 +01007 * Copyright (c) 2017-2018 Petr Machata <petrm@mellanox.com>
Ido Schimmel464dce12016-07-02 11:00:15 +02008 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions are met:
11 *
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. Neither the names of the copyright holders nor the names of its
18 * contributors may be used to endorse or promote products derived from
19 * this software without specific prior written permission.
20 *
21 * Alternatively, this software may be distributed under the terms of the
22 * GNU General Public License ("GPL") version 2 as published by the Free
23 * Software Foundation.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
26 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
29 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
36 */
37
38#include <linux/kernel.h>
39#include <linux/types.h>
Jiri Pirko5e9c16c2016-07-04 08:23:04 +020040#include <linux/rhashtable.h>
41#include <linux/bitops.h>
42#include <linux/in6.h>
Yotam Gigic723c7352016-07-05 11:27:43 +020043#include <linux/notifier.h>
Ido Schimmeldf6dd79b2017-02-08 14:36:49 +010044#include <linux/inetdevice.h>
Ido Schimmel9db032b2017-03-16 09:08:17 +010045#include <linux/netdevice.h>
Ido Schimmel03ea01e2017-05-23 21:56:30 +020046#include <linux/if_bridge.h>
Ido Schimmelb5f3e0d2017-07-24 09:56:00 +020047#include <linux/socket.h>
Ido Schimmel428b8512017-08-03 13:28:28 +020048#include <linux/route.h>
Ido Schimmeleb789982017-10-22 23:11:48 +020049#include <linux/gcd.h>
Ido Schimmelaf658b62017-11-02 17:14:09 +010050#include <linux/random.h>
Ido Schimmel2db99372018-07-14 11:39:52 +030051#include <linux/if_macvlan.h>
Yotam Gigic723c7352016-07-05 11:27:43 +020052#include <net/netevent.h>
Jiri Pirko6cf3c972016-07-05 11:27:39 +020053#include <net/neighbour.h>
54#include <net/arp.h>
Jiri Pirkob45f64d2016-09-26 12:52:31 +020055#include <net/ip_fib.h>
Ido Schimmel583419f2017-08-03 13:28:27 +020056#include <net/ip6_fib.h>
Ido Schimmel5d7bfd12017-03-16 09:08:14 +010057#include <net/fib_rules.h>
Petr Machata6ddb7422017-09-02 23:49:19 +020058#include <net/ip_tunnels.h>
Ido Schimmel57837882017-03-16 09:08:16 +010059#include <net/l3mdev.h>
Arkadi Sharshevsky5ea12372017-07-18 10:10:13 +020060#include <net/addrconf.h>
Arkadi Sharshevskyd5eb89c2017-07-18 10:10:15 +020061#include <net/ndisc.h>
62#include <net/ipv6.h>
Ido Schimmel04b1d4e2017-08-03 13:28:11 +020063#include <net/fib_notifier.h>
Ido Schimmel2db99372018-07-14 11:39:52 +030064#include <net/switchdev.h>
Ido Schimmel464dce12016-07-02 11:00:15 +020065
66#include "spectrum.h"
67#include "core.h"
68#include "reg.h"
Arkadi Sharshevskye0c0afd2017-03-28 17:24:15 +020069#include "spectrum_cnt.h"
70#include "spectrum_dpipe.h"
Petr Machata38ebc0f2017-09-02 23:49:17 +020071#include "spectrum_ipip.h"
Yotam Gigid42b0962017-09-27 08:23:20 +020072#include "spectrum_mr.h"
73#include "spectrum_mr_tcam.h"
Arkadi Sharshevskye0c0afd2017-03-28 17:24:15 +020074#include "spectrum_router.h"
Petr Machata803335a2018-02-27 14:53:46 +010075#include "spectrum_span.h"
Ido Schimmel464dce12016-07-02 11:00:15 +020076
Ido Schimmel2b52ce02018-01-22 09:17:42 +010077struct mlxsw_sp_fib;
Ido Schimmel9011b672017-05-16 19:38:25 +020078struct mlxsw_sp_vr;
79struct mlxsw_sp_lpm_tree;
Ido Schimmele4f3c1c2017-05-26 08:37:40 +020080struct mlxsw_sp_rif_ops;
Ido Schimmel9011b672017-05-16 19:38:25 +020081
82struct mlxsw_sp_router {
83 struct mlxsw_sp *mlxsw_sp;
Ido Schimmel5f9efff2017-05-16 19:38:27 +020084 struct mlxsw_sp_rif **rifs;
Ido Schimmel9011b672017-05-16 19:38:25 +020085 struct mlxsw_sp_vr *vrs;
86 struct rhashtable neigh_ht;
87 struct rhashtable nexthop_group_ht;
88 struct rhashtable nexthop_ht;
Arkadi Sharshevskydbe45982017-09-25 10:32:23 +020089 struct list_head nexthop_list;
Ido Schimmel9011b672017-05-16 19:38:25 +020090 struct {
Ido Schimmel2b52ce02018-01-22 09:17:42 +010091 /* One tree for each protocol: IPv4 and IPv6 */
92 struct mlxsw_sp_lpm_tree *proto_trees[2];
Ido Schimmel9011b672017-05-16 19:38:25 +020093 struct mlxsw_sp_lpm_tree *trees;
94 unsigned int tree_count;
95 } lpm;
96 struct {
97 struct delayed_work dw;
98 unsigned long interval; /* ms */
99 } neighs_update;
100 struct delayed_work nexthop_probe_dw;
101#define MLXSW_SP_UNRESOLVED_NH_PROBE_INTERVAL 5000 /* ms */
102 struct list_head nexthop_neighs_list;
Petr Machata1012b9a2017-09-02 23:49:23 +0200103 struct list_head ipip_list;
Ido Schimmel9011b672017-05-16 19:38:25 +0200104 bool aborted;
Ido Schimmel7e39d112017-05-16 19:38:28 +0200105 struct notifier_block fib_nb;
Ido Schimmel48fac882017-11-02 17:14:06 +0100106 struct notifier_block netevent_nb;
Ido Schimmele4f3c1c2017-05-26 08:37:40 +0200107 const struct mlxsw_sp_rif_ops **rif_ops_arr;
Petr Machata38ebc0f2017-09-02 23:49:17 +0200108 const struct mlxsw_sp_ipip_ops **ipip_ops_arr;
Ido Schimmel9011b672017-05-16 19:38:25 +0200109};
110
Ido Schimmel4724ba562017-03-10 08:53:39 +0100111struct mlxsw_sp_rif {
112 struct list_head nexthop_list;
113 struct list_head neigh_list;
114 struct net_device *dev;
Ido Schimmela1107482017-05-26 08:37:39 +0200115 struct mlxsw_sp_fid *fid;
Ido Schimmel4724ba562017-03-10 08:53:39 +0100116 unsigned char addr[ETH_ALEN];
117 int mtu;
Arkadi Sharshevskybf952332017-03-17 09:38:00 +0100118 u16 rif_index;
Ido Schimmel69132292017-03-10 08:53:42 +0100119 u16 vr_id;
Ido Schimmele4f3c1c2017-05-26 08:37:40 +0200120 const struct mlxsw_sp_rif_ops *ops;
121 struct mlxsw_sp *mlxsw_sp;
122
Arkadi Sharshevskye0c0afd2017-03-28 17:24:15 +0200123 unsigned int counter_ingress;
124 bool counter_ingress_valid;
125 unsigned int counter_egress;
126 bool counter_egress_valid;
Ido Schimmel4724ba562017-03-10 08:53:39 +0100127};
128
Ido Schimmele4f3c1c2017-05-26 08:37:40 +0200129struct mlxsw_sp_rif_params {
130 struct net_device *dev;
131 union {
132 u16 system_port;
133 u16 lag_id;
134 };
135 u16 vid;
136 bool lag;
137};
138
Ido Schimmel4d93cee2017-05-26 08:37:34 +0200139struct mlxsw_sp_rif_subport {
140 struct mlxsw_sp_rif common;
141 union {
142 u16 system_port;
143 u16 lag_id;
144 };
145 u16 vid;
146 bool lag;
147};
148
Petr Machata6ddb7422017-09-02 23:49:19 +0200149struct mlxsw_sp_rif_ipip_lb {
150 struct mlxsw_sp_rif common;
151 struct mlxsw_sp_rif_ipip_lb_config lb_config;
152 u16 ul_vr_id; /* Reserved for Spectrum-2. */
153};
154
155struct mlxsw_sp_rif_params_ipip_lb {
156 struct mlxsw_sp_rif_params common;
157 struct mlxsw_sp_rif_ipip_lb_config lb_config;
158};
159
Ido Schimmele4f3c1c2017-05-26 08:37:40 +0200160struct mlxsw_sp_rif_ops {
161 enum mlxsw_sp_rif_type type;
162 size_t rif_size;
163
164 void (*setup)(struct mlxsw_sp_rif *rif,
165 const struct mlxsw_sp_rif_params *params);
166 int (*configure)(struct mlxsw_sp_rif *rif);
167 void (*deconfigure)(struct mlxsw_sp_rif *rif);
Petr Machata5f15e252018-06-25 10:48:13 +0300168 struct mlxsw_sp_fid * (*fid_get)(struct mlxsw_sp_rif *rif,
169 struct netlink_ext_ack *extack);
Ido Schimmel2db99372018-07-14 11:39:52 +0300170 void (*fdb_del)(struct mlxsw_sp_rif *rif, const char *mac);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +0200171};
172
Ido Schimmel2b52ce02018-01-22 09:17:42 +0100173static void mlxsw_sp_lpm_tree_hold(struct mlxsw_sp_lpm_tree *lpm_tree);
174static void mlxsw_sp_lpm_tree_put(struct mlxsw_sp *mlxsw_sp,
175 struct mlxsw_sp_lpm_tree *lpm_tree);
176static int mlxsw_sp_vr_lpm_tree_bind(struct mlxsw_sp *mlxsw_sp,
177 const struct mlxsw_sp_fib *fib,
178 u8 tree_id);
179static int mlxsw_sp_vr_lpm_tree_unbind(struct mlxsw_sp *mlxsw_sp,
180 const struct mlxsw_sp_fib *fib);
181
Arkadi Sharshevskye0c0afd2017-03-28 17:24:15 +0200182static unsigned int *
183mlxsw_sp_rif_p_counter_get(struct mlxsw_sp_rif *rif,
184 enum mlxsw_sp_rif_counter_dir dir)
185{
186 switch (dir) {
187 case MLXSW_SP_RIF_COUNTER_EGRESS:
188 return &rif->counter_egress;
189 case MLXSW_SP_RIF_COUNTER_INGRESS:
190 return &rif->counter_ingress;
191 }
192 return NULL;
193}
194
195static bool
196mlxsw_sp_rif_counter_valid_get(struct mlxsw_sp_rif *rif,
197 enum mlxsw_sp_rif_counter_dir dir)
198{
199 switch (dir) {
200 case MLXSW_SP_RIF_COUNTER_EGRESS:
201 return rif->counter_egress_valid;
202 case MLXSW_SP_RIF_COUNTER_INGRESS:
203 return rif->counter_ingress_valid;
204 }
205 return false;
206}
207
208static void
209mlxsw_sp_rif_counter_valid_set(struct mlxsw_sp_rif *rif,
210 enum mlxsw_sp_rif_counter_dir dir,
211 bool valid)
212{
213 switch (dir) {
214 case MLXSW_SP_RIF_COUNTER_EGRESS:
215 rif->counter_egress_valid = valid;
216 break;
217 case MLXSW_SP_RIF_COUNTER_INGRESS:
218 rif->counter_ingress_valid = valid;
219 break;
220 }
221}
222
223static int mlxsw_sp_rif_counter_edit(struct mlxsw_sp *mlxsw_sp, u16 rif_index,
224 unsigned int counter_index, bool enable,
225 enum mlxsw_sp_rif_counter_dir dir)
226{
227 char ritr_pl[MLXSW_REG_RITR_LEN];
228 bool is_egress = false;
229 int err;
230
231 if (dir == MLXSW_SP_RIF_COUNTER_EGRESS)
232 is_egress = true;
233 mlxsw_reg_ritr_rif_pack(ritr_pl, rif_index);
234 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
235 if (err)
236 return err;
237
238 mlxsw_reg_ritr_counter_pack(ritr_pl, counter_index, enable,
239 is_egress);
240 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
241}
242
243int mlxsw_sp_rif_counter_value_get(struct mlxsw_sp *mlxsw_sp,
244 struct mlxsw_sp_rif *rif,
245 enum mlxsw_sp_rif_counter_dir dir, u64 *cnt)
246{
247 char ricnt_pl[MLXSW_REG_RICNT_LEN];
248 unsigned int *p_counter_index;
249 bool valid;
250 int err;
251
252 valid = mlxsw_sp_rif_counter_valid_get(rif, dir);
253 if (!valid)
254 return -EINVAL;
255
256 p_counter_index = mlxsw_sp_rif_p_counter_get(rif, dir);
257 if (!p_counter_index)
258 return -EINVAL;
259 mlxsw_reg_ricnt_pack(ricnt_pl, *p_counter_index,
260 MLXSW_REG_RICNT_OPCODE_NOP);
261 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ricnt), ricnt_pl);
262 if (err)
263 return err;
264 *cnt = mlxsw_reg_ricnt_good_unicast_packets_get(ricnt_pl);
265 return 0;
266}
267
268static int mlxsw_sp_rif_counter_clear(struct mlxsw_sp *mlxsw_sp,
269 unsigned int counter_index)
270{
271 char ricnt_pl[MLXSW_REG_RICNT_LEN];
272
273 mlxsw_reg_ricnt_pack(ricnt_pl, counter_index,
274 MLXSW_REG_RICNT_OPCODE_CLEAR);
275 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ricnt), ricnt_pl);
276}
277
278int mlxsw_sp_rif_counter_alloc(struct mlxsw_sp *mlxsw_sp,
279 struct mlxsw_sp_rif *rif,
280 enum mlxsw_sp_rif_counter_dir dir)
281{
282 unsigned int *p_counter_index;
283 int err;
284
285 p_counter_index = mlxsw_sp_rif_p_counter_get(rif, dir);
286 if (!p_counter_index)
287 return -EINVAL;
288 err = mlxsw_sp_counter_alloc(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_RIF,
289 p_counter_index);
290 if (err)
291 return err;
292
293 err = mlxsw_sp_rif_counter_clear(mlxsw_sp, *p_counter_index);
294 if (err)
295 goto err_counter_clear;
296
297 err = mlxsw_sp_rif_counter_edit(mlxsw_sp, rif->rif_index,
298 *p_counter_index, true, dir);
299 if (err)
300 goto err_counter_edit;
301 mlxsw_sp_rif_counter_valid_set(rif, dir, true);
302 return 0;
303
304err_counter_edit:
305err_counter_clear:
306 mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_RIF,
307 *p_counter_index);
308 return err;
309}
310
311void mlxsw_sp_rif_counter_free(struct mlxsw_sp *mlxsw_sp,
312 struct mlxsw_sp_rif *rif,
313 enum mlxsw_sp_rif_counter_dir dir)
314{
315 unsigned int *p_counter_index;
316
Arkadi Sharshevsky6b1206b2017-05-18 09:18:53 +0200317 if (!mlxsw_sp_rif_counter_valid_get(rif, dir))
318 return;
319
Arkadi Sharshevskye0c0afd2017-03-28 17:24:15 +0200320 p_counter_index = mlxsw_sp_rif_p_counter_get(rif, dir);
321 if (WARN_ON(!p_counter_index))
322 return;
323 mlxsw_sp_rif_counter_edit(mlxsw_sp, rif->rif_index,
324 *p_counter_index, false, dir);
325 mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_RIF,
326 *p_counter_index);
327 mlxsw_sp_rif_counter_valid_set(rif, dir, false);
328}
329
Ido Schimmele4f3c1c2017-05-26 08:37:40 +0200330static void mlxsw_sp_rif_counters_alloc(struct mlxsw_sp_rif *rif)
331{
332 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
333 struct devlink *devlink;
334
335 devlink = priv_to_devlink(mlxsw_sp->core);
336 if (!devlink_dpipe_table_counter_enabled(devlink,
337 MLXSW_SP_DPIPE_TABLE_NAME_ERIF))
338 return;
339 mlxsw_sp_rif_counter_alloc(mlxsw_sp, rif, MLXSW_SP_RIF_COUNTER_EGRESS);
340}
341
342static void mlxsw_sp_rif_counters_free(struct mlxsw_sp_rif *rif)
343{
344 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
345
346 mlxsw_sp_rif_counter_free(mlxsw_sp, rif, MLXSW_SP_RIF_COUNTER_EGRESS);
347}
348
Ido Schimmel7dcc18a2017-07-18 10:10:30 +0200349#define MLXSW_SP_PREFIX_COUNT (sizeof(struct in6_addr) * BITS_PER_BYTE + 1)
Ido Schimmel9011b672017-05-16 19:38:25 +0200350
351struct mlxsw_sp_prefix_usage {
352 DECLARE_BITMAP(b, MLXSW_SP_PREFIX_COUNT);
353};
354
Jiri Pirko53342022016-07-04 08:23:08 +0200355#define mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage) \
356 for_each_set_bit(prefix, (prefix_usage)->b, MLXSW_SP_PREFIX_COUNT)
357
358static bool
359mlxsw_sp_prefix_usage_eq(struct mlxsw_sp_prefix_usage *prefix_usage1,
360 struct mlxsw_sp_prefix_usage *prefix_usage2)
361{
362 return !memcmp(prefix_usage1, prefix_usage2, sizeof(*prefix_usage1));
363}
364
Jiri Pirko6b75c482016-07-04 08:23:09 +0200365static void
366mlxsw_sp_prefix_usage_cpy(struct mlxsw_sp_prefix_usage *prefix_usage1,
367 struct mlxsw_sp_prefix_usage *prefix_usage2)
368{
369 memcpy(prefix_usage1, prefix_usage2, sizeof(*prefix_usage1));
370}
371
372static void
Jiri Pirko5e9c16c2016-07-04 08:23:04 +0200373mlxsw_sp_prefix_usage_set(struct mlxsw_sp_prefix_usage *prefix_usage,
374 unsigned char prefix_len)
375{
376 set_bit(prefix_len, prefix_usage->b);
377}
378
379static void
380mlxsw_sp_prefix_usage_clear(struct mlxsw_sp_prefix_usage *prefix_usage,
381 unsigned char prefix_len)
382{
383 clear_bit(prefix_len, prefix_usage->b);
384}
385
386struct mlxsw_sp_fib_key {
387 unsigned char addr[sizeof(struct in6_addr)];
388 unsigned char prefix_len;
389};
390
Jiri Pirko61c503f2016-07-04 08:23:11 +0200391enum mlxsw_sp_fib_entry_type {
392 MLXSW_SP_FIB_ENTRY_TYPE_REMOTE,
393 MLXSW_SP_FIB_ENTRY_TYPE_LOCAL,
394 MLXSW_SP_FIB_ENTRY_TYPE_TRAP,
Petr Machata4607f6d2017-09-02 23:49:25 +0200395
396 /* This is a special case of local delivery, where a packet should be
397 * decapsulated on reception. Note that there is no corresponding ENCAP,
398 * because that's a type of next hop, not of FIB entry. (There can be
399 * several next hops in a REMOTE entry, and some of them may be
400 * encapsulating entries.)
401 */
402 MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP,
Jiri Pirko61c503f2016-07-04 08:23:11 +0200403};
404
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +0200405struct mlxsw_sp_nexthop_group;
406
Ido Schimmel9aecce12017-02-09 10:28:42 +0100407struct mlxsw_sp_fib_node {
408 struct list_head entry_list;
Jiri Pirkob45f64d2016-09-26 12:52:31 +0200409 struct list_head list;
Ido Schimmel9aecce12017-02-09 10:28:42 +0100410 struct rhash_head ht_node;
Ido Schimmel76610eb2017-03-10 08:53:41 +0100411 struct mlxsw_sp_fib *fib;
Ido Schimmel9aecce12017-02-09 10:28:42 +0100412 struct mlxsw_sp_fib_key key;
413};
414
Petr Machata4607f6d2017-09-02 23:49:25 +0200415struct mlxsw_sp_fib_entry_decap {
416 struct mlxsw_sp_ipip_entry *ipip_entry;
417 u32 tunnel_index;
418};
419
Ido Schimmel9aecce12017-02-09 10:28:42 +0100420struct mlxsw_sp_fib_entry {
421 struct list_head list;
422 struct mlxsw_sp_fib_node *fib_node;
423 enum mlxsw_sp_fib_entry_type type;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +0200424 struct list_head nexthop_group_node;
425 struct mlxsw_sp_nexthop_group *nh_group;
Petr Machata4607f6d2017-09-02 23:49:25 +0200426 struct mlxsw_sp_fib_entry_decap decap; /* Valid for decap entries. */
Jiri Pirko5e9c16c2016-07-04 08:23:04 +0200427};
428
Ido Schimmel4f1c7f12017-07-18 10:10:26 +0200429struct mlxsw_sp_fib4_entry {
430 struct mlxsw_sp_fib_entry common;
431 u32 tb_id;
432 u32 prio;
433 u8 tos;
434 u8 type;
435};
436
Ido Schimmel428b8512017-08-03 13:28:28 +0200437struct mlxsw_sp_fib6_entry {
438 struct mlxsw_sp_fib_entry common;
439 struct list_head rt6_list;
440 unsigned int nrt6;
441};
442
443struct mlxsw_sp_rt6 {
444 struct list_head list;
David Ahern8d1c8022018-04-17 17:33:26 -0700445 struct fib6_info *rt;
Ido Schimmel428b8512017-08-03 13:28:28 +0200446};
447
Ido Schimmel9011b672017-05-16 19:38:25 +0200448struct mlxsw_sp_lpm_tree {
449 u8 id; /* tree ID */
450 unsigned int ref_count;
451 enum mlxsw_sp_l3proto proto;
Ido Schimmel2b52ce02018-01-22 09:17:42 +0100452 unsigned long prefix_ref_count[MLXSW_SP_PREFIX_COUNT];
Ido Schimmel9011b672017-05-16 19:38:25 +0200453 struct mlxsw_sp_prefix_usage prefix_usage;
454};
455
Jiri Pirko5e9c16c2016-07-04 08:23:04 +0200456struct mlxsw_sp_fib {
457 struct rhashtable ht;
Ido Schimmel9aecce12017-02-09 10:28:42 +0100458 struct list_head node_list;
Ido Schimmel76610eb2017-03-10 08:53:41 +0100459 struct mlxsw_sp_vr *vr;
460 struct mlxsw_sp_lpm_tree *lpm_tree;
Ido Schimmel76610eb2017-03-10 08:53:41 +0100461 enum mlxsw_sp_l3proto proto;
Jiri Pirko5e9c16c2016-07-04 08:23:04 +0200462};
463
Ido Schimmel9011b672017-05-16 19:38:25 +0200464struct mlxsw_sp_vr {
465 u16 id; /* virtual router ID */
466 u32 tb_id; /* kernel fib table id */
467 unsigned int rif_count;
468 struct mlxsw_sp_fib *fib4;
Ido Schimmela3d9bc52017-07-18 10:10:22 +0200469 struct mlxsw_sp_fib *fib6;
Yuval Mintz9742f862018-03-26 15:01:40 +0300470 struct mlxsw_sp_mr_table *mr_table[MLXSW_SP_L3_PROTO_MAX];
Ido Schimmel9011b672017-05-16 19:38:25 +0200471};
472
Ido Schimmel9aecce12017-02-09 10:28:42 +0100473static const struct rhashtable_params mlxsw_sp_fib_ht_params;
Jiri Pirko5e9c16c2016-07-04 08:23:04 +0200474
Ido Schimmel2b52ce02018-01-22 09:17:42 +0100475static struct mlxsw_sp_fib *mlxsw_sp_fib_create(struct mlxsw_sp *mlxsw_sp,
476 struct mlxsw_sp_vr *vr,
Ido Schimmel76610eb2017-03-10 08:53:41 +0100477 enum mlxsw_sp_l3proto proto)
Jiri Pirko5e9c16c2016-07-04 08:23:04 +0200478{
Ido Schimmel2b52ce02018-01-22 09:17:42 +0100479 struct mlxsw_sp_lpm_tree *lpm_tree;
Jiri Pirko5e9c16c2016-07-04 08:23:04 +0200480 struct mlxsw_sp_fib *fib;
481 int err;
482
Ido Schimmel2b52ce02018-01-22 09:17:42 +0100483 lpm_tree = mlxsw_sp->router->lpm.proto_trees[proto];
Jiri Pirko5e9c16c2016-07-04 08:23:04 +0200484 fib = kzalloc(sizeof(*fib), GFP_KERNEL);
485 if (!fib)
486 return ERR_PTR(-ENOMEM);
487 err = rhashtable_init(&fib->ht, &mlxsw_sp_fib_ht_params);
488 if (err)
489 goto err_rhashtable_init;
Ido Schimmel9aecce12017-02-09 10:28:42 +0100490 INIT_LIST_HEAD(&fib->node_list);
Ido Schimmel76610eb2017-03-10 08:53:41 +0100491 fib->proto = proto;
492 fib->vr = vr;
Ido Schimmel2b52ce02018-01-22 09:17:42 +0100493 fib->lpm_tree = lpm_tree;
494 mlxsw_sp_lpm_tree_hold(lpm_tree);
495 err = mlxsw_sp_vr_lpm_tree_bind(mlxsw_sp, fib, lpm_tree->id);
496 if (err)
497 goto err_lpm_tree_bind;
Jiri Pirko5e9c16c2016-07-04 08:23:04 +0200498 return fib;
499
Ido Schimmel2b52ce02018-01-22 09:17:42 +0100500err_lpm_tree_bind:
501 mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
Jiri Pirko5e9c16c2016-07-04 08:23:04 +0200502err_rhashtable_init:
503 kfree(fib);
504 return ERR_PTR(err);
505}
506
Ido Schimmel2b52ce02018-01-22 09:17:42 +0100507static void mlxsw_sp_fib_destroy(struct mlxsw_sp *mlxsw_sp,
508 struct mlxsw_sp_fib *fib)
Jiri Pirko5e9c16c2016-07-04 08:23:04 +0200509{
Ido Schimmel2b52ce02018-01-22 09:17:42 +0100510 mlxsw_sp_vr_lpm_tree_unbind(mlxsw_sp, fib);
511 mlxsw_sp_lpm_tree_put(mlxsw_sp, fib->lpm_tree);
Ido Schimmel9aecce12017-02-09 10:28:42 +0100512 WARN_ON(!list_empty(&fib->node_list));
Jiri Pirko5e9c16c2016-07-04 08:23:04 +0200513 rhashtable_destroy(&fib->ht);
514 kfree(fib);
515}
516
Jiri Pirko53342022016-07-04 08:23:08 +0200517static struct mlxsw_sp_lpm_tree *
Ido Schimmel382dbb42017-03-10 08:53:40 +0100518mlxsw_sp_lpm_tree_find_unused(struct mlxsw_sp *mlxsw_sp)
Jiri Pirko53342022016-07-04 08:23:08 +0200519{
520 static struct mlxsw_sp_lpm_tree *lpm_tree;
521 int i;
522
Ido Schimmel9011b672017-05-16 19:38:25 +0200523 for (i = 0; i < mlxsw_sp->router->lpm.tree_count; i++) {
524 lpm_tree = &mlxsw_sp->router->lpm.trees[i];
Ido Schimmel382dbb42017-03-10 08:53:40 +0100525 if (lpm_tree->ref_count == 0)
526 return lpm_tree;
Jiri Pirko53342022016-07-04 08:23:08 +0200527 }
528 return NULL;
529}
530
531static int mlxsw_sp_lpm_tree_alloc(struct mlxsw_sp *mlxsw_sp,
532 struct mlxsw_sp_lpm_tree *lpm_tree)
533{
534 char ralta_pl[MLXSW_REG_RALTA_LEN];
535
Ido Schimmel1a9234e662016-09-19 08:29:26 +0200536 mlxsw_reg_ralta_pack(ralta_pl, true,
537 (enum mlxsw_reg_ralxx_protocol) lpm_tree->proto,
538 lpm_tree->id);
Jiri Pirko53342022016-07-04 08:23:08 +0200539 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralta), ralta_pl);
540}
541
Ido Schimmelcc702672017-08-14 10:54:03 +0200542static void mlxsw_sp_lpm_tree_free(struct mlxsw_sp *mlxsw_sp,
543 struct mlxsw_sp_lpm_tree *lpm_tree)
Jiri Pirko53342022016-07-04 08:23:08 +0200544{
545 char ralta_pl[MLXSW_REG_RALTA_LEN];
546
Ido Schimmel1a9234e662016-09-19 08:29:26 +0200547 mlxsw_reg_ralta_pack(ralta_pl, false,
548 (enum mlxsw_reg_ralxx_protocol) lpm_tree->proto,
549 lpm_tree->id);
Ido Schimmelcc702672017-08-14 10:54:03 +0200550 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralta), ralta_pl);
Jiri Pirko53342022016-07-04 08:23:08 +0200551}
552
553static int
554mlxsw_sp_lpm_tree_left_struct_set(struct mlxsw_sp *mlxsw_sp,
555 struct mlxsw_sp_prefix_usage *prefix_usage,
556 struct mlxsw_sp_lpm_tree *lpm_tree)
557{
558 char ralst_pl[MLXSW_REG_RALST_LEN];
559 u8 root_bin = 0;
560 u8 prefix;
561 u8 last_prefix = MLXSW_REG_RALST_BIN_NO_CHILD;
562
563 mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage)
564 root_bin = prefix;
565
566 mlxsw_reg_ralst_pack(ralst_pl, root_bin, lpm_tree->id);
567 mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage) {
568 if (prefix == 0)
569 continue;
570 mlxsw_reg_ralst_bin_pack(ralst_pl, prefix, last_prefix,
571 MLXSW_REG_RALST_BIN_NO_CHILD);
572 last_prefix = prefix;
573 }
574 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralst), ralst_pl);
575}
576
577static struct mlxsw_sp_lpm_tree *
578mlxsw_sp_lpm_tree_create(struct mlxsw_sp *mlxsw_sp,
579 struct mlxsw_sp_prefix_usage *prefix_usage,
Ido Schimmel382dbb42017-03-10 08:53:40 +0100580 enum mlxsw_sp_l3proto proto)
Jiri Pirko53342022016-07-04 08:23:08 +0200581{
582 struct mlxsw_sp_lpm_tree *lpm_tree;
583 int err;
584
Ido Schimmel382dbb42017-03-10 08:53:40 +0100585 lpm_tree = mlxsw_sp_lpm_tree_find_unused(mlxsw_sp);
Jiri Pirko53342022016-07-04 08:23:08 +0200586 if (!lpm_tree)
587 return ERR_PTR(-EBUSY);
588 lpm_tree->proto = proto;
589 err = mlxsw_sp_lpm_tree_alloc(mlxsw_sp, lpm_tree);
590 if (err)
591 return ERR_PTR(err);
592
593 err = mlxsw_sp_lpm_tree_left_struct_set(mlxsw_sp, prefix_usage,
594 lpm_tree);
595 if (err)
596 goto err_left_struct_set;
Jiri Pirko2083d362016-10-25 11:25:56 +0200597 memcpy(&lpm_tree->prefix_usage, prefix_usage,
598 sizeof(lpm_tree->prefix_usage));
Ido Schimmel2b52ce02018-01-22 09:17:42 +0100599 memset(&lpm_tree->prefix_ref_count, 0,
600 sizeof(lpm_tree->prefix_ref_count));
601 lpm_tree->ref_count = 1;
Jiri Pirko53342022016-07-04 08:23:08 +0200602 return lpm_tree;
603
604err_left_struct_set:
605 mlxsw_sp_lpm_tree_free(mlxsw_sp, lpm_tree);
606 return ERR_PTR(err);
607}
608
Ido Schimmelcc702672017-08-14 10:54:03 +0200609static void mlxsw_sp_lpm_tree_destroy(struct mlxsw_sp *mlxsw_sp,
610 struct mlxsw_sp_lpm_tree *lpm_tree)
Jiri Pirko53342022016-07-04 08:23:08 +0200611{
Ido Schimmelcc702672017-08-14 10:54:03 +0200612 mlxsw_sp_lpm_tree_free(mlxsw_sp, lpm_tree);
Jiri Pirko53342022016-07-04 08:23:08 +0200613}
614
615static struct mlxsw_sp_lpm_tree *
616mlxsw_sp_lpm_tree_get(struct mlxsw_sp *mlxsw_sp,
617 struct mlxsw_sp_prefix_usage *prefix_usage,
Ido Schimmel382dbb42017-03-10 08:53:40 +0100618 enum mlxsw_sp_l3proto proto)
Jiri Pirko53342022016-07-04 08:23:08 +0200619{
620 struct mlxsw_sp_lpm_tree *lpm_tree;
621 int i;
622
Ido Schimmel9011b672017-05-16 19:38:25 +0200623 for (i = 0; i < mlxsw_sp->router->lpm.tree_count; i++) {
624 lpm_tree = &mlxsw_sp->router->lpm.trees[i];
Jiri Pirko8b99bec2016-10-25 11:25:57 +0200625 if (lpm_tree->ref_count != 0 &&
626 lpm_tree->proto == proto &&
Jiri Pirko53342022016-07-04 08:23:08 +0200627 mlxsw_sp_prefix_usage_eq(&lpm_tree->prefix_usage,
Ido Schimmel2b52ce02018-01-22 09:17:42 +0100628 prefix_usage)) {
629 mlxsw_sp_lpm_tree_hold(lpm_tree);
Ido Schimmelfc922bb2017-08-14 10:54:05 +0200630 return lpm_tree;
Ido Schimmel2b52ce02018-01-22 09:17:42 +0100631 }
Jiri Pirko53342022016-07-04 08:23:08 +0200632 }
Ido Schimmelfc922bb2017-08-14 10:54:05 +0200633 return mlxsw_sp_lpm_tree_create(mlxsw_sp, prefix_usage, proto);
634}
Jiri Pirko53342022016-07-04 08:23:08 +0200635
Ido Schimmelfc922bb2017-08-14 10:54:05 +0200636static void mlxsw_sp_lpm_tree_hold(struct mlxsw_sp_lpm_tree *lpm_tree)
637{
Jiri Pirko53342022016-07-04 08:23:08 +0200638 lpm_tree->ref_count++;
Jiri Pirko53342022016-07-04 08:23:08 +0200639}
640
Ido Schimmelcc702672017-08-14 10:54:03 +0200641static void mlxsw_sp_lpm_tree_put(struct mlxsw_sp *mlxsw_sp,
642 struct mlxsw_sp_lpm_tree *lpm_tree)
Jiri Pirko53342022016-07-04 08:23:08 +0200643{
644 if (--lpm_tree->ref_count == 0)
Ido Schimmelcc702672017-08-14 10:54:03 +0200645 mlxsw_sp_lpm_tree_destroy(mlxsw_sp, lpm_tree);
Jiri Pirko53342022016-07-04 08:23:08 +0200646}
647
Ido Schimmeld7a60302017-06-08 08:47:43 +0200648#define MLXSW_SP_LPM_TREE_MIN 1 /* tree 0 is reserved */
Ido Schimmel8494ab02017-03-24 08:02:47 +0100649
650static int mlxsw_sp_lpm_init(struct mlxsw_sp *mlxsw_sp)
Jiri Pirko53342022016-07-04 08:23:08 +0200651{
Ido Schimmel2b52ce02018-01-22 09:17:42 +0100652 struct mlxsw_sp_prefix_usage req_prefix_usage = {{ 0 } };
Jiri Pirko53342022016-07-04 08:23:08 +0200653 struct mlxsw_sp_lpm_tree *lpm_tree;
Ido Schimmel8494ab02017-03-24 08:02:47 +0100654 u64 max_trees;
Ido Schimmel2b52ce02018-01-22 09:17:42 +0100655 int err, i;
Jiri Pirko53342022016-07-04 08:23:08 +0200656
Ido Schimmel8494ab02017-03-24 08:02:47 +0100657 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_LPM_TREES))
658 return -EIO;
659
660 max_trees = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_LPM_TREES);
Ido Schimmel9011b672017-05-16 19:38:25 +0200661 mlxsw_sp->router->lpm.tree_count = max_trees - MLXSW_SP_LPM_TREE_MIN;
662 mlxsw_sp->router->lpm.trees = kcalloc(mlxsw_sp->router->lpm.tree_count,
Ido Schimmel8494ab02017-03-24 08:02:47 +0100663 sizeof(struct mlxsw_sp_lpm_tree),
664 GFP_KERNEL);
Ido Schimmel9011b672017-05-16 19:38:25 +0200665 if (!mlxsw_sp->router->lpm.trees)
Ido Schimmel8494ab02017-03-24 08:02:47 +0100666 return -ENOMEM;
667
Ido Schimmel9011b672017-05-16 19:38:25 +0200668 for (i = 0; i < mlxsw_sp->router->lpm.tree_count; i++) {
669 lpm_tree = &mlxsw_sp->router->lpm.trees[i];
Jiri Pirko53342022016-07-04 08:23:08 +0200670 lpm_tree->id = i + MLXSW_SP_LPM_TREE_MIN;
671 }
Ido Schimmel8494ab02017-03-24 08:02:47 +0100672
Ido Schimmel2b52ce02018-01-22 09:17:42 +0100673 lpm_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, &req_prefix_usage,
674 MLXSW_SP_L3_PROTO_IPV4);
675 if (IS_ERR(lpm_tree)) {
676 err = PTR_ERR(lpm_tree);
677 goto err_ipv4_tree_get;
678 }
679 mlxsw_sp->router->lpm.proto_trees[MLXSW_SP_L3_PROTO_IPV4] = lpm_tree;
680
681 lpm_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, &req_prefix_usage,
682 MLXSW_SP_L3_PROTO_IPV6);
683 if (IS_ERR(lpm_tree)) {
684 err = PTR_ERR(lpm_tree);
685 goto err_ipv6_tree_get;
686 }
687 mlxsw_sp->router->lpm.proto_trees[MLXSW_SP_L3_PROTO_IPV6] = lpm_tree;
688
Ido Schimmel8494ab02017-03-24 08:02:47 +0100689 return 0;
Ido Schimmel2b52ce02018-01-22 09:17:42 +0100690
691err_ipv6_tree_get:
692 lpm_tree = mlxsw_sp->router->lpm.proto_trees[MLXSW_SP_L3_PROTO_IPV4];
693 mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
694err_ipv4_tree_get:
695 kfree(mlxsw_sp->router->lpm.trees);
696 return err;
Ido Schimmel8494ab02017-03-24 08:02:47 +0100697}
698
699static void mlxsw_sp_lpm_fini(struct mlxsw_sp *mlxsw_sp)
700{
Ido Schimmel2b52ce02018-01-22 09:17:42 +0100701 struct mlxsw_sp_lpm_tree *lpm_tree;
702
703 lpm_tree = mlxsw_sp->router->lpm.proto_trees[MLXSW_SP_L3_PROTO_IPV6];
704 mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
705
706 lpm_tree = mlxsw_sp->router->lpm.proto_trees[MLXSW_SP_L3_PROTO_IPV4];
707 mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
708
Ido Schimmel9011b672017-05-16 19:38:25 +0200709 kfree(mlxsw_sp->router->lpm.trees);
Jiri Pirko53342022016-07-04 08:23:08 +0200710}
711
Ido Schimmel76610eb2017-03-10 08:53:41 +0100712static bool mlxsw_sp_vr_is_used(const struct mlxsw_sp_vr *vr)
713{
Yuval Mintz9742f862018-03-26 15:01:40 +0300714 return !!vr->fib4 || !!vr->fib6 ||
715 !!vr->mr_table[MLXSW_SP_L3_PROTO_IPV4] ||
716 !!vr->mr_table[MLXSW_SP_L3_PROTO_IPV6];
Ido Schimmel76610eb2017-03-10 08:53:41 +0100717}
718
Jiri Pirko6b75c482016-07-04 08:23:09 +0200719static struct mlxsw_sp_vr *mlxsw_sp_vr_find_unused(struct mlxsw_sp *mlxsw_sp)
720{
721 struct mlxsw_sp_vr *vr;
722 int i;
723
Jiri Pirkoc1a38312016-10-21 16:07:23 +0200724 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
Ido Schimmel9011b672017-05-16 19:38:25 +0200725 vr = &mlxsw_sp->router->vrs[i];
Ido Schimmel76610eb2017-03-10 08:53:41 +0100726 if (!mlxsw_sp_vr_is_used(vr))
Jiri Pirko6b75c482016-07-04 08:23:09 +0200727 return vr;
728 }
729 return NULL;
730}
731
732static int mlxsw_sp_vr_lpm_tree_bind(struct mlxsw_sp *mlxsw_sp,
Ido Schimmel0adb2142017-08-14 10:54:04 +0200733 const struct mlxsw_sp_fib *fib, u8 tree_id)
Jiri Pirko6b75c482016-07-04 08:23:09 +0200734{
735 char raltb_pl[MLXSW_REG_RALTB_LEN];
736
Ido Schimmel76610eb2017-03-10 08:53:41 +0100737 mlxsw_reg_raltb_pack(raltb_pl, fib->vr->id,
738 (enum mlxsw_reg_ralxx_protocol) fib->proto,
Ido Schimmel0adb2142017-08-14 10:54:04 +0200739 tree_id);
Jiri Pirko6b75c482016-07-04 08:23:09 +0200740 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raltb), raltb_pl);
741}
742
743static int mlxsw_sp_vr_lpm_tree_unbind(struct mlxsw_sp *mlxsw_sp,
Ido Schimmel76610eb2017-03-10 08:53:41 +0100744 const struct mlxsw_sp_fib *fib)
Jiri Pirko6b75c482016-07-04 08:23:09 +0200745{
746 char raltb_pl[MLXSW_REG_RALTB_LEN];
747
748 /* Bind to tree 0 which is default */
Ido Schimmel76610eb2017-03-10 08:53:41 +0100749 mlxsw_reg_raltb_pack(raltb_pl, fib->vr->id,
750 (enum mlxsw_reg_ralxx_protocol) fib->proto, 0);
Jiri Pirko6b75c482016-07-04 08:23:09 +0200751 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raltb), raltb_pl);
752}
753
754static u32 mlxsw_sp_fix_tb_id(u32 tb_id)
755{
Yotam Gigi7e50d432017-09-27 08:23:19 +0200756 /* For our purpose, squash main, default and local tables into one */
757 if (tb_id == RT_TABLE_LOCAL || tb_id == RT_TABLE_DEFAULT)
Jiri Pirko6b75c482016-07-04 08:23:09 +0200758 tb_id = RT_TABLE_MAIN;
759 return tb_id;
760}
761
762static struct mlxsw_sp_vr *mlxsw_sp_vr_find(struct mlxsw_sp *mlxsw_sp,
Ido Schimmel76610eb2017-03-10 08:53:41 +0100763 u32 tb_id)
Jiri Pirko6b75c482016-07-04 08:23:09 +0200764{
765 struct mlxsw_sp_vr *vr;
766 int i;
767
768 tb_id = mlxsw_sp_fix_tb_id(tb_id);
Nogah Frankel9497c042016-09-20 11:16:54 +0200769
Jiri Pirkoc1a38312016-10-21 16:07:23 +0200770 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
Ido Schimmel9011b672017-05-16 19:38:25 +0200771 vr = &mlxsw_sp->router->vrs[i];
Ido Schimmel76610eb2017-03-10 08:53:41 +0100772 if (mlxsw_sp_vr_is_used(vr) && vr->tb_id == tb_id)
Jiri Pirko6b75c482016-07-04 08:23:09 +0200773 return vr;
774 }
775 return NULL;
776}
777
Ido Schimmel76610eb2017-03-10 08:53:41 +0100778static struct mlxsw_sp_fib *mlxsw_sp_vr_fib(const struct mlxsw_sp_vr *vr,
779 enum mlxsw_sp_l3proto proto)
Jiri Pirko6b75c482016-07-04 08:23:09 +0200780{
Ido Schimmel76610eb2017-03-10 08:53:41 +0100781 switch (proto) {
782 case MLXSW_SP_L3_PROTO_IPV4:
783 return vr->fib4;
784 case MLXSW_SP_L3_PROTO_IPV6:
Ido Schimmela3d9bc52017-07-18 10:10:22 +0200785 return vr->fib6;
Ido Schimmel76610eb2017-03-10 08:53:41 +0100786 }
787 return NULL;
788}
789
790static struct mlxsw_sp_vr *mlxsw_sp_vr_create(struct mlxsw_sp *mlxsw_sp,
David Ahernf8fa9b42017-10-18 09:56:56 -0700791 u32 tb_id,
792 struct netlink_ext_ack *extack)
Ido Schimmel76610eb2017-03-10 08:53:41 +0100793{
Yuval Mintz9742f862018-03-26 15:01:40 +0300794 struct mlxsw_sp_mr_table *mr4_table, *mr6_table;
Jiri Pirko0f2d2b22018-02-13 11:22:42 +0100795 struct mlxsw_sp_fib *fib4;
796 struct mlxsw_sp_fib *fib6;
Jiri Pirko6b75c482016-07-04 08:23:09 +0200797 struct mlxsw_sp_vr *vr;
Ido Schimmela3d9bc52017-07-18 10:10:22 +0200798 int err;
Jiri Pirko6b75c482016-07-04 08:23:09 +0200799
800 vr = mlxsw_sp_vr_find_unused(mlxsw_sp);
David Ahernf8fa9b42017-10-18 09:56:56 -0700801 if (!vr) {
Arkadi Sharshevsky6c677752018-02-13 11:29:05 +0100802 NL_SET_ERR_MSG_MOD(extack, "Exceeded number of supported virtual routers");
Jiri Pirko6b75c482016-07-04 08:23:09 +0200803 return ERR_PTR(-EBUSY);
David Ahernf8fa9b42017-10-18 09:56:56 -0700804 }
Jiri Pirko0f2d2b22018-02-13 11:22:42 +0100805 fib4 = mlxsw_sp_fib_create(mlxsw_sp, vr, MLXSW_SP_L3_PROTO_IPV4);
806 if (IS_ERR(fib4))
807 return ERR_CAST(fib4);
808 fib6 = mlxsw_sp_fib_create(mlxsw_sp, vr, MLXSW_SP_L3_PROTO_IPV6);
809 if (IS_ERR(fib6)) {
810 err = PTR_ERR(fib6);
Ido Schimmela3d9bc52017-07-18 10:10:22 +0200811 goto err_fib6_create;
812 }
Jiri Pirko0f2d2b22018-02-13 11:22:42 +0100813 mr4_table = mlxsw_sp_mr_table_create(mlxsw_sp, vr->id,
814 MLXSW_SP_L3_PROTO_IPV4);
815 if (IS_ERR(mr4_table)) {
816 err = PTR_ERR(mr4_table);
Yuval Mintz9742f862018-03-26 15:01:40 +0300817 goto err_mr4_table_create;
Yotam Gigid42b0962017-09-27 08:23:20 +0200818 }
Yuval Mintz9742f862018-03-26 15:01:40 +0300819 mr6_table = mlxsw_sp_mr_table_create(mlxsw_sp, vr->id,
820 MLXSW_SP_L3_PROTO_IPV6);
821 if (IS_ERR(mr6_table)) {
822 err = PTR_ERR(mr6_table);
823 goto err_mr6_table_create;
824 }
825
Jiri Pirko0f2d2b22018-02-13 11:22:42 +0100826 vr->fib4 = fib4;
827 vr->fib6 = fib6;
Yuval Mintz9742f862018-03-26 15:01:40 +0300828 vr->mr_table[MLXSW_SP_L3_PROTO_IPV4] = mr4_table;
829 vr->mr_table[MLXSW_SP_L3_PROTO_IPV6] = mr6_table;
Jiri Pirko6b75c482016-07-04 08:23:09 +0200830 vr->tb_id = tb_id;
Jiri Pirko6b75c482016-07-04 08:23:09 +0200831 return vr;
Ido Schimmela3d9bc52017-07-18 10:10:22 +0200832
Yuval Mintz9742f862018-03-26 15:01:40 +0300833err_mr6_table_create:
834 mlxsw_sp_mr_table_destroy(mr4_table);
835err_mr4_table_create:
Jiri Pirko0f2d2b22018-02-13 11:22:42 +0100836 mlxsw_sp_fib_destroy(mlxsw_sp, fib6);
Ido Schimmela3d9bc52017-07-18 10:10:22 +0200837err_fib6_create:
Jiri Pirko0f2d2b22018-02-13 11:22:42 +0100838 mlxsw_sp_fib_destroy(mlxsw_sp, fib4);
Ido Schimmela3d9bc52017-07-18 10:10:22 +0200839 return ERR_PTR(err);
Jiri Pirko6b75c482016-07-04 08:23:09 +0200840}
841
Ido Schimmel2b52ce02018-01-22 09:17:42 +0100842static void mlxsw_sp_vr_destroy(struct mlxsw_sp *mlxsw_sp,
843 struct mlxsw_sp_vr *vr)
Jiri Pirko6b75c482016-07-04 08:23:09 +0200844{
Yuval Mintz9742f862018-03-26 15:01:40 +0300845 mlxsw_sp_mr_table_destroy(vr->mr_table[MLXSW_SP_L3_PROTO_IPV6]);
846 vr->mr_table[MLXSW_SP_L3_PROTO_IPV6] = NULL;
847 mlxsw_sp_mr_table_destroy(vr->mr_table[MLXSW_SP_L3_PROTO_IPV4]);
848 vr->mr_table[MLXSW_SP_L3_PROTO_IPV4] = NULL;
Ido Schimmel2b52ce02018-01-22 09:17:42 +0100849 mlxsw_sp_fib_destroy(mlxsw_sp, vr->fib6);
Ido Schimmela3d9bc52017-07-18 10:10:22 +0200850 vr->fib6 = NULL;
Ido Schimmel2b52ce02018-01-22 09:17:42 +0100851 mlxsw_sp_fib_destroy(mlxsw_sp, vr->fib4);
Ido Schimmel76610eb2017-03-10 08:53:41 +0100852 vr->fib4 = NULL;
Jiri Pirko6b75c482016-07-04 08:23:09 +0200853}
854
David Ahernf8fa9b42017-10-18 09:56:56 -0700855static struct mlxsw_sp_vr *mlxsw_sp_vr_get(struct mlxsw_sp *mlxsw_sp, u32 tb_id,
856 struct netlink_ext_ack *extack)
Jiri Pirko6b75c482016-07-04 08:23:09 +0200857{
858 struct mlxsw_sp_vr *vr;
Jiri Pirko6b75c482016-07-04 08:23:09 +0200859
860 tb_id = mlxsw_sp_fix_tb_id(tb_id);
Ido Schimmel76610eb2017-03-10 08:53:41 +0100861 vr = mlxsw_sp_vr_find(mlxsw_sp, tb_id);
862 if (!vr)
David Ahernf8fa9b42017-10-18 09:56:56 -0700863 vr = mlxsw_sp_vr_create(mlxsw_sp, tb_id, extack);
Jiri Pirko6b75c482016-07-04 08:23:09 +0200864 return vr;
865}
866
Ido Schimmel2b52ce02018-01-22 09:17:42 +0100867static void mlxsw_sp_vr_put(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_vr *vr)
Jiri Pirko6b75c482016-07-04 08:23:09 +0200868{
Ido Schimmela3d9bc52017-07-18 10:10:22 +0200869 if (!vr->rif_count && list_empty(&vr->fib4->node_list) &&
Yotam Gigid42b0962017-09-27 08:23:20 +0200870 list_empty(&vr->fib6->node_list) &&
Yuval Mintz9742f862018-03-26 15:01:40 +0300871 mlxsw_sp_mr_table_empty(vr->mr_table[MLXSW_SP_L3_PROTO_IPV4]) &&
872 mlxsw_sp_mr_table_empty(vr->mr_table[MLXSW_SP_L3_PROTO_IPV6]))
Ido Schimmel2b52ce02018-01-22 09:17:42 +0100873 mlxsw_sp_vr_destroy(mlxsw_sp, vr);
Jiri Pirko6b75c482016-07-04 08:23:09 +0200874}
875
Ido Schimmelfc922bb2017-08-14 10:54:05 +0200876static bool
877mlxsw_sp_vr_lpm_tree_should_replace(struct mlxsw_sp_vr *vr,
878 enum mlxsw_sp_l3proto proto, u8 tree_id)
879{
880 struct mlxsw_sp_fib *fib = mlxsw_sp_vr_fib(vr, proto);
881
882 if (!mlxsw_sp_vr_is_used(vr))
883 return false;
Ido Schimmel2b52ce02018-01-22 09:17:42 +0100884 if (fib->lpm_tree->id == tree_id)
Ido Schimmelfc922bb2017-08-14 10:54:05 +0200885 return true;
886 return false;
887}
888
889static int mlxsw_sp_vr_lpm_tree_replace(struct mlxsw_sp *mlxsw_sp,
890 struct mlxsw_sp_fib *fib,
891 struct mlxsw_sp_lpm_tree *new_tree)
892{
893 struct mlxsw_sp_lpm_tree *old_tree = fib->lpm_tree;
894 int err;
895
Ido Schimmelfc922bb2017-08-14 10:54:05 +0200896 fib->lpm_tree = new_tree;
897 mlxsw_sp_lpm_tree_hold(new_tree);
Ido Schimmeled604c52018-01-18 15:42:10 +0100898 err = mlxsw_sp_vr_lpm_tree_bind(mlxsw_sp, fib, new_tree->id);
899 if (err)
900 goto err_tree_bind;
Ido Schimmelfc922bb2017-08-14 10:54:05 +0200901 mlxsw_sp_lpm_tree_put(mlxsw_sp, old_tree);
902 return 0;
Ido Schimmeled604c52018-01-18 15:42:10 +0100903
904err_tree_bind:
905 mlxsw_sp_lpm_tree_put(mlxsw_sp, new_tree);
906 fib->lpm_tree = old_tree;
907 return err;
Ido Schimmelfc922bb2017-08-14 10:54:05 +0200908}
909
910static int mlxsw_sp_vrs_lpm_tree_replace(struct mlxsw_sp *mlxsw_sp,
911 struct mlxsw_sp_fib *fib,
912 struct mlxsw_sp_lpm_tree *new_tree)
913{
Ido Schimmelfc922bb2017-08-14 10:54:05 +0200914 enum mlxsw_sp_l3proto proto = fib->proto;
Ido Schimmel2b52ce02018-01-22 09:17:42 +0100915 struct mlxsw_sp_lpm_tree *old_tree;
Ido Schimmelfc922bb2017-08-14 10:54:05 +0200916 u8 old_id, new_id = new_tree->id;
917 struct mlxsw_sp_vr *vr;
918 int i, err;
919
Ido Schimmel2b52ce02018-01-22 09:17:42 +0100920 old_tree = mlxsw_sp->router->lpm.proto_trees[proto];
Ido Schimmelfc922bb2017-08-14 10:54:05 +0200921 old_id = old_tree->id;
922
923 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
924 vr = &mlxsw_sp->router->vrs[i];
925 if (!mlxsw_sp_vr_lpm_tree_should_replace(vr, proto, old_id))
926 continue;
927 err = mlxsw_sp_vr_lpm_tree_replace(mlxsw_sp,
928 mlxsw_sp_vr_fib(vr, proto),
929 new_tree);
930 if (err)
931 goto err_tree_replace;
932 }
933
Ido Schimmel2b52ce02018-01-22 09:17:42 +0100934 memcpy(new_tree->prefix_ref_count, old_tree->prefix_ref_count,
935 sizeof(new_tree->prefix_ref_count));
936 mlxsw_sp->router->lpm.proto_trees[proto] = new_tree;
937 mlxsw_sp_lpm_tree_put(mlxsw_sp, old_tree);
938
Ido Schimmelfc922bb2017-08-14 10:54:05 +0200939 return 0;
940
941err_tree_replace:
942 for (i--; i >= 0; i--) {
943 if (!mlxsw_sp_vr_lpm_tree_should_replace(vr, proto, new_id))
944 continue;
945 mlxsw_sp_vr_lpm_tree_replace(mlxsw_sp,
946 mlxsw_sp_vr_fib(vr, proto),
947 old_tree);
948 }
949 return err;
Ido Schimmelfc922bb2017-08-14 10:54:05 +0200950}
951
Nogah Frankel9497c042016-09-20 11:16:54 +0200952static int mlxsw_sp_vrs_init(struct mlxsw_sp *mlxsw_sp)
Jiri Pirko6b75c482016-07-04 08:23:09 +0200953{
954 struct mlxsw_sp_vr *vr;
Jiri Pirkoc1a38312016-10-21 16:07:23 +0200955 u64 max_vrs;
Jiri Pirko6b75c482016-07-04 08:23:09 +0200956 int i;
957
Jiri Pirkoc1a38312016-10-21 16:07:23 +0200958 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_VRS))
Nogah Frankel9497c042016-09-20 11:16:54 +0200959 return -EIO;
960
Jiri Pirkoc1a38312016-10-21 16:07:23 +0200961 max_vrs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS);
Ido Schimmel9011b672017-05-16 19:38:25 +0200962 mlxsw_sp->router->vrs = kcalloc(max_vrs, sizeof(struct mlxsw_sp_vr),
963 GFP_KERNEL);
964 if (!mlxsw_sp->router->vrs)
Nogah Frankel9497c042016-09-20 11:16:54 +0200965 return -ENOMEM;
966
Jiri Pirkoc1a38312016-10-21 16:07:23 +0200967 for (i = 0; i < max_vrs; i++) {
Ido Schimmel9011b672017-05-16 19:38:25 +0200968 vr = &mlxsw_sp->router->vrs[i];
Jiri Pirko6b75c482016-07-04 08:23:09 +0200969 vr->id = i;
970 }
Nogah Frankel9497c042016-09-20 11:16:54 +0200971
972 return 0;
973}
974
Ido Schimmelac571de2016-11-14 11:26:32 +0100975static void mlxsw_sp_router_fib_flush(struct mlxsw_sp *mlxsw_sp);
976
Nogah Frankel9497c042016-09-20 11:16:54 +0200977static void mlxsw_sp_vrs_fini(struct mlxsw_sp *mlxsw_sp)
978{
Ido Schimmel30572242016-12-03 16:45:01 +0100979 /* At this stage we're guaranteed not to have new incoming
980 * FIB notifications and the work queue is free from FIBs
981 * sitting on top of mlxsw netdevs. However, we can still
982 * have other FIBs queued. Flush the queue before flushing
983 * the device's tables. No need for locks, as we're the only
984 * writer.
985 */
986 mlxsw_core_flush_owq();
Ido Schimmelac571de2016-11-14 11:26:32 +0100987 mlxsw_sp_router_fib_flush(mlxsw_sp);
Ido Schimmel9011b672017-05-16 19:38:25 +0200988 kfree(mlxsw_sp->router->vrs);
Jiri Pirko6b75c482016-07-04 08:23:09 +0200989}
990
Petr Machata6ddb7422017-09-02 23:49:19 +0200991static struct net_device *
992__mlxsw_sp_ipip_netdev_ul_dev_get(const struct net_device *ol_dev)
993{
994 struct ip_tunnel *tun = netdev_priv(ol_dev);
995 struct net *net = dev_net(ol_dev);
996
997 return __dev_get_by_index(net, tun->parms.link);
998}
999
Petr Machata4cf04f32017-11-03 10:03:42 +01001000u32 mlxsw_sp_ipip_dev_ul_tb_id(const struct net_device *ol_dev)
Petr Machata6ddb7422017-09-02 23:49:19 +02001001{
1002 struct net_device *d = __mlxsw_sp_ipip_netdev_ul_dev_get(ol_dev);
1003
1004 if (d)
1005 return l3mdev_fib_table(d) ? : RT_TABLE_MAIN;
1006 else
1007 return l3mdev_fib_table(ol_dev) ? : RT_TABLE_MAIN;
1008}
1009
Petr Machata1012b9a2017-09-02 23:49:23 +02001010static struct mlxsw_sp_rif *
1011mlxsw_sp_rif_create(struct mlxsw_sp *mlxsw_sp,
David Ahernf8fa9b42017-10-18 09:56:56 -07001012 const struct mlxsw_sp_rif_params *params,
1013 struct netlink_ext_ack *extack);
Petr Machata1012b9a2017-09-02 23:49:23 +02001014
1015static struct mlxsw_sp_rif_ipip_lb *
1016mlxsw_sp_ipip_ol_ipip_lb_create(struct mlxsw_sp *mlxsw_sp,
1017 enum mlxsw_sp_ipip_type ipipt,
Petr Machata7e75af62017-11-03 10:03:36 +01001018 struct net_device *ol_dev,
1019 struct netlink_ext_ack *extack)
Petr Machata1012b9a2017-09-02 23:49:23 +02001020{
1021 struct mlxsw_sp_rif_params_ipip_lb lb_params;
1022 const struct mlxsw_sp_ipip_ops *ipip_ops;
1023 struct mlxsw_sp_rif *rif;
1024
1025 ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipipt];
1026 lb_params = (struct mlxsw_sp_rif_params_ipip_lb) {
1027 .common.dev = ol_dev,
1028 .common.lag = false,
1029 .lb_config = ipip_ops->ol_loopback_config(mlxsw_sp, ol_dev),
1030 };
1031
Petr Machata7e75af62017-11-03 10:03:36 +01001032 rif = mlxsw_sp_rif_create(mlxsw_sp, &lb_params.common, extack);
Petr Machata1012b9a2017-09-02 23:49:23 +02001033 if (IS_ERR(rif))
1034 return ERR_CAST(rif);
1035 return container_of(rif, struct mlxsw_sp_rif_ipip_lb, common);
1036}
1037
1038static struct mlxsw_sp_ipip_entry *
1039mlxsw_sp_ipip_entry_alloc(struct mlxsw_sp *mlxsw_sp,
1040 enum mlxsw_sp_ipip_type ipipt,
1041 struct net_device *ol_dev)
1042{
Petr Machatae437f3b2018-02-13 11:26:09 +01001043 const struct mlxsw_sp_ipip_ops *ipip_ops;
Petr Machata1012b9a2017-09-02 23:49:23 +02001044 struct mlxsw_sp_ipip_entry *ipip_entry;
1045 struct mlxsw_sp_ipip_entry *ret = NULL;
1046
Petr Machatae437f3b2018-02-13 11:26:09 +01001047 ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipipt];
Petr Machata1012b9a2017-09-02 23:49:23 +02001048 ipip_entry = kzalloc(sizeof(*ipip_entry), GFP_KERNEL);
1049 if (!ipip_entry)
1050 return ERR_PTR(-ENOMEM);
1051
1052 ipip_entry->ol_lb = mlxsw_sp_ipip_ol_ipip_lb_create(mlxsw_sp, ipipt,
Petr Machata7e75af62017-11-03 10:03:36 +01001053 ol_dev, NULL);
Petr Machata1012b9a2017-09-02 23:49:23 +02001054 if (IS_ERR(ipip_entry->ol_lb)) {
1055 ret = ERR_CAST(ipip_entry->ol_lb);
1056 goto err_ol_ipip_lb_create;
1057 }
1058
1059 ipip_entry->ipipt = ipipt;
1060 ipip_entry->ol_dev = ol_dev;
Petr Machatae437f3b2018-02-13 11:26:09 +01001061
1062 switch (ipip_ops->ul_proto) {
1063 case MLXSW_SP_L3_PROTO_IPV4:
1064 ipip_entry->parms4 = mlxsw_sp_ipip_netdev_parms4(ol_dev);
1065 break;
1066 case MLXSW_SP_L3_PROTO_IPV6:
1067 WARN_ON(1);
1068 break;
1069 }
Petr Machata1012b9a2017-09-02 23:49:23 +02001070
1071 return ipip_entry;
1072
1073err_ol_ipip_lb_create:
1074 kfree(ipip_entry);
1075 return ret;
1076}
1077
1078static void
Petr Machata4cccb732017-10-16 16:26:39 +02001079mlxsw_sp_ipip_entry_dealloc(struct mlxsw_sp_ipip_entry *ipip_entry)
Petr Machata1012b9a2017-09-02 23:49:23 +02001080{
Petr Machata1012b9a2017-09-02 23:49:23 +02001081 mlxsw_sp_rif_destroy(&ipip_entry->ol_lb->common);
1082 kfree(ipip_entry);
1083}
1084
Petr Machata1012b9a2017-09-02 23:49:23 +02001085static bool
1086mlxsw_sp_ipip_entry_saddr_matches(struct mlxsw_sp *mlxsw_sp,
1087 const enum mlxsw_sp_l3proto ul_proto,
1088 union mlxsw_sp_l3addr saddr,
1089 u32 ul_tb_id,
1090 struct mlxsw_sp_ipip_entry *ipip_entry)
1091{
1092 u32 tun_ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(ipip_entry->ol_dev);
1093 enum mlxsw_sp_ipip_type ipipt = ipip_entry->ipipt;
1094 union mlxsw_sp_l3addr tun_saddr;
1095
1096 if (mlxsw_sp->router->ipip_ops_arr[ipipt]->ul_proto != ul_proto)
1097 return false;
1098
1099 tun_saddr = mlxsw_sp_ipip_netdev_saddr(ul_proto, ipip_entry->ol_dev);
1100 return tun_ul_tb_id == ul_tb_id &&
1101 mlxsw_sp_l3addr_eq(&tun_saddr, &saddr);
1102}
1103
Petr Machata4607f6d2017-09-02 23:49:25 +02001104static int
1105mlxsw_sp_fib_entry_decap_init(struct mlxsw_sp *mlxsw_sp,
1106 struct mlxsw_sp_fib_entry *fib_entry,
1107 struct mlxsw_sp_ipip_entry *ipip_entry)
1108{
1109 u32 tunnel_index;
1110 int err;
1111
Jiri Pirko4b6b1862018-07-08 23:51:17 +03001112 err = mlxsw_sp_kvdl_alloc(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ,
1113 1, &tunnel_index);
Petr Machata4607f6d2017-09-02 23:49:25 +02001114 if (err)
1115 return err;
1116
1117 ipip_entry->decap_fib_entry = fib_entry;
1118 fib_entry->decap.ipip_entry = ipip_entry;
1119 fib_entry->decap.tunnel_index = tunnel_index;
1120 return 0;
1121}
1122
1123static void mlxsw_sp_fib_entry_decap_fini(struct mlxsw_sp *mlxsw_sp,
1124 struct mlxsw_sp_fib_entry *fib_entry)
1125{
1126 /* Unlink this node from the IPIP entry that it's the decap entry of. */
1127 fib_entry->decap.ipip_entry->decap_fib_entry = NULL;
1128 fib_entry->decap.ipip_entry = NULL;
Jiri Pirko4b6b1862018-07-08 23:51:17 +03001129 mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ,
Jiri Pirko0304c002018-07-08 23:51:18 +03001130 1, fib_entry->decap.tunnel_index);
Petr Machata4607f6d2017-09-02 23:49:25 +02001131}
1132
Petr Machata1cc38fb2017-09-02 23:49:26 +02001133static struct mlxsw_sp_fib_node *
1134mlxsw_sp_fib_node_lookup(struct mlxsw_sp_fib *fib, const void *addr,
1135 size_t addr_len, unsigned char prefix_len);
Petr Machata4607f6d2017-09-02 23:49:25 +02001136static int mlxsw_sp_fib_entry_update(struct mlxsw_sp *mlxsw_sp,
1137 struct mlxsw_sp_fib_entry *fib_entry);
1138
1139static void
1140mlxsw_sp_ipip_entry_demote_decap(struct mlxsw_sp *mlxsw_sp,
1141 struct mlxsw_sp_ipip_entry *ipip_entry)
1142{
1143 struct mlxsw_sp_fib_entry *fib_entry = ipip_entry->decap_fib_entry;
1144
1145 mlxsw_sp_fib_entry_decap_fini(mlxsw_sp, fib_entry);
1146 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
1147
1148 mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
1149}
1150
Petr Machata1cc38fb2017-09-02 23:49:26 +02001151static void
1152mlxsw_sp_ipip_entry_promote_decap(struct mlxsw_sp *mlxsw_sp,
1153 struct mlxsw_sp_ipip_entry *ipip_entry,
1154 struct mlxsw_sp_fib_entry *decap_fib_entry)
1155{
1156 if (mlxsw_sp_fib_entry_decap_init(mlxsw_sp, decap_fib_entry,
1157 ipip_entry))
1158 return;
1159 decap_fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP;
1160
1161 if (mlxsw_sp_fib_entry_update(mlxsw_sp, decap_fib_entry))
1162 mlxsw_sp_ipip_entry_demote_decap(mlxsw_sp, ipip_entry);
1163}
1164
1165/* Given an IPIP entry, find the corresponding decap route. */
1166static struct mlxsw_sp_fib_entry *
1167mlxsw_sp_ipip_entry_find_decap(struct mlxsw_sp *mlxsw_sp,
1168 struct mlxsw_sp_ipip_entry *ipip_entry)
1169{
1170 static struct mlxsw_sp_fib_node *fib_node;
1171 const struct mlxsw_sp_ipip_ops *ipip_ops;
1172 struct mlxsw_sp_fib_entry *fib_entry;
1173 unsigned char saddr_prefix_len;
1174 union mlxsw_sp_l3addr saddr;
1175 struct mlxsw_sp_fib *ul_fib;
1176 struct mlxsw_sp_vr *ul_vr;
1177 const void *saddrp;
1178 size_t saddr_len;
1179 u32 ul_tb_id;
1180 u32 saddr4;
1181
1182 ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt];
1183
1184 ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(ipip_entry->ol_dev);
1185 ul_vr = mlxsw_sp_vr_find(mlxsw_sp, ul_tb_id);
1186 if (!ul_vr)
1187 return NULL;
1188
1189 ul_fib = mlxsw_sp_vr_fib(ul_vr, ipip_ops->ul_proto);
1190 saddr = mlxsw_sp_ipip_netdev_saddr(ipip_ops->ul_proto,
1191 ipip_entry->ol_dev);
1192
1193 switch (ipip_ops->ul_proto) {
1194 case MLXSW_SP_L3_PROTO_IPV4:
1195 saddr4 = be32_to_cpu(saddr.addr4);
1196 saddrp = &saddr4;
1197 saddr_len = 4;
1198 saddr_prefix_len = 32;
1199 break;
1200 case MLXSW_SP_L3_PROTO_IPV6:
1201 WARN_ON(1);
1202 return NULL;
1203 }
1204
1205 fib_node = mlxsw_sp_fib_node_lookup(ul_fib, saddrp, saddr_len,
1206 saddr_prefix_len);
1207 if (!fib_node || list_empty(&fib_node->entry_list))
1208 return NULL;
1209
1210 fib_entry = list_first_entry(&fib_node->entry_list,
1211 struct mlxsw_sp_fib_entry, list);
1212 if (fib_entry->type != MLXSW_SP_FIB_ENTRY_TYPE_TRAP)
1213 return NULL;
1214
1215 return fib_entry;
1216}
1217
Petr Machata1012b9a2017-09-02 23:49:23 +02001218static struct mlxsw_sp_ipip_entry *
Petr Machata4cccb732017-10-16 16:26:39 +02001219mlxsw_sp_ipip_entry_create(struct mlxsw_sp *mlxsw_sp,
1220 enum mlxsw_sp_ipip_type ipipt,
1221 struct net_device *ol_dev)
Petr Machata1012b9a2017-09-02 23:49:23 +02001222{
Petr Machata1012b9a2017-09-02 23:49:23 +02001223 struct mlxsw_sp_ipip_entry *ipip_entry;
Petr Machata1012b9a2017-09-02 23:49:23 +02001224
1225 ipip_entry = mlxsw_sp_ipip_entry_alloc(mlxsw_sp, ipipt, ol_dev);
1226 if (IS_ERR(ipip_entry))
1227 return ipip_entry;
1228
1229 list_add_tail(&ipip_entry->ipip_list_node,
1230 &mlxsw_sp->router->ipip_list);
1231
Petr Machata1012b9a2017-09-02 23:49:23 +02001232 return ipip_entry;
1233}
1234
1235static void
Petr Machata4cccb732017-10-16 16:26:39 +02001236mlxsw_sp_ipip_entry_destroy(struct mlxsw_sp *mlxsw_sp,
1237 struct mlxsw_sp_ipip_entry *ipip_entry)
Petr Machata1012b9a2017-09-02 23:49:23 +02001238{
Petr Machata4cccb732017-10-16 16:26:39 +02001239 list_del(&ipip_entry->ipip_list_node);
1240 mlxsw_sp_ipip_entry_dealloc(ipip_entry);
Petr Machata1012b9a2017-09-02 23:49:23 +02001241}
1242
Petr Machata4607f6d2017-09-02 23:49:25 +02001243static bool
1244mlxsw_sp_ipip_entry_matches_decap(struct mlxsw_sp *mlxsw_sp,
1245 const struct net_device *ul_dev,
1246 enum mlxsw_sp_l3proto ul_proto,
1247 union mlxsw_sp_l3addr ul_dip,
1248 struct mlxsw_sp_ipip_entry *ipip_entry)
1249{
1250 u32 ul_tb_id = l3mdev_fib_table(ul_dev) ? : RT_TABLE_MAIN;
1251 enum mlxsw_sp_ipip_type ipipt = ipip_entry->ipipt;
1252 struct net_device *ipip_ul_dev;
1253
1254 if (mlxsw_sp->router->ipip_ops_arr[ipipt]->ul_proto != ul_proto)
1255 return false;
1256
1257 ipip_ul_dev = __mlxsw_sp_ipip_netdev_ul_dev_get(ipip_entry->ol_dev);
1258 return mlxsw_sp_ipip_entry_saddr_matches(mlxsw_sp, ul_proto, ul_dip,
1259 ul_tb_id, ipip_entry) &&
1260 (!ipip_ul_dev || ipip_ul_dev == ul_dev);
1261}
1262
1263/* Given decap parameters, find the corresponding IPIP entry. */
1264static struct mlxsw_sp_ipip_entry *
1265mlxsw_sp_ipip_entry_find_by_decap(struct mlxsw_sp *mlxsw_sp,
1266 const struct net_device *ul_dev,
1267 enum mlxsw_sp_l3proto ul_proto,
1268 union mlxsw_sp_l3addr ul_dip)
1269{
1270 struct mlxsw_sp_ipip_entry *ipip_entry;
1271
1272 list_for_each_entry(ipip_entry, &mlxsw_sp->router->ipip_list,
1273 ipip_list_node)
1274 if (mlxsw_sp_ipip_entry_matches_decap(mlxsw_sp, ul_dev,
1275 ul_proto, ul_dip,
1276 ipip_entry))
1277 return ipip_entry;
1278
1279 return NULL;
1280}
1281
Petr Machata6698c162017-10-16 16:26:36 +02001282static bool mlxsw_sp_netdev_ipip_type(const struct mlxsw_sp *mlxsw_sp,
1283 const struct net_device *dev,
1284 enum mlxsw_sp_ipip_type *p_type)
1285{
1286 struct mlxsw_sp_router *router = mlxsw_sp->router;
1287 const struct mlxsw_sp_ipip_ops *ipip_ops;
1288 enum mlxsw_sp_ipip_type ipipt;
1289
1290 for (ipipt = 0; ipipt < MLXSW_SP_IPIP_TYPE_MAX; ++ipipt) {
1291 ipip_ops = router->ipip_ops_arr[ipipt];
1292 if (dev->type == ipip_ops->dev_type) {
1293 if (p_type)
1294 *p_type = ipipt;
1295 return true;
1296 }
1297 }
1298 return false;
1299}
1300
Petr Machata796ec772017-11-03 10:03:29 +01001301bool mlxsw_sp_netdev_is_ipip_ol(const struct mlxsw_sp *mlxsw_sp,
1302 const struct net_device *dev)
Petr Machata00635872017-10-16 16:26:37 +02001303{
1304 return mlxsw_sp_netdev_ipip_type(mlxsw_sp, dev, NULL);
1305}
1306
1307static struct mlxsw_sp_ipip_entry *
1308mlxsw_sp_ipip_entry_find_by_ol_dev(struct mlxsw_sp *mlxsw_sp,
1309 const struct net_device *ol_dev)
1310{
1311 struct mlxsw_sp_ipip_entry *ipip_entry;
1312
1313 list_for_each_entry(ipip_entry, &mlxsw_sp->router->ipip_list,
1314 ipip_list_node)
1315 if (ipip_entry->ol_dev == ol_dev)
1316 return ipip_entry;
1317
1318 return NULL;
1319}
1320
Petr Machata61481f22017-11-03 10:03:41 +01001321static struct mlxsw_sp_ipip_entry *
1322mlxsw_sp_ipip_entry_find_by_ul_dev(const struct mlxsw_sp *mlxsw_sp,
1323 const struct net_device *ul_dev,
1324 struct mlxsw_sp_ipip_entry *start)
1325{
1326 struct mlxsw_sp_ipip_entry *ipip_entry;
1327
1328 ipip_entry = list_prepare_entry(start, &mlxsw_sp->router->ipip_list,
1329 ipip_list_node);
1330 list_for_each_entry_continue(ipip_entry, &mlxsw_sp->router->ipip_list,
1331 ipip_list_node) {
1332 struct net_device *ipip_ul_dev =
1333 __mlxsw_sp_ipip_netdev_ul_dev_get(ipip_entry->ol_dev);
1334
1335 if (ipip_ul_dev == ul_dev)
1336 return ipip_entry;
1337 }
1338
1339 return NULL;
1340}
1341
1342bool mlxsw_sp_netdev_is_ipip_ul(const struct mlxsw_sp *mlxsw_sp,
1343 const struct net_device *dev)
1344{
1345 return mlxsw_sp_ipip_entry_find_by_ul_dev(mlxsw_sp, dev, NULL);
1346}
1347
Petr Machatacafdb2a2017-11-03 10:03:30 +01001348static bool mlxsw_sp_netdevice_ipip_can_offload(struct mlxsw_sp *mlxsw_sp,
1349 const struct net_device *ol_dev,
1350 enum mlxsw_sp_ipip_type ipipt)
1351{
1352 const struct mlxsw_sp_ipip_ops *ops
1353 = mlxsw_sp->router->ipip_ops_arr[ipipt];
1354
1355 /* For deciding whether decap should be offloaded, we don't care about
1356 * overlay protocol, so ask whether either one is supported.
1357 */
1358 return ops->can_offload(mlxsw_sp, ol_dev, MLXSW_SP_L3_PROTO_IPV4) ||
1359 ops->can_offload(mlxsw_sp, ol_dev, MLXSW_SP_L3_PROTO_IPV6);
1360}
1361
Petr Machata796ec772017-11-03 10:03:29 +01001362static int mlxsw_sp_netdevice_ipip_ol_reg_event(struct mlxsw_sp *mlxsw_sp,
1363 struct net_device *ol_dev)
Petr Machata00635872017-10-16 16:26:37 +02001364{
Petr Machata00635872017-10-16 16:26:37 +02001365 struct mlxsw_sp_ipip_entry *ipip_entry;
Petr Machataaf641712017-11-03 10:03:40 +01001366 enum mlxsw_sp_l3proto ul_proto;
Petr Machata00635872017-10-16 16:26:37 +02001367 enum mlxsw_sp_ipip_type ipipt;
Petr Machataaf641712017-11-03 10:03:40 +01001368 union mlxsw_sp_l3addr saddr;
1369 u32 ul_tb_id;
Petr Machata00635872017-10-16 16:26:37 +02001370
1371 mlxsw_sp_netdev_ipip_type(mlxsw_sp, ol_dev, &ipipt);
Petr Machatacafdb2a2017-11-03 10:03:30 +01001372 if (mlxsw_sp_netdevice_ipip_can_offload(mlxsw_sp, ol_dev, ipipt)) {
Petr Machataaf641712017-11-03 10:03:40 +01001373 ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(ol_dev);
1374 ul_proto = mlxsw_sp->router->ipip_ops_arr[ipipt]->ul_proto;
1375 saddr = mlxsw_sp_ipip_netdev_saddr(ul_proto, ol_dev);
1376 if (!mlxsw_sp_ipip_demote_tunnel_by_saddr(mlxsw_sp, ul_proto,
1377 saddr, ul_tb_id,
1378 NULL)) {
1379 ipip_entry = mlxsw_sp_ipip_entry_create(mlxsw_sp, ipipt,
1380 ol_dev);
1381 if (IS_ERR(ipip_entry))
1382 return PTR_ERR(ipip_entry);
1383 }
Petr Machata00635872017-10-16 16:26:37 +02001384 }
1385
1386 return 0;
1387}
1388
Petr Machata796ec772017-11-03 10:03:29 +01001389static void mlxsw_sp_netdevice_ipip_ol_unreg_event(struct mlxsw_sp *mlxsw_sp,
1390 struct net_device *ol_dev)
Petr Machata00635872017-10-16 16:26:37 +02001391{
1392 struct mlxsw_sp_ipip_entry *ipip_entry;
1393
1394 ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
1395 if (ipip_entry)
Petr Machata4cccb732017-10-16 16:26:39 +02001396 mlxsw_sp_ipip_entry_destroy(mlxsw_sp, ipip_entry);
Petr Machata00635872017-10-16 16:26:37 +02001397}
1398
Petr Machata47518ca2017-11-03 10:03:35 +01001399static void
1400mlxsw_sp_ipip_entry_ol_up_event(struct mlxsw_sp *mlxsw_sp,
1401 struct mlxsw_sp_ipip_entry *ipip_entry)
1402{
1403 struct mlxsw_sp_fib_entry *decap_fib_entry;
1404
1405 decap_fib_entry = mlxsw_sp_ipip_entry_find_decap(mlxsw_sp, ipip_entry);
1406 if (decap_fib_entry)
1407 mlxsw_sp_ipip_entry_promote_decap(mlxsw_sp, ipip_entry,
1408 decap_fib_entry);
1409}
1410
Petr Machata22b990582018-03-22 19:53:34 +02001411static int
1412mlxsw_sp_rif_ipip_lb_op(struct mlxsw_sp_rif_ipip_lb *lb_rif,
1413 struct mlxsw_sp_vr *ul_vr, bool enable)
1414{
1415 struct mlxsw_sp_rif_ipip_lb_config lb_cf = lb_rif->lb_config;
1416 struct mlxsw_sp_rif *rif = &lb_rif->common;
1417 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
1418 char ritr_pl[MLXSW_REG_RITR_LEN];
1419 u32 saddr4;
1420
1421 switch (lb_cf.ul_protocol) {
1422 case MLXSW_SP_L3_PROTO_IPV4:
1423 saddr4 = be32_to_cpu(lb_cf.saddr.addr4);
1424 mlxsw_reg_ritr_pack(ritr_pl, enable, MLXSW_REG_RITR_LOOPBACK_IF,
1425 rif->rif_index, rif->vr_id, rif->dev->mtu);
1426 mlxsw_reg_ritr_loopback_ipip4_pack(ritr_pl, lb_cf.lb_ipipt,
1427 MLXSW_REG_RITR_LOOPBACK_IPIP_OPTIONS_GRE_KEY_PRESET,
1428 ul_vr->id, saddr4, lb_cf.okey);
1429 break;
1430
1431 case MLXSW_SP_L3_PROTO_IPV6:
1432 return -EAFNOSUPPORT;
1433 }
1434
1435 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
1436}
1437
Petr Machata68c3cd92018-03-22 19:53:35 +02001438static int mlxsw_sp_netdevice_ipip_ol_update_mtu(struct mlxsw_sp *mlxsw_sp,
1439 struct net_device *ol_dev)
1440{
1441 struct mlxsw_sp_ipip_entry *ipip_entry;
1442 struct mlxsw_sp_rif_ipip_lb *lb_rif;
1443 struct mlxsw_sp_vr *ul_vr;
1444 int err = 0;
1445
1446 ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
1447 if (ipip_entry) {
1448 lb_rif = ipip_entry->ol_lb;
1449 ul_vr = &mlxsw_sp->router->vrs[lb_rif->ul_vr_id];
1450 err = mlxsw_sp_rif_ipip_lb_op(lb_rif, ul_vr, true);
1451 if (err)
1452 goto out;
1453 lb_rif->common.mtu = ol_dev->mtu;
1454 }
1455
1456out:
1457 return err;
1458}
1459
Petr Machata6d4de442017-11-03 10:03:34 +01001460static void mlxsw_sp_netdevice_ipip_ol_up_event(struct mlxsw_sp *mlxsw_sp,
1461 struct net_device *ol_dev)
Petr Machata00635872017-10-16 16:26:37 +02001462{
Petr Machata00635872017-10-16 16:26:37 +02001463 struct mlxsw_sp_ipip_entry *ipip_entry;
1464
1465 ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
Petr Machata47518ca2017-11-03 10:03:35 +01001466 if (ipip_entry)
1467 mlxsw_sp_ipip_entry_ol_up_event(mlxsw_sp, ipip_entry);
Petr Machata00635872017-10-16 16:26:37 +02001468}
1469
Petr Machataa3fe1982017-11-03 10:03:33 +01001470static void
1471mlxsw_sp_ipip_entry_ol_down_event(struct mlxsw_sp *mlxsw_sp,
1472 struct mlxsw_sp_ipip_entry *ipip_entry)
1473{
1474 if (ipip_entry->decap_fib_entry)
1475 mlxsw_sp_ipip_entry_demote_decap(mlxsw_sp, ipip_entry);
1476}
1477
Petr Machata796ec772017-11-03 10:03:29 +01001478static void mlxsw_sp_netdevice_ipip_ol_down_event(struct mlxsw_sp *mlxsw_sp,
1479 struct net_device *ol_dev)
Petr Machata00635872017-10-16 16:26:37 +02001480{
1481 struct mlxsw_sp_ipip_entry *ipip_entry;
1482
1483 ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
Petr Machataa3fe1982017-11-03 10:03:33 +01001484 if (ipip_entry)
1485 mlxsw_sp_ipip_entry_ol_down_event(mlxsw_sp, ipip_entry);
Petr Machata00635872017-10-16 16:26:37 +02001486}
1487
Petr Machata09dbf622017-11-28 13:17:14 +01001488static void mlxsw_sp_nexthop_rif_migrate(struct mlxsw_sp *mlxsw_sp,
1489 struct mlxsw_sp_rif *old_rif,
1490 struct mlxsw_sp_rif *new_rif);
Petr Machata65a61212017-11-03 10:03:37 +01001491static int
1492mlxsw_sp_ipip_entry_ol_lb_update(struct mlxsw_sp *mlxsw_sp,
1493 struct mlxsw_sp_ipip_entry *ipip_entry,
Petr Machata0c5f1cd2017-11-03 10:03:38 +01001494 bool keep_encap,
Petr Machata65a61212017-11-03 10:03:37 +01001495 struct netlink_ext_ack *extack)
Petr Machataf63ce4e2017-10-16 16:26:38 +02001496{
Petr Machata65a61212017-11-03 10:03:37 +01001497 struct mlxsw_sp_rif_ipip_lb *old_lb_rif = ipip_entry->ol_lb;
1498 struct mlxsw_sp_rif_ipip_lb *new_lb_rif;
Petr Machataf63ce4e2017-10-16 16:26:38 +02001499
Petr Machata65a61212017-11-03 10:03:37 +01001500 new_lb_rif = mlxsw_sp_ipip_ol_ipip_lb_create(mlxsw_sp,
1501 ipip_entry->ipipt,
1502 ipip_entry->ol_dev,
1503 extack);
1504 if (IS_ERR(new_lb_rif))
1505 return PTR_ERR(new_lb_rif);
1506 ipip_entry->ol_lb = new_lb_rif;
Petr Machata0c5f1cd2017-11-03 10:03:38 +01001507
Petr Machata09dbf622017-11-28 13:17:14 +01001508 if (keep_encap)
1509 mlxsw_sp_nexthop_rif_migrate(mlxsw_sp, &old_lb_rif->common,
1510 &new_lb_rif->common);
Petr Machata0c5f1cd2017-11-03 10:03:38 +01001511
Petr Machata65a61212017-11-03 10:03:37 +01001512 mlxsw_sp_rif_destroy(&old_lb_rif->common);
Petr Machataf63ce4e2017-10-16 16:26:38 +02001513
Petr Machata65a61212017-11-03 10:03:37 +01001514 return 0;
1515}
1516
Petr Machata09dbf622017-11-28 13:17:14 +01001517static void mlxsw_sp_nexthop_rif_update(struct mlxsw_sp *mlxsw_sp,
1518 struct mlxsw_sp_rif *rif);
1519
Petr Machata0c5f1cd2017-11-03 10:03:38 +01001520/**
1521 * Update the offload related to an IPIP entry. This always updates decap, and
1522 * in addition to that it also:
1523 * @recreate_loopback: recreates the associated loopback RIF
1524 * @keep_encap: updates next hops that use the tunnel netdevice. This is only
1525 * relevant when recreate_loopback is true.
1526 * @update_nexthops: updates next hops, keeping the current loopback RIF. This
1527 * is only relevant when recreate_loopback is false.
1528 */
Petr Machata65a61212017-11-03 10:03:37 +01001529int __mlxsw_sp_ipip_entry_update_tunnel(struct mlxsw_sp *mlxsw_sp,
1530 struct mlxsw_sp_ipip_entry *ipip_entry,
Petr Machata0c5f1cd2017-11-03 10:03:38 +01001531 bool recreate_loopback,
1532 bool keep_encap,
1533 bool update_nexthops,
Petr Machata65a61212017-11-03 10:03:37 +01001534 struct netlink_ext_ack *extack)
1535{
1536 int err;
1537
1538 /* RIFs can't be edited, so to update loopback, we need to destroy and
1539 * recreate it. That creates a window of opportunity where RALUE and
1540 * RATR registers end up referencing a RIF that's already gone. RATRs
1541 * are handled in mlxsw_sp_ipip_entry_ol_lb_update(), and to take care
Petr Machataf63ce4e2017-10-16 16:26:38 +02001542 * of RALUE, demote the decap route back.
1543 */
1544 if (ipip_entry->decap_fib_entry)
1545 mlxsw_sp_ipip_entry_demote_decap(mlxsw_sp, ipip_entry);
1546
Petr Machata0c5f1cd2017-11-03 10:03:38 +01001547 if (recreate_loopback) {
1548 err = mlxsw_sp_ipip_entry_ol_lb_update(mlxsw_sp, ipip_entry,
1549 keep_encap, extack);
1550 if (err)
1551 return err;
1552 } else if (update_nexthops) {
1553 mlxsw_sp_nexthop_rif_update(mlxsw_sp,
1554 &ipip_entry->ol_lb->common);
1555 }
Petr Machataf63ce4e2017-10-16 16:26:38 +02001556
Petr Machata65a61212017-11-03 10:03:37 +01001557 if (ipip_entry->ol_dev->flags & IFF_UP)
1558 mlxsw_sp_ipip_entry_ol_up_event(mlxsw_sp, ipip_entry);
Petr Machataf63ce4e2017-10-16 16:26:38 +02001559
1560 return 0;
1561}
1562
Petr Machata65a61212017-11-03 10:03:37 +01001563static int mlxsw_sp_netdevice_ipip_ol_vrf_event(struct mlxsw_sp *mlxsw_sp,
1564 struct net_device *ol_dev,
1565 struct netlink_ext_ack *extack)
1566{
1567 struct mlxsw_sp_ipip_entry *ipip_entry =
1568 mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
Petr Machatacab43d92017-11-28 13:17:12 +01001569 enum mlxsw_sp_l3proto ul_proto;
1570 union mlxsw_sp_l3addr saddr;
1571 u32 ul_tb_id;
Petr Machata65a61212017-11-03 10:03:37 +01001572
1573 if (!ipip_entry)
1574 return 0;
Petr Machatacab43d92017-11-28 13:17:12 +01001575
1576 /* For flat configuration cases, moving overlay to a different VRF might
1577 * cause local address conflict, and the conflicting tunnels need to be
1578 * demoted.
1579 */
1580 ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(ol_dev);
1581 ul_proto = mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt]->ul_proto;
1582 saddr = mlxsw_sp_ipip_netdev_saddr(ul_proto, ol_dev);
1583 if (mlxsw_sp_ipip_demote_tunnel_by_saddr(mlxsw_sp, ul_proto,
1584 saddr, ul_tb_id,
1585 ipip_entry)) {
1586 mlxsw_sp_ipip_entry_demote_tunnel(mlxsw_sp, ipip_entry);
1587 return 0;
1588 }
1589
Petr Machata65a61212017-11-03 10:03:37 +01001590 return __mlxsw_sp_ipip_entry_update_tunnel(mlxsw_sp, ipip_entry,
Petr Machata0c5f1cd2017-11-03 10:03:38 +01001591 true, false, false, extack);
Petr Machata65a61212017-11-03 10:03:37 +01001592}
1593
Petr Machata61481f22017-11-03 10:03:41 +01001594static int
1595mlxsw_sp_netdevice_ipip_ul_vrf_event(struct mlxsw_sp *mlxsw_sp,
1596 struct mlxsw_sp_ipip_entry *ipip_entry,
1597 struct net_device *ul_dev,
1598 struct netlink_ext_ack *extack)
1599{
1600 return __mlxsw_sp_ipip_entry_update_tunnel(mlxsw_sp, ipip_entry,
1601 true, true, false, extack);
1602}
1603
Petr Machata4cf04f32017-11-03 10:03:42 +01001604static int
Petr Machata44b0fff2017-11-03 10:03:44 +01001605mlxsw_sp_netdevice_ipip_ul_up_event(struct mlxsw_sp *mlxsw_sp,
1606 struct mlxsw_sp_ipip_entry *ipip_entry,
1607 struct net_device *ul_dev)
1608{
1609 return __mlxsw_sp_ipip_entry_update_tunnel(mlxsw_sp, ipip_entry,
1610 false, false, true, NULL);
1611}
1612
1613static int
1614mlxsw_sp_netdevice_ipip_ul_down_event(struct mlxsw_sp *mlxsw_sp,
1615 struct mlxsw_sp_ipip_entry *ipip_entry,
1616 struct net_device *ul_dev)
1617{
1618 /* A down underlay device causes encapsulated packets to not be
1619 * forwarded, but decap still works. So refresh next hops without
1620 * touching anything else.
1621 */
1622 return __mlxsw_sp_ipip_entry_update_tunnel(mlxsw_sp, ipip_entry,
1623 false, false, true, NULL);
1624}
1625
1626static int
Petr Machata4cf04f32017-11-03 10:03:42 +01001627mlxsw_sp_netdevice_ipip_ol_change_event(struct mlxsw_sp *mlxsw_sp,
1628 struct net_device *ol_dev,
1629 struct netlink_ext_ack *extack)
1630{
1631 const struct mlxsw_sp_ipip_ops *ipip_ops;
1632 struct mlxsw_sp_ipip_entry *ipip_entry;
1633 int err;
1634
1635 ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
1636 if (!ipip_entry)
1637 /* A change might make a tunnel eligible for offloading, but
1638 * that is currently not implemented. What falls to slow path
1639 * stays there.
1640 */
1641 return 0;
1642
1643 /* A change might make a tunnel not eligible for offloading. */
1644 if (!mlxsw_sp_netdevice_ipip_can_offload(mlxsw_sp, ol_dev,
1645 ipip_entry->ipipt)) {
1646 mlxsw_sp_ipip_entry_demote_tunnel(mlxsw_sp, ipip_entry);
1647 return 0;
1648 }
1649
1650 ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt];
1651 err = ipip_ops->ol_netdev_change(mlxsw_sp, ipip_entry, extack);
1652 return err;
1653}
1654
Petr Machataaf641712017-11-03 10:03:40 +01001655void mlxsw_sp_ipip_entry_demote_tunnel(struct mlxsw_sp *mlxsw_sp,
1656 struct mlxsw_sp_ipip_entry *ipip_entry)
1657{
1658 struct net_device *ol_dev = ipip_entry->ol_dev;
1659
1660 if (ol_dev->flags & IFF_UP)
1661 mlxsw_sp_ipip_entry_ol_down_event(mlxsw_sp, ipip_entry);
1662 mlxsw_sp_ipip_entry_destroy(mlxsw_sp, ipip_entry);
1663}
1664
1665/* The configuration where several tunnels have the same local address in the
1666 * same underlay table needs special treatment in the HW. That is currently not
1667 * implemented in the driver. This function finds and demotes the first tunnel
1668 * with a given source address, except the one passed in in the argument
1669 * `except'.
1670 */
1671bool
1672mlxsw_sp_ipip_demote_tunnel_by_saddr(struct mlxsw_sp *mlxsw_sp,
1673 enum mlxsw_sp_l3proto ul_proto,
1674 union mlxsw_sp_l3addr saddr,
1675 u32 ul_tb_id,
1676 const struct mlxsw_sp_ipip_entry *except)
1677{
1678 struct mlxsw_sp_ipip_entry *ipip_entry, *tmp;
1679
1680 list_for_each_entry_safe(ipip_entry, tmp, &mlxsw_sp->router->ipip_list,
1681 ipip_list_node) {
1682 if (ipip_entry != except &&
1683 mlxsw_sp_ipip_entry_saddr_matches(mlxsw_sp, ul_proto, saddr,
1684 ul_tb_id, ipip_entry)) {
1685 mlxsw_sp_ipip_entry_demote_tunnel(mlxsw_sp, ipip_entry);
1686 return true;
1687 }
1688 }
1689
1690 return false;
1691}
1692
Petr Machata61481f22017-11-03 10:03:41 +01001693static void mlxsw_sp_ipip_demote_tunnel_by_ul_netdev(struct mlxsw_sp *mlxsw_sp,
1694 struct net_device *ul_dev)
1695{
1696 struct mlxsw_sp_ipip_entry *ipip_entry, *tmp;
1697
1698 list_for_each_entry_safe(ipip_entry, tmp, &mlxsw_sp->router->ipip_list,
1699 ipip_list_node) {
1700 struct net_device *ipip_ul_dev =
1701 __mlxsw_sp_ipip_netdev_ul_dev_get(ipip_entry->ol_dev);
1702
1703 if (ipip_ul_dev == ul_dev)
1704 mlxsw_sp_ipip_entry_demote_tunnel(mlxsw_sp, ipip_entry);
1705 }
1706}
1707
Petr Machata7e75af62017-11-03 10:03:36 +01001708int mlxsw_sp_netdevice_ipip_ol_event(struct mlxsw_sp *mlxsw_sp,
1709 struct net_device *ol_dev,
1710 unsigned long event,
1711 struct netdev_notifier_info *info)
Petr Machata00635872017-10-16 16:26:37 +02001712{
Petr Machata7e75af62017-11-03 10:03:36 +01001713 struct netdev_notifier_changeupper_info *chup;
1714 struct netlink_ext_ack *extack;
1715
Petr Machata00635872017-10-16 16:26:37 +02001716 switch (event) {
1717 case NETDEV_REGISTER:
Petr Machata796ec772017-11-03 10:03:29 +01001718 return mlxsw_sp_netdevice_ipip_ol_reg_event(mlxsw_sp, ol_dev);
Petr Machata00635872017-10-16 16:26:37 +02001719 case NETDEV_UNREGISTER:
Petr Machata796ec772017-11-03 10:03:29 +01001720 mlxsw_sp_netdevice_ipip_ol_unreg_event(mlxsw_sp, ol_dev);
Petr Machata00635872017-10-16 16:26:37 +02001721 return 0;
1722 case NETDEV_UP:
Petr Machata6d4de442017-11-03 10:03:34 +01001723 mlxsw_sp_netdevice_ipip_ol_up_event(mlxsw_sp, ol_dev);
1724 return 0;
Petr Machata00635872017-10-16 16:26:37 +02001725 case NETDEV_DOWN:
Petr Machata796ec772017-11-03 10:03:29 +01001726 mlxsw_sp_netdevice_ipip_ol_down_event(mlxsw_sp, ol_dev);
Petr Machata00635872017-10-16 16:26:37 +02001727 return 0;
Petr Machataf63ce4e2017-10-16 16:26:38 +02001728 case NETDEV_CHANGEUPPER:
Petr Machata7e75af62017-11-03 10:03:36 +01001729 chup = container_of(info, typeof(*chup), info);
1730 extack = info->extack;
1731 if (netif_is_l3_master(chup->upper_dev))
Petr Machata796ec772017-11-03 10:03:29 +01001732 return mlxsw_sp_netdevice_ipip_ol_vrf_event(mlxsw_sp,
Petr Machata7e75af62017-11-03 10:03:36 +01001733 ol_dev,
1734 extack);
Petr Machataf63ce4e2017-10-16 16:26:38 +02001735 return 0;
Petr Machata4cf04f32017-11-03 10:03:42 +01001736 case NETDEV_CHANGE:
1737 extack = info->extack;
1738 return mlxsw_sp_netdevice_ipip_ol_change_event(mlxsw_sp,
1739 ol_dev, extack);
Petr Machata68c3cd92018-03-22 19:53:35 +02001740 case NETDEV_CHANGEMTU:
1741 return mlxsw_sp_netdevice_ipip_ol_update_mtu(mlxsw_sp, ol_dev);
Petr Machata00635872017-10-16 16:26:37 +02001742 }
1743 return 0;
1744}
1745
Petr Machata61481f22017-11-03 10:03:41 +01001746static int
1747__mlxsw_sp_netdevice_ipip_ul_event(struct mlxsw_sp *mlxsw_sp,
1748 struct mlxsw_sp_ipip_entry *ipip_entry,
1749 struct net_device *ul_dev,
1750 unsigned long event,
1751 struct netdev_notifier_info *info)
1752{
1753 struct netdev_notifier_changeupper_info *chup;
1754 struct netlink_ext_ack *extack;
1755
1756 switch (event) {
1757 case NETDEV_CHANGEUPPER:
1758 chup = container_of(info, typeof(*chup), info);
1759 extack = info->extack;
1760 if (netif_is_l3_master(chup->upper_dev))
1761 return mlxsw_sp_netdevice_ipip_ul_vrf_event(mlxsw_sp,
1762 ipip_entry,
1763 ul_dev,
1764 extack);
1765 break;
Petr Machata44b0fff2017-11-03 10:03:44 +01001766
1767 case NETDEV_UP:
1768 return mlxsw_sp_netdevice_ipip_ul_up_event(mlxsw_sp, ipip_entry,
1769 ul_dev);
1770 case NETDEV_DOWN:
1771 return mlxsw_sp_netdevice_ipip_ul_down_event(mlxsw_sp,
1772 ipip_entry,
1773 ul_dev);
Petr Machata61481f22017-11-03 10:03:41 +01001774 }
1775 return 0;
1776}
1777
1778int
1779mlxsw_sp_netdevice_ipip_ul_event(struct mlxsw_sp *mlxsw_sp,
1780 struct net_device *ul_dev,
1781 unsigned long event,
1782 struct netdev_notifier_info *info)
1783{
1784 struct mlxsw_sp_ipip_entry *ipip_entry = NULL;
1785 int err;
1786
1787 while ((ipip_entry = mlxsw_sp_ipip_entry_find_by_ul_dev(mlxsw_sp,
1788 ul_dev,
1789 ipip_entry))) {
1790 err = __mlxsw_sp_netdevice_ipip_ul_event(mlxsw_sp, ipip_entry,
1791 ul_dev, event, info);
1792 if (err) {
1793 mlxsw_sp_ipip_demote_tunnel_by_ul_netdev(mlxsw_sp,
1794 ul_dev);
1795 return err;
1796 }
1797 }
1798
1799 return 0;
1800}
1801
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001802struct mlxsw_sp_neigh_key {
Jiri Pirko33b13412016-11-10 12:31:04 +01001803 struct neighbour *n;
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001804};
1805
1806struct mlxsw_sp_neigh_entry {
Ido Schimmel9665b742017-02-08 11:16:42 +01001807 struct list_head rif_list_node;
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001808 struct rhash_head ht_node;
1809 struct mlxsw_sp_neigh_key key;
1810 u16 rif;
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001811 bool connected;
Yotam Gigia6bf9e92016-07-05 11:27:44 +02001812 unsigned char ha[ETH_ALEN];
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001813 struct list_head nexthop_list; /* list of nexthops using
1814 * this neigh entry
1815 */
Yotam Gigib2157142016-07-05 11:27:51 +02001816 struct list_head nexthop_neighs_list_node;
Arkadi Sharshevsky7cfcbc72017-08-24 08:40:08 +02001817 unsigned int counter_index;
1818 bool counter_valid;
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001819};
1820
1821static const struct rhashtable_params mlxsw_sp_neigh_ht_params = {
1822 .key_offset = offsetof(struct mlxsw_sp_neigh_entry, key),
1823 .head_offset = offsetof(struct mlxsw_sp_neigh_entry, ht_node),
1824 .key_len = sizeof(struct mlxsw_sp_neigh_key),
1825};
1826
Arkadi Sharshevskyf17cc842017-08-24 08:40:04 +02001827struct mlxsw_sp_neigh_entry *
1828mlxsw_sp_rif_neigh_next(struct mlxsw_sp_rif *rif,
1829 struct mlxsw_sp_neigh_entry *neigh_entry)
1830{
1831 if (!neigh_entry) {
1832 if (list_empty(&rif->neigh_list))
1833 return NULL;
1834 else
1835 return list_first_entry(&rif->neigh_list,
1836 typeof(*neigh_entry),
1837 rif_list_node);
1838 }
Arkadi Sharshevskyec2437f2017-09-25 10:32:24 +02001839 if (list_is_last(&neigh_entry->rif_list_node, &rif->neigh_list))
Arkadi Sharshevskyf17cc842017-08-24 08:40:04 +02001840 return NULL;
1841 return list_next_entry(neigh_entry, rif_list_node);
1842}
1843
1844int mlxsw_sp_neigh_entry_type(struct mlxsw_sp_neigh_entry *neigh_entry)
1845{
1846 return neigh_entry->key.n->tbl->family;
1847}
1848
1849unsigned char *
1850mlxsw_sp_neigh_entry_ha(struct mlxsw_sp_neigh_entry *neigh_entry)
1851{
1852 return neigh_entry->ha;
1853}
1854
1855u32 mlxsw_sp_neigh4_entry_dip(struct mlxsw_sp_neigh_entry *neigh_entry)
1856{
1857 struct neighbour *n;
1858
1859 n = neigh_entry->key.n;
1860 return ntohl(*((__be32 *) n->primary_key));
1861}
1862
Arkadi Sharshevsky02507682017-08-31 17:59:15 +02001863struct in6_addr *
1864mlxsw_sp_neigh6_entry_dip(struct mlxsw_sp_neigh_entry *neigh_entry)
1865{
1866 struct neighbour *n;
1867
1868 n = neigh_entry->key.n;
1869 return (struct in6_addr *) &n->primary_key;
1870}
1871
Arkadi Sharshevsky7cfcbc72017-08-24 08:40:08 +02001872int mlxsw_sp_neigh_counter_get(struct mlxsw_sp *mlxsw_sp,
1873 struct mlxsw_sp_neigh_entry *neigh_entry,
1874 u64 *p_counter)
1875{
1876 if (!neigh_entry->counter_valid)
1877 return -EINVAL;
1878
1879 return mlxsw_sp_flow_counter_get(mlxsw_sp, neigh_entry->counter_index,
1880 p_counter, NULL);
1881}
1882
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001883static struct mlxsw_sp_neigh_entry *
1884mlxsw_sp_neigh_entry_alloc(struct mlxsw_sp *mlxsw_sp, struct neighbour *n,
1885 u16 rif)
1886{
1887 struct mlxsw_sp_neigh_entry *neigh_entry;
1888
1889 neigh_entry = kzalloc(sizeof(*neigh_entry), GFP_KERNEL);
1890 if (!neigh_entry)
1891 return NULL;
1892
1893 neigh_entry->key.n = n;
1894 neigh_entry->rif = rif;
1895 INIT_LIST_HEAD(&neigh_entry->nexthop_list);
1896
1897 return neigh_entry;
1898}
1899
1900static void mlxsw_sp_neigh_entry_free(struct mlxsw_sp_neigh_entry *neigh_entry)
1901{
1902 kfree(neigh_entry);
1903}
1904
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001905static int
1906mlxsw_sp_neigh_entry_insert(struct mlxsw_sp *mlxsw_sp,
1907 struct mlxsw_sp_neigh_entry *neigh_entry)
1908{
Ido Schimmel9011b672017-05-16 19:38:25 +02001909 return rhashtable_insert_fast(&mlxsw_sp->router->neigh_ht,
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001910 &neigh_entry->ht_node,
1911 mlxsw_sp_neigh_ht_params);
1912}
1913
1914static void
1915mlxsw_sp_neigh_entry_remove(struct mlxsw_sp *mlxsw_sp,
1916 struct mlxsw_sp_neigh_entry *neigh_entry)
1917{
Ido Schimmel9011b672017-05-16 19:38:25 +02001918 rhashtable_remove_fast(&mlxsw_sp->router->neigh_ht,
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001919 &neigh_entry->ht_node,
1920 mlxsw_sp_neigh_ht_params);
1921}
1922
Arkadi Sharshevsky7cfcbc72017-08-24 08:40:08 +02001923static bool
Arkadi Sharshevsky1ed55742017-08-31 17:59:18 +02001924mlxsw_sp_neigh_counter_should_alloc(struct mlxsw_sp *mlxsw_sp,
1925 struct mlxsw_sp_neigh_entry *neigh_entry)
Arkadi Sharshevsky7cfcbc72017-08-24 08:40:08 +02001926{
1927 struct devlink *devlink;
Arkadi Sharshevsky1ed55742017-08-31 17:59:18 +02001928 const char *table_name;
1929
1930 switch (mlxsw_sp_neigh_entry_type(neigh_entry)) {
1931 case AF_INET:
1932 table_name = MLXSW_SP_DPIPE_TABLE_NAME_HOST4;
1933 break;
1934 case AF_INET6:
1935 table_name = MLXSW_SP_DPIPE_TABLE_NAME_HOST6;
1936 break;
1937 default:
1938 WARN_ON(1);
1939 return false;
1940 }
Arkadi Sharshevsky7cfcbc72017-08-24 08:40:08 +02001941
1942 devlink = priv_to_devlink(mlxsw_sp->core);
Arkadi Sharshevsky1ed55742017-08-31 17:59:18 +02001943 return devlink_dpipe_table_counter_enabled(devlink, table_name);
Arkadi Sharshevsky7cfcbc72017-08-24 08:40:08 +02001944}
1945
1946static void
1947mlxsw_sp_neigh_counter_alloc(struct mlxsw_sp *mlxsw_sp,
1948 struct mlxsw_sp_neigh_entry *neigh_entry)
1949{
Arkadi Sharshevsky1ed55742017-08-31 17:59:18 +02001950 if (!mlxsw_sp_neigh_counter_should_alloc(mlxsw_sp, neigh_entry))
Arkadi Sharshevsky7cfcbc72017-08-24 08:40:08 +02001951 return;
1952
1953 if (mlxsw_sp_flow_counter_alloc(mlxsw_sp, &neigh_entry->counter_index))
1954 return;
1955
1956 neigh_entry->counter_valid = true;
1957}
1958
1959static void
1960mlxsw_sp_neigh_counter_free(struct mlxsw_sp *mlxsw_sp,
1961 struct mlxsw_sp_neigh_entry *neigh_entry)
1962{
1963 if (!neigh_entry->counter_valid)
1964 return;
1965 mlxsw_sp_flow_counter_free(mlxsw_sp,
1966 neigh_entry->counter_index);
1967 neigh_entry->counter_valid = false;
1968}
1969
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001970static struct mlxsw_sp_neigh_entry *
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001971mlxsw_sp_neigh_entry_create(struct mlxsw_sp *mlxsw_sp, struct neighbour *n)
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001972{
1973 struct mlxsw_sp_neigh_entry *neigh_entry;
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01001974 struct mlxsw_sp_rif *rif;
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001975 int err;
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001976
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01001977 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, n->dev);
1978 if (!rif)
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001979 return ERR_PTR(-EINVAL);
1980
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01001981 neigh_entry = mlxsw_sp_neigh_entry_alloc(mlxsw_sp, n, rif->rif_index);
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001982 if (!neigh_entry)
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001983 return ERR_PTR(-ENOMEM);
1984
1985 err = mlxsw_sp_neigh_entry_insert(mlxsw_sp, neigh_entry);
1986 if (err)
1987 goto err_neigh_entry_insert;
1988
Arkadi Sharshevsky7cfcbc72017-08-24 08:40:08 +02001989 mlxsw_sp_neigh_counter_alloc(mlxsw_sp, neigh_entry);
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01001990 list_add(&neigh_entry->rif_list_node, &rif->neigh_list);
Ido Schimmel9665b742017-02-08 11:16:42 +01001991
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001992 return neigh_entry;
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001993
1994err_neigh_entry_insert:
1995 mlxsw_sp_neigh_entry_free(neigh_entry);
1996 return ERR_PTR(err);
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001997}
1998
1999static void
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002000mlxsw_sp_neigh_entry_destroy(struct mlxsw_sp *mlxsw_sp,
2001 struct mlxsw_sp_neigh_entry *neigh_entry)
Jiri Pirko6cf3c972016-07-05 11:27:39 +02002002{
Ido Schimmel9665b742017-02-08 11:16:42 +01002003 list_del(&neigh_entry->rif_list_node);
Arkadi Sharshevsky7cfcbc72017-08-24 08:40:08 +02002004 mlxsw_sp_neigh_counter_free(mlxsw_sp, neigh_entry);
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002005 mlxsw_sp_neigh_entry_remove(mlxsw_sp, neigh_entry);
2006 mlxsw_sp_neigh_entry_free(neigh_entry);
Jiri Pirko6cf3c972016-07-05 11:27:39 +02002007}
2008
2009static struct mlxsw_sp_neigh_entry *
Jiri Pirko33b13412016-11-10 12:31:04 +01002010mlxsw_sp_neigh_entry_lookup(struct mlxsw_sp *mlxsw_sp, struct neighbour *n)
Jiri Pirko6cf3c972016-07-05 11:27:39 +02002011{
Jiri Pirko33b13412016-11-10 12:31:04 +01002012 struct mlxsw_sp_neigh_key key;
Jiri Pirko6cf3c972016-07-05 11:27:39 +02002013
Jiri Pirko33b13412016-11-10 12:31:04 +01002014 key.n = n;
Ido Schimmel9011b672017-05-16 19:38:25 +02002015 return rhashtable_lookup_fast(&mlxsw_sp->router->neigh_ht,
Jiri Pirko6cf3c972016-07-05 11:27:39 +02002016 &key, mlxsw_sp_neigh_ht_params);
2017}
2018
Yotam Gigic723c7352016-07-05 11:27:43 +02002019static void
2020mlxsw_sp_router_neighs_update_interval_init(struct mlxsw_sp *mlxsw_sp)
2021{
Arkadi Sharshevskya6c9b5d2017-07-18 10:10:18 +02002022 unsigned long interval;
Yotam Gigic723c7352016-07-05 11:27:43 +02002023
Ido Schimmelb5f3e0d2017-07-24 09:56:00 +02002024#if IS_ENABLED(CONFIG_IPV6)
Arkadi Sharshevskya6c9b5d2017-07-18 10:10:18 +02002025 interval = min_t(unsigned long,
2026 NEIGH_VAR(&arp_tbl.parms, DELAY_PROBE_TIME),
2027 NEIGH_VAR(&nd_tbl.parms, DELAY_PROBE_TIME));
Ido Schimmelb5f3e0d2017-07-24 09:56:00 +02002028#else
2029 interval = NEIGH_VAR(&arp_tbl.parms, DELAY_PROBE_TIME);
2030#endif
Ido Schimmel9011b672017-05-16 19:38:25 +02002031 mlxsw_sp->router->neighs_update.interval = jiffies_to_msecs(interval);
Yotam Gigic723c7352016-07-05 11:27:43 +02002032}
2033
2034static void mlxsw_sp_router_neigh_ent_ipv4_process(struct mlxsw_sp *mlxsw_sp,
2035 char *rauhtd_pl,
2036 int ent_index)
2037{
2038 struct net_device *dev;
2039 struct neighbour *n;
2040 __be32 dipn;
2041 u32 dip;
2042 u16 rif;
2043
2044 mlxsw_reg_rauhtd_ent_ipv4_unpack(rauhtd_pl, ent_index, &rif, &dip);
2045
Ido Schimmel5f9efff2017-05-16 19:38:27 +02002046 if (!mlxsw_sp->router->rifs[rif]) {
Yotam Gigic723c7352016-07-05 11:27:43 +02002047 dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Incorrect RIF in neighbour entry\n");
2048 return;
2049 }
2050
2051 dipn = htonl(dip);
Ido Schimmel5f9efff2017-05-16 19:38:27 +02002052 dev = mlxsw_sp->router->rifs[rif]->dev;
Yotam Gigic723c7352016-07-05 11:27:43 +02002053 n = neigh_lookup(&arp_tbl, &dipn, dev);
Yuval Mintz1ecdaea2018-01-24 10:02:09 +01002054 if (!n)
Yotam Gigic723c7352016-07-05 11:27:43 +02002055 return;
Yotam Gigic723c7352016-07-05 11:27:43 +02002056
2057 netdev_dbg(dev, "Updating neighbour with IP=%pI4h\n", &dip);
2058 neigh_event_send(n, NULL);
2059 neigh_release(n);
2060}
2061
Ido Schimmeldf9a21f2017-08-15 09:10:33 +02002062#if IS_ENABLED(CONFIG_IPV6)
Arkadi Sharshevsky60f040c2017-07-18 10:10:17 +02002063static void mlxsw_sp_router_neigh_ent_ipv6_process(struct mlxsw_sp *mlxsw_sp,
2064 char *rauhtd_pl,
2065 int rec_index)
2066{
2067 struct net_device *dev;
2068 struct neighbour *n;
2069 struct in6_addr dip;
2070 u16 rif;
2071
2072 mlxsw_reg_rauhtd_ent_ipv6_unpack(rauhtd_pl, rec_index, &rif,
2073 (char *) &dip);
2074
2075 if (!mlxsw_sp->router->rifs[rif]) {
2076 dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Incorrect RIF in neighbour entry\n");
2077 return;
2078 }
2079
2080 dev = mlxsw_sp->router->rifs[rif]->dev;
2081 n = neigh_lookup(&nd_tbl, &dip, dev);
Yuval Mintz1ecdaea2018-01-24 10:02:09 +01002082 if (!n)
Arkadi Sharshevsky60f040c2017-07-18 10:10:17 +02002083 return;
Arkadi Sharshevsky60f040c2017-07-18 10:10:17 +02002084
2085 netdev_dbg(dev, "Updating neighbour with IP=%pI6c\n", &dip);
2086 neigh_event_send(n, NULL);
2087 neigh_release(n);
2088}
Ido Schimmelb5f3e0d2017-07-24 09:56:00 +02002089#else
2090static void mlxsw_sp_router_neigh_ent_ipv6_process(struct mlxsw_sp *mlxsw_sp,
2091 char *rauhtd_pl,
2092 int rec_index)
2093{
2094}
2095#endif
Arkadi Sharshevsky60f040c2017-07-18 10:10:17 +02002096
Yotam Gigic723c7352016-07-05 11:27:43 +02002097static void mlxsw_sp_router_neigh_rec_ipv4_process(struct mlxsw_sp *mlxsw_sp,
2098 char *rauhtd_pl,
2099 int rec_index)
2100{
2101 u8 num_entries;
2102 int i;
2103
2104 num_entries = mlxsw_reg_rauhtd_ipv4_rec_num_entries_get(rauhtd_pl,
2105 rec_index);
2106 /* Hardware starts counting at 0, so add 1. */
2107 num_entries++;
2108
2109 /* Each record consists of several neighbour entries. */
2110 for (i = 0; i < num_entries; i++) {
2111 int ent_index;
2112
2113 ent_index = rec_index * MLXSW_REG_RAUHTD_IPV4_ENT_PER_REC + i;
2114 mlxsw_sp_router_neigh_ent_ipv4_process(mlxsw_sp, rauhtd_pl,
2115 ent_index);
2116 }
2117
2118}
2119
Arkadi Sharshevsky60f040c2017-07-18 10:10:17 +02002120static void mlxsw_sp_router_neigh_rec_ipv6_process(struct mlxsw_sp *mlxsw_sp,
2121 char *rauhtd_pl,
2122 int rec_index)
2123{
2124 /* One record contains one entry. */
2125 mlxsw_sp_router_neigh_ent_ipv6_process(mlxsw_sp, rauhtd_pl,
2126 rec_index);
2127}
2128
Yotam Gigic723c7352016-07-05 11:27:43 +02002129static void mlxsw_sp_router_neigh_rec_process(struct mlxsw_sp *mlxsw_sp,
2130 char *rauhtd_pl, int rec_index)
2131{
2132 switch (mlxsw_reg_rauhtd_rec_type_get(rauhtd_pl, rec_index)) {
2133 case MLXSW_REG_RAUHTD_TYPE_IPV4:
2134 mlxsw_sp_router_neigh_rec_ipv4_process(mlxsw_sp, rauhtd_pl,
2135 rec_index);
2136 break;
2137 case MLXSW_REG_RAUHTD_TYPE_IPV6:
Arkadi Sharshevsky60f040c2017-07-18 10:10:17 +02002138 mlxsw_sp_router_neigh_rec_ipv6_process(mlxsw_sp, rauhtd_pl,
2139 rec_index);
Yotam Gigic723c7352016-07-05 11:27:43 +02002140 break;
2141 }
2142}
2143
Arkadi Sharshevsky42cdb332016-11-11 16:34:26 +01002144static bool mlxsw_sp_router_rauhtd_is_full(char *rauhtd_pl)
2145{
2146 u8 num_rec, last_rec_index, num_entries;
2147
2148 num_rec = mlxsw_reg_rauhtd_num_rec_get(rauhtd_pl);
2149 last_rec_index = num_rec - 1;
2150
2151 if (num_rec < MLXSW_REG_RAUHTD_REC_MAX_NUM)
2152 return false;
2153 if (mlxsw_reg_rauhtd_rec_type_get(rauhtd_pl, last_rec_index) ==
2154 MLXSW_REG_RAUHTD_TYPE_IPV6)
2155 return true;
2156
2157 num_entries = mlxsw_reg_rauhtd_ipv4_rec_num_entries_get(rauhtd_pl,
2158 last_rec_index);
2159 if (++num_entries == MLXSW_REG_RAUHTD_IPV4_ENT_PER_REC)
2160 return true;
2161 return false;
2162}
2163
Arkadi Sharshevsky60f040c2017-07-18 10:10:17 +02002164static int
2165__mlxsw_sp_router_neighs_update_rauhtd(struct mlxsw_sp *mlxsw_sp,
2166 char *rauhtd_pl,
2167 enum mlxsw_reg_rauhtd_type type)
Yotam Gigic723c7352016-07-05 11:27:43 +02002168{
Arkadi Sharshevsky60f040c2017-07-18 10:10:17 +02002169 int i, num_rec;
2170 int err;
Yotam Gigic723c7352016-07-05 11:27:43 +02002171
2172 /* Make sure the neighbour's netdev isn't removed in the
2173 * process.
2174 */
2175 rtnl_lock();
2176 do {
Arkadi Sharshevsky60f040c2017-07-18 10:10:17 +02002177 mlxsw_reg_rauhtd_pack(rauhtd_pl, type);
Yotam Gigic723c7352016-07-05 11:27:43 +02002178 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(rauhtd),
2179 rauhtd_pl);
2180 if (err) {
Petr Machata7ff176f2017-10-02 12:21:57 +02002181 dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Failed to dump neighbour table\n");
Yotam Gigic723c7352016-07-05 11:27:43 +02002182 break;
2183 }
2184 num_rec = mlxsw_reg_rauhtd_num_rec_get(rauhtd_pl);
2185 for (i = 0; i < num_rec; i++)
2186 mlxsw_sp_router_neigh_rec_process(mlxsw_sp, rauhtd_pl,
2187 i);
Arkadi Sharshevsky42cdb332016-11-11 16:34:26 +01002188 } while (mlxsw_sp_router_rauhtd_is_full(rauhtd_pl));
Yotam Gigic723c7352016-07-05 11:27:43 +02002189 rtnl_unlock();
2190
Arkadi Sharshevsky60f040c2017-07-18 10:10:17 +02002191 return err;
2192}
2193
2194static int mlxsw_sp_router_neighs_update_rauhtd(struct mlxsw_sp *mlxsw_sp)
2195{
2196 enum mlxsw_reg_rauhtd_type type;
2197 char *rauhtd_pl;
2198 int err;
2199
2200 rauhtd_pl = kmalloc(MLXSW_REG_RAUHTD_LEN, GFP_KERNEL);
2201 if (!rauhtd_pl)
2202 return -ENOMEM;
2203
2204 type = MLXSW_REG_RAUHTD_TYPE_IPV4;
2205 err = __mlxsw_sp_router_neighs_update_rauhtd(mlxsw_sp, rauhtd_pl, type);
2206 if (err)
2207 goto out;
2208
2209 type = MLXSW_REG_RAUHTD_TYPE_IPV6;
2210 err = __mlxsw_sp_router_neighs_update_rauhtd(mlxsw_sp, rauhtd_pl, type);
2211out:
Yotam Gigic723c7352016-07-05 11:27:43 +02002212 kfree(rauhtd_pl);
Yotam Gigib2157142016-07-05 11:27:51 +02002213 return err;
2214}
2215
2216static void mlxsw_sp_router_neighs_update_nh(struct mlxsw_sp *mlxsw_sp)
2217{
2218 struct mlxsw_sp_neigh_entry *neigh_entry;
2219
2220 /* Take RTNL mutex here to prevent lists from changes */
2221 rtnl_lock();
Ido Schimmel9011b672017-05-16 19:38:25 +02002222 list_for_each_entry(neigh_entry, &mlxsw_sp->router->nexthop_neighs_list,
Ido Schimmel8a0b7272017-02-06 16:20:15 +01002223 nexthop_neighs_list_node)
Yotam Gigib2157142016-07-05 11:27:51 +02002224 /* If this neigh have nexthops, make the kernel think this neigh
2225 * is active regardless of the traffic.
2226 */
Ido Schimmel8a0b7272017-02-06 16:20:15 +01002227 neigh_event_send(neigh_entry->key.n, NULL);
Yotam Gigib2157142016-07-05 11:27:51 +02002228 rtnl_unlock();
2229}
2230
2231static void
2232mlxsw_sp_router_neighs_update_work_schedule(struct mlxsw_sp *mlxsw_sp)
2233{
Ido Schimmel9011b672017-05-16 19:38:25 +02002234 unsigned long interval = mlxsw_sp->router->neighs_update.interval;
Yotam Gigib2157142016-07-05 11:27:51 +02002235
Ido Schimmel9011b672017-05-16 19:38:25 +02002236 mlxsw_core_schedule_dw(&mlxsw_sp->router->neighs_update.dw,
Yotam Gigib2157142016-07-05 11:27:51 +02002237 msecs_to_jiffies(interval));
2238}
2239
2240static void mlxsw_sp_router_neighs_update_work(struct work_struct *work)
2241{
Ido Schimmel9011b672017-05-16 19:38:25 +02002242 struct mlxsw_sp_router *router;
Yotam Gigib2157142016-07-05 11:27:51 +02002243 int err;
2244
Ido Schimmel9011b672017-05-16 19:38:25 +02002245 router = container_of(work, struct mlxsw_sp_router,
2246 neighs_update.dw.work);
2247 err = mlxsw_sp_router_neighs_update_rauhtd(router->mlxsw_sp);
Yotam Gigib2157142016-07-05 11:27:51 +02002248 if (err)
Ido Schimmel9011b672017-05-16 19:38:25 +02002249 dev_err(router->mlxsw_sp->bus_info->dev, "Could not update kernel for neigh activity");
Yotam Gigib2157142016-07-05 11:27:51 +02002250
Ido Schimmel9011b672017-05-16 19:38:25 +02002251 mlxsw_sp_router_neighs_update_nh(router->mlxsw_sp);
Yotam Gigib2157142016-07-05 11:27:51 +02002252
Ido Schimmel9011b672017-05-16 19:38:25 +02002253 mlxsw_sp_router_neighs_update_work_schedule(router->mlxsw_sp);
Yotam Gigic723c7352016-07-05 11:27:43 +02002254}
2255
Yotam Gigi0b2361d2016-07-05 11:27:52 +02002256static void mlxsw_sp_router_probe_unresolved_nexthops(struct work_struct *work)
2257{
2258 struct mlxsw_sp_neigh_entry *neigh_entry;
Ido Schimmel9011b672017-05-16 19:38:25 +02002259 struct mlxsw_sp_router *router;
Yotam Gigi0b2361d2016-07-05 11:27:52 +02002260
Ido Schimmel9011b672017-05-16 19:38:25 +02002261 router = container_of(work, struct mlxsw_sp_router,
2262 nexthop_probe_dw.work);
Yotam Gigi0b2361d2016-07-05 11:27:52 +02002263 /* Iterate over nexthop neighbours, find those who are unresolved and
2264 * send arp on them. This solves the chicken-egg problem when
2265 * the nexthop wouldn't get offloaded until the neighbor is resolved
2266 * but it wouldn't get resolved ever in case traffic is flowing in HW
2267 * using different nexthop.
2268 *
2269 * Take RTNL mutex here to prevent lists from changes.
2270 */
2271 rtnl_lock();
Ido Schimmel9011b672017-05-16 19:38:25 +02002272 list_for_each_entry(neigh_entry, &router->nexthop_neighs_list,
Ido Schimmel8a0b7272017-02-06 16:20:15 +01002273 nexthop_neighs_list_node)
Ido Schimmel01b1aa32017-02-06 16:20:16 +01002274 if (!neigh_entry->connected)
Jiri Pirko33b13412016-11-10 12:31:04 +01002275 neigh_event_send(neigh_entry->key.n, NULL);
Yotam Gigi0b2361d2016-07-05 11:27:52 +02002276 rtnl_unlock();
2277
Ido Schimmel9011b672017-05-16 19:38:25 +02002278 mlxsw_core_schedule_dw(&router->nexthop_probe_dw,
Yotam Gigi0b2361d2016-07-05 11:27:52 +02002279 MLXSW_SP_UNRESOLVED_NH_PROBE_INTERVAL);
2280}
2281
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002282static void
2283mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp *mlxsw_sp,
2284 struct mlxsw_sp_neigh_entry *neigh_entry,
2285 bool removing);
2286
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002287static enum mlxsw_reg_rauht_op mlxsw_sp_rauht_op(bool adding)
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002288{
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002289 return adding ? MLXSW_REG_RAUHT_OP_WRITE_ADD :
2290 MLXSW_REG_RAUHT_OP_WRITE_DELETE;
2291}
2292
2293static void
2294mlxsw_sp_router_neigh_entry_op4(struct mlxsw_sp *mlxsw_sp,
2295 struct mlxsw_sp_neigh_entry *neigh_entry,
2296 enum mlxsw_reg_rauht_op op)
2297{
Jiri Pirko33b13412016-11-10 12:31:04 +01002298 struct neighbour *n = neigh_entry->key.n;
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002299 u32 dip = ntohl(*((__be32 *) n->primary_key));
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002300 char rauht_pl[MLXSW_REG_RAUHT_LEN];
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002301
2302 mlxsw_reg_rauht_pack4(rauht_pl, op, neigh_entry->rif, neigh_entry->ha,
2303 dip);
Arkadi Sharshevsky7cfcbc72017-08-24 08:40:08 +02002304 if (neigh_entry->counter_valid)
2305 mlxsw_reg_rauht_pack_counter(rauht_pl,
2306 neigh_entry->counter_index);
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002307 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rauht), rauht_pl);
2308}
2309
2310static void
Arkadi Sharshevskyd5eb89c2017-07-18 10:10:15 +02002311mlxsw_sp_router_neigh_entry_op6(struct mlxsw_sp *mlxsw_sp,
2312 struct mlxsw_sp_neigh_entry *neigh_entry,
2313 enum mlxsw_reg_rauht_op op)
2314{
2315 struct neighbour *n = neigh_entry->key.n;
2316 char rauht_pl[MLXSW_REG_RAUHT_LEN];
2317 const char *dip = n->primary_key;
2318
2319 mlxsw_reg_rauht_pack6(rauht_pl, op, neigh_entry->rif, neigh_entry->ha,
2320 dip);
Arkadi Sharshevsky7cfcbc72017-08-24 08:40:08 +02002321 if (neigh_entry->counter_valid)
2322 mlxsw_reg_rauht_pack_counter(rauht_pl,
2323 neigh_entry->counter_index);
Arkadi Sharshevskyd5eb89c2017-07-18 10:10:15 +02002324 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rauht), rauht_pl);
2325}
2326
Arkadi Sharshevsky1d1056d82017-08-31 17:59:13 +02002327bool mlxsw_sp_neigh_ipv6_ignore(struct mlxsw_sp_neigh_entry *neigh_entry)
Arkadi Sharshevskyd5eb89c2017-07-18 10:10:15 +02002328{
Arkadi Sharshevsky1d1056d82017-08-31 17:59:13 +02002329 struct neighbour *n = neigh_entry->key.n;
2330
Arkadi Sharshevskyd5eb89c2017-07-18 10:10:15 +02002331 /* Packets with a link-local destination address are trapped
2332 * after LPM lookup and never reach the neighbour table, so
2333 * there is no need to program such neighbours to the device.
2334 */
2335 if (ipv6_addr_type((struct in6_addr *) &n->primary_key) &
2336 IPV6_ADDR_LINKLOCAL)
2337 return true;
2338 return false;
2339}
2340
2341static void
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002342mlxsw_sp_neigh_entry_update(struct mlxsw_sp *mlxsw_sp,
2343 struct mlxsw_sp_neigh_entry *neigh_entry,
2344 bool adding)
2345{
2346 if (!adding && !neigh_entry->connected)
2347 return;
2348 neigh_entry->connected = adding;
Ido Schimmelb5f3e0d2017-07-24 09:56:00 +02002349 if (neigh_entry->key.n->tbl->family == AF_INET) {
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002350 mlxsw_sp_router_neigh_entry_op4(mlxsw_sp, neigh_entry,
2351 mlxsw_sp_rauht_op(adding));
Ido Schimmelb5f3e0d2017-07-24 09:56:00 +02002352 } else if (neigh_entry->key.n->tbl->family == AF_INET6) {
Arkadi Sharshevsky1d1056d82017-08-31 17:59:13 +02002353 if (mlxsw_sp_neigh_ipv6_ignore(neigh_entry))
Arkadi Sharshevskyd5eb89c2017-07-18 10:10:15 +02002354 return;
2355 mlxsw_sp_router_neigh_entry_op6(mlxsw_sp, neigh_entry,
2356 mlxsw_sp_rauht_op(adding));
2357 } else {
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002358 WARN_ON_ONCE(1);
Arkadi Sharshevskyd5eb89c2017-07-18 10:10:15 +02002359 }
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002360}
2361
Arkadi Sharshevskya481d712017-08-24 08:40:10 +02002362void
2363mlxsw_sp_neigh_entry_counter_update(struct mlxsw_sp *mlxsw_sp,
2364 struct mlxsw_sp_neigh_entry *neigh_entry,
2365 bool adding)
2366{
2367 if (adding)
2368 mlxsw_sp_neigh_counter_alloc(mlxsw_sp, neigh_entry);
2369 else
2370 mlxsw_sp_neigh_counter_free(mlxsw_sp, neigh_entry);
2371 mlxsw_sp_neigh_entry_update(mlxsw_sp, neigh_entry, true);
2372}
2373
Ido Schimmelceb88812017-11-02 17:14:07 +01002374struct mlxsw_sp_netevent_work {
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002375 struct work_struct work;
2376 struct mlxsw_sp *mlxsw_sp;
2377 struct neighbour *n;
2378};
2379
2380static void mlxsw_sp_router_neigh_event_work(struct work_struct *work)
2381{
Ido Schimmelceb88812017-11-02 17:14:07 +01002382 struct mlxsw_sp_netevent_work *net_work =
2383 container_of(work, struct mlxsw_sp_netevent_work, work);
2384 struct mlxsw_sp *mlxsw_sp = net_work->mlxsw_sp;
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002385 struct mlxsw_sp_neigh_entry *neigh_entry;
Ido Schimmelceb88812017-11-02 17:14:07 +01002386 struct neighbour *n = net_work->n;
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002387 unsigned char ha[ETH_ALEN];
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002388 bool entry_connected;
Ido Schimmel93a87e52016-12-23 09:32:49 +01002389 u8 nud_state, dead;
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002390
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002391 /* If these parameters are changed after we release the lock,
2392 * then we are guaranteed to receive another event letting us
2393 * know about it.
2394 */
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002395 read_lock_bh(&n->lock);
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002396 memcpy(ha, n->ha, ETH_ALEN);
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002397 nud_state = n->nud_state;
Ido Schimmel93a87e52016-12-23 09:32:49 +01002398 dead = n->dead;
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002399 read_unlock_bh(&n->lock);
2400
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002401 rtnl_lock();
Petr Machata803335a2018-02-27 14:53:46 +01002402 mlxsw_sp_span_respin(mlxsw_sp);
2403
Ido Schimmel93a87e52016-12-23 09:32:49 +01002404 entry_connected = nud_state & NUD_VALID && !dead;
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002405 neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp, n);
2406 if (!entry_connected && !neigh_entry)
2407 goto out;
2408 if (!neigh_entry) {
2409 neigh_entry = mlxsw_sp_neigh_entry_create(mlxsw_sp, n);
2410 if (IS_ERR(neigh_entry))
2411 goto out;
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002412 }
2413
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002414 memcpy(neigh_entry->ha, ha, ETH_ALEN);
2415 mlxsw_sp_neigh_entry_update(mlxsw_sp, neigh_entry, entry_connected);
2416 mlxsw_sp_nexthop_neigh_update(mlxsw_sp, neigh_entry, !entry_connected);
2417
2418 if (!neigh_entry->connected && list_empty(&neigh_entry->nexthop_list))
2419 mlxsw_sp_neigh_entry_destroy(mlxsw_sp, neigh_entry);
2420
2421out:
2422 rtnl_unlock();
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002423 neigh_release(n);
Ido Schimmelceb88812017-11-02 17:14:07 +01002424 kfree(net_work);
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002425}
2426
Ido Schimmel28678f02017-11-02 17:14:10 +01002427static int mlxsw_sp_mp_hash_init(struct mlxsw_sp *mlxsw_sp);
2428
2429static void mlxsw_sp_router_mp_hash_event_work(struct work_struct *work)
2430{
2431 struct mlxsw_sp_netevent_work *net_work =
2432 container_of(work, struct mlxsw_sp_netevent_work, work);
2433 struct mlxsw_sp *mlxsw_sp = net_work->mlxsw_sp;
2434
2435 mlxsw_sp_mp_hash_init(mlxsw_sp);
2436 kfree(net_work);
2437}
2438
2439static int mlxsw_sp_router_netevent_event(struct notifier_block *nb,
Ido Schimmel48fac882017-11-02 17:14:06 +01002440 unsigned long event, void *ptr)
Yotam Gigic723c7352016-07-05 11:27:43 +02002441{
Ido Schimmelceb88812017-11-02 17:14:07 +01002442 struct mlxsw_sp_netevent_work *net_work;
Yotam Gigic723c7352016-07-05 11:27:43 +02002443 struct mlxsw_sp_port *mlxsw_sp_port;
Ido Schimmel28678f02017-11-02 17:14:10 +01002444 struct mlxsw_sp_router *router;
Yotam Gigic723c7352016-07-05 11:27:43 +02002445 struct mlxsw_sp *mlxsw_sp;
2446 unsigned long interval;
2447 struct neigh_parms *p;
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002448 struct neighbour *n;
Ido Schimmel28678f02017-11-02 17:14:10 +01002449 struct net *net;
Yotam Gigic723c7352016-07-05 11:27:43 +02002450
2451 switch (event) {
2452 case NETEVENT_DELAY_PROBE_TIME_UPDATE:
2453 p = ptr;
2454
2455 /* We don't care about changes in the default table. */
Ido Schimmelb5f3e0d2017-07-24 09:56:00 +02002456 if (!p->dev || (p->tbl->family != AF_INET &&
2457 p->tbl->family != AF_INET6))
Yotam Gigic723c7352016-07-05 11:27:43 +02002458 return NOTIFY_DONE;
2459
2460 /* We are in atomic context and can't take RTNL mutex,
2461 * so use RCU variant to walk the device chain.
2462 */
2463 mlxsw_sp_port = mlxsw_sp_port_lower_dev_hold(p->dev);
2464 if (!mlxsw_sp_port)
2465 return NOTIFY_DONE;
2466
2467 mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2468 interval = jiffies_to_msecs(NEIGH_VAR(p, DELAY_PROBE_TIME));
Ido Schimmel9011b672017-05-16 19:38:25 +02002469 mlxsw_sp->router->neighs_update.interval = interval;
Yotam Gigic723c7352016-07-05 11:27:43 +02002470
2471 mlxsw_sp_port_dev_put(mlxsw_sp_port);
2472 break;
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002473 case NETEVENT_NEIGH_UPDATE:
2474 n = ptr;
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002475
Ido Schimmelb5f3e0d2017-07-24 09:56:00 +02002476 if (n->tbl->family != AF_INET && n->tbl->family != AF_INET6)
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002477 return NOTIFY_DONE;
2478
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002479 mlxsw_sp_port = mlxsw_sp_port_lower_dev_hold(n->dev);
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002480 if (!mlxsw_sp_port)
2481 return NOTIFY_DONE;
2482
Ido Schimmelceb88812017-11-02 17:14:07 +01002483 net_work = kzalloc(sizeof(*net_work), GFP_ATOMIC);
2484 if (!net_work) {
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002485 mlxsw_sp_port_dev_put(mlxsw_sp_port);
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002486 return NOTIFY_BAD;
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002487 }
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002488
Ido Schimmelceb88812017-11-02 17:14:07 +01002489 INIT_WORK(&net_work->work, mlxsw_sp_router_neigh_event_work);
2490 net_work->mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2491 net_work->n = n;
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002492
2493 /* Take a reference to ensure the neighbour won't be
2494 * destructed until we drop the reference in delayed
2495 * work.
2496 */
2497 neigh_clone(n);
Ido Schimmelceb88812017-11-02 17:14:07 +01002498 mlxsw_core_schedule_work(&net_work->work);
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002499 mlxsw_sp_port_dev_put(mlxsw_sp_port);
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002500 break;
David Ahern3192dac2018-03-02 08:32:16 -08002501 case NETEVENT_IPV4_MPATH_HASH_UPDATE:
David Ahern5e18b9c552018-03-02 08:32:19 -08002502 case NETEVENT_IPV6_MPATH_HASH_UPDATE:
Ido Schimmel28678f02017-11-02 17:14:10 +01002503 net = ptr;
2504
2505 if (!net_eq(net, &init_net))
2506 return NOTIFY_DONE;
2507
2508 net_work = kzalloc(sizeof(*net_work), GFP_ATOMIC);
2509 if (!net_work)
2510 return NOTIFY_BAD;
2511
2512 router = container_of(nb, struct mlxsw_sp_router, netevent_nb);
2513 INIT_WORK(&net_work->work, mlxsw_sp_router_mp_hash_event_work);
2514 net_work->mlxsw_sp = router->mlxsw_sp;
2515 mlxsw_core_schedule_work(&net_work->work);
2516 break;
Yotam Gigic723c7352016-07-05 11:27:43 +02002517 }
2518
2519 return NOTIFY_DONE;
2520}
2521
Jiri Pirko6cf3c972016-07-05 11:27:39 +02002522static int mlxsw_sp_neigh_init(struct mlxsw_sp *mlxsw_sp)
2523{
Yotam Gigic723c7352016-07-05 11:27:43 +02002524 int err;
2525
Ido Schimmel9011b672017-05-16 19:38:25 +02002526 err = rhashtable_init(&mlxsw_sp->router->neigh_ht,
Yotam Gigic723c7352016-07-05 11:27:43 +02002527 &mlxsw_sp_neigh_ht_params);
2528 if (err)
2529 return err;
2530
2531 /* Initialize the polling interval according to the default
2532 * table.
2533 */
2534 mlxsw_sp_router_neighs_update_interval_init(mlxsw_sp);
2535
Yotam Gigi0b2361d2016-07-05 11:27:52 +02002536 /* Create the delayed works for the activity_update */
Ido Schimmel9011b672017-05-16 19:38:25 +02002537 INIT_DELAYED_WORK(&mlxsw_sp->router->neighs_update.dw,
Yotam Gigic723c7352016-07-05 11:27:43 +02002538 mlxsw_sp_router_neighs_update_work);
Ido Schimmel9011b672017-05-16 19:38:25 +02002539 INIT_DELAYED_WORK(&mlxsw_sp->router->nexthop_probe_dw,
Yotam Gigi0b2361d2016-07-05 11:27:52 +02002540 mlxsw_sp_router_probe_unresolved_nexthops);
Ido Schimmel9011b672017-05-16 19:38:25 +02002541 mlxsw_core_schedule_dw(&mlxsw_sp->router->neighs_update.dw, 0);
2542 mlxsw_core_schedule_dw(&mlxsw_sp->router->nexthop_probe_dw, 0);
Yotam Gigic723c7352016-07-05 11:27:43 +02002543 return 0;
Jiri Pirko6cf3c972016-07-05 11:27:39 +02002544}
2545
2546static void mlxsw_sp_neigh_fini(struct mlxsw_sp *mlxsw_sp)
2547{
Ido Schimmel9011b672017-05-16 19:38:25 +02002548 cancel_delayed_work_sync(&mlxsw_sp->router->neighs_update.dw);
2549 cancel_delayed_work_sync(&mlxsw_sp->router->nexthop_probe_dw);
2550 rhashtable_destroy(&mlxsw_sp->router->neigh_ht);
Jiri Pirko6cf3c972016-07-05 11:27:39 +02002551}
2552
Ido Schimmel9665b742017-02-08 11:16:42 +01002553static void mlxsw_sp_neigh_rif_gone_sync(struct mlxsw_sp *mlxsw_sp,
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01002554 struct mlxsw_sp_rif *rif)
Ido Schimmel9665b742017-02-08 11:16:42 +01002555{
2556 struct mlxsw_sp_neigh_entry *neigh_entry, *tmp;
2557
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01002558 list_for_each_entry_safe(neigh_entry, tmp, &rif->neigh_list,
Petr Machata8ba6b302017-12-17 17:16:43 +01002559 rif_list_node) {
2560 mlxsw_sp_neigh_entry_update(mlxsw_sp, neigh_entry, false);
Ido Schimmel9665b742017-02-08 11:16:42 +01002561 mlxsw_sp_neigh_entry_destroy(mlxsw_sp, neigh_entry);
Petr Machata8ba6b302017-12-17 17:16:43 +01002562 }
Ido Schimmel9665b742017-02-08 11:16:42 +01002563}
2564
Petr Machata35225e42017-09-02 23:49:22 +02002565enum mlxsw_sp_nexthop_type {
2566 MLXSW_SP_NEXTHOP_TYPE_ETH,
Petr Machata1012b9a2017-09-02 23:49:23 +02002567 MLXSW_SP_NEXTHOP_TYPE_IPIP,
Petr Machata35225e42017-09-02 23:49:22 +02002568};
2569
Ido Schimmelc53b8e12017-02-08 11:16:30 +01002570struct mlxsw_sp_nexthop_key {
2571 struct fib_nh *fib_nh;
2572};
2573
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002574struct mlxsw_sp_nexthop {
2575 struct list_head neigh_list_node; /* member of neigh entry list */
Ido Schimmel9665b742017-02-08 11:16:42 +01002576 struct list_head rif_list_node;
Arkadi Sharshevskydbe45982017-09-25 10:32:23 +02002577 struct list_head router_list_node;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002578 struct mlxsw_sp_nexthop_group *nh_grp; /* pointer back to the group
2579 * this belongs to
2580 */
Ido Schimmelc53b8e12017-02-08 11:16:30 +01002581 struct rhash_head ht_node;
2582 struct mlxsw_sp_nexthop_key key;
Ido Schimmel58adf2c2017-07-18 10:10:19 +02002583 unsigned char gw_addr[sizeof(struct in6_addr)];
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02002584 int ifindex;
Ido Schimmel408bd942017-10-22 23:11:46 +02002585 int nh_weight;
Ido Schimmeleb789982017-10-22 23:11:48 +02002586 int norm_nh_weight;
2587 int num_adj_entries;
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01002588 struct mlxsw_sp_rif *rif;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002589 u8 should_offload:1, /* set indicates this neigh is connected and
2590 * should be put to KVD linear area of this group.
2591 */
2592 offloaded:1, /* set in case the neigh is actually put into
2593 * KVD linear area of this group.
2594 */
2595 update:1; /* set indicates that MAC of this neigh should be
2596 * updated in HW
2597 */
Petr Machata35225e42017-09-02 23:49:22 +02002598 enum mlxsw_sp_nexthop_type type;
2599 union {
2600 struct mlxsw_sp_neigh_entry *neigh_entry;
Petr Machata1012b9a2017-09-02 23:49:23 +02002601 struct mlxsw_sp_ipip_entry *ipip_entry;
Petr Machata35225e42017-09-02 23:49:22 +02002602 };
Arkadi Sharshevskya5390272017-09-25 10:32:28 +02002603 unsigned int counter_index;
2604 bool counter_valid;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002605};
2606
2607struct mlxsw_sp_nexthop_group {
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02002608 void *priv;
Ido Schimmele9ad5e72017-02-08 11:16:29 +01002609 struct rhash_head ht_node;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002610 struct list_head fib_list; /* list of fib entries that use this group */
Ido Schimmel58adf2c2017-07-18 10:10:19 +02002611 struct neigh_table *neigh_tbl;
Ido Schimmelb3e8d1e2017-02-08 11:16:32 +01002612 u8 adj_index_valid:1,
2613 gateway:1; /* routes using the group use a gateway */
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002614 u32 adj_index;
2615 u16 ecmp_size;
2616 u16 count;
Ido Schimmeleb789982017-10-22 23:11:48 +02002617 int sum_norm_weight;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002618 struct mlxsw_sp_nexthop nexthops[0];
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01002619#define nh_rif nexthops[0].rif
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002620};
2621
Arkadi Sharshevsky427e6522017-09-25 10:32:30 +02002622void mlxsw_sp_nexthop_counter_alloc(struct mlxsw_sp *mlxsw_sp,
2623 struct mlxsw_sp_nexthop *nh)
Arkadi Sharshevskya5390272017-09-25 10:32:28 +02002624{
2625 struct devlink *devlink;
2626
2627 devlink = priv_to_devlink(mlxsw_sp->core);
2628 if (!devlink_dpipe_table_counter_enabled(devlink,
2629 MLXSW_SP_DPIPE_TABLE_NAME_ADJ))
2630 return;
2631
2632 if (mlxsw_sp_flow_counter_alloc(mlxsw_sp, &nh->counter_index))
2633 return;
2634
2635 nh->counter_valid = true;
2636}
2637
Arkadi Sharshevsky427e6522017-09-25 10:32:30 +02002638void mlxsw_sp_nexthop_counter_free(struct mlxsw_sp *mlxsw_sp,
2639 struct mlxsw_sp_nexthop *nh)
Arkadi Sharshevskya5390272017-09-25 10:32:28 +02002640{
2641 if (!nh->counter_valid)
2642 return;
2643 mlxsw_sp_flow_counter_free(mlxsw_sp, nh->counter_index);
2644 nh->counter_valid = false;
2645}
2646
2647int mlxsw_sp_nexthop_counter_get(struct mlxsw_sp *mlxsw_sp,
2648 struct mlxsw_sp_nexthop *nh, u64 *p_counter)
2649{
2650 if (!nh->counter_valid)
2651 return -EINVAL;
2652
2653 return mlxsw_sp_flow_counter_get(mlxsw_sp, nh->counter_index,
2654 p_counter, NULL);
2655}
2656
Arkadi Sharshevskyc556cd22017-09-25 10:32:25 +02002657struct mlxsw_sp_nexthop *mlxsw_sp_nexthop_next(struct mlxsw_sp_router *router,
2658 struct mlxsw_sp_nexthop *nh)
2659{
2660 if (!nh) {
2661 if (list_empty(&router->nexthop_list))
2662 return NULL;
2663 else
2664 return list_first_entry(&router->nexthop_list,
2665 typeof(*nh), router_list_node);
2666 }
2667 if (list_is_last(&nh->router_list_node, &router->nexthop_list))
2668 return NULL;
2669 return list_next_entry(nh, router_list_node);
2670}
2671
2672bool mlxsw_sp_nexthop_offload(struct mlxsw_sp_nexthop *nh)
2673{
2674 return nh->offloaded;
2675}
2676
2677unsigned char *mlxsw_sp_nexthop_ha(struct mlxsw_sp_nexthop *nh)
2678{
2679 if (!nh->offloaded)
2680 return NULL;
2681 return nh->neigh_entry->ha;
2682}
2683
2684int mlxsw_sp_nexthop_indexes(struct mlxsw_sp_nexthop *nh, u32 *p_adj_index,
Ido Schimmele69cd9d2017-10-22 23:11:43 +02002685 u32 *p_adj_size, u32 *p_adj_hash_index)
Arkadi Sharshevskyc556cd22017-09-25 10:32:25 +02002686{
2687 struct mlxsw_sp_nexthop_group *nh_grp = nh->nh_grp;
2688 u32 adj_hash_index = 0;
2689 int i;
2690
2691 if (!nh->offloaded || !nh_grp->adj_index_valid)
2692 return -EINVAL;
2693
2694 *p_adj_index = nh_grp->adj_index;
Ido Schimmele69cd9d2017-10-22 23:11:43 +02002695 *p_adj_size = nh_grp->ecmp_size;
Arkadi Sharshevskyc556cd22017-09-25 10:32:25 +02002696
2697 for (i = 0; i < nh_grp->count; i++) {
2698 struct mlxsw_sp_nexthop *nh_iter = &nh_grp->nexthops[i];
2699
2700 if (nh_iter == nh)
2701 break;
2702 if (nh_iter->offloaded)
Ido Schimmeleb789982017-10-22 23:11:48 +02002703 adj_hash_index += nh_iter->num_adj_entries;
Arkadi Sharshevskyc556cd22017-09-25 10:32:25 +02002704 }
2705
2706 *p_adj_hash_index = adj_hash_index;
2707 return 0;
2708}
2709
2710struct mlxsw_sp_rif *mlxsw_sp_nexthop_rif(struct mlxsw_sp_nexthop *nh)
2711{
2712 return nh->rif;
2713}
2714
2715bool mlxsw_sp_nexthop_group_has_ipip(struct mlxsw_sp_nexthop *nh)
2716{
2717 struct mlxsw_sp_nexthop_group *nh_grp = nh->nh_grp;
2718 int i;
2719
2720 for (i = 0; i < nh_grp->count; i++) {
2721 struct mlxsw_sp_nexthop *nh_iter = &nh_grp->nexthops[i];
2722
2723 if (nh_iter->type == MLXSW_SP_NEXTHOP_TYPE_IPIP)
2724 return true;
2725 }
2726 return false;
2727}
2728
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02002729static struct fib_info *
2730mlxsw_sp_nexthop4_group_fi(const struct mlxsw_sp_nexthop_group *nh_grp)
2731{
2732 return nh_grp->priv;
2733}
2734
2735struct mlxsw_sp_nexthop_group_cmp_arg {
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02002736 enum mlxsw_sp_l3proto proto;
2737 union {
2738 struct fib_info *fi;
2739 struct mlxsw_sp_fib6_entry *fib6_entry;
2740 };
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02002741};
2742
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02002743static bool
2744mlxsw_sp_nexthop6_group_has_nexthop(const struct mlxsw_sp_nexthop_group *nh_grp,
Ido Schimmel3743d882018-01-12 17:15:59 +01002745 const struct in6_addr *gw, int ifindex,
2746 int weight)
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02002747{
2748 int i;
2749
2750 for (i = 0; i < nh_grp->count; i++) {
2751 const struct mlxsw_sp_nexthop *nh;
2752
2753 nh = &nh_grp->nexthops[i];
Ido Schimmel3743d882018-01-12 17:15:59 +01002754 if (nh->ifindex == ifindex && nh->nh_weight == weight &&
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02002755 ipv6_addr_equal(gw, (struct in6_addr *) nh->gw_addr))
2756 return true;
2757 }
2758
2759 return false;
2760}
2761
2762static bool
2763mlxsw_sp_nexthop6_group_cmp(const struct mlxsw_sp_nexthop_group *nh_grp,
2764 const struct mlxsw_sp_fib6_entry *fib6_entry)
2765{
2766 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
2767
2768 if (nh_grp->count != fib6_entry->nrt6)
2769 return false;
2770
2771 list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) {
2772 struct in6_addr *gw;
Ido Schimmel3743d882018-01-12 17:15:59 +01002773 int ifindex, weight;
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02002774
David Ahern5e670d82018-04-17 17:33:14 -07002775 ifindex = mlxsw_sp_rt6->rt->fib6_nh.nh_dev->ifindex;
2776 weight = mlxsw_sp_rt6->rt->fib6_nh.nh_weight;
2777 gw = &mlxsw_sp_rt6->rt->fib6_nh.nh_gw;
Ido Schimmel3743d882018-01-12 17:15:59 +01002778 if (!mlxsw_sp_nexthop6_group_has_nexthop(nh_grp, gw, ifindex,
2779 weight))
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02002780 return false;
2781 }
2782
2783 return true;
2784}
2785
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02002786static int
2787mlxsw_sp_nexthop_group_cmp(struct rhashtable_compare_arg *arg, const void *ptr)
2788{
2789 const struct mlxsw_sp_nexthop_group_cmp_arg *cmp_arg = arg->key;
2790 const struct mlxsw_sp_nexthop_group *nh_grp = ptr;
2791
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02002792 switch (cmp_arg->proto) {
2793 case MLXSW_SP_L3_PROTO_IPV4:
2794 return cmp_arg->fi != mlxsw_sp_nexthop4_group_fi(nh_grp);
2795 case MLXSW_SP_L3_PROTO_IPV6:
2796 return !mlxsw_sp_nexthop6_group_cmp(nh_grp,
2797 cmp_arg->fib6_entry);
2798 default:
2799 WARN_ON(1);
2800 return 1;
2801 }
2802}
2803
2804static int
2805mlxsw_sp_nexthop_group_type(const struct mlxsw_sp_nexthop_group *nh_grp)
2806{
2807 return nh_grp->neigh_tbl->family;
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02002808}
2809
2810static u32 mlxsw_sp_nexthop_group_hash_obj(const void *data, u32 len, u32 seed)
2811{
2812 const struct mlxsw_sp_nexthop_group *nh_grp = data;
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02002813 const struct mlxsw_sp_nexthop *nh;
2814 struct fib_info *fi;
2815 unsigned int val;
2816 int i;
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02002817
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02002818 switch (mlxsw_sp_nexthop_group_type(nh_grp)) {
2819 case AF_INET:
2820 fi = mlxsw_sp_nexthop4_group_fi(nh_grp);
2821 return jhash(&fi, sizeof(fi), seed);
2822 case AF_INET6:
2823 val = nh_grp->count;
2824 for (i = 0; i < nh_grp->count; i++) {
2825 nh = &nh_grp->nexthops[i];
2826 val ^= nh->ifindex;
2827 }
2828 return jhash(&val, sizeof(val), seed);
2829 default:
2830 WARN_ON(1);
2831 return 0;
2832 }
2833}
2834
2835static u32
2836mlxsw_sp_nexthop6_group_hash(struct mlxsw_sp_fib6_entry *fib6_entry, u32 seed)
2837{
2838 unsigned int val = fib6_entry->nrt6;
2839 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
2840 struct net_device *dev;
2841
2842 list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) {
David Ahern5e670d82018-04-17 17:33:14 -07002843 dev = mlxsw_sp_rt6->rt->fib6_nh.nh_dev;
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02002844 val ^= dev->ifindex;
2845 }
2846
2847 return jhash(&val, sizeof(val), seed);
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02002848}
2849
2850static u32
2851mlxsw_sp_nexthop_group_hash(const void *data, u32 len, u32 seed)
2852{
2853 const struct mlxsw_sp_nexthop_group_cmp_arg *cmp_arg = data;
2854
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02002855 switch (cmp_arg->proto) {
2856 case MLXSW_SP_L3_PROTO_IPV4:
2857 return jhash(&cmp_arg->fi, sizeof(cmp_arg->fi), seed);
2858 case MLXSW_SP_L3_PROTO_IPV6:
2859 return mlxsw_sp_nexthop6_group_hash(cmp_arg->fib6_entry, seed);
2860 default:
2861 WARN_ON(1);
2862 return 0;
2863 }
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02002864}
2865
Ido Schimmele9ad5e72017-02-08 11:16:29 +01002866static const struct rhashtable_params mlxsw_sp_nexthop_group_ht_params = {
Ido Schimmele9ad5e72017-02-08 11:16:29 +01002867 .head_offset = offsetof(struct mlxsw_sp_nexthop_group, ht_node),
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02002868 .hashfn = mlxsw_sp_nexthop_group_hash,
2869 .obj_hashfn = mlxsw_sp_nexthop_group_hash_obj,
2870 .obj_cmpfn = mlxsw_sp_nexthop_group_cmp,
Ido Schimmele9ad5e72017-02-08 11:16:29 +01002871};
2872
2873static int mlxsw_sp_nexthop_group_insert(struct mlxsw_sp *mlxsw_sp,
2874 struct mlxsw_sp_nexthop_group *nh_grp)
2875{
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02002876 if (mlxsw_sp_nexthop_group_type(nh_grp) == AF_INET6 &&
2877 !nh_grp->gateway)
2878 return 0;
2879
Ido Schimmel9011b672017-05-16 19:38:25 +02002880 return rhashtable_insert_fast(&mlxsw_sp->router->nexthop_group_ht,
Ido Schimmele9ad5e72017-02-08 11:16:29 +01002881 &nh_grp->ht_node,
2882 mlxsw_sp_nexthop_group_ht_params);
2883}
2884
2885static void mlxsw_sp_nexthop_group_remove(struct mlxsw_sp *mlxsw_sp,
2886 struct mlxsw_sp_nexthop_group *nh_grp)
2887{
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02002888 if (mlxsw_sp_nexthop_group_type(nh_grp) == AF_INET6 &&
2889 !nh_grp->gateway)
2890 return;
2891
Ido Schimmel9011b672017-05-16 19:38:25 +02002892 rhashtable_remove_fast(&mlxsw_sp->router->nexthop_group_ht,
Ido Schimmele9ad5e72017-02-08 11:16:29 +01002893 &nh_grp->ht_node,
2894 mlxsw_sp_nexthop_group_ht_params);
2895}
2896
2897static struct mlxsw_sp_nexthop_group *
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02002898mlxsw_sp_nexthop4_group_lookup(struct mlxsw_sp *mlxsw_sp,
2899 struct fib_info *fi)
Ido Schimmele9ad5e72017-02-08 11:16:29 +01002900{
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02002901 struct mlxsw_sp_nexthop_group_cmp_arg cmp_arg;
2902
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02002903 cmp_arg.proto = MLXSW_SP_L3_PROTO_IPV4;
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02002904 cmp_arg.fi = fi;
2905 return rhashtable_lookup_fast(&mlxsw_sp->router->nexthop_group_ht,
2906 &cmp_arg,
Ido Schimmele9ad5e72017-02-08 11:16:29 +01002907 mlxsw_sp_nexthop_group_ht_params);
2908}
2909
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02002910static struct mlxsw_sp_nexthop_group *
2911mlxsw_sp_nexthop6_group_lookup(struct mlxsw_sp *mlxsw_sp,
2912 struct mlxsw_sp_fib6_entry *fib6_entry)
2913{
2914 struct mlxsw_sp_nexthop_group_cmp_arg cmp_arg;
2915
2916 cmp_arg.proto = MLXSW_SP_L3_PROTO_IPV6;
2917 cmp_arg.fib6_entry = fib6_entry;
2918 return rhashtable_lookup_fast(&mlxsw_sp->router->nexthop_group_ht,
2919 &cmp_arg,
2920 mlxsw_sp_nexthop_group_ht_params);
2921}
2922
Ido Schimmelc53b8e12017-02-08 11:16:30 +01002923static const struct rhashtable_params mlxsw_sp_nexthop_ht_params = {
2924 .key_offset = offsetof(struct mlxsw_sp_nexthop, key),
2925 .head_offset = offsetof(struct mlxsw_sp_nexthop, ht_node),
2926 .key_len = sizeof(struct mlxsw_sp_nexthop_key),
2927};
2928
2929static int mlxsw_sp_nexthop_insert(struct mlxsw_sp *mlxsw_sp,
2930 struct mlxsw_sp_nexthop *nh)
2931{
Ido Schimmel9011b672017-05-16 19:38:25 +02002932 return rhashtable_insert_fast(&mlxsw_sp->router->nexthop_ht,
Ido Schimmelc53b8e12017-02-08 11:16:30 +01002933 &nh->ht_node, mlxsw_sp_nexthop_ht_params);
2934}
2935
2936static void mlxsw_sp_nexthop_remove(struct mlxsw_sp *mlxsw_sp,
2937 struct mlxsw_sp_nexthop *nh)
2938{
Ido Schimmel9011b672017-05-16 19:38:25 +02002939 rhashtable_remove_fast(&mlxsw_sp->router->nexthop_ht, &nh->ht_node,
Ido Schimmelc53b8e12017-02-08 11:16:30 +01002940 mlxsw_sp_nexthop_ht_params);
2941}
2942
Ido Schimmelad178c82017-02-08 11:16:40 +01002943static struct mlxsw_sp_nexthop *
2944mlxsw_sp_nexthop_lookup(struct mlxsw_sp *mlxsw_sp,
2945 struct mlxsw_sp_nexthop_key key)
2946{
Ido Schimmel9011b672017-05-16 19:38:25 +02002947 return rhashtable_lookup_fast(&mlxsw_sp->router->nexthop_ht, &key,
Ido Schimmelad178c82017-02-08 11:16:40 +01002948 mlxsw_sp_nexthop_ht_params);
2949}
2950
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002951static int mlxsw_sp_adj_index_mass_update_vr(struct mlxsw_sp *mlxsw_sp,
Ido Schimmel76610eb2017-03-10 08:53:41 +01002952 const struct mlxsw_sp_fib *fib,
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002953 u32 adj_index, u16 ecmp_size,
2954 u32 new_adj_index,
2955 u16 new_ecmp_size)
2956{
2957 char raleu_pl[MLXSW_REG_RALEU_LEN];
2958
Ido Schimmel1a9234e662016-09-19 08:29:26 +02002959 mlxsw_reg_raleu_pack(raleu_pl,
Ido Schimmel76610eb2017-03-10 08:53:41 +01002960 (enum mlxsw_reg_ralxx_protocol) fib->proto,
2961 fib->vr->id, adj_index, ecmp_size, new_adj_index,
Ido Schimmel1a9234e662016-09-19 08:29:26 +02002962 new_ecmp_size);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002963 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raleu), raleu_pl);
2964}
2965
2966static int mlxsw_sp_adj_index_mass_update(struct mlxsw_sp *mlxsw_sp,
2967 struct mlxsw_sp_nexthop_group *nh_grp,
2968 u32 old_adj_index, u16 old_ecmp_size)
2969{
2970 struct mlxsw_sp_fib_entry *fib_entry;
Ido Schimmel76610eb2017-03-10 08:53:41 +01002971 struct mlxsw_sp_fib *fib = NULL;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002972 int err;
2973
2974 list_for_each_entry(fib_entry, &nh_grp->fib_list, nexthop_group_node) {
Ido Schimmel76610eb2017-03-10 08:53:41 +01002975 if (fib == fib_entry->fib_node->fib)
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002976 continue;
Ido Schimmel76610eb2017-03-10 08:53:41 +01002977 fib = fib_entry->fib_node->fib;
2978 err = mlxsw_sp_adj_index_mass_update_vr(mlxsw_sp, fib,
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002979 old_adj_index,
2980 old_ecmp_size,
2981 nh_grp->adj_index,
2982 nh_grp->ecmp_size);
2983 if (err)
2984 return err;
2985 }
2986 return 0;
2987}
2988
Ido Schimmeleb789982017-10-22 23:11:48 +02002989static int __mlxsw_sp_nexthop_update(struct mlxsw_sp *mlxsw_sp, u32 adj_index,
2990 struct mlxsw_sp_nexthop *nh)
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002991{
2992 struct mlxsw_sp_neigh_entry *neigh_entry = nh->neigh_entry;
2993 char ratr_pl[MLXSW_REG_RATR_LEN];
2994
2995 mlxsw_reg_ratr_pack(ratr_pl, MLXSW_REG_RATR_OP_WRITE_WRITE_ENTRY,
Petr Machata89e41982017-09-02 23:49:15 +02002996 true, MLXSW_REG_RATR_TYPE_ETHERNET,
2997 adj_index, neigh_entry->rif);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002998 mlxsw_reg_ratr_eth_entry_pack(ratr_pl, neigh_entry->ha);
Arkadi Sharshevskya5390272017-09-25 10:32:28 +02002999 if (nh->counter_valid)
3000 mlxsw_reg_ratr_counter_pack(ratr_pl, nh->counter_index, true);
3001 else
3002 mlxsw_reg_ratr_counter_pack(ratr_pl, 0, false);
3003
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003004 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ratr), ratr_pl);
3005}
3006
Ido Schimmeleb789982017-10-22 23:11:48 +02003007int mlxsw_sp_nexthop_update(struct mlxsw_sp *mlxsw_sp, u32 adj_index,
3008 struct mlxsw_sp_nexthop *nh)
3009{
3010 int i;
3011
3012 for (i = 0; i < nh->num_adj_entries; i++) {
3013 int err;
3014
3015 err = __mlxsw_sp_nexthop_update(mlxsw_sp, adj_index + i, nh);
3016 if (err)
3017 return err;
3018 }
3019
3020 return 0;
3021}
3022
3023static int __mlxsw_sp_nexthop_ipip_update(struct mlxsw_sp *mlxsw_sp,
3024 u32 adj_index,
3025 struct mlxsw_sp_nexthop *nh)
Petr Machata1012b9a2017-09-02 23:49:23 +02003026{
3027 const struct mlxsw_sp_ipip_ops *ipip_ops;
3028
3029 ipip_ops = mlxsw_sp->router->ipip_ops_arr[nh->ipip_entry->ipipt];
3030 return ipip_ops->nexthop_update(mlxsw_sp, adj_index, nh->ipip_entry);
3031}
3032
Ido Schimmeleb789982017-10-22 23:11:48 +02003033static int mlxsw_sp_nexthop_ipip_update(struct mlxsw_sp *mlxsw_sp,
3034 u32 adj_index,
3035 struct mlxsw_sp_nexthop *nh)
3036{
3037 int i;
3038
3039 for (i = 0; i < nh->num_adj_entries; i++) {
3040 int err;
3041
3042 err = __mlxsw_sp_nexthop_ipip_update(mlxsw_sp, adj_index + i,
3043 nh);
3044 if (err)
3045 return err;
3046 }
3047
3048 return 0;
3049}
3050
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003051static int
Petr Machata35225e42017-09-02 23:49:22 +02003052mlxsw_sp_nexthop_group_update(struct mlxsw_sp *mlxsw_sp,
3053 struct mlxsw_sp_nexthop_group *nh_grp,
3054 bool reallocate)
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003055{
3056 u32 adj_index = nh_grp->adj_index; /* base */
3057 struct mlxsw_sp_nexthop *nh;
3058 int i;
3059 int err;
3060
3061 for (i = 0; i < nh_grp->count; i++) {
3062 nh = &nh_grp->nexthops[i];
3063
3064 if (!nh->should_offload) {
3065 nh->offloaded = 0;
3066 continue;
3067 }
3068
Ido Schimmela59b7e02017-01-23 11:11:42 +01003069 if (nh->update || reallocate) {
Petr Machata35225e42017-09-02 23:49:22 +02003070 switch (nh->type) {
3071 case MLXSW_SP_NEXTHOP_TYPE_ETH:
Arkadi Sharshevskya5390272017-09-25 10:32:28 +02003072 err = mlxsw_sp_nexthop_update
Petr Machata35225e42017-09-02 23:49:22 +02003073 (mlxsw_sp, adj_index, nh);
3074 break;
Petr Machata1012b9a2017-09-02 23:49:23 +02003075 case MLXSW_SP_NEXTHOP_TYPE_IPIP:
3076 err = mlxsw_sp_nexthop_ipip_update
3077 (mlxsw_sp, adj_index, nh);
3078 break;
Petr Machata35225e42017-09-02 23:49:22 +02003079 }
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003080 if (err)
3081 return err;
3082 nh->update = 0;
3083 nh->offloaded = 1;
3084 }
Ido Schimmeleb789982017-10-22 23:11:48 +02003085 adj_index += nh->num_adj_entries;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003086 }
3087 return 0;
3088}
3089
Ido Schimmel1819ae32017-07-21 18:04:28 +02003090static bool
3091mlxsw_sp_fib_node_entry_is_first(const struct mlxsw_sp_fib_node *fib_node,
3092 const struct mlxsw_sp_fib_entry *fib_entry);
3093
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003094static int
3095mlxsw_sp_nexthop_fib_entries_update(struct mlxsw_sp *mlxsw_sp,
3096 struct mlxsw_sp_nexthop_group *nh_grp)
3097{
3098 struct mlxsw_sp_fib_entry *fib_entry;
3099 int err;
3100
3101 list_for_each_entry(fib_entry, &nh_grp->fib_list, nexthop_group_node) {
Ido Schimmel1819ae32017-07-21 18:04:28 +02003102 if (!mlxsw_sp_fib_node_entry_is_first(fib_entry->fib_node,
3103 fib_entry))
3104 continue;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003105 err = mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
3106 if (err)
3107 return err;
3108 }
3109 return 0;
3110}
3111
3112static void
Ido Schimmel77d964e2017-08-02 09:56:05 +02003113mlxsw_sp_fib_entry_offload_refresh(struct mlxsw_sp_fib_entry *fib_entry,
3114 enum mlxsw_reg_ralue_op op, int err);
3115
3116static void
3117mlxsw_sp_nexthop_fib_entries_refresh(struct mlxsw_sp_nexthop_group *nh_grp)
3118{
3119 enum mlxsw_reg_ralue_op op = MLXSW_REG_RALUE_OP_WRITE_WRITE;
3120 struct mlxsw_sp_fib_entry *fib_entry;
3121
3122 list_for_each_entry(fib_entry, &nh_grp->fib_list, nexthop_group_node) {
3123 if (!mlxsw_sp_fib_node_entry_is_first(fib_entry->fib_node,
3124 fib_entry))
3125 continue;
3126 mlxsw_sp_fib_entry_offload_refresh(fib_entry, op, 0);
3127 }
3128}
3129
Ido Schimmel425a08c2017-10-22 23:11:47 +02003130static void mlxsw_sp_adj_grp_size_round_up(u16 *p_adj_grp_size)
3131{
3132 /* Valid sizes for an adjacency group are:
3133 * 1-64, 512, 1024, 2048 and 4096.
3134 */
3135 if (*p_adj_grp_size <= 64)
3136 return;
3137 else if (*p_adj_grp_size <= 512)
3138 *p_adj_grp_size = 512;
3139 else if (*p_adj_grp_size <= 1024)
3140 *p_adj_grp_size = 1024;
3141 else if (*p_adj_grp_size <= 2048)
3142 *p_adj_grp_size = 2048;
3143 else
3144 *p_adj_grp_size = 4096;
3145}
3146
3147static void mlxsw_sp_adj_grp_size_round_down(u16 *p_adj_grp_size,
3148 unsigned int alloc_size)
3149{
3150 if (alloc_size >= 4096)
3151 *p_adj_grp_size = 4096;
3152 else if (alloc_size >= 2048)
3153 *p_adj_grp_size = 2048;
3154 else if (alloc_size >= 1024)
3155 *p_adj_grp_size = 1024;
3156 else if (alloc_size >= 512)
3157 *p_adj_grp_size = 512;
3158}
3159
3160static int mlxsw_sp_fix_adj_grp_size(struct mlxsw_sp *mlxsw_sp,
3161 u16 *p_adj_grp_size)
3162{
3163 unsigned int alloc_size;
3164 int err;
3165
3166 /* Round up the requested group size to the next size supported
3167 * by the device and make sure the request can be satisfied.
3168 */
3169 mlxsw_sp_adj_grp_size_round_up(p_adj_grp_size);
Jiri Pirko4b6b1862018-07-08 23:51:17 +03003170 err = mlxsw_sp_kvdl_alloc_count_query(mlxsw_sp,
3171 MLXSW_SP_KVDL_ENTRY_TYPE_ADJ,
3172 *p_adj_grp_size, &alloc_size);
Ido Schimmel425a08c2017-10-22 23:11:47 +02003173 if (err)
3174 return err;
3175 /* It is possible the allocation results in more allocated
3176 * entries than requested. Try to use as much of them as
3177 * possible.
3178 */
3179 mlxsw_sp_adj_grp_size_round_down(p_adj_grp_size, alloc_size);
3180
3181 return 0;
3182}
3183
Ido Schimmel77d964e2017-08-02 09:56:05 +02003184static void
Ido Schimmeleb789982017-10-22 23:11:48 +02003185mlxsw_sp_nexthop_group_normalize(struct mlxsw_sp_nexthop_group *nh_grp)
3186{
3187 int i, g = 0, sum_norm_weight = 0;
3188 struct mlxsw_sp_nexthop *nh;
3189
3190 for (i = 0; i < nh_grp->count; i++) {
3191 nh = &nh_grp->nexthops[i];
3192
3193 if (!nh->should_offload)
3194 continue;
3195 if (g > 0)
3196 g = gcd(nh->nh_weight, g);
3197 else
3198 g = nh->nh_weight;
3199 }
3200
3201 for (i = 0; i < nh_grp->count; i++) {
3202 nh = &nh_grp->nexthops[i];
3203
3204 if (!nh->should_offload)
3205 continue;
3206 nh->norm_nh_weight = nh->nh_weight / g;
3207 sum_norm_weight += nh->norm_nh_weight;
3208 }
3209
3210 nh_grp->sum_norm_weight = sum_norm_weight;
3211}
3212
3213static void
3214mlxsw_sp_nexthop_group_rebalance(struct mlxsw_sp_nexthop_group *nh_grp)
3215{
3216 int total = nh_grp->sum_norm_weight;
3217 u16 ecmp_size = nh_grp->ecmp_size;
3218 int i, weight = 0, lower_bound = 0;
3219
3220 for (i = 0; i < nh_grp->count; i++) {
3221 struct mlxsw_sp_nexthop *nh = &nh_grp->nexthops[i];
3222 int upper_bound;
3223
3224 if (!nh->should_offload)
3225 continue;
3226 weight += nh->norm_nh_weight;
3227 upper_bound = DIV_ROUND_CLOSEST(ecmp_size * weight, total);
3228 nh->num_adj_entries = upper_bound - lower_bound;
3229 lower_bound = upper_bound;
3230 }
3231}
3232
3233static void
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003234mlxsw_sp_nexthop_group_refresh(struct mlxsw_sp *mlxsw_sp,
3235 struct mlxsw_sp_nexthop_group *nh_grp)
3236{
Ido Schimmeleb789982017-10-22 23:11:48 +02003237 u16 ecmp_size, old_ecmp_size;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003238 struct mlxsw_sp_nexthop *nh;
3239 bool offload_change = false;
3240 u32 adj_index;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003241 bool old_adj_index_valid;
3242 u32 old_adj_index;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003243 int i;
3244 int err;
3245
Ido Schimmelb3e8d1e2017-02-08 11:16:32 +01003246 if (!nh_grp->gateway) {
3247 mlxsw_sp_nexthop_fib_entries_update(mlxsw_sp, nh_grp);
3248 return;
3249 }
3250
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003251 for (i = 0; i < nh_grp->count; i++) {
3252 nh = &nh_grp->nexthops[i];
3253
Petr Machata56b8a9e2017-07-31 09:27:29 +02003254 if (nh->should_offload != nh->offloaded) {
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003255 offload_change = true;
3256 if (nh->should_offload)
3257 nh->update = 1;
3258 }
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003259 }
3260 if (!offload_change) {
3261 /* Nothing was added or removed, so no need to reallocate. Just
3262 * update MAC on existing adjacency indexes.
3263 */
Petr Machata35225e42017-09-02 23:49:22 +02003264 err = mlxsw_sp_nexthop_group_update(mlxsw_sp, nh_grp, false);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003265 if (err) {
3266 dev_warn(mlxsw_sp->bus_info->dev, "Failed to update neigh MAC in adjacency table.\n");
3267 goto set_trap;
3268 }
3269 return;
3270 }
Ido Schimmeleb789982017-10-22 23:11:48 +02003271 mlxsw_sp_nexthop_group_normalize(nh_grp);
3272 if (!nh_grp->sum_norm_weight)
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003273 /* No neigh of this group is connected so we just set
3274 * the trap and let everthing flow through kernel.
3275 */
3276 goto set_trap;
3277
Ido Schimmeleb789982017-10-22 23:11:48 +02003278 ecmp_size = nh_grp->sum_norm_weight;
Ido Schimmel425a08c2017-10-22 23:11:47 +02003279 err = mlxsw_sp_fix_adj_grp_size(mlxsw_sp, &ecmp_size);
3280 if (err)
3281 /* No valid allocation size available. */
3282 goto set_trap;
3283
Jiri Pirko4b6b1862018-07-08 23:51:17 +03003284 err = mlxsw_sp_kvdl_alloc(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ,
3285 ecmp_size, &adj_index);
Arkadi Sharshevsky13124442017-03-25 08:28:22 +01003286 if (err) {
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003287 /* We ran out of KVD linear space, just set the
3288 * trap and let everything flow through kernel.
3289 */
3290 dev_warn(mlxsw_sp->bus_info->dev, "Failed to allocate KVD linear area for nexthop group.\n");
3291 goto set_trap;
3292 }
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003293 old_adj_index_valid = nh_grp->adj_index_valid;
3294 old_adj_index = nh_grp->adj_index;
3295 old_ecmp_size = nh_grp->ecmp_size;
3296 nh_grp->adj_index_valid = 1;
3297 nh_grp->adj_index = adj_index;
3298 nh_grp->ecmp_size = ecmp_size;
Ido Schimmeleb789982017-10-22 23:11:48 +02003299 mlxsw_sp_nexthop_group_rebalance(nh_grp);
Petr Machata35225e42017-09-02 23:49:22 +02003300 err = mlxsw_sp_nexthop_group_update(mlxsw_sp, nh_grp, true);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003301 if (err) {
3302 dev_warn(mlxsw_sp->bus_info->dev, "Failed to update neigh MAC in adjacency table.\n");
3303 goto set_trap;
3304 }
3305
3306 if (!old_adj_index_valid) {
3307 /* The trap was set for fib entries, so we have to call
3308 * fib entry update to unset it and use adjacency index.
3309 */
3310 err = mlxsw_sp_nexthop_fib_entries_update(mlxsw_sp, nh_grp);
3311 if (err) {
3312 dev_warn(mlxsw_sp->bus_info->dev, "Failed to add adjacency index to fib entries.\n");
3313 goto set_trap;
3314 }
3315 return;
3316 }
3317
3318 err = mlxsw_sp_adj_index_mass_update(mlxsw_sp, nh_grp,
3319 old_adj_index, old_ecmp_size);
Jiri Pirko4b6b1862018-07-08 23:51:17 +03003320 mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ,
Jiri Pirko0304c002018-07-08 23:51:18 +03003321 old_ecmp_size, old_adj_index);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003322 if (err) {
3323 dev_warn(mlxsw_sp->bus_info->dev, "Failed to mass-update adjacency index for nexthop group.\n");
3324 goto set_trap;
3325 }
Ido Schimmel77d964e2017-08-02 09:56:05 +02003326
3327 /* Offload state within the group changed, so update the flags. */
3328 mlxsw_sp_nexthop_fib_entries_refresh(nh_grp);
3329
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003330 return;
3331
3332set_trap:
3333 old_adj_index_valid = nh_grp->adj_index_valid;
3334 nh_grp->adj_index_valid = 0;
3335 for (i = 0; i < nh_grp->count; i++) {
3336 nh = &nh_grp->nexthops[i];
3337 nh->offloaded = 0;
3338 }
3339 err = mlxsw_sp_nexthop_fib_entries_update(mlxsw_sp, nh_grp);
3340 if (err)
3341 dev_warn(mlxsw_sp->bus_info->dev, "Failed to set traps for fib entries.\n");
3342 if (old_adj_index_valid)
Jiri Pirko4b6b1862018-07-08 23:51:17 +03003343 mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ,
Jiri Pirko0304c002018-07-08 23:51:18 +03003344 nh_grp->ecmp_size, nh_grp->adj_index);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003345}
3346
3347static void __mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp_nexthop *nh,
3348 bool removing)
3349{
Petr Machata213666a2017-07-31 09:27:30 +02003350 if (!removing)
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003351 nh->should_offload = 1;
Ido Schimmel8764a822017-12-25 08:57:35 +01003352 else
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003353 nh->should_offload = 0;
3354 nh->update = 1;
3355}
3356
3357static void
3358mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp *mlxsw_sp,
3359 struct mlxsw_sp_neigh_entry *neigh_entry,
3360 bool removing)
3361{
3362 struct mlxsw_sp_nexthop *nh;
3363
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003364 list_for_each_entry(nh, &neigh_entry->nexthop_list,
3365 neigh_list_node) {
3366 __mlxsw_sp_nexthop_neigh_update(nh, removing);
3367 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nh_grp);
3368 }
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003369}
3370
Ido Schimmel9665b742017-02-08 11:16:42 +01003371static void mlxsw_sp_nexthop_rif_init(struct mlxsw_sp_nexthop *nh,
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003372 struct mlxsw_sp_rif *rif)
Ido Schimmel9665b742017-02-08 11:16:42 +01003373{
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003374 if (nh->rif)
Ido Schimmel9665b742017-02-08 11:16:42 +01003375 return;
3376
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003377 nh->rif = rif;
3378 list_add(&nh->rif_list_node, &rif->nexthop_list);
Ido Schimmel9665b742017-02-08 11:16:42 +01003379}
3380
3381static void mlxsw_sp_nexthop_rif_fini(struct mlxsw_sp_nexthop *nh)
3382{
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003383 if (!nh->rif)
Ido Schimmel9665b742017-02-08 11:16:42 +01003384 return;
3385
3386 list_del(&nh->rif_list_node);
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003387 nh->rif = NULL;
Ido Schimmel9665b742017-02-08 11:16:42 +01003388}
3389
Ido Schimmela8c97012017-02-08 11:16:35 +01003390static int mlxsw_sp_nexthop_neigh_init(struct mlxsw_sp *mlxsw_sp,
3391 struct mlxsw_sp_nexthop *nh)
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003392{
3393 struct mlxsw_sp_neigh_entry *neigh_entry;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003394 struct neighbour *n;
Ido Schimmel93a87e52016-12-23 09:32:49 +01003395 u8 nud_state, dead;
Ido Schimmelc53b8e12017-02-08 11:16:30 +01003396 int err;
3397
Ido Schimmelad178c82017-02-08 11:16:40 +01003398 if (!nh->nh_grp->gateway || nh->neigh_entry)
Ido Schimmelb8399a12017-02-08 11:16:33 +01003399 return 0;
3400
Jiri Pirko33b13412016-11-10 12:31:04 +01003401 /* Take a reference of neigh here ensuring that neigh would
Petr Machata8de3c172017-07-31 09:27:25 +02003402 * not be destructed before the nexthop entry is finished.
Jiri Pirko33b13412016-11-10 12:31:04 +01003403 * The reference is taken either in neigh_lookup() or
Ido Schimmelfd76d912017-02-06 16:20:17 +01003404 * in neigh_create() in case n is not found.
Jiri Pirko33b13412016-11-10 12:31:04 +01003405 */
Ido Schimmel58adf2c2017-07-18 10:10:19 +02003406 n = neigh_lookup(nh->nh_grp->neigh_tbl, &nh->gw_addr, nh->rif->dev);
Jiri Pirko33b13412016-11-10 12:31:04 +01003407 if (!n) {
Ido Schimmel58adf2c2017-07-18 10:10:19 +02003408 n = neigh_create(nh->nh_grp->neigh_tbl, &nh->gw_addr,
3409 nh->rif->dev);
Ido Schimmela8c97012017-02-08 11:16:35 +01003410 if (IS_ERR(n))
3411 return PTR_ERR(n);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003412 neigh_event_send(n, NULL);
Jiri Pirko33b13412016-11-10 12:31:04 +01003413 }
3414 neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp, n);
3415 if (!neigh_entry) {
Ido Schimmel5c8802f2017-02-06 16:20:13 +01003416 neigh_entry = mlxsw_sp_neigh_entry_create(mlxsw_sp, n);
3417 if (IS_ERR(neigh_entry)) {
Ido Schimmelc53b8e12017-02-08 11:16:30 +01003418 err = -EINVAL;
3419 goto err_neigh_entry_create;
Ido Schimmel5c8802f2017-02-06 16:20:13 +01003420 }
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003421 }
Yotam Gigib2157142016-07-05 11:27:51 +02003422
3423 /* If that is the first nexthop connected to that neigh, add to
3424 * nexthop_neighs_list
3425 */
3426 if (list_empty(&neigh_entry->nexthop_list))
3427 list_add_tail(&neigh_entry->nexthop_neighs_list_node,
Ido Schimmel9011b672017-05-16 19:38:25 +02003428 &mlxsw_sp->router->nexthop_neighs_list);
Yotam Gigib2157142016-07-05 11:27:51 +02003429
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003430 nh->neigh_entry = neigh_entry;
3431 list_add_tail(&nh->neigh_list_node, &neigh_entry->nexthop_list);
3432 read_lock_bh(&n->lock);
3433 nud_state = n->nud_state;
Ido Schimmel93a87e52016-12-23 09:32:49 +01003434 dead = n->dead;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003435 read_unlock_bh(&n->lock);
Ido Schimmel93a87e52016-12-23 09:32:49 +01003436 __mlxsw_sp_nexthop_neigh_update(nh, !(nud_state & NUD_VALID && !dead));
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003437
3438 return 0;
Ido Schimmelc53b8e12017-02-08 11:16:30 +01003439
3440err_neigh_entry_create:
3441 neigh_release(n);
Ido Schimmelc53b8e12017-02-08 11:16:30 +01003442 return err;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003443}
3444
Ido Schimmela8c97012017-02-08 11:16:35 +01003445static void mlxsw_sp_nexthop_neigh_fini(struct mlxsw_sp *mlxsw_sp,
3446 struct mlxsw_sp_nexthop *nh)
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003447{
3448 struct mlxsw_sp_neigh_entry *neigh_entry = nh->neigh_entry;
Ido Schimmela8c97012017-02-08 11:16:35 +01003449 struct neighbour *n;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003450
Ido Schimmelb8399a12017-02-08 11:16:33 +01003451 if (!neigh_entry)
Ido Schimmela8c97012017-02-08 11:16:35 +01003452 return;
3453 n = neigh_entry->key.n;
Ido Schimmelb8399a12017-02-08 11:16:33 +01003454
Ido Schimmel58312122016-12-23 09:32:50 +01003455 __mlxsw_sp_nexthop_neigh_update(nh, true);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003456 list_del(&nh->neigh_list_node);
Ido Schimmele58be792017-02-08 11:16:28 +01003457 nh->neigh_entry = NULL;
Yotam Gigib2157142016-07-05 11:27:51 +02003458
3459 /* If that is the last nexthop connected to that neigh, remove from
3460 * nexthop_neighs_list
3461 */
Ido Schimmele58be792017-02-08 11:16:28 +01003462 if (list_empty(&neigh_entry->nexthop_list))
3463 list_del(&neigh_entry->nexthop_neighs_list_node);
Yotam Gigib2157142016-07-05 11:27:51 +02003464
Ido Schimmel5c8802f2017-02-06 16:20:13 +01003465 if (!neigh_entry->connected && list_empty(&neigh_entry->nexthop_list))
3466 mlxsw_sp_neigh_entry_destroy(mlxsw_sp, neigh_entry);
3467
3468 neigh_release(n);
Ido Schimmela8c97012017-02-08 11:16:35 +01003469}
Ido Schimmelc53b8e12017-02-08 11:16:30 +01003470
Petr Machata44b0fff2017-11-03 10:03:44 +01003471static bool mlxsw_sp_ipip_netdev_ul_up(struct net_device *ol_dev)
3472{
3473 struct net_device *ul_dev = __mlxsw_sp_ipip_netdev_ul_dev_get(ol_dev);
3474
3475 return ul_dev ? (ul_dev->flags & IFF_UP) : true;
3476}
3477
Petr Machatad97cda52017-11-28 13:17:13 +01003478static void mlxsw_sp_nexthop_ipip_init(struct mlxsw_sp *mlxsw_sp,
3479 struct mlxsw_sp_nexthop *nh,
3480 struct mlxsw_sp_ipip_entry *ipip_entry)
Petr Machata1012b9a2017-09-02 23:49:23 +02003481{
Petr Machata44b0fff2017-11-03 10:03:44 +01003482 bool removing;
3483
Petr Machata1012b9a2017-09-02 23:49:23 +02003484 if (!nh->nh_grp->gateway || nh->ipip_entry)
Petr Machatad97cda52017-11-28 13:17:13 +01003485 return;
Petr Machata1012b9a2017-09-02 23:49:23 +02003486
Petr Machatad97cda52017-11-28 13:17:13 +01003487 nh->ipip_entry = ipip_entry;
3488 removing = !mlxsw_sp_ipip_netdev_ul_up(ipip_entry->ol_dev);
Petr Machata44b0fff2017-11-03 10:03:44 +01003489 __mlxsw_sp_nexthop_neigh_update(nh, removing);
Petr Machatad97cda52017-11-28 13:17:13 +01003490 mlxsw_sp_nexthop_rif_init(nh, &ipip_entry->ol_lb->common);
Petr Machata1012b9a2017-09-02 23:49:23 +02003491}
3492
3493static void mlxsw_sp_nexthop_ipip_fini(struct mlxsw_sp *mlxsw_sp,
3494 struct mlxsw_sp_nexthop *nh)
3495{
3496 struct mlxsw_sp_ipip_entry *ipip_entry = nh->ipip_entry;
3497
3498 if (!ipip_entry)
3499 return;
3500
3501 __mlxsw_sp_nexthop_neigh_update(nh, true);
Petr Machata1012b9a2017-09-02 23:49:23 +02003502 nh->ipip_entry = NULL;
3503}
3504
3505static bool mlxsw_sp_nexthop4_ipip_type(const struct mlxsw_sp *mlxsw_sp,
3506 const struct fib_nh *fib_nh,
3507 enum mlxsw_sp_ipip_type *p_ipipt)
3508{
3509 struct net_device *dev = fib_nh->nh_dev;
3510
3511 return dev &&
3512 fib_nh->nh_parent->fib_type == RTN_UNICAST &&
3513 mlxsw_sp_netdev_ipip_type(mlxsw_sp, dev, p_ipipt);
3514}
3515
Petr Machata35225e42017-09-02 23:49:22 +02003516static void mlxsw_sp_nexthop_type_fini(struct mlxsw_sp *mlxsw_sp,
3517 struct mlxsw_sp_nexthop *nh)
3518{
3519 switch (nh->type) {
3520 case MLXSW_SP_NEXTHOP_TYPE_ETH:
3521 mlxsw_sp_nexthop_neigh_fini(mlxsw_sp, nh);
3522 mlxsw_sp_nexthop_rif_fini(nh);
3523 break;
Petr Machata1012b9a2017-09-02 23:49:23 +02003524 case MLXSW_SP_NEXTHOP_TYPE_IPIP:
Petr Machatade0f43c2017-10-02 12:14:57 +02003525 mlxsw_sp_nexthop_rif_fini(nh);
Petr Machata1012b9a2017-09-02 23:49:23 +02003526 mlxsw_sp_nexthop_ipip_fini(mlxsw_sp, nh);
3527 break;
Petr Machata35225e42017-09-02 23:49:22 +02003528 }
3529}
3530
3531static int mlxsw_sp_nexthop4_type_init(struct mlxsw_sp *mlxsw_sp,
3532 struct mlxsw_sp_nexthop *nh,
3533 struct fib_nh *fib_nh)
3534{
Petr Machatad97cda52017-11-28 13:17:13 +01003535 const struct mlxsw_sp_ipip_ops *ipip_ops;
Petr Machata35225e42017-09-02 23:49:22 +02003536 struct net_device *dev = fib_nh->nh_dev;
Petr Machatad97cda52017-11-28 13:17:13 +01003537 struct mlxsw_sp_ipip_entry *ipip_entry;
Petr Machata35225e42017-09-02 23:49:22 +02003538 struct mlxsw_sp_rif *rif;
3539 int err;
3540
Petr Machatad97cda52017-11-28 13:17:13 +01003541 ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, dev);
3542 if (ipip_entry) {
3543 ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt];
3544 if (ipip_ops->can_offload(mlxsw_sp, dev,
3545 MLXSW_SP_L3_PROTO_IPV4)) {
3546 nh->type = MLXSW_SP_NEXTHOP_TYPE_IPIP;
3547 mlxsw_sp_nexthop_ipip_init(mlxsw_sp, nh, ipip_entry);
3548 return 0;
3549 }
Petr Machata1012b9a2017-09-02 23:49:23 +02003550 }
3551
Petr Machata35225e42017-09-02 23:49:22 +02003552 nh->type = MLXSW_SP_NEXTHOP_TYPE_ETH;
3553 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
3554 if (!rif)
3555 return 0;
3556
3557 mlxsw_sp_nexthop_rif_init(nh, rif);
3558 err = mlxsw_sp_nexthop_neigh_init(mlxsw_sp, nh);
3559 if (err)
3560 goto err_neigh_init;
3561
3562 return 0;
3563
3564err_neigh_init:
3565 mlxsw_sp_nexthop_rif_fini(nh);
3566 return err;
3567}
3568
3569static void mlxsw_sp_nexthop4_type_fini(struct mlxsw_sp *mlxsw_sp,
3570 struct mlxsw_sp_nexthop *nh)
3571{
3572 mlxsw_sp_nexthop_type_fini(mlxsw_sp, nh);
3573}
3574
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02003575static int mlxsw_sp_nexthop4_init(struct mlxsw_sp *mlxsw_sp,
3576 struct mlxsw_sp_nexthop_group *nh_grp,
3577 struct mlxsw_sp_nexthop *nh,
3578 struct fib_nh *fib_nh)
Ido Schimmela8c97012017-02-08 11:16:35 +01003579{
3580 struct net_device *dev = fib_nh->nh_dev;
Ido Schimmeldf6dd79b2017-02-08 14:36:49 +01003581 struct in_device *in_dev;
Ido Schimmela8c97012017-02-08 11:16:35 +01003582 int err;
3583
3584 nh->nh_grp = nh_grp;
3585 nh->key.fib_nh = fib_nh;
Ido Schimmel408bd942017-10-22 23:11:46 +02003586#ifdef CONFIG_IP_ROUTE_MULTIPATH
3587 nh->nh_weight = fib_nh->nh_weight;
3588#else
3589 nh->nh_weight = 1;
3590#endif
Ido Schimmel58adf2c2017-07-18 10:10:19 +02003591 memcpy(&nh->gw_addr, &fib_nh->nh_gw, sizeof(fib_nh->nh_gw));
Ido Schimmela8c97012017-02-08 11:16:35 +01003592 err = mlxsw_sp_nexthop_insert(mlxsw_sp, nh);
3593 if (err)
3594 return err;
3595
Arkadi Sharshevskya5390272017-09-25 10:32:28 +02003596 mlxsw_sp_nexthop_counter_alloc(mlxsw_sp, nh);
Arkadi Sharshevskydbe45982017-09-25 10:32:23 +02003597 list_add_tail(&nh->router_list_node, &mlxsw_sp->router->nexthop_list);
3598
Ido Schimmel97989ee2017-03-10 08:53:38 +01003599 if (!dev)
3600 return 0;
3601
Ido Schimmeldf6dd79b2017-02-08 14:36:49 +01003602 in_dev = __in_dev_get_rtnl(dev);
3603 if (in_dev && IN_DEV_IGNORE_ROUTES_WITH_LINKDOWN(in_dev) &&
3604 fib_nh->nh_flags & RTNH_F_LINKDOWN)
3605 return 0;
3606
Petr Machata35225e42017-09-02 23:49:22 +02003607 err = mlxsw_sp_nexthop4_type_init(mlxsw_sp, nh, fib_nh);
Ido Schimmela8c97012017-02-08 11:16:35 +01003608 if (err)
3609 goto err_nexthop_neigh_init;
3610
3611 return 0;
3612
3613err_nexthop_neigh_init:
3614 mlxsw_sp_nexthop_remove(mlxsw_sp, nh);
3615 return err;
3616}
3617
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02003618static void mlxsw_sp_nexthop4_fini(struct mlxsw_sp *mlxsw_sp,
3619 struct mlxsw_sp_nexthop *nh)
Ido Schimmela8c97012017-02-08 11:16:35 +01003620{
Petr Machata35225e42017-09-02 23:49:22 +02003621 mlxsw_sp_nexthop4_type_fini(mlxsw_sp, nh);
Arkadi Sharshevskydbe45982017-09-25 10:32:23 +02003622 list_del(&nh->router_list_node);
Arkadi Sharshevskya5390272017-09-25 10:32:28 +02003623 mlxsw_sp_nexthop_counter_free(mlxsw_sp, nh);
Ido Schimmelc53b8e12017-02-08 11:16:30 +01003624 mlxsw_sp_nexthop_remove(mlxsw_sp, nh);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003625}
3626
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02003627static void mlxsw_sp_nexthop4_event(struct mlxsw_sp *mlxsw_sp,
3628 unsigned long event, struct fib_nh *fib_nh)
Ido Schimmelad178c82017-02-08 11:16:40 +01003629{
3630 struct mlxsw_sp_nexthop_key key;
3631 struct mlxsw_sp_nexthop *nh;
Ido Schimmelad178c82017-02-08 11:16:40 +01003632
Ido Schimmel9011b672017-05-16 19:38:25 +02003633 if (mlxsw_sp->router->aborted)
Ido Schimmelad178c82017-02-08 11:16:40 +01003634 return;
3635
3636 key.fib_nh = fib_nh;
3637 nh = mlxsw_sp_nexthop_lookup(mlxsw_sp, key);
3638 if (WARN_ON_ONCE(!nh))
3639 return;
3640
Ido Schimmelad178c82017-02-08 11:16:40 +01003641 switch (event) {
3642 case FIB_EVENT_NH_ADD:
Petr Machata35225e42017-09-02 23:49:22 +02003643 mlxsw_sp_nexthop4_type_init(mlxsw_sp, nh, fib_nh);
Ido Schimmelad178c82017-02-08 11:16:40 +01003644 break;
3645 case FIB_EVENT_NH_DEL:
Petr Machata35225e42017-09-02 23:49:22 +02003646 mlxsw_sp_nexthop4_type_fini(mlxsw_sp, nh);
Ido Schimmelad178c82017-02-08 11:16:40 +01003647 break;
3648 }
3649
3650 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nh_grp);
3651}
3652
Petr Machata0c5f1cd2017-11-03 10:03:38 +01003653static void mlxsw_sp_nexthop_rif_update(struct mlxsw_sp *mlxsw_sp,
3654 struct mlxsw_sp_rif *rif)
3655{
3656 struct mlxsw_sp_nexthop *nh;
Petr Machata44b0fff2017-11-03 10:03:44 +01003657 bool removing;
Petr Machata0c5f1cd2017-11-03 10:03:38 +01003658
3659 list_for_each_entry(nh, &rif->nexthop_list, rif_list_node) {
Petr Machata44b0fff2017-11-03 10:03:44 +01003660 switch (nh->type) {
3661 case MLXSW_SP_NEXTHOP_TYPE_ETH:
3662 removing = false;
3663 break;
3664 case MLXSW_SP_NEXTHOP_TYPE_IPIP:
3665 removing = !mlxsw_sp_ipip_netdev_ul_up(rif->dev);
3666 break;
3667 default:
3668 WARN_ON(1);
3669 continue;
3670 }
3671
3672 __mlxsw_sp_nexthop_neigh_update(nh, removing);
Petr Machata0c5f1cd2017-11-03 10:03:38 +01003673 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nh_grp);
3674 }
3675}
3676
Petr Machata09dbf622017-11-28 13:17:14 +01003677static void mlxsw_sp_nexthop_rif_migrate(struct mlxsw_sp *mlxsw_sp,
3678 struct mlxsw_sp_rif *old_rif,
3679 struct mlxsw_sp_rif *new_rif)
3680{
3681 struct mlxsw_sp_nexthop *nh;
3682
3683 list_splice_init(&old_rif->nexthop_list, &new_rif->nexthop_list);
3684 list_for_each_entry(nh, &new_rif->nexthop_list, rif_list_node)
3685 nh->rif = new_rif;
3686 mlxsw_sp_nexthop_rif_update(mlxsw_sp, new_rif);
3687}
3688
Ido Schimmel9665b742017-02-08 11:16:42 +01003689static void mlxsw_sp_nexthop_rif_gone_sync(struct mlxsw_sp *mlxsw_sp,
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003690 struct mlxsw_sp_rif *rif)
Ido Schimmel9665b742017-02-08 11:16:42 +01003691{
3692 struct mlxsw_sp_nexthop *nh, *tmp;
3693
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003694 list_for_each_entry_safe(nh, tmp, &rif->nexthop_list, rif_list_node) {
Petr Machata35225e42017-09-02 23:49:22 +02003695 mlxsw_sp_nexthop_type_fini(mlxsw_sp, nh);
Ido Schimmel9665b742017-02-08 11:16:42 +01003696 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nh_grp);
3697 }
3698}
3699
Petr Machata9b014512017-09-02 23:49:20 +02003700static bool mlxsw_sp_fi_is_gateway(const struct mlxsw_sp *mlxsw_sp,
3701 const struct fib_info *fi)
3702{
Petr Machata1012b9a2017-09-02 23:49:23 +02003703 return fi->fib_nh->nh_scope == RT_SCOPE_LINK ||
3704 mlxsw_sp_nexthop4_ipip_type(mlxsw_sp, fi->fib_nh, NULL);
Petr Machata9b014512017-09-02 23:49:20 +02003705}
3706
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003707static struct mlxsw_sp_nexthop_group *
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02003708mlxsw_sp_nexthop4_group_create(struct mlxsw_sp *mlxsw_sp, struct fib_info *fi)
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003709{
3710 struct mlxsw_sp_nexthop_group *nh_grp;
3711 struct mlxsw_sp_nexthop *nh;
3712 struct fib_nh *fib_nh;
3713 size_t alloc_size;
3714 int i;
3715 int err;
3716
3717 alloc_size = sizeof(*nh_grp) +
3718 fi->fib_nhs * sizeof(struct mlxsw_sp_nexthop);
3719 nh_grp = kzalloc(alloc_size, GFP_KERNEL);
3720 if (!nh_grp)
3721 return ERR_PTR(-ENOMEM);
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02003722 nh_grp->priv = fi;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003723 INIT_LIST_HEAD(&nh_grp->fib_list);
Ido Schimmel58adf2c2017-07-18 10:10:19 +02003724 nh_grp->neigh_tbl = &arp_tbl;
3725
Petr Machata9b014512017-09-02 23:49:20 +02003726 nh_grp->gateway = mlxsw_sp_fi_is_gateway(mlxsw_sp, fi);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003727 nh_grp->count = fi->fib_nhs;
Ido Schimmel7387dbb2017-07-12 09:12:53 +02003728 fib_info_hold(fi);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003729 for (i = 0; i < nh_grp->count; i++) {
3730 nh = &nh_grp->nexthops[i];
3731 fib_nh = &fi->fib_nh[i];
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02003732 err = mlxsw_sp_nexthop4_init(mlxsw_sp, nh_grp, nh, fib_nh);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003733 if (err)
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02003734 goto err_nexthop4_init;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003735 }
Ido Schimmele9ad5e72017-02-08 11:16:29 +01003736 err = mlxsw_sp_nexthop_group_insert(mlxsw_sp, nh_grp);
3737 if (err)
3738 goto err_nexthop_group_insert;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003739 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
3740 return nh_grp;
3741
Ido Schimmele9ad5e72017-02-08 11:16:29 +01003742err_nexthop_group_insert:
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02003743err_nexthop4_init:
Ido Schimmeldf6dd79b2017-02-08 14:36:49 +01003744 for (i--; i >= 0; i--) {
3745 nh = &nh_grp->nexthops[i];
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02003746 mlxsw_sp_nexthop4_fini(mlxsw_sp, nh);
Ido Schimmeldf6dd79b2017-02-08 14:36:49 +01003747 }
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02003748 fib_info_put(fi);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003749 kfree(nh_grp);
3750 return ERR_PTR(err);
3751}
3752
3753static void
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02003754mlxsw_sp_nexthop4_group_destroy(struct mlxsw_sp *mlxsw_sp,
3755 struct mlxsw_sp_nexthop_group *nh_grp)
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003756{
3757 struct mlxsw_sp_nexthop *nh;
3758 int i;
3759
Ido Schimmele9ad5e72017-02-08 11:16:29 +01003760 mlxsw_sp_nexthop_group_remove(mlxsw_sp, nh_grp);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003761 for (i = 0; i < nh_grp->count; i++) {
3762 nh = &nh_grp->nexthops[i];
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02003763 mlxsw_sp_nexthop4_fini(mlxsw_sp, nh);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003764 }
Ido Schimmel58312122016-12-23 09:32:50 +01003765 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
3766 WARN_ON_ONCE(nh_grp->adj_index_valid);
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02003767 fib_info_put(mlxsw_sp_nexthop4_group_fi(nh_grp));
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003768 kfree(nh_grp);
3769}
3770
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02003771static int mlxsw_sp_nexthop4_group_get(struct mlxsw_sp *mlxsw_sp,
3772 struct mlxsw_sp_fib_entry *fib_entry,
3773 struct fib_info *fi)
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003774{
3775 struct mlxsw_sp_nexthop_group *nh_grp;
3776
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02003777 nh_grp = mlxsw_sp_nexthop4_group_lookup(mlxsw_sp, fi);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003778 if (!nh_grp) {
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02003779 nh_grp = mlxsw_sp_nexthop4_group_create(mlxsw_sp, fi);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003780 if (IS_ERR(nh_grp))
3781 return PTR_ERR(nh_grp);
3782 }
3783 list_add_tail(&fib_entry->nexthop_group_node, &nh_grp->fib_list);
3784 fib_entry->nh_group = nh_grp;
3785 return 0;
3786}
3787
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02003788static void mlxsw_sp_nexthop4_group_put(struct mlxsw_sp *mlxsw_sp,
3789 struct mlxsw_sp_fib_entry *fib_entry)
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003790{
3791 struct mlxsw_sp_nexthop_group *nh_grp = fib_entry->nh_group;
3792
3793 list_del(&fib_entry->nexthop_group_node);
3794 if (!list_empty(&nh_grp->fib_list))
3795 return;
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02003796 mlxsw_sp_nexthop4_group_destroy(mlxsw_sp, nh_grp);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003797}
3798
Ido Schimmel013b20f2017-02-08 11:16:36 +01003799static bool
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02003800mlxsw_sp_fib4_entry_should_offload(const struct mlxsw_sp_fib_entry *fib_entry)
3801{
3802 struct mlxsw_sp_fib4_entry *fib4_entry;
3803
3804 fib4_entry = container_of(fib_entry, struct mlxsw_sp_fib4_entry,
3805 common);
3806 return !fib4_entry->tos;
3807}
3808
3809static bool
Ido Schimmel013b20f2017-02-08 11:16:36 +01003810mlxsw_sp_fib_entry_should_offload(const struct mlxsw_sp_fib_entry *fib_entry)
3811{
3812 struct mlxsw_sp_nexthop_group *nh_group = fib_entry->nh_group;
3813
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02003814 switch (fib_entry->fib_node->fib->proto) {
3815 case MLXSW_SP_L3_PROTO_IPV4:
3816 if (!mlxsw_sp_fib4_entry_should_offload(fib_entry))
3817 return false;
3818 break;
3819 case MLXSW_SP_L3_PROTO_IPV6:
3820 break;
3821 }
Ido Schimmel9aecce12017-02-09 10:28:42 +01003822
Ido Schimmel013b20f2017-02-08 11:16:36 +01003823 switch (fib_entry->type) {
3824 case MLXSW_SP_FIB_ENTRY_TYPE_REMOTE:
3825 return !!nh_group->adj_index_valid;
3826 case MLXSW_SP_FIB_ENTRY_TYPE_LOCAL:
Ido Schimmel70ad3502017-02-08 11:16:38 +01003827 return !!nh_group->nh_rif;
Petr Machata4607f6d2017-09-02 23:49:25 +02003828 case MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP:
3829 return true;
Ido Schimmel013b20f2017-02-08 11:16:36 +01003830 default:
3831 return false;
3832 }
3833}
3834
Ido Schimmel428b8512017-08-03 13:28:28 +02003835static struct mlxsw_sp_nexthop *
3836mlxsw_sp_rt6_nexthop(struct mlxsw_sp_nexthop_group *nh_grp,
3837 const struct mlxsw_sp_rt6 *mlxsw_sp_rt6)
3838{
3839 int i;
3840
3841 for (i = 0; i < nh_grp->count; i++) {
3842 struct mlxsw_sp_nexthop *nh = &nh_grp->nexthops[i];
David Ahern8d1c8022018-04-17 17:33:26 -07003843 struct fib6_info *rt = mlxsw_sp_rt6->rt;
Ido Schimmel428b8512017-08-03 13:28:28 +02003844
David Ahern5e670d82018-04-17 17:33:14 -07003845 if (nh->rif && nh->rif->dev == rt->fib6_nh.nh_dev &&
Ido Schimmel428b8512017-08-03 13:28:28 +02003846 ipv6_addr_equal((const struct in6_addr *) &nh->gw_addr,
David Ahern5e670d82018-04-17 17:33:14 -07003847 &rt->fib6_nh.nh_gw))
Ido Schimmel428b8512017-08-03 13:28:28 +02003848 return nh;
3849 continue;
3850 }
3851
3852 return NULL;
3853}
3854
Ido Schimmel3984d1a2017-08-02 09:56:03 +02003855static void
3856mlxsw_sp_fib4_entry_offload_set(struct mlxsw_sp_fib_entry *fib_entry)
3857{
3858 struct mlxsw_sp_nexthop_group *nh_grp = fib_entry->nh_group;
3859 int i;
3860
Petr Machata4607f6d2017-09-02 23:49:25 +02003861 if (fib_entry->type == MLXSW_SP_FIB_ENTRY_TYPE_LOCAL ||
3862 fib_entry->type == MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP) {
Ido Schimmel3984d1a2017-08-02 09:56:03 +02003863 nh_grp->nexthops->key.fib_nh->nh_flags |= RTNH_F_OFFLOAD;
3864 return;
3865 }
3866
3867 for (i = 0; i < nh_grp->count; i++) {
3868 struct mlxsw_sp_nexthop *nh = &nh_grp->nexthops[i];
3869
3870 if (nh->offloaded)
3871 nh->key.fib_nh->nh_flags |= RTNH_F_OFFLOAD;
3872 else
3873 nh->key.fib_nh->nh_flags &= ~RTNH_F_OFFLOAD;
3874 }
3875}
3876
3877static void
3878mlxsw_sp_fib4_entry_offload_unset(struct mlxsw_sp_fib_entry *fib_entry)
3879{
3880 struct mlxsw_sp_nexthop_group *nh_grp = fib_entry->nh_group;
3881 int i;
3882
Ido Schimmeld1c95af2018-02-17 00:30:44 +01003883 if (!list_is_singular(&nh_grp->fib_list))
3884 return;
3885
Ido Schimmel3984d1a2017-08-02 09:56:03 +02003886 for (i = 0; i < nh_grp->count; i++) {
3887 struct mlxsw_sp_nexthop *nh = &nh_grp->nexthops[i];
3888
3889 nh->key.fib_nh->nh_flags &= ~RTNH_F_OFFLOAD;
3890 }
3891}
3892
Ido Schimmel428b8512017-08-03 13:28:28 +02003893static void
3894mlxsw_sp_fib6_entry_offload_set(struct mlxsw_sp_fib_entry *fib_entry)
3895{
3896 struct mlxsw_sp_fib6_entry *fib6_entry;
3897 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
3898
3899 fib6_entry = container_of(fib_entry, struct mlxsw_sp_fib6_entry,
3900 common);
3901
3902 if (fib_entry->type == MLXSW_SP_FIB_ENTRY_TYPE_LOCAL) {
3903 list_first_entry(&fib6_entry->rt6_list, struct mlxsw_sp_rt6,
David Ahern5e670d82018-04-17 17:33:14 -07003904 list)->rt->fib6_nh.nh_flags |= RTNH_F_OFFLOAD;
Ido Schimmel428b8512017-08-03 13:28:28 +02003905 return;
3906 }
3907
3908 list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) {
3909 struct mlxsw_sp_nexthop_group *nh_grp = fib_entry->nh_group;
3910 struct mlxsw_sp_nexthop *nh;
3911
3912 nh = mlxsw_sp_rt6_nexthop(nh_grp, mlxsw_sp_rt6);
3913 if (nh && nh->offloaded)
David Ahern5e670d82018-04-17 17:33:14 -07003914 mlxsw_sp_rt6->rt->fib6_nh.nh_flags |= RTNH_F_OFFLOAD;
Ido Schimmel428b8512017-08-03 13:28:28 +02003915 else
David Ahern5e670d82018-04-17 17:33:14 -07003916 mlxsw_sp_rt6->rt->fib6_nh.nh_flags &= ~RTNH_F_OFFLOAD;
Ido Schimmel428b8512017-08-03 13:28:28 +02003917 }
3918}
3919
3920static void
3921mlxsw_sp_fib6_entry_offload_unset(struct mlxsw_sp_fib_entry *fib_entry)
3922{
3923 struct mlxsw_sp_fib6_entry *fib6_entry;
3924 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
3925
3926 fib6_entry = container_of(fib_entry, struct mlxsw_sp_fib6_entry,
3927 common);
3928 list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) {
David Ahern8d1c8022018-04-17 17:33:26 -07003929 struct fib6_info *rt = mlxsw_sp_rt6->rt;
Ido Schimmel428b8512017-08-03 13:28:28 +02003930
David Ahern5e670d82018-04-17 17:33:14 -07003931 rt->fib6_nh.nh_flags &= ~RTNH_F_OFFLOAD;
Ido Schimmel428b8512017-08-03 13:28:28 +02003932 }
3933}
3934
Ido Schimmel013b20f2017-02-08 11:16:36 +01003935static void mlxsw_sp_fib_entry_offload_set(struct mlxsw_sp_fib_entry *fib_entry)
3936{
Ido Schimmel76610eb2017-03-10 08:53:41 +01003937 switch (fib_entry->fib_node->fib->proto) {
Ido Schimmel013b20f2017-02-08 11:16:36 +01003938 case MLXSW_SP_L3_PROTO_IPV4:
Ido Schimmel3984d1a2017-08-02 09:56:03 +02003939 mlxsw_sp_fib4_entry_offload_set(fib_entry);
Ido Schimmel013b20f2017-02-08 11:16:36 +01003940 break;
3941 case MLXSW_SP_L3_PROTO_IPV6:
Ido Schimmel428b8512017-08-03 13:28:28 +02003942 mlxsw_sp_fib6_entry_offload_set(fib_entry);
3943 break;
Ido Schimmel013b20f2017-02-08 11:16:36 +01003944 }
3945}
3946
3947static void
3948mlxsw_sp_fib_entry_offload_unset(struct mlxsw_sp_fib_entry *fib_entry)
3949{
Ido Schimmel76610eb2017-03-10 08:53:41 +01003950 switch (fib_entry->fib_node->fib->proto) {
Ido Schimmel013b20f2017-02-08 11:16:36 +01003951 case MLXSW_SP_L3_PROTO_IPV4:
Ido Schimmel3984d1a2017-08-02 09:56:03 +02003952 mlxsw_sp_fib4_entry_offload_unset(fib_entry);
Ido Schimmel013b20f2017-02-08 11:16:36 +01003953 break;
3954 case MLXSW_SP_L3_PROTO_IPV6:
Ido Schimmel428b8512017-08-03 13:28:28 +02003955 mlxsw_sp_fib6_entry_offload_unset(fib_entry);
3956 break;
Ido Schimmel013b20f2017-02-08 11:16:36 +01003957 }
Ido Schimmel013b20f2017-02-08 11:16:36 +01003958}
3959
3960static void
3961mlxsw_sp_fib_entry_offload_refresh(struct mlxsw_sp_fib_entry *fib_entry,
3962 enum mlxsw_reg_ralue_op op, int err)
3963{
3964 switch (op) {
3965 case MLXSW_REG_RALUE_OP_WRITE_DELETE:
Ido Schimmel013b20f2017-02-08 11:16:36 +01003966 return mlxsw_sp_fib_entry_offload_unset(fib_entry);
3967 case MLXSW_REG_RALUE_OP_WRITE_WRITE:
3968 if (err)
3969 return;
Ido Schimmel1353ee72017-08-02 09:56:04 +02003970 if (mlxsw_sp_fib_entry_should_offload(fib_entry))
Ido Schimmel013b20f2017-02-08 11:16:36 +01003971 mlxsw_sp_fib_entry_offload_set(fib_entry);
Petr Machata85f44a12017-10-02 12:21:58 +02003972 else
Ido Schimmel013b20f2017-02-08 11:16:36 +01003973 mlxsw_sp_fib_entry_offload_unset(fib_entry);
3974 return;
3975 default:
3976 return;
3977 }
3978}
3979
Ido Schimmel9dbf4d72017-07-18 10:10:24 +02003980static void
3981mlxsw_sp_fib_entry_ralue_pack(char *ralue_pl,
3982 const struct mlxsw_sp_fib_entry *fib_entry,
3983 enum mlxsw_reg_ralue_op op)
3984{
3985 struct mlxsw_sp_fib *fib = fib_entry->fib_node->fib;
3986 enum mlxsw_reg_ralxx_protocol proto;
3987 u32 *p_dip;
3988
3989 proto = (enum mlxsw_reg_ralxx_protocol) fib->proto;
3990
3991 switch (fib->proto) {
3992 case MLXSW_SP_L3_PROTO_IPV4:
3993 p_dip = (u32 *) fib_entry->fib_node->key.addr;
3994 mlxsw_reg_ralue_pack4(ralue_pl, proto, op, fib->vr->id,
3995 fib_entry->fib_node->key.prefix_len,
3996 *p_dip);
3997 break;
3998 case MLXSW_SP_L3_PROTO_IPV6:
3999 mlxsw_reg_ralue_pack6(ralue_pl, proto, op, fib->vr->id,
4000 fib_entry->fib_node->key.prefix_len,
4001 fib_entry->fib_node->key.addr);
4002 break;
4003 }
4004}
4005
4006static int mlxsw_sp_fib_entry_op_remote(struct mlxsw_sp *mlxsw_sp,
4007 struct mlxsw_sp_fib_entry *fib_entry,
4008 enum mlxsw_reg_ralue_op op)
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02004009{
4010 char ralue_pl[MLXSW_REG_RALUE_LEN];
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02004011 enum mlxsw_reg_ralue_trap_action trap_action;
4012 u16 trap_id = 0;
4013 u32 adjacency_index = 0;
4014 u16 ecmp_size = 0;
4015
4016 /* In case the nexthop group adjacency index is valid, use it
4017 * with provided ECMP size. Otherwise, setup trap and pass
4018 * traffic to kernel.
4019 */
Ido Schimmel4b411472017-02-08 11:16:37 +01004020 if (mlxsw_sp_fib_entry_should_offload(fib_entry)) {
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02004021 trap_action = MLXSW_REG_RALUE_TRAP_ACTION_NOP;
4022 adjacency_index = fib_entry->nh_group->adj_index;
4023 ecmp_size = fib_entry->nh_group->ecmp_size;
4024 } else {
4025 trap_action = MLXSW_REG_RALUE_TRAP_ACTION_TRAP;
4026 trap_id = MLXSW_TRAP_ID_RTR_INGRESS0;
4027 }
4028
Ido Schimmel9dbf4d72017-07-18 10:10:24 +02004029 mlxsw_sp_fib_entry_ralue_pack(ralue_pl, fib_entry, op);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02004030 mlxsw_reg_ralue_act_remote_pack(ralue_pl, trap_action, trap_id,
4031 adjacency_index, ecmp_size);
4032 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
4033}
4034
Ido Schimmel9dbf4d72017-07-18 10:10:24 +02004035static int mlxsw_sp_fib_entry_op_local(struct mlxsw_sp *mlxsw_sp,
4036 struct mlxsw_sp_fib_entry *fib_entry,
4037 enum mlxsw_reg_ralue_op op)
Jiri Pirko61c503f2016-07-04 08:23:11 +02004038{
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01004039 struct mlxsw_sp_rif *rif = fib_entry->nh_group->nh_rif;
Ido Schimmel70ad3502017-02-08 11:16:38 +01004040 enum mlxsw_reg_ralue_trap_action trap_action;
Jiri Pirko61c503f2016-07-04 08:23:11 +02004041 char ralue_pl[MLXSW_REG_RALUE_LEN];
Ido Schimmel70ad3502017-02-08 11:16:38 +01004042 u16 trap_id = 0;
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01004043 u16 rif_index = 0;
Ido Schimmel70ad3502017-02-08 11:16:38 +01004044
4045 if (mlxsw_sp_fib_entry_should_offload(fib_entry)) {
4046 trap_action = MLXSW_REG_RALUE_TRAP_ACTION_NOP;
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01004047 rif_index = rif->rif_index;
Ido Schimmel70ad3502017-02-08 11:16:38 +01004048 } else {
4049 trap_action = MLXSW_REG_RALUE_TRAP_ACTION_TRAP;
4050 trap_id = MLXSW_TRAP_ID_RTR_INGRESS0;
4051 }
Jiri Pirko61c503f2016-07-04 08:23:11 +02004052
Ido Schimmel9dbf4d72017-07-18 10:10:24 +02004053 mlxsw_sp_fib_entry_ralue_pack(ralue_pl, fib_entry, op);
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01004054 mlxsw_reg_ralue_act_local_pack(ralue_pl, trap_action, trap_id,
4055 rif_index);
Jiri Pirko61c503f2016-07-04 08:23:11 +02004056 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
4057}
4058
Ido Schimmel9dbf4d72017-07-18 10:10:24 +02004059static int mlxsw_sp_fib_entry_op_trap(struct mlxsw_sp *mlxsw_sp,
4060 struct mlxsw_sp_fib_entry *fib_entry,
4061 enum mlxsw_reg_ralue_op op)
Jiri Pirko61c503f2016-07-04 08:23:11 +02004062{
4063 char ralue_pl[MLXSW_REG_RALUE_LEN];
Jiri Pirko61c503f2016-07-04 08:23:11 +02004064
Ido Schimmel9dbf4d72017-07-18 10:10:24 +02004065 mlxsw_sp_fib_entry_ralue_pack(ralue_pl, fib_entry, op);
Jiri Pirko61c503f2016-07-04 08:23:11 +02004066 mlxsw_reg_ralue_act_ip2me_pack(ralue_pl);
4067 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
4068}
4069
Petr Machata4607f6d2017-09-02 23:49:25 +02004070static int
4071mlxsw_sp_fib_entry_op_ipip_decap(struct mlxsw_sp *mlxsw_sp,
4072 struct mlxsw_sp_fib_entry *fib_entry,
4073 enum mlxsw_reg_ralue_op op)
4074{
4075 struct mlxsw_sp_ipip_entry *ipip_entry = fib_entry->decap.ipip_entry;
4076 const struct mlxsw_sp_ipip_ops *ipip_ops;
4077
4078 if (WARN_ON(!ipip_entry))
4079 return -EINVAL;
4080
4081 ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt];
4082 return ipip_ops->fib_entry_op(mlxsw_sp, ipip_entry, op,
4083 fib_entry->decap.tunnel_index);
4084}
4085
Ido Schimmel9dbf4d72017-07-18 10:10:24 +02004086static int __mlxsw_sp_fib_entry_op(struct mlxsw_sp *mlxsw_sp,
4087 struct mlxsw_sp_fib_entry *fib_entry,
4088 enum mlxsw_reg_ralue_op op)
Jiri Pirko61c503f2016-07-04 08:23:11 +02004089{
4090 switch (fib_entry->type) {
4091 case MLXSW_SP_FIB_ENTRY_TYPE_REMOTE:
Ido Schimmel9dbf4d72017-07-18 10:10:24 +02004092 return mlxsw_sp_fib_entry_op_remote(mlxsw_sp, fib_entry, op);
Jiri Pirko61c503f2016-07-04 08:23:11 +02004093 case MLXSW_SP_FIB_ENTRY_TYPE_LOCAL:
Ido Schimmel9dbf4d72017-07-18 10:10:24 +02004094 return mlxsw_sp_fib_entry_op_local(mlxsw_sp, fib_entry, op);
Jiri Pirko61c503f2016-07-04 08:23:11 +02004095 case MLXSW_SP_FIB_ENTRY_TYPE_TRAP:
Ido Schimmel9dbf4d72017-07-18 10:10:24 +02004096 return mlxsw_sp_fib_entry_op_trap(mlxsw_sp, fib_entry, op);
Petr Machata4607f6d2017-09-02 23:49:25 +02004097 case MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP:
4098 return mlxsw_sp_fib_entry_op_ipip_decap(mlxsw_sp,
4099 fib_entry, op);
Jiri Pirko61c503f2016-07-04 08:23:11 +02004100 }
4101 return -EINVAL;
4102}
4103
4104static int mlxsw_sp_fib_entry_op(struct mlxsw_sp *mlxsw_sp,
4105 struct mlxsw_sp_fib_entry *fib_entry,
4106 enum mlxsw_reg_ralue_op op)
4107{
Ido Schimmel9dbf4d72017-07-18 10:10:24 +02004108 int err = __mlxsw_sp_fib_entry_op(mlxsw_sp, fib_entry, op);
Ido Schimmel013b20f2017-02-08 11:16:36 +01004109
Ido Schimmel013b20f2017-02-08 11:16:36 +01004110 mlxsw_sp_fib_entry_offload_refresh(fib_entry, op, err);
Ido Schimmel9dbf4d72017-07-18 10:10:24 +02004111
Ido Schimmel013b20f2017-02-08 11:16:36 +01004112 return err;
Jiri Pirko61c503f2016-07-04 08:23:11 +02004113}
4114
4115static int mlxsw_sp_fib_entry_update(struct mlxsw_sp *mlxsw_sp,
4116 struct mlxsw_sp_fib_entry *fib_entry)
4117{
Jiri Pirko7146da32016-09-01 10:37:41 +02004118 return mlxsw_sp_fib_entry_op(mlxsw_sp, fib_entry,
4119 MLXSW_REG_RALUE_OP_WRITE_WRITE);
Jiri Pirko61c503f2016-07-04 08:23:11 +02004120}
4121
4122static int mlxsw_sp_fib_entry_del(struct mlxsw_sp *mlxsw_sp,
4123 struct mlxsw_sp_fib_entry *fib_entry)
4124{
4125 return mlxsw_sp_fib_entry_op(mlxsw_sp, fib_entry,
4126 MLXSW_REG_RALUE_OP_WRITE_DELETE);
4127}
4128
Jiri Pirko61c503f2016-07-04 08:23:11 +02004129static int
Ido Schimmel013b20f2017-02-08 11:16:36 +01004130mlxsw_sp_fib4_entry_type_set(struct mlxsw_sp *mlxsw_sp,
4131 const struct fib_entry_notifier_info *fen_info,
4132 struct mlxsw_sp_fib_entry *fib_entry)
Jiri Pirko61c503f2016-07-04 08:23:11 +02004133{
Petr Machata4607f6d2017-09-02 23:49:25 +02004134 union mlxsw_sp_l3addr dip = { .addr4 = htonl(fen_info->dst) };
4135 struct net_device *dev = fen_info->fi->fib_dev;
4136 struct mlxsw_sp_ipip_entry *ipip_entry;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02004137 struct fib_info *fi = fen_info->fi;
Jiri Pirko61c503f2016-07-04 08:23:11 +02004138
Ido Schimmel97989ee2017-03-10 08:53:38 +01004139 switch (fen_info->type) {
Ido Schimmel97989ee2017-03-10 08:53:38 +01004140 case RTN_LOCAL:
Petr Machata4607f6d2017-09-02 23:49:25 +02004141 ipip_entry = mlxsw_sp_ipip_entry_find_by_decap(mlxsw_sp, dev,
4142 MLXSW_SP_L3_PROTO_IPV4, dip);
Petr Machata57c77ce2017-11-28 13:17:11 +01004143 if (ipip_entry && ipip_entry->ol_dev->flags & IFF_UP) {
Petr Machata4607f6d2017-09-02 23:49:25 +02004144 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP;
4145 return mlxsw_sp_fib_entry_decap_init(mlxsw_sp,
4146 fib_entry,
4147 ipip_entry);
4148 }
4149 /* fall through */
4150 case RTN_BROADCAST:
Jiri Pirko61c503f2016-07-04 08:23:11 +02004151 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
4152 return 0;
Ido Schimmel97989ee2017-03-10 08:53:38 +01004153 case RTN_UNREACHABLE: /* fall through */
4154 case RTN_BLACKHOLE: /* fall through */
4155 case RTN_PROHIBIT:
4156 /* Packets hitting these routes need to be trapped, but
4157 * can do so with a lower priority than packets directed
4158 * at the host, so use action type local instead of trap.
4159 */
Jiri Pirkob45f64d2016-09-26 12:52:31 +02004160 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_LOCAL;
Ido Schimmel97989ee2017-03-10 08:53:38 +01004161 return 0;
4162 case RTN_UNICAST:
Petr Machata9b014512017-09-02 23:49:20 +02004163 if (mlxsw_sp_fi_is_gateway(mlxsw_sp, fi))
Ido Schimmel97989ee2017-03-10 08:53:38 +01004164 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_REMOTE;
Petr Machata9b014512017-09-02 23:49:20 +02004165 else
4166 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_LOCAL;
Ido Schimmel97989ee2017-03-10 08:53:38 +01004167 return 0;
4168 default:
4169 return -EINVAL;
4170 }
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02004171}
4172
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004173static struct mlxsw_sp_fib4_entry *
Ido Schimmel9aecce12017-02-09 10:28:42 +01004174mlxsw_sp_fib4_entry_create(struct mlxsw_sp *mlxsw_sp,
4175 struct mlxsw_sp_fib_node *fib_node,
4176 const struct fib_entry_notifier_info *fen_info)
Jiri Pirko5b004412016-09-01 10:37:40 +02004177{
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004178 struct mlxsw_sp_fib4_entry *fib4_entry;
Jiri Pirko5b004412016-09-01 10:37:40 +02004179 struct mlxsw_sp_fib_entry *fib_entry;
Ido Schimmel9aecce12017-02-09 10:28:42 +01004180 int err;
4181
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004182 fib4_entry = kzalloc(sizeof(*fib4_entry), GFP_KERNEL);
4183 if (!fib4_entry)
4184 return ERR_PTR(-ENOMEM);
4185 fib_entry = &fib4_entry->common;
Ido Schimmel9aecce12017-02-09 10:28:42 +01004186
4187 err = mlxsw_sp_fib4_entry_type_set(mlxsw_sp, fen_info, fib_entry);
4188 if (err)
4189 goto err_fib4_entry_type_set;
4190
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02004191 err = mlxsw_sp_nexthop4_group_get(mlxsw_sp, fib_entry, fen_info->fi);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004192 if (err)
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02004193 goto err_nexthop4_group_get;
Ido Schimmel9aecce12017-02-09 10:28:42 +01004194
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004195 fib4_entry->prio = fen_info->fi->fib_priority;
4196 fib4_entry->tb_id = fen_info->tb_id;
4197 fib4_entry->type = fen_info->type;
4198 fib4_entry->tos = fen_info->tos;
Ido Schimmel9aecce12017-02-09 10:28:42 +01004199
4200 fib_entry->fib_node = fib_node;
4201
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004202 return fib4_entry;
Ido Schimmel9aecce12017-02-09 10:28:42 +01004203
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02004204err_nexthop4_group_get:
Ido Schimmel9aecce12017-02-09 10:28:42 +01004205err_fib4_entry_type_set:
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004206 kfree(fib4_entry);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004207 return ERR_PTR(err);
4208}
4209
4210static void mlxsw_sp_fib4_entry_destroy(struct mlxsw_sp *mlxsw_sp,
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004211 struct mlxsw_sp_fib4_entry *fib4_entry)
Ido Schimmel9aecce12017-02-09 10:28:42 +01004212{
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02004213 mlxsw_sp_nexthop4_group_put(mlxsw_sp, &fib4_entry->common);
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004214 kfree(fib4_entry);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004215}
4216
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004217static struct mlxsw_sp_fib4_entry *
Ido Schimmel9aecce12017-02-09 10:28:42 +01004218mlxsw_sp_fib4_entry_lookup(struct mlxsw_sp *mlxsw_sp,
4219 const struct fib_entry_notifier_info *fen_info)
4220{
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004221 struct mlxsw_sp_fib4_entry *fib4_entry;
Ido Schimmel9aecce12017-02-09 10:28:42 +01004222 struct mlxsw_sp_fib_node *fib_node;
Ido Schimmel160e22a2017-07-18 10:10:20 +02004223 struct mlxsw_sp_fib *fib;
4224 struct mlxsw_sp_vr *vr;
Ido Schimmel9aecce12017-02-09 10:28:42 +01004225
Ido Schimmel160e22a2017-07-18 10:10:20 +02004226 vr = mlxsw_sp_vr_find(mlxsw_sp, fen_info->tb_id);
4227 if (!vr)
4228 return NULL;
4229 fib = mlxsw_sp_vr_fib(vr, MLXSW_SP_L3_PROTO_IPV4);
4230
4231 fib_node = mlxsw_sp_fib_node_lookup(fib, &fen_info->dst,
4232 sizeof(fen_info->dst),
4233 fen_info->dst_len);
4234 if (!fib_node)
Ido Schimmel9aecce12017-02-09 10:28:42 +01004235 return NULL;
4236
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004237 list_for_each_entry(fib4_entry, &fib_node->entry_list, common.list) {
4238 if (fib4_entry->tb_id == fen_info->tb_id &&
4239 fib4_entry->tos == fen_info->tos &&
4240 fib4_entry->type == fen_info->type &&
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02004241 mlxsw_sp_nexthop4_group_fi(fib4_entry->common.nh_group) ==
4242 fen_info->fi) {
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004243 return fib4_entry;
Ido Schimmel9aecce12017-02-09 10:28:42 +01004244 }
4245 }
4246
4247 return NULL;
4248}
4249
4250static const struct rhashtable_params mlxsw_sp_fib_ht_params = {
4251 .key_offset = offsetof(struct mlxsw_sp_fib_node, key),
4252 .head_offset = offsetof(struct mlxsw_sp_fib_node, ht_node),
4253 .key_len = sizeof(struct mlxsw_sp_fib_key),
4254 .automatic_shrinking = true,
4255};
4256
4257static int mlxsw_sp_fib_node_insert(struct mlxsw_sp_fib *fib,
4258 struct mlxsw_sp_fib_node *fib_node)
4259{
4260 return rhashtable_insert_fast(&fib->ht, &fib_node->ht_node,
4261 mlxsw_sp_fib_ht_params);
4262}
4263
4264static void mlxsw_sp_fib_node_remove(struct mlxsw_sp_fib *fib,
4265 struct mlxsw_sp_fib_node *fib_node)
4266{
4267 rhashtable_remove_fast(&fib->ht, &fib_node->ht_node,
4268 mlxsw_sp_fib_ht_params);
4269}
4270
4271static struct mlxsw_sp_fib_node *
4272mlxsw_sp_fib_node_lookup(struct mlxsw_sp_fib *fib, const void *addr,
4273 size_t addr_len, unsigned char prefix_len)
4274{
4275 struct mlxsw_sp_fib_key key;
4276
4277 memset(&key, 0, sizeof(key));
4278 memcpy(key.addr, addr, addr_len);
4279 key.prefix_len = prefix_len;
4280 return rhashtable_lookup_fast(&fib->ht, &key, mlxsw_sp_fib_ht_params);
4281}
4282
4283static struct mlxsw_sp_fib_node *
Ido Schimmel76610eb2017-03-10 08:53:41 +01004284mlxsw_sp_fib_node_create(struct mlxsw_sp_fib *fib, const void *addr,
Ido Schimmel9aecce12017-02-09 10:28:42 +01004285 size_t addr_len, unsigned char prefix_len)
4286{
4287 struct mlxsw_sp_fib_node *fib_node;
4288
4289 fib_node = kzalloc(sizeof(*fib_node), GFP_KERNEL);
4290 if (!fib_node)
4291 return NULL;
4292
4293 INIT_LIST_HEAD(&fib_node->entry_list);
Ido Schimmel76610eb2017-03-10 08:53:41 +01004294 list_add(&fib_node->list, &fib->node_list);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004295 memcpy(fib_node->key.addr, addr, addr_len);
4296 fib_node->key.prefix_len = prefix_len;
Ido Schimmel9aecce12017-02-09 10:28:42 +01004297
4298 return fib_node;
4299}
4300
4301static void mlxsw_sp_fib_node_destroy(struct mlxsw_sp_fib_node *fib_node)
4302{
Ido Schimmel9aecce12017-02-09 10:28:42 +01004303 list_del(&fib_node->list);
4304 WARN_ON(!list_empty(&fib_node->entry_list));
4305 kfree(fib_node);
4306}
4307
4308static bool
4309mlxsw_sp_fib_node_entry_is_first(const struct mlxsw_sp_fib_node *fib_node,
4310 const struct mlxsw_sp_fib_entry *fib_entry)
4311{
4312 return list_first_entry(&fib_node->entry_list,
4313 struct mlxsw_sp_fib_entry, list) == fib_entry;
4314}
4315
Ido Schimmelfc922bb2017-08-14 10:54:05 +02004316static int mlxsw_sp_fib_lpm_tree_link(struct mlxsw_sp *mlxsw_sp,
Ido Schimmelfc922bb2017-08-14 10:54:05 +02004317 struct mlxsw_sp_fib_node *fib_node)
4318{
Ido Schimmel2b52ce02018-01-22 09:17:42 +01004319 struct mlxsw_sp_prefix_usage req_prefix_usage;
Ido Schimmel3aad95d2018-01-22 09:17:41 +01004320 struct mlxsw_sp_fib *fib = fib_node->fib;
Ido Schimmelfc922bb2017-08-14 10:54:05 +02004321 struct mlxsw_sp_lpm_tree *lpm_tree;
4322 int err;
4323
Ido Schimmel2b52ce02018-01-22 09:17:42 +01004324 lpm_tree = mlxsw_sp->router->lpm.proto_trees[fib->proto];
4325 if (lpm_tree->prefix_ref_count[fib_node->key.prefix_len] != 0)
4326 goto out;
Ido Schimmelfc922bb2017-08-14 10:54:05 +02004327
Ido Schimmel2b52ce02018-01-22 09:17:42 +01004328 mlxsw_sp_prefix_usage_cpy(&req_prefix_usage, &lpm_tree->prefix_usage);
4329 mlxsw_sp_prefix_usage_set(&req_prefix_usage, fib_node->key.prefix_len);
Ido Schimmelfc922bb2017-08-14 10:54:05 +02004330 lpm_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, &req_prefix_usage,
4331 fib->proto);
4332 if (IS_ERR(lpm_tree))
4333 return PTR_ERR(lpm_tree);
4334
Ido Schimmelfc922bb2017-08-14 10:54:05 +02004335 err = mlxsw_sp_vrs_lpm_tree_replace(mlxsw_sp, fib, lpm_tree);
4336 if (err)
Ido Schimmel2b52ce02018-01-22 09:17:42 +01004337 goto err_lpm_tree_replace;
Ido Schimmelfc922bb2017-08-14 10:54:05 +02004338
Ido Schimmel2b52ce02018-01-22 09:17:42 +01004339out:
4340 lpm_tree->prefix_ref_count[fib_node->key.prefix_len]++;
Ido Schimmelfc922bb2017-08-14 10:54:05 +02004341 return 0;
Ido Schimmel2b52ce02018-01-22 09:17:42 +01004342
4343err_lpm_tree_replace:
4344 mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
4345 return err;
Ido Schimmelfc922bb2017-08-14 10:54:05 +02004346}
4347
4348static void mlxsw_sp_fib_lpm_tree_unlink(struct mlxsw_sp *mlxsw_sp,
Ido Schimmel3aad95d2018-01-22 09:17:41 +01004349 struct mlxsw_sp_fib_node *fib_node)
Ido Schimmelfc922bb2017-08-14 10:54:05 +02004350{
Ido Schimmel2b52ce02018-01-22 09:17:42 +01004351 struct mlxsw_sp_lpm_tree *lpm_tree = fib_node->fib->lpm_tree;
4352 struct mlxsw_sp_prefix_usage req_prefix_usage;
Ido Schimmel3aad95d2018-01-22 09:17:41 +01004353 struct mlxsw_sp_fib *fib = fib_node->fib;
Ido Schimmel2b52ce02018-01-22 09:17:42 +01004354 int err;
Ido Schimmel3aad95d2018-01-22 09:17:41 +01004355
Ido Schimmel2b52ce02018-01-22 09:17:42 +01004356 if (--lpm_tree->prefix_ref_count[fib_node->key.prefix_len] != 0)
Ido Schimmelfc922bb2017-08-14 10:54:05 +02004357 return;
Ido Schimmel2b52ce02018-01-22 09:17:42 +01004358 /* Try to construct a new LPM tree from the current prefix usage
4359 * minus the unused one. If we fail, continue using the old one.
Ido Schimmel4fd00312018-01-22 09:17:40 +01004360 */
Ido Schimmel2b52ce02018-01-22 09:17:42 +01004361 mlxsw_sp_prefix_usage_cpy(&req_prefix_usage, &lpm_tree->prefix_usage);
4362 mlxsw_sp_prefix_usage_clear(&req_prefix_usage,
4363 fib_node->key.prefix_len);
4364 lpm_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, &req_prefix_usage,
4365 fib->proto);
4366 if (IS_ERR(lpm_tree))
4367 return;
Ido Schimmelfc922bb2017-08-14 10:54:05 +02004368
Ido Schimmel2b52ce02018-01-22 09:17:42 +01004369 err = mlxsw_sp_vrs_lpm_tree_replace(mlxsw_sp, fib, lpm_tree);
4370 if (err)
4371 goto err_lpm_tree_replace;
Ido Schimmel9aecce12017-02-09 10:28:42 +01004372
Ido Schimmel2b52ce02018-01-22 09:17:42 +01004373 return;
Ido Schimmel9aecce12017-02-09 10:28:42 +01004374
Ido Schimmel2b52ce02018-01-22 09:17:42 +01004375err_lpm_tree_replace:
4376 mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004377}
4378
Ido Schimmel76610eb2017-03-10 08:53:41 +01004379static int mlxsw_sp_fib_node_init(struct mlxsw_sp *mlxsw_sp,
4380 struct mlxsw_sp_fib_node *fib_node,
4381 struct mlxsw_sp_fib *fib)
4382{
Ido Schimmel76610eb2017-03-10 08:53:41 +01004383 int err;
4384
4385 err = mlxsw_sp_fib_node_insert(fib, fib_node);
4386 if (err)
4387 return err;
4388 fib_node->fib = fib;
4389
Ido Schimmel3aad95d2018-01-22 09:17:41 +01004390 err = mlxsw_sp_fib_lpm_tree_link(mlxsw_sp, fib_node);
Ido Schimmelfc922bb2017-08-14 10:54:05 +02004391 if (err)
4392 goto err_fib_lpm_tree_link;
Ido Schimmel76610eb2017-03-10 08:53:41 +01004393
Ido Schimmel76610eb2017-03-10 08:53:41 +01004394 return 0;
4395
Ido Schimmelfc922bb2017-08-14 10:54:05 +02004396err_fib_lpm_tree_link:
Ido Schimmel76610eb2017-03-10 08:53:41 +01004397 fib_node->fib = NULL;
4398 mlxsw_sp_fib_node_remove(fib, fib_node);
4399 return err;
4400}
4401
4402static void mlxsw_sp_fib_node_fini(struct mlxsw_sp *mlxsw_sp,
4403 struct mlxsw_sp_fib_node *fib_node)
4404{
Ido Schimmel76610eb2017-03-10 08:53:41 +01004405 struct mlxsw_sp_fib *fib = fib_node->fib;
4406
Ido Schimmel3aad95d2018-01-22 09:17:41 +01004407 mlxsw_sp_fib_lpm_tree_unlink(mlxsw_sp, fib_node);
Ido Schimmel76610eb2017-03-10 08:53:41 +01004408 fib_node->fib = NULL;
4409 mlxsw_sp_fib_node_remove(fib, fib_node);
4410}
4411
Ido Schimmel9aecce12017-02-09 10:28:42 +01004412static struct mlxsw_sp_fib_node *
Ido Schimmel731ea1c2017-07-18 10:10:21 +02004413mlxsw_sp_fib_node_get(struct mlxsw_sp *mlxsw_sp, u32 tb_id, const void *addr,
4414 size_t addr_len, unsigned char prefix_len,
4415 enum mlxsw_sp_l3proto proto)
Ido Schimmel9aecce12017-02-09 10:28:42 +01004416{
4417 struct mlxsw_sp_fib_node *fib_node;
Ido Schimmel76610eb2017-03-10 08:53:41 +01004418 struct mlxsw_sp_fib *fib;
Jiri Pirko5b004412016-09-01 10:37:40 +02004419 struct mlxsw_sp_vr *vr;
4420 int err;
4421
David Ahernf8fa9b42017-10-18 09:56:56 -07004422 vr = mlxsw_sp_vr_get(mlxsw_sp, tb_id, NULL);
Jiri Pirko5b004412016-09-01 10:37:40 +02004423 if (IS_ERR(vr))
4424 return ERR_CAST(vr);
Ido Schimmel731ea1c2017-07-18 10:10:21 +02004425 fib = mlxsw_sp_vr_fib(vr, proto);
Jiri Pirko5b004412016-09-01 10:37:40 +02004426
Ido Schimmel731ea1c2017-07-18 10:10:21 +02004427 fib_node = mlxsw_sp_fib_node_lookup(fib, addr, addr_len, prefix_len);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004428 if (fib_node)
4429 return fib_node;
4430
Ido Schimmel731ea1c2017-07-18 10:10:21 +02004431 fib_node = mlxsw_sp_fib_node_create(fib, addr, addr_len, prefix_len);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004432 if (!fib_node) {
Jiri Pirko5b004412016-09-01 10:37:40 +02004433 err = -ENOMEM;
Ido Schimmel9aecce12017-02-09 10:28:42 +01004434 goto err_fib_node_create;
Jiri Pirko5b004412016-09-01 10:37:40 +02004435 }
Jiri Pirko5b004412016-09-01 10:37:40 +02004436
Ido Schimmel76610eb2017-03-10 08:53:41 +01004437 err = mlxsw_sp_fib_node_init(mlxsw_sp, fib_node, fib);
4438 if (err)
4439 goto err_fib_node_init;
4440
Ido Schimmel9aecce12017-02-09 10:28:42 +01004441 return fib_node;
Jiri Pirko5b004412016-09-01 10:37:40 +02004442
Ido Schimmel76610eb2017-03-10 08:53:41 +01004443err_fib_node_init:
4444 mlxsw_sp_fib_node_destroy(fib_node);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004445err_fib_node_create:
Ido Schimmel2b52ce02018-01-22 09:17:42 +01004446 mlxsw_sp_vr_put(mlxsw_sp, vr);
Jiri Pirko5b004412016-09-01 10:37:40 +02004447 return ERR_PTR(err);
4448}
4449
Ido Schimmel731ea1c2017-07-18 10:10:21 +02004450static void mlxsw_sp_fib_node_put(struct mlxsw_sp *mlxsw_sp,
4451 struct mlxsw_sp_fib_node *fib_node)
Jiri Pirko5b004412016-09-01 10:37:40 +02004452{
Ido Schimmel76610eb2017-03-10 08:53:41 +01004453 struct mlxsw_sp_vr *vr = fib_node->fib->vr;
Jiri Pirko5b004412016-09-01 10:37:40 +02004454
Ido Schimmel9aecce12017-02-09 10:28:42 +01004455 if (!list_empty(&fib_node->entry_list))
4456 return;
Ido Schimmel76610eb2017-03-10 08:53:41 +01004457 mlxsw_sp_fib_node_fini(mlxsw_sp, fib_node);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004458 mlxsw_sp_fib_node_destroy(fib_node);
Ido Schimmel2b52ce02018-01-22 09:17:42 +01004459 mlxsw_sp_vr_put(mlxsw_sp, vr);
Jiri Pirko5b004412016-09-01 10:37:40 +02004460}
4461
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004462static struct mlxsw_sp_fib4_entry *
Ido Schimmel9aecce12017-02-09 10:28:42 +01004463mlxsw_sp_fib4_node_entry_find(const struct mlxsw_sp_fib_node *fib_node,
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004464 const struct mlxsw_sp_fib4_entry *new4_entry)
Jiri Pirko61c503f2016-07-04 08:23:11 +02004465{
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004466 struct mlxsw_sp_fib4_entry *fib4_entry;
Ido Schimmel9aecce12017-02-09 10:28:42 +01004467
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004468 list_for_each_entry(fib4_entry, &fib_node->entry_list, common.list) {
4469 if (fib4_entry->tb_id > new4_entry->tb_id)
Ido Schimmel9aecce12017-02-09 10:28:42 +01004470 continue;
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004471 if (fib4_entry->tb_id != new4_entry->tb_id)
Ido Schimmel9aecce12017-02-09 10:28:42 +01004472 break;
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004473 if (fib4_entry->tos > new4_entry->tos)
Ido Schimmel9aecce12017-02-09 10:28:42 +01004474 continue;
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004475 if (fib4_entry->prio >= new4_entry->prio ||
4476 fib4_entry->tos < new4_entry->tos)
4477 return fib4_entry;
Ido Schimmel9aecce12017-02-09 10:28:42 +01004478 }
4479
4480 return NULL;
4481}
4482
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004483static int
4484mlxsw_sp_fib4_node_list_append(struct mlxsw_sp_fib4_entry *fib4_entry,
4485 struct mlxsw_sp_fib4_entry *new4_entry)
Ido Schimmel4283bce2017-02-09 10:28:43 +01004486{
4487 struct mlxsw_sp_fib_node *fib_node;
4488
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004489 if (WARN_ON(!fib4_entry))
Ido Schimmel4283bce2017-02-09 10:28:43 +01004490 return -EINVAL;
4491
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004492 fib_node = fib4_entry->common.fib_node;
4493 list_for_each_entry_from(fib4_entry, &fib_node->entry_list,
4494 common.list) {
4495 if (fib4_entry->tb_id != new4_entry->tb_id ||
4496 fib4_entry->tos != new4_entry->tos ||
4497 fib4_entry->prio != new4_entry->prio)
Ido Schimmel4283bce2017-02-09 10:28:43 +01004498 break;
4499 }
4500
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004501 list_add_tail(&new4_entry->common.list, &fib4_entry->common.list);
Ido Schimmel4283bce2017-02-09 10:28:43 +01004502 return 0;
4503}
4504
Ido Schimmel9aecce12017-02-09 10:28:42 +01004505static int
Ido Schimmel9efbee62017-07-18 10:10:28 +02004506mlxsw_sp_fib4_node_list_insert(struct mlxsw_sp_fib4_entry *new4_entry,
Ido Schimmel599cf8f2017-02-09 10:28:44 +01004507 bool replace, bool append)
Ido Schimmel9aecce12017-02-09 10:28:42 +01004508{
Ido Schimmel9efbee62017-07-18 10:10:28 +02004509 struct mlxsw_sp_fib_node *fib_node = new4_entry->common.fib_node;
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004510 struct mlxsw_sp_fib4_entry *fib4_entry;
Ido Schimmel9aecce12017-02-09 10:28:42 +01004511
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004512 fib4_entry = mlxsw_sp_fib4_node_entry_find(fib_node, new4_entry);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004513
Ido Schimmel4283bce2017-02-09 10:28:43 +01004514 if (append)
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004515 return mlxsw_sp_fib4_node_list_append(fib4_entry, new4_entry);
4516 if (replace && WARN_ON(!fib4_entry))
Ido Schimmel599cf8f2017-02-09 10:28:44 +01004517 return -EINVAL;
Ido Schimmel4283bce2017-02-09 10:28:43 +01004518
Ido Schimmel599cf8f2017-02-09 10:28:44 +01004519 /* Insert new entry before replaced one, so that we can later
4520 * remove the second.
4521 */
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004522 if (fib4_entry) {
4523 list_add_tail(&new4_entry->common.list,
4524 &fib4_entry->common.list);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004525 } else {
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004526 struct mlxsw_sp_fib4_entry *last;
Ido Schimmel9aecce12017-02-09 10:28:42 +01004527
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004528 list_for_each_entry(last, &fib_node->entry_list, common.list) {
4529 if (new4_entry->tb_id > last->tb_id)
Ido Schimmel9aecce12017-02-09 10:28:42 +01004530 break;
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004531 fib4_entry = last;
Ido Schimmel9aecce12017-02-09 10:28:42 +01004532 }
4533
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004534 if (fib4_entry)
4535 list_add(&new4_entry->common.list,
4536 &fib4_entry->common.list);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004537 else
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004538 list_add(&new4_entry->common.list,
4539 &fib_node->entry_list);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004540 }
4541
4542 return 0;
4543}
4544
4545static void
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004546mlxsw_sp_fib4_node_list_remove(struct mlxsw_sp_fib4_entry *fib4_entry)
Ido Schimmel9aecce12017-02-09 10:28:42 +01004547{
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004548 list_del(&fib4_entry->common.list);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004549}
4550
Ido Schimmel80c238f2017-07-18 10:10:29 +02004551static int mlxsw_sp_fib_node_entry_add(struct mlxsw_sp *mlxsw_sp,
4552 struct mlxsw_sp_fib_entry *fib_entry)
Ido Schimmel9aecce12017-02-09 10:28:42 +01004553{
Ido Schimmel9efbee62017-07-18 10:10:28 +02004554 struct mlxsw_sp_fib_node *fib_node = fib_entry->fib_node;
4555
Ido Schimmel9aecce12017-02-09 10:28:42 +01004556 if (!mlxsw_sp_fib_node_entry_is_first(fib_node, fib_entry))
4557 return 0;
4558
4559 /* To prevent packet loss, overwrite the previously offloaded
4560 * entry.
4561 */
4562 if (!list_is_singular(&fib_node->entry_list)) {
4563 enum mlxsw_reg_ralue_op op = MLXSW_REG_RALUE_OP_WRITE_DELETE;
4564 struct mlxsw_sp_fib_entry *n = list_next_entry(fib_entry, list);
4565
4566 mlxsw_sp_fib_entry_offload_refresh(n, op, 0);
4567 }
4568
4569 return mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
4570}
4571
Ido Schimmel80c238f2017-07-18 10:10:29 +02004572static void mlxsw_sp_fib_node_entry_del(struct mlxsw_sp *mlxsw_sp,
4573 struct mlxsw_sp_fib_entry *fib_entry)
Ido Schimmel9aecce12017-02-09 10:28:42 +01004574{
Ido Schimmel9efbee62017-07-18 10:10:28 +02004575 struct mlxsw_sp_fib_node *fib_node = fib_entry->fib_node;
4576
Ido Schimmel9aecce12017-02-09 10:28:42 +01004577 if (!mlxsw_sp_fib_node_entry_is_first(fib_node, fib_entry))
4578 return;
4579
4580 /* Promote the next entry by overwriting the deleted entry */
4581 if (!list_is_singular(&fib_node->entry_list)) {
4582 struct mlxsw_sp_fib_entry *n = list_next_entry(fib_entry, list);
4583 enum mlxsw_reg_ralue_op op = MLXSW_REG_RALUE_OP_WRITE_DELETE;
4584
4585 mlxsw_sp_fib_entry_update(mlxsw_sp, n);
4586 mlxsw_sp_fib_entry_offload_refresh(fib_entry, op, 0);
4587 return;
4588 }
4589
4590 mlxsw_sp_fib_entry_del(mlxsw_sp, fib_entry);
4591}
4592
4593static int mlxsw_sp_fib4_node_entry_link(struct mlxsw_sp *mlxsw_sp,
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004594 struct mlxsw_sp_fib4_entry *fib4_entry,
Ido Schimmel599cf8f2017-02-09 10:28:44 +01004595 bool replace, bool append)
Ido Schimmel9aecce12017-02-09 10:28:42 +01004596{
Ido Schimmel9aecce12017-02-09 10:28:42 +01004597 int err;
4598
Ido Schimmel9efbee62017-07-18 10:10:28 +02004599 err = mlxsw_sp_fib4_node_list_insert(fib4_entry, replace, append);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004600 if (err)
4601 return err;
4602
Ido Schimmel80c238f2017-07-18 10:10:29 +02004603 err = mlxsw_sp_fib_node_entry_add(mlxsw_sp, &fib4_entry->common);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004604 if (err)
Ido Schimmel80c238f2017-07-18 10:10:29 +02004605 goto err_fib_node_entry_add;
Ido Schimmel9aecce12017-02-09 10:28:42 +01004606
Ido Schimmel9aecce12017-02-09 10:28:42 +01004607 return 0;
4608
Ido Schimmel80c238f2017-07-18 10:10:29 +02004609err_fib_node_entry_add:
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004610 mlxsw_sp_fib4_node_list_remove(fib4_entry);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004611 return err;
4612}
4613
4614static void
4615mlxsw_sp_fib4_node_entry_unlink(struct mlxsw_sp *mlxsw_sp,
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004616 struct mlxsw_sp_fib4_entry *fib4_entry)
Ido Schimmel9aecce12017-02-09 10:28:42 +01004617{
Ido Schimmel80c238f2017-07-18 10:10:29 +02004618 mlxsw_sp_fib_node_entry_del(mlxsw_sp, &fib4_entry->common);
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004619 mlxsw_sp_fib4_node_list_remove(fib4_entry);
Petr Machata4607f6d2017-09-02 23:49:25 +02004620
4621 if (fib4_entry->common.type == MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP)
4622 mlxsw_sp_fib_entry_decap_fini(mlxsw_sp, &fib4_entry->common);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004623}
4624
Ido Schimmel599cf8f2017-02-09 10:28:44 +01004625static void mlxsw_sp_fib4_entry_replace(struct mlxsw_sp *mlxsw_sp,
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004626 struct mlxsw_sp_fib4_entry *fib4_entry,
Ido Schimmel599cf8f2017-02-09 10:28:44 +01004627 bool replace)
4628{
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004629 struct mlxsw_sp_fib_node *fib_node = fib4_entry->common.fib_node;
4630 struct mlxsw_sp_fib4_entry *replaced;
Ido Schimmel599cf8f2017-02-09 10:28:44 +01004631
4632 if (!replace)
4633 return;
4634
4635 /* We inserted the new entry before replaced one */
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004636 replaced = list_next_entry(fib4_entry, common.list);
Ido Schimmel599cf8f2017-02-09 10:28:44 +01004637
4638 mlxsw_sp_fib4_node_entry_unlink(mlxsw_sp, replaced);
4639 mlxsw_sp_fib4_entry_destroy(mlxsw_sp, replaced);
Ido Schimmel731ea1c2017-07-18 10:10:21 +02004640 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
Ido Schimmel599cf8f2017-02-09 10:28:44 +01004641}
4642
Ido Schimmel9aecce12017-02-09 10:28:42 +01004643static int
4644mlxsw_sp_router_fib4_add(struct mlxsw_sp *mlxsw_sp,
Ido Schimmel4283bce2017-02-09 10:28:43 +01004645 const struct fib_entry_notifier_info *fen_info,
Ido Schimmel599cf8f2017-02-09 10:28:44 +01004646 bool replace, bool append)
Ido Schimmel9aecce12017-02-09 10:28:42 +01004647{
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004648 struct mlxsw_sp_fib4_entry *fib4_entry;
Ido Schimmel9aecce12017-02-09 10:28:42 +01004649 struct mlxsw_sp_fib_node *fib_node;
Jiri Pirko61c503f2016-07-04 08:23:11 +02004650 int err;
4651
Ido Schimmel9011b672017-05-16 19:38:25 +02004652 if (mlxsw_sp->router->aborted)
Jiri Pirkob45f64d2016-09-26 12:52:31 +02004653 return 0;
4654
Ido Schimmel731ea1c2017-07-18 10:10:21 +02004655 fib_node = mlxsw_sp_fib_node_get(mlxsw_sp, fen_info->tb_id,
4656 &fen_info->dst, sizeof(fen_info->dst),
4657 fen_info->dst_len,
4658 MLXSW_SP_L3_PROTO_IPV4);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004659 if (IS_ERR(fib_node)) {
4660 dev_warn(mlxsw_sp->bus_info->dev, "Failed to get FIB node\n");
4661 return PTR_ERR(fib_node);
4662 }
4663
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004664 fib4_entry = mlxsw_sp_fib4_entry_create(mlxsw_sp, fib_node, fen_info);
4665 if (IS_ERR(fib4_entry)) {
Ido Schimmel9aecce12017-02-09 10:28:42 +01004666 dev_warn(mlxsw_sp->bus_info->dev, "Failed to create FIB entry\n");
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004667 err = PTR_ERR(fib4_entry);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004668 goto err_fib4_entry_create;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02004669 }
Jiri Pirko61c503f2016-07-04 08:23:11 +02004670
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004671 err = mlxsw_sp_fib4_node_entry_link(mlxsw_sp, fib4_entry, replace,
Ido Schimmel599cf8f2017-02-09 10:28:44 +01004672 append);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02004673 if (err) {
Ido Schimmel9aecce12017-02-09 10:28:42 +01004674 dev_warn(mlxsw_sp->bus_info->dev, "Failed to link FIB entry to node\n");
4675 goto err_fib4_node_entry_link;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02004676 }
Ido Schimmel9aecce12017-02-09 10:28:42 +01004677
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004678 mlxsw_sp_fib4_entry_replace(mlxsw_sp, fib4_entry, replace);
Ido Schimmel599cf8f2017-02-09 10:28:44 +01004679
Jiri Pirko61c503f2016-07-04 08:23:11 +02004680 return 0;
4681
Ido Schimmel9aecce12017-02-09 10:28:42 +01004682err_fib4_node_entry_link:
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004683 mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib4_entry);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004684err_fib4_entry_create:
Ido Schimmel731ea1c2017-07-18 10:10:21 +02004685 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
Jiri Pirko61c503f2016-07-04 08:23:11 +02004686 return err;
4687}
4688
Jiri Pirko37956d72016-10-20 16:05:43 +02004689static void mlxsw_sp_router_fib4_del(struct mlxsw_sp *mlxsw_sp,
4690 struct fib_entry_notifier_info *fen_info)
Jiri Pirko61c503f2016-07-04 08:23:11 +02004691{
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004692 struct mlxsw_sp_fib4_entry *fib4_entry;
Ido Schimmel9aecce12017-02-09 10:28:42 +01004693 struct mlxsw_sp_fib_node *fib_node;
Jiri Pirko61c503f2016-07-04 08:23:11 +02004694
Ido Schimmel9011b672017-05-16 19:38:25 +02004695 if (mlxsw_sp->router->aborted)
Jiri Pirko37956d72016-10-20 16:05:43 +02004696 return;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02004697
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004698 fib4_entry = mlxsw_sp_fib4_entry_lookup(mlxsw_sp, fen_info);
4699 if (WARN_ON(!fib4_entry))
Jiri Pirko37956d72016-10-20 16:05:43 +02004700 return;
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004701 fib_node = fib4_entry->common.fib_node;
Jiri Pirko5b004412016-09-01 10:37:40 +02004702
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004703 mlxsw_sp_fib4_node_entry_unlink(mlxsw_sp, fib4_entry);
4704 mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib4_entry);
Ido Schimmel731ea1c2017-07-18 10:10:21 +02004705 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
Jiri Pirko61c503f2016-07-04 08:23:11 +02004706}
Jiri Pirkob45f64d2016-09-26 12:52:31 +02004707
David Ahern8d1c8022018-04-17 17:33:26 -07004708static bool mlxsw_sp_fib6_rt_should_ignore(const struct fib6_info *rt)
Ido Schimmel428b8512017-08-03 13:28:28 +02004709{
4710 /* Packets with link-local destination IP arriving to the router
4711 * are trapped to the CPU, so no need to program specific routes
4712 * for them.
4713 */
David Ahern93c2fb22018-04-18 15:38:59 -07004714 if (ipv6_addr_type(&rt->fib6_dst.addr) & IPV6_ADDR_LINKLOCAL)
Ido Schimmel428b8512017-08-03 13:28:28 +02004715 return true;
4716
4717 /* Multicast routes aren't supported, so ignore them. Neighbour
4718 * Discovery packets are specifically trapped.
4719 */
David Ahern93c2fb22018-04-18 15:38:59 -07004720 if (ipv6_addr_type(&rt->fib6_dst.addr) & IPV6_ADDR_MULTICAST)
Ido Schimmel428b8512017-08-03 13:28:28 +02004721 return true;
4722
4723 /* Cloned routes are irrelevant in the forwarding path. */
David Ahern93c2fb22018-04-18 15:38:59 -07004724 if (rt->fib6_flags & RTF_CACHE)
Ido Schimmel428b8512017-08-03 13:28:28 +02004725 return true;
4726
4727 return false;
4728}
4729
David Ahern8d1c8022018-04-17 17:33:26 -07004730static struct mlxsw_sp_rt6 *mlxsw_sp_rt6_create(struct fib6_info *rt)
Ido Schimmel428b8512017-08-03 13:28:28 +02004731{
4732 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
4733
4734 mlxsw_sp_rt6 = kzalloc(sizeof(*mlxsw_sp_rt6), GFP_KERNEL);
4735 if (!mlxsw_sp_rt6)
4736 return ERR_PTR(-ENOMEM);
4737
4738 /* In case of route replace, replaced route is deleted with
4739 * no notification. Take reference to prevent accessing freed
4740 * memory.
4741 */
4742 mlxsw_sp_rt6->rt = rt;
David Ahern8d1c8022018-04-17 17:33:26 -07004743 fib6_info_hold(rt);
Ido Schimmel428b8512017-08-03 13:28:28 +02004744
4745 return mlxsw_sp_rt6;
4746}
4747
4748#if IS_ENABLED(CONFIG_IPV6)
David Ahern8d1c8022018-04-17 17:33:26 -07004749static void mlxsw_sp_rt6_release(struct fib6_info *rt)
Ido Schimmel428b8512017-08-03 13:28:28 +02004750{
David Ahern8d1c8022018-04-17 17:33:26 -07004751 fib6_info_release(rt);
Ido Schimmel428b8512017-08-03 13:28:28 +02004752}
4753#else
David Ahern8d1c8022018-04-17 17:33:26 -07004754static void mlxsw_sp_rt6_release(struct fib6_info *rt)
Ido Schimmel428b8512017-08-03 13:28:28 +02004755{
4756}
4757#endif
4758
4759static void mlxsw_sp_rt6_destroy(struct mlxsw_sp_rt6 *mlxsw_sp_rt6)
4760{
4761 mlxsw_sp_rt6_release(mlxsw_sp_rt6->rt);
4762 kfree(mlxsw_sp_rt6);
4763}
4764
David Ahern8d1c8022018-04-17 17:33:26 -07004765static struct fib6_info *
Ido Schimmel428b8512017-08-03 13:28:28 +02004766mlxsw_sp_fib6_entry_rt(const struct mlxsw_sp_fib6_entry *fib6_entry)
4767{
4768 return list_first_entry(&fib6_entry->rt6_list, struct mlxsw_sp_rt6,
4769 list)->rt;
4770}
4771
4772static struct mlxsw_sp_fib6_entry *
4773mlxsw_sp_fib6_node_mp_entry_find(const struct mlxsw_sp_fib_node *fib_node,
Ido Schimmel53b562d2018-06-15 16:23:36 +03004774 const struct fib6_info *nrt, bool append)
Ido Schimmel428b8512017-08-03 13:28:28 +02004775{
4776 struct mlxsw_sp_fib6_entry *fib6_entry;
4777
Ido Schimmel53b562d2018-06-15 16:23:36 +03004778 if (!append)
Ido Schimmel428b8512017-08-03 13:28:28 +02004779 return NULL;
4780
4781 list_for_each_entry(fib6_entry, &fib_node->entry_list, common.list) {
David Ahern8d1c8022018-04-17 17:33:26 -07004782 struct fib6_info *rt = mlxsw_sp_fib6_entry_rt(fib6_entry);
Ido Schimmel428b8512017-08-03 13:28:28 +02004783
4784 /* RT6_TABLE_LOCAL and RT6_TABLE_MAIN share the same
4785 * virtual router.
4786 */
David Ahern93c2fb22018-04-18 15:38:59 -07004787 if (rt->fib6_table->tb6_id > nrt->fib6_table->tb6_id)
Ido Schimmel428b8512017-08-03 13:28:28 +02004788 continue;
David Ahern93c2fb22018-04-18 15:38:59 -07004789 if (rt->fib6_table->tb6_id != nrt->fib6_table->tb6_id)
Ido Schimmel428b8512017-08-03 13:28:28 +02004790 break;
David Ahern93c2fb22018-04-18 15:38:59 -07004791 if (rt->fib6_metric < nrt->fib6_metric)
Ido Schimmel428b8512017-08-03 13:28:28 +02004792 continue;
Ido Schimmel53b562d2018-06-15 16:23:36 +03004793 if (rt->fib6_metric == nrt->fib6_metric)
Ido Schimmel428b8512017-08-03 13:28:28 +02004794 return fib6_entry;
David Ahern93c2fb22018-04-18 15:38:59 -07004795 if (rt->fib6_metric > nrt->fib6_metric)
Ido Schimmel428b8512017-08-03 13:28:28 +02004796 break;
4797 }
4798
4799 return NULL;
4800}
4801
4802static struct mlxsw_sp_rt6 *
4803mlxsw_sp_fib6_entry_rt_find(const struct mlxsw_sp_fib6_entry *fib6_entry,
David Ahern8d1c8022018-04-17 17:33:26 -07004804 const struct fib6_info *rt)
Ido Schimmel428b8512017-08-03 13:28:28 +02004805{
4806 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
4807
4808 list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) {
4809 if (mlxsw_sp_rt6->rt == rt)
4810 return mlxsw_sp_rt6;
4811 }
4812
4813 return NULL;
4814}
4815
Petr Machata8f28a302017-09-02 23:49:24 +02004816static bool mlxsw_sp_nexthop6_ipip_type(const struct mlxsw_sp *mlxsw_sp,
David Ahern8d1c8022018-04-17 17:33:26 -07004817 const struct fib6_info *rt,
Petr Machata8f28a302017-09-02 23:49:24 +02004818 enum mlxsw_sp_ipip_type *ret)
4819{
David Ahern5e670d82018-04-17 17:33:14 -07004820 return rt->fib6_nh.nh_dev &&
4821 mlxsw_sp_netdev_ipip_type(mlxsw_sp, rt->fib6_nh.nh_dev, ret);
Petr Machata8f28a302017-09-02 23:49:24 +02004822}
4823
Petr Machata35225e42017-09-02 23:49:22 +02004824static int mlxsw_sp_nexthop6_type_init(struct mlxsw_sp *mlxsw_sp,
4825 struct mlxsw_sp_nexthop_group *nh_grp,
4826 struct mlxsw_sp_nexthop *nh,
David Ahern8d1c8022018-04-17 17:33:26 -07004827 const struct fib6_info *rt)
Ido Schimmel428b8512017-08-03 13:28:28 +02004828{
Petr Machatad97cda52017-11-28 13:17:13 +01004829 const struct mlxsw_sp_ipip_ops *ipip_ops;
4830 struct mlxsw_sp_ipip_entry *ipip_entry;
David Ahern5e670d82018-04-17 17:33:14 -07004831 struct net_device *dev = rt->fib6_nh.nh_dev;
Ido Schimmel428b8512017-08-03 13:28:28 +02004832 struct mlxsw_sp_rif *rif;
4833 int err;
4834
Petr Machatad97cda52017-11-28 13:17:13 +01004835 ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, dev);
4836 if (ipip_entry) {
4837 ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt];
4838 if (ipip_ops->can_offload(mlxsw_sp, dev,
4839 MLXSW_SP_L3_PROTO_IPV6)) {
4840 nh->type = MLXSW_SP_NEXTHOP_TYPE_IPIP;
4841 mlxsw_sp_nexthop_ipip_init(mlxsw_sp, nh, ipip_entry);
4842 return 0;
4843 }
Petr Machata8f28a302017-09-02 23:49:24 +02004844 }
4845
Petr Machata35225e42017-09-02 23:49:22 +02004846 nh->type = MLXSW_SP_NEXTHOP_TYPE_ETH;
Ido Schimmel428b8512017-08-03 13:28:28 +02004847 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
4848 if (!rif)
4849 return 0;
4850 mlxsw_sp_nexthop_rif_init(nh, rif);
4851
4852 err = mlxsw_sp_nexthop_neigh_init(mlxsw_sp, nh);
4853 if (err)
4854 goto err_nexthop_neigh_init;
4855
4856 return 0;
4857
4858err_nexthop_neigh_init:
4859 mlxsw_sp_nexthop_rif_fini(nh);
4860 return err;
4861}
4862
Petr Machata35225e42017-09-02 23:49:22 +02004863static void mlxsw_sp_nexthop6_type_fini(struct mlxsw_sp *mlxsw_sp,
4864 struct mlxsw_sp_nexthop *nh)
4865{
4866 mlxsw_sp_nexthop_type_fini(mlxsw_sp, nh);
4867}
4868
4869static int mlxsw_sp_nexthop6_init(struct mlxsw_sp *mlxsw_sp,
4870 struct mlxsw_sp_nexthop_group *nh_grp,
4871 struct mlxsw_sp_nexthop *nh,
David Ahern8d1c8022018-04-17 17:33:26 -07004872 const struct fib6_info *rt)
Petr Machata35225e42017-09-02 23:49:22 +02004873{
David Ahern5e670d82018-04-17 17:33:14 -07004874 struct net_device *dev = rt->fib6_nh.nh_dev;
Petr Machata35225e42017-09-02 23:49:22 +02004875
4876 nh->nh_grp = nh_grp;
David Ahern5e670d82018-04-17 17:33:14 -07004877 nh->nh_weight = rt->fib6_nh.nh_weight;
4878 memcpy(&nh->gw_addr, &rt->fib6_nh.nh_gw, sizeof(nh->gw_addr));
Arkadi Sharshevskya5390272017-09-25 10:32:28 +02004879 mlxsw_sp_nexthop_counter_alloc(mlxsw_sp, nh);
Petr Machata35225e42017-09-02 23:49:22 +02004880
Arkadi Sharshevskydbe45982017-09-25 10:32:23 +02004881 list_add_tail(&nh->router_list_node, &mlxsw_sp->router->nexthop_list);
4882
Petr Machata35225e42017-09-02 23:49:22 +02004883 if (!dev)
4884 return 0;
4885 nh->ifindex = dev->ifindex;
4886
4887 return mlxsw_sp_nexthop6_type_init(mlxsw_sp, nh_grp, nh, rt);
4888}
4889
Ido Schimmel428b8512017-08-03 13:28:28 +02004890static void mlxsw_sp_nexthop6_fini(struct mlxsw_sp *mlxsw_sp,
4891 struct mlxsw_sp_nexthop *nh)
4892{
Petr Machata35225e42017-09-02 23:49:22 +02004893 mlxsw_sp_nexthop6_type_fini(mlxsw_sp, nh);
Arkadi Sharshevskydbe45982017-09-25 10:32:23 +02004894 list_del(&nh->router_list_node);
Arkadi Sharshevskya5390272017-09-25 10:32:28 +02004895 mlxsw_sp_nexthop_counter_free(mlxsw_sp, nh);
Ido Schimmel428b8512017-08-03 13:28:28 +02004896}
4897
Petr Machataf6050ee2017-09-02 23:49:21 +02004898static bool mlxsw_sp_rt6_is_gateway(const struct mlxsw_sp *mlxsw_sp,
David Ahern8d1c8022018-04-17 17:33:26 -07004899 const struct fib6_info *rt)
Petr Machataf6050ee2017-09-02 23:49:21 +02004900{
David Ahern93c2fb22018-04-18 15:38:59 -07004901 return rt->fib6_flags & RTF_GATEWAY ||
Petr Machata8f28a302017-09-02 23:49:24 +02004902 mlxsw_sp_nexthop6_ipip_type(mlxsw_sp, rt, NULL);
Petr Machataf6050ee2017-09-02 23:49:21 +02004903}
4904
Ido Schimmel428b8512017-08-03 13:28:28 +02004905static struct mlxsw_sp_nexthop_group *
4906mlxsw_sp_nexthop6_group_create(struct mlxsw_sp *mlxsw_sp,
4907 struct mlxsw_sp_fib6_entry *fib6_entry)
4908{
4909 struct mlxsw_sp_nexthop_group *nh_grp;
4910 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
4911 struct mlxsw_sp_nexthop *nh;
4912 size_t alloc_size;
4913 int i = 0;
4914 int err;
4915
4916 alloc_size = sizeof(*nh_grp) +
4917 fib6_entry->nrt6 * sizeof(struct mlxsw_sp_nexthop);
4918 nh_grp = kzalloc(alloc_size, GFP_KERNEL);
4919 if (!nh_grp)
4920 return ERR_PTR(-ENOMEM);
4921 INIT_LIST_HEAD(&nh_grp->fib_list);
4922#if IS_ENABLED(CONFIG_IPV6)
4923 nh_grp->neigh_tbl = &nd_tbl;
4924#endif
4925 mlxsw_sp_rt6 = list_first_entry(&fib6_entry->rt6_list,
4926 struct mlxsw_sp_rt6, list);
Petr Machataf6050ee2017-09-02 23:49:21 +02004927 nh_grp->gateway = mlxsw_sp_rt6_is_gateway(mlxsw_sp, mlxsw_sp_rt6->rt);
Ido Schimmel428b8512017-08-03 13:28:28 +02004928 nh_grp->count = fib6_entry->nrt6;
4929 for (i = 0; i < nh_grp->count; i++) {
David Ahern8d1c8022018-04-17 17:33:26 -07004930 struct fib6_info *rt = mlxsw_sp_rt6->rt;
Ido Schimmel428b8512017-08-03 13:28:28 +02004931
4932 nh = &nh_grp->nexthops[i];
4933 err = mlxsw_sp_nexthop6_init(mlxsw_sp, nh_grp, nh, rt);
4934 if (err)
4935 goto err_nexthop6_init;
4936 mlxsw_sp_rt6 = list_next_entry(mlxsw_sp_rt6, list);
4937 }
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02004938
4939 err = mlxsw_sp_nexthop_group_insert(mlxsw_sp, nh_grp);
4940 if (err)
4941 goto err_nexthop_group_insert;
4942
Ido Schimmel428b8512017-08-03 13:28:28 +02004943 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
4944 return nh_grp;
4945
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02004946err_nexthop_group_insert:
Ido Schimmel428b8512017-08-03 13:28:28 +02004947err_nexthop6_init:
4948 for (i--; i >= 0; i--) {
4949 nh = &nh_grp->nexthops[i];
4950 mlxsw_sp_nexthop6_fini(mlxsw_sp, nh);
4951 }
4952 kfree(nh_grp);
4953 return ERR_PTR(err);
4954}
4955
4956static void
4957mlxsw_sp_nexthop6_group_destroy(struct mlxsw_sp *mlxsw_sp,
4958 struct mlxsw_sp_nexthop_group *nh_grp)
4959{
4960 struct mlxsw_sp_nexthop *nh;
4961 int i = nh_grp->count;
4962
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02004963 mlxsw_sp_nexthop_group_remove(mlxsw_sp, nh_grp);
Ido Schimmel428b8512017-08-03 13:28:28 +02004964 for (i--; i >= 0; i--) {
4965 nh = &nh_grp->nexthops[i];
4966 mlxsw_sp_nexthop6_fini(mlxsw_sp, nh);
4967 }
4968 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
4969 WARN_ON(nh_grp->adj_index_valid);
4970 kfree(nh_grp);
4971}
4972
4973static int mlxsw_sp_nexthop6_group_get(struct mlxsw_sp *mlxsw_sp,
4974 struct mlxsw_sp_fib6_entry *fib6_entry)
4975{
4976 struct mlxsw_sp_nexthop_group *nh_grp;
4977
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02004978 nh_grp = mlxsw_sp_nexthop6_group_lookup(mlxsw_sp, fib6_entry);
4979 if (!nh_grp) {
4980 nh_grp = mlxsw_sp_nexthop6_group_create(mlxsw_sp, fib6_entry);
4981 if (IS_ERR(nh_grp))
4982 return PTR_ERR(nh_grp);
4983 }
Ido Schimmel428b8512017-08-03 13:28:28 +02004984
4985 list_add_tail(&fib6_entry->common.nexthop_group_node,
4986 &nh_grp->fib_list);
4987 fib6_entry->common.nh_group = nh_grp;
4988
4989 return 0;
4990}
4991
4992static void mlxsw_sp_nexthop6_group_put(struct mlxsw_sp *mlxsw_sp,
4993 struct mlxsw_sp_fib_entry *fib_entry)
4994{
4995 struct mlxsw_sp_nexthop_group *nh_grp = fib_entry->nh_group;
4996
4997 list_del(&fib_entry->nexthop_group_node);
4998 if (!list_empty(&nh_grp->fib_list))
4999 return;
5000 mlxsw_sp_nexthop6_group_destroy(mlxsw_sp, nh_grp);
5001}
5002
5003static int
5004mlxsw_sp_nexthop6_group_update(struct mlxsw_sp *mlxsw_sp,
5005 struct mlxsw_sp_fib6_entry *fib6_entry)
5006{
5007 struct mlxsw_sp_nexthop_group *old_nh_grp = fib6_entry->common.nh_group;
5008 int err;
5009
5010 fib6_entry->common.nh_group = NULL;
5011 list_del(&fib6_entry->common.nexthop_group_node);
5012
5013 err = mlxsw_sp_nexthop6_group_get(mlxsw_sp, fib6_entry);
5014 if (err)
5015 goto err_nexthop6_group_get;
5016
5017 /* In case this entry is offloaded, then the adjacency index
5018 * currently associated with it in the device's table is that
5019 * of the old group. Start using the new one instead.
5020 */
5021 err = mlxsw_sp_fib_node_entry_add(mlxsw_sp, &fib6_entry->common);
5022 if (err)
5023 goto err_fib_node_entry_add;
5024
5025 if (list_empty(&old_nh_grp->fib_list))
5026 mlxsw_sp_nexthop6_group_destroy(mlxsw_sp, old_nh_grp);
5027
5028 return 0;
5029
5030err_fib_node_entry_add:
5031 mlxsw_sp_nexthop6_group_put(mlxsw_sp, &fib6_entry->common);
5032err_nexthop6_group_get:
5033 list_add_tail(&fib6_entry->common.nexthop_group_node,
5034 &old_nh_grp->fib_list);
5035 fib6_entry->common.nh_group = old_nh_grp;
5036 return err;
5037}
5038
5039static int
5040mlxsw_sp_fib6_entry_nexthop_add(struct mlxsw_sp *mlxsw_sp,
5041 struct mlxsw_sp_fib6_entry *fib6_entry,
David Ahern8d1c8022018-04-17 17:33:26 -07005042 struct fib6_info *rt)
Ido Schimmel428b8512017-08-03 13:28:28 +02005043{
5044 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
5045 int err;
5046
5047 mlxsw_sp_rt6 = mlxsw_sp_rt6_create(rt);
5048 if (IS_ERR(mlxsw_sp_rt6))
5049 return PTR_ERR(mlxsw_sp_rt6);
5050
5051 list_add_tail(&mlxsw_sp_rt6->list, &fib6_entry->rt6_list);
5052 fib6_entry->nrt6++;
5053
5054 err = mlxsw_sp_nexthop6_group_update(mlxsw_sp, fib6_entry);
5055 if (err)
5056 goto err_nexthop6_group_update;
5057
5058 return 0;
5059
5060err_nexthop6_group_update:
5061 fib6_entry->nrt6--;
5062 list_del(&mlxsw_sp_rt6->list);
5063 mlxsw_sp_rt6_destroy(mlxsw_sp_rt6);
5064 return err;
5065}
5066
5067static void
5068mlxsw_sp_fib6_entry_nexthop_del(struct mlxsw_sp *mlxsw_sp,
5069 struct mlxsw_sp_fib6_entry *fib6_entry,
David Ahern8d1c8022018-04-17 17:33:26 -07005070 struct fib6_info *rt)
Ido Schimmel428b8512017-08-03 13:28:28 +02005071{
5072 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
5073
5074 mlxsw_sp_rt6 = mlxsw_sp_fib6_entry_rt_find(fib6_entry, rt);
5075 if (WARN_ON(!mlxsw_sp_rt6))
5076 return;
5077
5078 fib6_entry->nrt6--;
5079 list_del(&mlxsw_sp_rt6->list);
5080 mlxsw_sp_nexthop6_group_update(mlxsw_sp, fib6_entry);
5081 mlxsw_sp_rt6_destroy(mlxsw_sp_rt6);
5082}
5083
Petr Machataf6050ee2017-09-02 23:49:21 +02005084static void mlxsw_sp_fib6_entry_type_set(struct mlxsw_sp *mlxsw_sp,
5085 struct mlxsw_sp_fib_entry *fib_entry,
David Ahern8d1c8022018-04-17 17:33:26 -07005086 const struct fib6_info *rt)
Ido Schimmel428b8512017-08-03 13:28:28 +02005087{
5088 /* Packets hitting RTF_REJECT routes need to be discarded by the
5089 * stack. We can rely on their destination device not having a
5090 * RIF (it's the loopback device) and can thus use action type
5091 * local, which will cause them to be trapped with a lower
5092 * priority than packets that need to be locally received.
5093 */
David Ahern93c2fb22018-04-18 15:38:59 -07005094 if (rt->fib6_flags & (RTF_LOCAL | RTF_ANYCAST))
Ido Schimmel428b8512017-08-03 13:28:28 +02005095 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
David Ahern93c2fb22018-04-18 15:38:59 -07005096 else if (rt->fib6_flags & RTF_REJECT)
Ido Schimmel428b8512017-08-03 13:28:28 +02005097 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_LOCAL;
Petr Machataf6050ee2017-09-02 23:49:21 +02005098 else if (mlxsw_sp_rt6_is_gateway(mlxsw_sp, rt))
Ido Schimmel428b8512017-08-03 13:28:28 +02005099 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_REMOTE;
5100 else
5101 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_LOCAL;
5102}
5103
5104static void
5105mlxsw_sp_fib6_entry_rt_destroy_all(struct mlxsw_sp_fib6_entry *fib6_entry)
5106{
5107 struct mlxsw_sp_rt6 *mlxsw_sp_rt6, *tmp;
5108
5109 list_for_each_entry_safe(mlxsw_sp_rt6, tmp, &fib6_entry->rt6_list,
5110 list) {
5111 fib6_entry->nrt6--;
5112 list_del(&mlxsw_sp_rt6->list);
5113 mlxsw_sp_rt6_destroy(mlxsw_sp_rt6);
5114 }
5115}
5116
5117static struct mlxsw_sp_fib6_entry *
5118mlxsw_sp_fib6_entry_create(struct mlxsw_sp *mlxsw_sp,
5119 struct mlxsw_sp_fib_node *fib_node,
David Ahern8d1c8022018-04-17 17:33:26 -07005120 struct fib6_info *rt)
Ido Schimmel428b8512017-08-03 13:28:28 +02005121{
5122 struct mlxsw_sp_fib6_entry *fib6_entry;
5123 struct mlxsw_sp_fib_entry *fib_entry;
5124 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
5125 int err;
5126
5127 fib6_entry = kzalloc(sizeof(*fib6_entry), GFP_KERNEL);
5128 if (!fib6_entry)
5129 return ERR_PTR(-ENOMEM);
5130 fib_entry = &fib6_entry->common;
5131
5132 mlxsw_sp_rt6 = mlxsw_sp_rt6_create(rt);
5133 if (IS_ERR(mlxsw_sp_rt6)) {
5134 err = PTR_ERR(mlxsw_sp_rt6);
5135 goto err_rt6_create;
5136 }
5137
Petr Machataf6050ee2017-09-02 23:49:21 +02005138 mlxsw_sp_fib6_entry_type_set(mlxsw_sp, fib_entry, mlxsw_sp_rt6->rt);
Ido Schimmel428b8512017-08-03 13:28:28 +02005139
5140 INIT_LIST_HEAD(&fib6_entry->rt6_list);
5141 list_add_tail(&mlxsw_sp_rt6->list, &fib6_entry->rt6_list);
5142 fib6_entry->nrt6 = 1;
5143 err = mlxsw_sp_nexthop6_group_get(mlxsw_sp, fib6_entry);
5144 if (err)
5145 goto err_nexthop6_group_get;
5146
5147 fib_entry->fib_node = fib_node;
5148
5149 return fib6_entry;
5150
5151err_nexthop6_group_get:
5152 list_del(&mlxsw_sp_rt6->list);
5153 mlxsw_sp_rt6_destroy(mlxsw_sp_rt6);
5154err_rt6_create:
5155 kfree(fib6_entry);
5156 return ERR_PTR(err);
5157}
5158
5159static void mlxsw_sp_fib6_entry_destroy(struct mlxsw_sp *mlxsw_sp,
5160 struct mlxsw_sp_fib6_entry *fib6_entry)
5161{
5162 mlxsw_sp_nexthop6_group_put(mlxsw_sp, &fib6_entry->common);
5163 mlxsw_sp_fib6_entry_rt_destroy_all(fib6_entry);
5164 WARN_ON(fib6_entry->nrt6);
5165 kfree(fib6_entry);
5166}
5167
5168static struct mlxsw_sp_fib6_entry *
5169mlxsw_sp_fib6_node_entry_find(const struct mlxsw_sp_fib_node *fib_node,
David Ahern8d1c8022018-04-17 17:33:26 -07005170 const struct fib6_info *nrt, bool replace)
Ido Schimmel428b8512017-08-03 13:28:28 +02005171{
Ido Schimmelce45bded2018-06-15 16:23:37 +03005172 struct mlxsw_sp_fib6_entry *fib6_entry;
Ido Schimmel428b8512017-08-03 13:28:28 +02005173
5174 list_for_each_entry(fib6_entry, &fib_node->entry_list, common.list) {
David Ahern8d1c8022018-04-17 17:33:26 -07005175 struct fib6_info *rt = mlxsw_sp_fib6_entry_rt(fib6_entry);
Ido Schimmel428b8512017-08-03 13:28:28 +02005176
David Ahern93c2fb22018-04-18 15:38:59 -07005177 if (rt->fib6_table->tb6_id > nrt->fib6_table->tb6_id)
Ido Schimmel428b8512017-08-03 13:28:28 +02005178 continue;
David Ahern93c2fb22018-04-18 15:38:59 -07005179 if (rt->fib6_table->tb6_id != nrt->fib6_table->tb6_id)
Ido Schimmel428b8512017-08-03 13:28:28 +02005180 break;
Ido Schimmelce45bded2018-06-15 16:23:37 +03005181 if (replace && rt->fib6_metric == nrt->fib6_metric)
5182 return fib6_entry;
David Ahern93c2fb22018-04-18 15:38:59 -07005183 if (rt->fib6_metric > nrt->fib6_metric)
Ido Schimmelce45bded2018-06-15 16:23:37 +03005184 return fib6_entry;
Ido Schimmel428b8512017-08-03 13:28:28 +02005185 }
5186
Ido Schimmelce45bded2018-06-15 16:23:37 +03005187 return NULL;
Ido Schimmel428b8512017-08-03 13:28:28 +02005188}
5189
5190static int
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02005191mlxsw_sp_fib6_node_list_insert(struct mlxsw_sp_fib6_entry *new6_entry,
5192 bool replace)
Ido Schimmel428b8512017-08-03 13:28:28 +02005193{
5194 struct mlxsw_sp_fib_node *fib_node = new6_entry->common.fib_node;
David Ahern8d1c8022018-04-17 17:33:26 -07005195 struct fib6_info *nrt = mlxsw_sp_fib6_entry_rt(new6_entry);
Ido Schimmel428b8512017-08-03 13:28:28 +02005196 struct mlxsw_sp_fib6_entry *fib6_entry;
5197
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02005198 fib6_entry = mlxsw_sp_fib6_node_entry_find(fib_node, nrt, replace);
5199
5200 if (replace && WARN_ON(!fib6_entry))
5201 return -EINVAL;
Ido Schimmel428b8512017-08-03 13:28:28 +02005202
5203 if (fib6_entry) {
5204 list_add_tail(&new6_entry->common.list,
5205 &fib6_entry->common.list);
5206 } else {
5207 struct mlxsw_sp_fib6_entry *last;
5208
5209 list_for_each_entry(last, &fib_node->entry_list, common.list) {
David Ahern8d1c8022018-04-17 17:33:26 -07005210 struct fib6_info *rt = mlxsw_sp_fib6_entry_rt(last);
Ido Schimmel428b8512017-08-03 13:28:28 +02005211
David Ahern93c2fb22018-04-18 15:38:59 -07005212 if (nrt->fib6_table->tb6_id > rt->fib6_table->tb6_id)
Ido Schimmel428b8512017-08-03 13:28:28 +02005213 break;
5214 fib6_entry = last;
5215 }
5216
5217 if (fib6_entry)
5218 list_add(&new6_entry->common.list,
5219 &fib6_entry->common.list);
5220 else
5221 list_add(&new6_entry->common.list,
5222 &fib_node->entry_list);
5223 }
5224
5225 return 0;
5226}
5227
5228static void
5229mlxsw_sp_fib6_node_list_remove(struct mlxsw_sp_fib6_entry *fib6_entry)
5230{
5231 list_del(&fib6_entry->common.list);
5232}
5233
5234static int mlxsw_sp_fib6_node_entry_link(struct mlxsw_sp *mlxsw_sp,
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02005235 struct mlxsw_sp_fib6_entry *fib6_entry,
5236 bool replace)
Ido Schimmel428b8512017-08-03 13:28:28 +02005237{
5238 int err;
5239
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02005240 err = mlxsw_sp_fib6_node_list_insert(fib6_entry, replace);
Ido Schimmel428b8512017-08-03 13:28:28 +02005241 if (err)
5242 return err;
5243
5244 err = mlxsw_sp_fib_node_entry_add(mlxsw_sp, &fib6_entry->common);
5245 if (err)
5246 goto err_fib_node_entry_add;
5247
5248 return 0;
5249
5250err_fib_node_entry_add:
5251 mlxsw_sp_fib6_node_list_remove(fib6_entry);
5252 return err;
5253}
5254
5255static void
5256mlxsw_sp_fib6_node_entry_unlink(struct mlxsw_sp *mlxsw_sp,
5257 struct mlxsw_sp_fib6_entry *fib6_entry)
5258{
5259 mlxsw_sp_fib_node_entry_del(mlxsw_sp, &fib6_entry->common);
5260 mlxsw_sp_fib6_node_list_remove(fib6_entry);
5261}
5262
5263static struct mlxsw_sp_fib6_entry *
5264mlxsw_sp_fib6_entry_lookup(struct mlxsw_sp *mlxsw_sp,
David Ahern8d1c8022018-04-17 17:33:26 -07005265 const struct fib6_info *rt)
Ido Schimmel428b8512017-08-03 13:28:28 +02005266{
5267 struct mlxsw_sp_fib6_entry *fib6_entry;
5268 struct mlxsw_sp_fib_node *fib_node;
5269 struct mlxsw_sp_fib *fib;
5270 struct mlxsw_sp_vr *vr;
5271
David Ahern93c2fb22018-04-18 15:38:59 -07005272 vr = mlxsw_sp_vr_find(mlxsw_sp, rt->fib6_table->tb6_id);
Ido Schimmel428b8512017-08-03 13:28:28 +02005273 if (!vr)
5274 return NULL;
5275 fib = mlxsw_sp_vr_fib(vr, MLXSW_SP_L3_PROTO_IPV6);
5276
David Ahern93c2fb22018-04-18 15:38:59 -07005277 fib_node = mlxsw_sp_fib_node_lookup(fib, &rt->fib6_dst.addr,
5278 sizeof(rt->fib6_dst.addr),
5279 rt->fib6_dst.plen);
Ido Schimmel428b8512017-08-03 13:28:28 +02005280 if (!fib_node)
5281 return NULL;
5282
5283 list_for_each_entry(fib6_entry, &fib_node->entry_list, common.list) {
David Ahern8d1c8022018-04-17 17:33:26 -07005284 struct fib6_info *iter_rt = mlxsw_sp_fib6_entry_rt(fib6_entry);
Ido Schimmel428b8512017-08-03 13:28:28 +02005285
David Ahern93c2fb22018-04-18 15:38:59 -07005286 if (rt->fib6_table->tb6_id == iter_rt->fib6_table->tb6_id &&
5287 rt->fib6_metric == iter_rt->fib6_metric &&
Ido Schimmel428b8512017-08-03 13:28:28 +02005288 mlxsw_sp_fib6_entry_rt_find(fib6_entry, rt))
5289 return fib6_entry;
5290 }
5291
5292 return NULL;
5293}
5294
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02005295static void mlxsw_sp_fib6_entry_replace(struct mlxsw_sp *mlxsw_sp,
5296 struct mlxsw_sp_fib6_entry *fib6_entry,
5297 bool replace)
5298{
5299 struct mlxsw_sp_fib_node *fib_node = fib6_entry->common.fib_node;
5300 struct mlxsw_sp_fib6_entry *replaced;
5301
5302 if (!replace)
5303 return;
5304
5305 replaced = list_next_entry(fib6_entry, common.list);
5306
5307 mlxsw_sp_fib6_node_entry_unlink(mlxsw_sp, replaced);
5308 mlxsw_sp_fib6_entry_destroy(mlxsw_sp, replaced);
5309 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
5310}
5311
Ido Schimmel428b8512017-08-03 13:28:28 +02005312static int mlxsw_sp_router_fib6_add(struct mlxsw_sp *mlxsw_sp,
Ido Schimmel53b562d2018-06-15 16:23:36 +03005313 struct fib6_info *rt, bool replace,
5314 bool append)
Ido Schimmel428b8512017-08-03 13:28:28 +02005315{
5316 struct mlxsw_sp_fib6_entry *fib6_entry;
5317 struct mlxsw_sp_fib_node *fib_node;
5318 int err;
5319
5320 if (mlxsw_sp->router->aborted)
5321 return 0;
5322
David Ahern93c2fb22018-04-18 15:38:59 -07005323 if (rt->fib6_src.plen)
Ido Schimmelf36f5ac2017-08-03 13:28:30 +02005324 return -EINVAL;
5325
Ido Schimmel428b8512017-08-03 13:28:28 +02005326 if (mlxsw_sp_fib6_rt_should_ignore(rt))
5327 return 0;
5328
David Ahern93c2fb22018-04-18 15:38:59 -07005329 fib_node = mlxsw_sp_fib_node_get(mlxsw_sp, rt->fib6_table->tb6_id,
5330 &rt->fib6_dst.addr,
5331 sizeof(rt->fib6_dst.addr),
5332 rt->fib6_dst.plen,
Ido Schimmel428b8512017-08-03 13:28:28 +02005333 MLXSW_SP_L3_PROTO_IPV6);
5334 if (IS_ERR(fib_node))
5335 return PTR_ERR(fib_node);
5336
5337 /* Before creating a new entry, try to append route to an existing
5338 * multipath entry.
5339 */
Ido Schimmel53b562d2018-06-15 16:23:36 +03005340 fib6_entry = mlxsw_sp_fib6_node_mp_entry_find(fib_node, rt, append);
Ido Schimmel428b8512017-08-03 13:28:28 +02005341 if (fib6_entry) {
5342 err = mlxsw_sp_fib6_entry_nexthop_add(mlxsw_sp, fib6_entry, rt);
5343 if (err)
5344 goto err_fib6_entry_nexthop_add;
5345 return 0;
5346 }
5347
Ido Schimmel53b562d2018-06-15 16:23:36 +03005348 /* We received an append event, yet did not find any route to
5349 * append to.
5350 */
5351 if (WARN_ON(append)) {
5352 err = -EINVAL;
5353 goto err_fib6_entry_append;
5354 }
5355
Ido Schimmel428b8512017-08-03 13:28:28 +02005356 fib6_entry = mlxsw_sp_fib6_entry_create(mlxsw_sp, fib_node, rt);
5357 if (IS_ERR(fib6_entry)) {
5358 err = PTR_ERR(fib6_entry);
5359 goto err_fib6_entry_create;
5360 }
5361
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02005362 err = mlxsw_sp_fib6_node_entry_link(mlxsw_sp, fib6_entry, replace);
Ido Schimmel428b8512017-08-03 13:28:28 +02005363 if (err)
5364 goto err_fib6_node_entry_link;
5365
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02005366 mlxsw_sp_fib6_entry_replace(mlxsw_sp, fib6_entry, replace);
5367
Ido Schimmel428b8512017-08-03 13:28:28 +02005368 return 0;
5369
5370err_fib6_node_entry_link:
5371 mlxsw_sp_fib6_entry_destroy(mlxsw_sp, fib6_entry);
5372err_fib6_entry_create:
Ido Schimmel53b562d2018-06-15 16:23:36 +03005373err_fib6_entry_append:
Ido Schimmel428b8512017-08-03 13:28:28 +02005374err_fib6_entry_nexthop_add:
5375 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
5376 return err;
5377}
5378
5379static void mlxsw_sp_router_fib6_del(struct mlxsw_sp *mlxsw_sp,
David Ahern8d1c8022018-04-17 17:33:26 -07005380 struct fib6_info *rt)
Ido Schimmel428b8512017-08-03 13:28:28 +02005381{
5382 struct mlxsw_sp_fib6_entry *fib6_entry;
5383 struct mlxsw_sp_fib_node *fib_node;
5384
5385 if (mlxsw_sp->router->aborted)
5386 return;
5387
5388 if (mlxsw_sp_fib6_rt_should_ignore(rt))
5389 return;
5390
5391 fib6_entry = mlxsw_sp_fib6_entry_lookup(mlxsw_sp, rt);
5392 if (WARN_ON(!fib6_entry))
5393 return;
5394
5395 /* If route is part of a multipath entry, but not the last one
5396 * removed, then only reduce its nexthop group.
5397 */
5398 if (!list_is_singular(&fib6_entry->rt6_list)) {
5399 mlxsw_sp_fib6_entry_nexthop_del(mlxsw_sp, fib6_entry, rt);
5400 return;
5401 }
5402
5403 fib_node = fib6_entry->common.fib_node;
5404
5405 mlxsw_sp_fib6_node_entry_unlink(mlxsw_sp, fib6_entry);
5406 mlxsw_sp_fib6_entry_destroy(mlxsw_sp, fib6_entry);
5407 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
5408}
5409
Ido Schimmelbc65a8a2017-07-18 10:10:25 +02005410static int __mlxsw_sp_router_set_abort_trap(struct mlxsw_sp *mlxsw_sp,
5411 enum mlxsw_reg_ralxx_protocol proto,
5412 u8 tree_id)
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005413{
5414 char ralta_pl[MLXSW_REG_RALTA_LEN];
5415 char ralst_pl[MLXSW_REG_RALST_LEN];
Ido Schimmelb5d90e62017-03-10 08:53:43 +01005416 int i, err;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005417
Ido Schimmelbc65a8a2017-07-18 10:10:25 +02005418 mlxsw_reg_ralta_pack(ralta_pl, true, proto, tree_id);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005419 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralta), ralta_pl);
5420 if (err)
5421 return err;
5422
Ido Schimmelbc65a8a2017-07-18 10:10:25 +02005423 mlxsw_reg_ralst_pack(ralst_pl, 0xff, tree_id);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005424 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralst), ralst_pl);
5425 if (err)
5426 return err;
5427
Ido Schimmelb5d90e62017-03-10 08:53:43 +01005428 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
Ido Schimmel9011b672017-05-16 19:38:25 +02005429 struct mlxsw_sp_vr *vr = &mlxsw_sp->router->vrs[i];
Ido Schimmelb5d90e62017-03-10 08:53:43 +01005430 char raltb_pl[MLXSW_REG_RALTB_LEN];
5431 char ralue_pl[MLXSW_REG_RALUE_LEN];
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005432
Ido Schimmelbc65a8a2017-07-18 10:10:25 +02005433 mlxsw_reg_raltb_pack(raltb_pl, vr->id, proto, tree_id);
Ido Schimmelb5d90e62017-03-10 08:53:43 +01005434 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raltb),
5435 raltb_pl);
5436 if (err)
5437 return err;
5438
Ido Schimmelbc65a8a2017-07-18 10:10:25 +02005439 mlxsw_reg_ralue_pack(ralue_pl, proto,
5440 MLXSW_REG_RALUE_OP_WRITE_WRITE, vr->id, 0);
Ido Schimmelb5d90e62017-03-10 08:53:43 +01005441 mlxsw_reg_ralue_act_ip2me_pack(ralue_pl);
5442 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue),
5443 ralue_pl);
5444 if (err)
5445 return err;
5446 }
5447
5448 return 0;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005449}
5450
Yuval Mintzeb35da02018-03-26 15:01:42 +03005451static struct mlxsw_sp_mr_table *
5452mlxsw_sp_router_fibmr_family_to_table(struct mlxsw_sp_vr *vr, int family)
5453{
Yuval Mintz64ed1b92018-03-26 15:01:44 +03005454 if (family == RTNL_FAMILY_IPMR)
Yuval Mintzeb35da02018-03-26 15:01:42 +03005455 return vr->mr_table[MLXSW_SP_L3_PROTO_IPV4];
Yuval Mintz64ed1b92018-03-26 15:01:44 +03005456 else
5457 return vr->mr_table[MLXSW_SP_L3_PROTO_IPV6];
Yuval Mintzeb35da02018-03-26 15:01:42 +03005458}
5459
Yotam Gigid42b0962017-09-27 08:23:20 +02005460static int mlxsw_sp_router_fibmr_add(struct mlxsw_sp *mlxsw_sp,
5461 struct mfc_entry_notifier_info *men_info,
5462 bool replace)
5463{
Yuval Mintzeb35da02018-03-26 15:01:42 +03005464 struct mlxsw_sp_mr_table *mrt;
Yotam Gigid42b0962017-09-27 08:23:20 +02005465 struct mlxsw_sp_vr *vr;
5466
5467 if (mlxsw_sp->router->aborted)
5468 return 0;
5469
David Ahernf8fa9b42017-10-18 09:56:56 -07005470 vr = mlxsw_sp_vr_get(mlxsw_sp, men_info->tb_id, NULL);
Yotam Gigid42b0962017-09-27 08:23:20 +02005471 if (IS_ERR(vr))
5472 return PTR_ERR(vr);
5473
Yuval Mintzeb35da02018-03-26 15:01:42 +03005474 mrt = mlxsw_sp_router_fibmr_family_to_table(vr, men_info->info.family);
5475 return mlxsw_sp_mr_route_add(mrt, men_info->mfc, replace);
Yotam Gigid42b0962017-09-27 08:23:20 +02005476}
5477
5478static void mlxsw_sp_router_fibmr_del(struct mlxsw_sp *mlxsw_sp,
5479 struct mfc_entry_notifier_info *men_info)
5480{
Yuval Mintzeb35da02018-03-26 15:01:42 +03005481 struct mlxsw_sp_mr_table *mrt;
Yotam Gigid42b0962017-09-27 08:23:20 +02005482 struct mlxsw_sp_vr *vr;
5483
5484 if (mlxsw_sp->router->aborted)
5485 return;
5486
5487 vr = mlxsw_sp_vr_find(mlxsw_sp, men_info->tb_id);
5488 if (WARN_ON(!vr))
5489 return;
5490
Yuval Mintzeb35da02018-03-26 15:01:42 +03005491 mrt = mlxsw_sp_router_fibmr_family_to_table(vr, men_info->info.family);
5492 mlxsw_sp_mr_route_del(mrt, men_info->mfc);
Ido Schimmel2b52ce02018-01-22 09:17:42 +01005493 mlxsw_sp_vr_put(mlxsw_sp, vr);
Yotam Gigid42b0962017-09-27 08:23:20 +02005494}
5495
5496static int
5497mlxsw_sp_router_fibmr_vif_add(struct mlxsw_sp *mlxsw_sp,
5498 struct vif_entry_notifier_info *ven_info)
5499{
Yuval Mintzeb35da02018-03-26 15:01:42 +03005500 struct mlxsw_sp_mr_table *mrt;
Yotam Gigid42b0962017-09-27 08:23:20 +02005501 struct mlxsw_sp_rif *rif;
5502 struct mlxsw_sp_vr *vr;
5503
5504 if (mlxsw_sp->router->aborted)
5505 return 0;
5506
David Ahernf8fa9b42017-10-18 09:56:56 -07005507 vr = mlxsw_sp_vr_get(mlxsw_sp, ven_info->tb_id, NULL);
Yotam Gigid42b0962017-09-27 08:23:20 +02005508 if (IS_ERR(vr))
5509 return PTR_ERR(vr);
5510
Yuval Mintzeb35da02018-03-26 15:01:42 +03005511 mrt = mlxsw_sp_router_fibmr_family_to_table(vr, ven_info->info.family);
Yotam Gigid42b0962017-09-27 08:23:20 +02005512 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, ven_info->dev);
Yuval Mintzeb35da02018-03-26 15:01:42 +03005513 return mlxsw_sp_mr_vif_add(mrt, ven_info->dev,
Yotam Gigid42b0962017-09-27 08:23:20 +02005514 ven_info->vif_index,
5515 ven_info->vif_flags, rif);
5516}
5517
5518static void
5519mlxsw_sp_router_fibmr_vif_del(struct mlxsw_sp *mlxsw_sp,
5520 struct vif_entry_notifier_info *ven_info)
5521{
Yuval Mintzeb35da02018-03-26 15:01:42 +03005522 struct mlxsw_sp_mr_table *mrt;
Yotam Gigid42b0962017-09-27 08:23:20 +02005523 struct mlxsw_sp_vr *vr;
5524
5525 if (mlxsw_sp->router->aborted)
5526 return;
5527
5528 vr = mlxsw_sp_vr_find(mlxsw_sp, ven_info->tb_id);
5529 if (WARN_ON(!vr))
5530 return;
5531
Yuval Mintzeb35da02018-03-26 15:01:42 +03005532 mrt = mlxsw_sp_router_fibmr_family_to_table(vr, ven_info->info.family);
5533 mlxsw_sp_mr_vif_del(mrt, ven_info->vif_index);
Ido Schimmel2b52ce02018-01-22 09:17:42 +01005534 mlxsw_sp_vr_put(mlxsw_sp, vr);
Yotam Gigid42b0962017-09-27 08:23:20 +02005535}
5536
Ido Schimmelbc65a8a2017-07-18 10:10:25 +02005537static int mlxsw_sp_router_set_abort_trap(struct mlxsw_sp *mlxsw_sp)
5538{
5539 enum mlxsw_reg_ralxx_protocol proto = MLXSW_REG_RALXX_PROTOCOL_IPV4;
5540 int err;
5541
5542 err = __mlxsw_sp_router_set_abort_trap(mlxsw_sp, proto,
5543 MLXSW_SP_LPM_TREE_MIN);
5544 if (err)
5545 return err;
5546
Yotam Gigid42b0962017-09-27 08:23:20 +02005547 /* The multicast router code does not need an abort trap as by default,
5548 * packets that don't match any routes are trapped to the CPU.
5549 */
5550
Ido Schimmelbc65a8a2017-07-18 10:10:25 +02005551 proto = MLXSW_REG_RALXX_PROTOCOL_IPV6;
5552 return __mlxsw_sp_router_set_abort_trap(mlxsw_sp, proto,
5553 MLXSW_SP_LPM_TREE_MIN + 1);
5554}
5555
Ido Schimmel9aecce12017-02-09 10:28:42 +01005556static void mlxsw_sp_fib4_node_flush(struct mlxsw_sp *mlxsw_sp,
5557 struct mlxsw_sp_fib_node *fib_node)
5558{
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02005559 struct mlxsw_sp_fib4_entry *fib4_entry, *tmp;
Ido Schimmel9aecce12017-02-09 10:28:42 +01005560
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02005561 list_for_each_entry_safe(fib4_entry, tmp, &fib_node->entry_list,
5562 common.list) {
5563 bool do_break = &tmp->common.list == &fib_node->entry_list;
Ido Schimmel9aecce12017-02-09 10:28:42 +01005564
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02005565 mlxsw_sp_fib4_node_entry_unlink(mlxsw_sp, fib4_entry);
5566 mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib4_entry);
Ido Schimmel731ea1c2017-07-18 10:10:21 +02005567 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
Ido Schimmel9aecce12017-02-09 10:28:42 +01005568 /* Break when entry list is empty and node was freed.
5569 * Otherwise, we'll access freed memory in the next
5570 * iteration.
5571 */
5572 if (do_break)
5573 break;
5574 }
5575}
5576
Ido Schimmel428b8512017-08-03 13:28:28 +02005577static void mlxsw_sp_fib6_node_flush(struct mlxsw_sp *mlxsw_sp,
5578 struct mlxsw_sp_fib_node *fib_node)
5579{
5580 struct mlxsw_sp_fib6_entry *fib6_entry, *tmp;
5581
5582 list_for_each_entry_safe(fib6_entry, tmp, &fib_node->entry_list,
5583 common.list) {
5584 bool do_break = &tmp->common.list == &fib_node->entry_list;
5585
5586 mlxsw_sp_fib6_node_entry_unlink(mlxsw_sp, fib6_entry);
5587 mlxsw_sp_fib6_entry_destroy(mlxsw_sp, fib6_entry);
5588 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
5589 if (do_break)
5590 break;
5591 }
5592}
5593
Ido Schimmel9aecce12017-02-09 10:28:42 +01005594static void mlxsw_sp_fib_node_flush(struct mlxsw_sp *mlxsw_sp,
5595 struct mlxsw_sp_fib_node *fib_node)
5596{
Ido Schimmel76610eb2017-03-10 08:53:41 +01005597 switch (fib_node->fib->proto) {
Ido Schimmel9aecce12017-02-09 10:28:42 +01005598 case MLXSW_SP_L3_PROTO_IPV4:
5599 mlxsw_sp_fib4_node_flush(mlxsw_sp, fib_node);
5600 break;
5601 case MLXSW_SP_L3_PROTO_IPV6:
Ido Schimmel428b8512017-08-03 13:28:28 +02005602 mlxsw_sp_fib6_node_flush(mlxsw_sp, fib_node);
Ido Schimmel9aecce12017-02-09 10:28:42 +01005603 break;
5604 }
5605}
5606
Ido Schimmel76610eb2017-03-10 08:53:41 +01005607static void mlxsw_sp_vr_fib_flush(struct mlxsw_sp *mlxsw_sp,
5608 struct mlxsw_sp_vr *vr,
5609 enum mlxsw_sp_l3proto proto)
5610{
5611 struct mlxsw_sp_fib *fib = mlxsw_sp_vr_fib(vr, proto);
5612 struct mlxsw_sp_fib_node *fib_node, *tmp;
5613
5614 list_for_each_entry_safe(fib_node, tmp, &fib->node_list, list) {
5615 bool do_break = &tmp->list == &fib->node_list;
5616
5617 mlxsw_sp_fib_node_flush(mlxsw_sp, fib_node);
5618 if (do_break)
5619 break;
5620 }
5621}
5622
Ido Schimmelac571de2016-11-14 11:26:32 +01005623static void mlxsw_sp_router_fib_flush(struct mlxsw_sp *mlxsw_sp)
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005624{
Yuval Mintz9742f862018-03-26 15:01:40 +03005625 int i, j;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005626
Jiri Pirkoc1a38312016-10-21 16:07:23 +02005627 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
Ido Schimmel9011b672017-05-16 19:38:25 +02005628 struct mlxsw_sp_vr *vr = &mlxsw_sp->router->vrs[i];
Ido Schimmelac571de2016-11-14 11:26:32 +01005629
Ido Schimmel76610eb2017-03-10 08:53:41 +01005630 if (!mlxsw_sp_vr_is_used(vr))
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005631 continue;
Yotam Gigid42b0962017-09-27 08:23:20 +02005632
Yuval Mintz9742f862018-03-26 15:01:40 +03005633 for (j = 0; j < MLXSW_SP_L3_PROTO_MAX; j++)
5634 mlxsw_sp_mr_table_flush(vr->mr_table[j]);
Ido Schimmel76610eb2017-03-10 08:53:41 +01005635 mlxsw_sp_vr_fib_flush(mlxsw_sp, vr, MLXSW_SP_L3_PROTO_IPV4);
Ido Schimmela3d9bc52017-07-18 10:10:22 +02005636
5637 /* If virtual router was only used for IPv4, then it's no
5638 * longer used.
5639 */
5640 if (!mlxsw_sp_vr_is_used(vr))
5641 continue;
5642 mlxsw_sp_vr_fib_flush(mlxsw_sp, vr, MLXSW_SP_L3_PROTO_IPV6);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005643 }
Ido Schimmelac571de2016-11-14 11:26:32 +01005644}
5645
Ido Schimmelbc65a8a2017-07-18 10:10:25 +02005646static void mlxsw_sp_router_fib_abort(struct mlxsw_sp *mlxsw_sp)
Ido Schimmelac571de2016-11-14 11:26:32 +01005647{
5648 int err;
5649
Ido Schimmel9011b672017-05-16 19:38:25 +02005650 if (mlxsw_sp->router->aborted)
Ido Schimmeld331d302016-11-16 09:51:58 +01005651 return;
5652 dev_warn(mlxsw_sp->bus_info->dev, "FIB abort triggered. Note that FIB entries are no longer being offloaded to this device.\n");
Ido Schimmelac571de2016-11-14 11:26:32 +01005653 mlxsw_sp_router_fib_flush(mlxsw_sp);
Ido Schimmel9011b672017-05-16 19:38:25 +02005654 mlxsw_sp->router->aborted = true;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005655 err = mlxsw_sp_router_set_abort_trap(mlxsw_sp);
5656 if (err)
5657 dev_warn(mlxsw_sp->bus_info->dev, "Failed to set abort trap.\n");
5658}
5659
Ido Schimmel30572242016-12-03 16:45:01 +01005660struct mlxsw_sp_fib_event_work {
Ido Schimmela0e47612017-02-06 16:20:10 +01005661 struct work_struct work;
Ido Schimmelad178c82017-02-08 11:16:40 +01005662 union {
Ido Schimmel428b8512017-08-03 13:28:28 +02005663 struct fib6_entry_notifier_info fen6_info;
Ido Schimmelad178c82017-02-08 11:16:40 +01005664 struct fib_entry_notifier_info fen_info;
Ido Schimmel5d7bfd12017-03-16 09:08:14 +01005665 struct fib_rule_notifier_info fr_info;
Ido Schimmelad178c82017-02-08 11:16:40 +01005666 struct fib_nh_notifier_info fnh_info;
Yotam Gigid42b0962017-09-27 08:23:20 +02005667 struct mfc_entry_notifier_info men_info;
5668 struct vif_entry_notifier_info ven_info;
Ido Schimmelad178c82017-02-08 11:16:40 +01005669 };
Ido Schimmel30572242016-12-03 16:45:01 +01005670 struct mlxsw_sp *mlxsw_sp;
5671 unsigned long event;
5672};
5673
Ido Schimmel66a57632017-08-03 13:28:26 +02005674static void mlxsw_sp_router_fib4_event_work(struct work_struct *work)
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005675{
Ido Schimmel30572242016-12-03 16:45:01 +01005676 struct mlxsw_sp_fib_event_work *fib_work =
Ido Schimmela0e47612017-02-06 16:20:10 +01005677 container_of(work, struct mlxsw_sp_fib_event_work, work);
Ido Schimmel30572242016-12-03 16:45:01 +01005678 struct mlxsw_sp *mlxsw_sp = fib_work->mlxsw_sp;
Ido Schimmel599cf8f2017-02-09 10:28:44 +01005679 bool replace, append;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005680 int err;
5681
Ido Schimmel30572242016-12-03 16:45:01 +01005682 /* Protect internal structures from changes */
5683 rtnl_lock();
Petr Machata803335a2018-02-27 14:53:46 +01005684 mlxsw_sp_span_respin(mlxsw_sp);
5685
Ido Schimmel30572242016-12-03 16:45:01 +01005686 switch (fib_work->event) {
Ido Schimmel599cf8f2017-02-09 10:28:44 +01005687 case FIB_EVENT_ENTRY_REPLACE: /* fall through */
Ido Schimmel4283bce2017-02-09 10:28:43 +01005688 case FIB_EVENT_ENTRY_APPEND: /* fall through */
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005689 case FIB_EVENT_ENTRY_ADD:
Ido Schimmel599cf8f2017-02-09 10:28:44 +01005690 replace = fib_work->event == FIB_EVENT_ENTRY_REPLACE;
Ido Schimmel4283bce2017-02-09 10:28:43 +01005691 append = fib_work->event == FIB_EVENT_ENTRY_APPEND;
5692 err = mlxsw_sp_router_fib4_add(mlxsw_sp, &fib_work->fen_info,
Ido Schimmel599cf8f2017-02-09 10:28:44 +01005693 replace, append);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005694 if (err)
Ido Schimmelbc65a8a2017-07-18 10:10:25 +02005695 mlxsw_sp_router_fib_abort(mlxsw_sp);
Ido Schimmel30572242016-12-03 16:45:01 +01005696 fib_info_put(fib_work->fen_info.fi);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005697 break;
5698 case FIB_EVENT_ENTRY_DEL:
Ido Schimmel30572242016-12-03 16:45:01 +01005699 mlxsw_sp_router_fib4_del(mlxsw_sp, &fib_work->fen_info);
5700 fib_info_put(fib_work->fen_info.fi);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005701 break;
David Ahern1f279232017-10-27 17:37:14 -07005702 case FIB_EVENT_RULE_ADD:
5703 /* if we get here, a rule was added that we do not support.
5704 * just do the fib_abort
5705 */
5706 mlxsw_sp_router_fib_abort(mlxsw_sp);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005707 break;
Ido Schimmelad178c82017-02-08 11:16:40 +01005708 case FIB_EVENT_NH_ADD: /* fall through */
5709 case FIB_EVENT_NH_DEL:
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02005710 mlxsw_sp_nexthop4_event(mlxsw_sp, fib_work->event,
5711 fib_work->fnh_info.fib_nh);
Ido Schimmelad178c82017-02-08 11:16:40 +01005712 fib_info_put(fib_work->fnh_info.fib_nh->nh_parent);
5713 break;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005714 }
Ido Schimmel30572242016-12-03 16:45:01 +01005715 rtnl_unlock();
5716 kfree(fib_work);
5717}
5718
Ido Schimmel66a57632017-08-03 13:28:26 +02005719static void mlxsw_sp_router_fib6_event_work(struct work_struct *work)
5720{
Ido Schimmel583419f2017-08-03 13:28:27 +02005721 struct mlxsw_sp_fib_event_work *fib_work =
5722 container_of(work, struct mlxsw_sp_fib_event_work, work);
5723 struct mlxsw_sp *mlxsw_sp = fib_work->mlxsw_sp;
Ido Schimmel53b562d2018-06-15 16:23:36 +03005724 bool replace, append;
Ido Schimmel428b8512017-08-03 13:28:28 +02005725 int err;
Ido Schimmel583419f2017-08-03 13:28:27 +02005726
5727 rtnl_lock();
Petr Machata803335a2018-02-27 14:53:46 +01005728 mlxsw_sp_span_respin(mlxsw_sp);
5729
Ido Schimmel583419f2017-08-03 13:28:27 +02005730 switch (fib_work->event) {
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02005731 case FIB_EVENT_ENTRY_REPLACE: /* fall through */
David Ahern5a15a1b2018-05-21 10:26:52 -07005732 case FIB_EVENT_ENTRY_APPEND: /* fall through */
Ido Schimmel428b8512017-08-03 13:28:28 +02005733 case FIB_EVENT_ENTRY_ADD:
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02005734 replace = fib_work->event == FIB_EVENT_ENTRY_REPLACE;
Ido Schimmel53b562d2018-06-15 16:23:36 +03005735 append = fib_work->event == FIB_EVENT_ENTRY_APPEND;
Ido Schimmel428b8512017-08-03 13:28:28 +02005736 err = mlxsw_sp_router_fib6_add(mlxsw_sp,
Ido Schimmel53b562d2018-06-15 16:23:36 +03005737 fib_work->fen6_info.rt, replace,
5738 append);
Ido Schimmel428b8512017-08-03 13:28:28 +02005739 if (err)
5740 mlxsw_sp_router_fib_abort(mlxsw_sp);
5741 mlxsw_sp_rt6_release(fib_work->fen6_info.rt);
5742 break;
5743 case FIB_EVENT_ENTRY_DEL:
5744 mlxsw_sp_router_fib6_del(mlxsw_sp, fib_work->fen6_info.rt);
5745 mlxsw_sp_rt6_release(fib_work->fen6_info.rt);
5746 break;
David Ahern1f279232017-10-27 17:37:14 -07005747 case FIB_EVENT_RULE_ADD:
5748 /* if we get here, a rule was added that we do not support.
5749 * just do the fib_abort
5750 */
5751 mlxsw_sp_router_fib_abort(mlxsw_sp);
Ido Schimmel583419f2017-08-03 13:28:27 +02005752 break;
5753 }
5754 rtnl_unlock();
5755 kfree(fib_work);
Ido Schimmel66a57632017-08-03 13:28:26 +02005756}
5757
Yotam Gigid42b0962017-09-27 08:23:20 +02005758static void mlxsw_sp_router_fibmr_event_work(struct work_struct *work)
5759{
5760 struct mlxsw_sp_fib_event_work *fib_work =
5761 container_of(work, struct mlxsw_sp_fib_event_work, work);
5762 struct mlxsw_sp *mlxsw_sp = fib_work->mlxsw_sp;
Yotam Gigid42b0962017-09-27 08:23:20 +02005763 bool replace;
5764 int err;
5765
5766 rtnl_lock();
5767 switch (fib_work->event) {
5768 case FIB_EVENT_ENTRY_REPLACE: /* fall through */
5769 case FIB_EVENT_ENTRY_ADD:
5770 replace = fib_work->event == FIB_EVENT_ENTRY_REPLACE;
5771
5772 err = mlxsw_sp_router_fibmr_add(mlxsw_sp, &fib_work->men_info,
5773 replace);
5774 if (err)
5775 mlxsw_sp_router_fib_abort(mlxsw_sp);
Yuval Mintz8c13af22018-03-26 15:01:36 +03005776 mr_cache_put(fib_work->men_info.mfc);
Yotam Gigid42b0962017-09-27 08:23:20 +02005777 break;
5778 case FIB_EVENT_ENTRY_DEL:
5779 mlxsw_sp_router_fibmr_del(mlxsw_sp, &fib_work->men_info);
Yuval Mintz8c13af22018-03-26 15:01:36 +03005780 mr_cache_put(fib_work->men_info.mfc);
Yotam Gigid42b0962017-09-27 08:23:20 +02005781 break;
5782 case FIB_EVENT_VIF_ADD:
5783 err = mlxsw_sp_router_fibmr_vif_add(mlxsw_sp,
5784 &fib_work->ven_info);
5785 if (err)
5786 mlxsw_sp_router_fib_abort(mlxsw_sp);
5787 dev_put(fib_work->ven_info.dev);
5788 break;
5789 case FIB_EVENT_VIF_DEL:
5790 mlxsw_sp_router_fibmr_vif_del(mlxsw_sp,
5791 &fib_work->ven_info);
5792 dev_put(fib_work->ven_info.dev);
5793 break;
David Ahern1f279232017-10-27 17:37:14 -07005794 case FIB_EVENT_RULE_ADD:
5795 /* if we get here, a rule was added that we do not support.
5796 * just do the fib_abort
5797 */
5798 mlxsw_sp_router_fib_abort(mlxsw_sp);
Yotam Gigid42b0962017-09-27 08:23:20 +02005799 break;
5800 }
5801 rtnl_unlock();
5802 kfree(fib_work);
5803}
5804
Ido Schimmel66a57632017-08-03 13:28:26 +02005805static void mlxsw_sp_router_fib4_event(struct mlxsw_sp_fib_event_work *fib_work,
5806 struct fib_notifier_info *info)
5807{
David Ahern3c75f9b2017-10-18 15:01:38 -07005808 struct fib_entry_notifier_info *fen_info;
David Ahern3c75f9b2017-10-18 15:01:38 -07005809 struct fib_nh_notifier_info *fnh_info;
5810
Ido Schimmel66a57632017-08-03 13:28:26 +02005811 switch (fib_work->event) {
5812 case FIB_EVENT_ENTRY_REPLACE: /* fall through */
5813 case FIB_EVENT_ENTRY_APPEND: /* fall through */
5814 case FIB_EVENT_ENTRY_ADD: /* fall through */
5815 case FIB_EVENT_ENTRY_DEL:
David Ahern3c75f9b2017-10-18 15:01:38 -07005816 fen_info = container_of(info, struct fib_entry_notifier_info,
5817 info);
5818 fib_work->fen_info = *fen_info;
5819 /* Take reference on fib_info to prevent it from being
Ido Schimmel66a57632017-08-03 13:28:26 +02005820 * freed while work is queued. Release it afterwards.
5821 */
5822 fib_info_hold(fib_work->fen_info.fi);
5823 break;
Ido Schimmel66a57632017-08-03 13:28:26 +02005824 case FIB_EVENT_NH_ADD: /* fall through */
5825 case FIB_EVENT_NH_DEL:
David Ahern3c75f9b2017-10-18 15:01:38 -07005826 fnh_info = container_of(info, struct fib_nh_notifier_info,
5827 info);
5828 fib_work->fnh_info = *fnh_info;
Ido Schimmel66a57632017-08-03 13:28:26 +02005829 fib_info_hold(fib_work->fnh_info.fib_nh->nh_parent);
5830 break;
5831 }
5832}
5833
5834static void mlxsw_sp_router_fib6_event(struct mlxsw_sp_fib_event_work *fib_work,
5835 struct fib_notifier_info *info)
5836{
David Ahern3c75f9b2017-10-18 15:01:38 -07005837 struct fib6_entry_notifier_info *fen6_info;
David Ahern3c75f9b2017-10-18 15:01:38 -07005838
Ido Schimmel583419f2017-08-03 13:28:27 +02005839 switch (fib_work->event) {
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02005840 case FIB_EVENT_ENTRY_REPLACE: /* fall through */
David Ahern5a15a1b2018-05-21 10:26:52 -07005841 case FIB_EVENT_ENTRY_APPEND: /* fall through */
Ido Schimmel428b8512017-08-03 13:28:28 +02005842 case FIB_EVENT_ENTRY_ADD: /* fall through */
5843 case FIB_EVENT_ENTRY_DEL:
David Ahern3c75f9b2017-10-18 15:01:38 -07005844 fen6_info = container_of(info, struct fib6_entry_notifier_info,
5845 info);
5846 fib_work->fen6_info = *fen6_info;
David Ahern8d1c8022018-04-17 17:33:26 -07005847 fib6_info_hold(fib_work->fen6_info.rt);
Ido Schimmel428b8512017-08-03 13:28:28 +02005848 break;
Ido Schimmel583419f2017-08-03 13:28:27 +02005849 }
Ido Schimmel66a57632017-08-03 13:28:26 +02005850}
5851
Yotam Gigid42b0962017-09-27 08:23:20 +02005852static void
5853mlxsw_sp_router_fibmr_event(struct mlxsw_sp_fib_event_work *fib_work,
5854 struct fib_notifier_info *info)
5855{
5856 switch (fib_work->event) {
5857 case FIB_EVENT_ENTRY_REPLACE: /* fall through */
5858 case FIB_EVENT_ENTRY_ADD: /* fall through */
5859 case FIB_EVENT_ENTRY_DEL:
5860 memcpy(&fib_work->men_info, info, sizeof(fib_work->men_info));
Yuval Mintz8c13af22018-03-26 15:01:36 +03005861 mr_cache_hold(fib_work->men_info.mfc);
Yotam Gigid42b0962017-09-27 08:23:20 +02005862 break;
5863 case FIB_EVENT_VIF_ADD: /* fall through */
5864 case FIB_EVENT_VIF_DEL:
5865 memcpy(&fib_work->ven_info, info, sizeof(fib_work->ven_info));
5866 dev_hold(fib_work->ven_info.dev);
5867 break;
David Ahern1f279232017-10-27 17:37:14 -07005868 }
5869}
5870
5871static int mlxsw_sp_router_fib_rule_event(unsigned long event,
5872 struct fib_notifier_info *info,
5873 struct mlxsw_sp *mlxsw_sp)
5874{
5875 struct netlink_ext_ack *extack = info->extack;
5876 struct fib_rule_notifier_info *fr_info;
5877 struct fib_rule *rule;
5878 int err = 0;
5879
5880 /* nothing to do at the moment */
5881 if (event == FIB_EVENT_RULE_DEL)
5882 return 0;
5883
5884 if (mlxsw_sp->router->aborted)
5885 return 0;
5886
5887 fr_info = container_of(info, struct fib_rule_notifier_info, info);
5888 rule = fr_info->rule;
5889
5890 switch (info->family) {
5891 case AF_INET:
5892 if (!fib4_rule_default(rule) && !rule->l3mdev)
Ido Schimmel62901822018-05-02 10:17:34 +03005893 err = -EOPNOTSUPP;
David Ahern1f279232017-10-27 17:37:14 -07005894 break;
5895 case AF_INET6:
5896 if (!fib6_rule_default(rule) && !rule->l3mdev)
Ido Schimmel62901822018-05-02 10:17:34 +03005897 err = -EOPNOTSUPP;
David Ahern1f279232017-10-27 17:37:14 -07005898 break;
5899 case RTNL_FAMILY_IPMR:
5900 if (!ipmr_rule_default(rule) && !rule->l3mdev)
Ido Schimmel62901822018-05-02 10:17:34 +03005901 err = -EOPNOTSUPP;
Yotam Gigid42b0962017-09-27 08:23:20 +02005902 break;
Yuval Mintz64ed1b92018-03-26 15:01:44 +03005903 case RTNL_FAMILY_IP6MR:
5904 if (!ip6mr_rule_default(rule) && !rule->l3mdev)
Ido Schimmel62901822018-05-02 10:17:34 +03005905 err = -EOPNOTSUPP;
Yuval Mintz64ed1b92018-03-26 15:01:44 +03005906 break;
Yotam Gigid42b0962017-09-27 08:23:20 +02005907 }
David Ahern1f279232017-10-27 17:37:14 -07005908
5909 if (err < 0)
Ido Schimmel62901822018-05-02 10:17:34 +03005910 NL_SET_ERR_MSG_MOD(extack, "FIB rules not supported");
David Ahern1f279232017-10-27 17:37:14 -07005911
5912 return err;
Yotam Gigid42b0962017-09-27 08:23:20 +02005913}
5914
Ido Schimmel30572242016-12-03 16:45:01 +01005915/* Called with rcu_read_lock() */
5916static int mlxsw_sp_router_fib_event(struct notifier_block *nb,
5917 unsigned long event, void *ptr)
5918{
Ido Schimmel30572242016-12-03 16:45:01 +01005919 struct mlxsw_sp_fib_event_work *fib_work;
5920 struct fib_notifier_info *info = ptr;
Ido Schimmel7e39d112017-05-16 19:38:28 +02005921 struct mlxsw_sp_router *router;
David Ahern1f279232017-10-27 17:37:14 -07005922 int err;
Ido Schimmel30572242016-12-03 16:45:01 +01005923
Ido Schimmel8e29f972017-09-15 15:31:07 +02005924 if (!net_eq(info->net, &init_net) ||
Yotam Gigi664375e2017-09-27 08:23:22 +02005925 (info->family != AF_INET && info->family != AF_INET6 &&
Yuval Mintz64ed1b92018-03-26 15:01:44 +03005926 info->family != RTNL_FAMILY_IPMR &&
5927 info->family != RTNL_FAMILY_IP6MR))
Ido Schimmel30572242016-12-03 16:45:01 +01005928 return NOTIFY_DONE;
5929
David Ahern1f279232017-10-27 17:37:14 -07005930 router = container_of(nb, struct mlxsw_sp_router, fib_nb);
5931
5932 switch (event) {
5933 case FIB_EVENT_RULE_ADD: /* fall through */
5934 case FIB_EVENT_RULE_DEL:
5935 err = mlxsw_sp_router_fib_rule_event(event, info,
5936 router->mlxsw_sp);
Ido Schimmel62901822018-05-02 10:17:34 +03005937 if (!err || info->extack)
5938 return notifier_from_errno(err);
Ido Schimmel50d10712018-05-02 10:17:35 +03005939 break;
5940 case FIB_EVENT_ENTRY_ADD:
5941 if (router->aborted) {
5942 NL_SET_ERR_MSG_MOD(info->extack, "FIB offload was aborted. Not configuring route");
5943 return notifier_from_errno(-EINVAL);
5944 }
5945 break;
David Ahern1f279232017-10-27 17:37:14 -07005946 }
5947
Ido Schimmel30572242016-12-03 16:45:01 +01005948 fib_work = kzalloc(sizeof(*fib_work), GFP_ATOMIC);
5949 if (WARN_ON(!fib_work))
5950 return NOTIFY_BAD;
5951
Ido Schimmel7e39d112017-05-16 19:38:28 +02005952 fib_work->mlxsw_sp = router->mlxsw_sp;
Ido Schimmel30572242016-12-03 16:45:01 +01005953 fib_work->event = event;
5954
Ido Schimmel66a57632017-08-03 13:28:26 +02005955 switch (info->family) {
5956 case AF_INET:
5957 INIT_WORK(&fib_work->work, mlxsw_sp_router_fib4_event_work);
5958 mlxsw_sp_router_fib4_event(fib_work, info);
Ido Schimmel30572242016-12-03 16:45:01 +01005959 break;
Ido Schimmel66a57632017-08-03 13:28:26 +02005960 case AF_INET6:
5961 INIT_WORK(&fib_work->work, mlxsw_sp_router_fib6_event_work);
5962 mlxsw_sp_router_fib6_event(fib_work, info);
Ido Schimmelad178c82017-02-08 11:16:40 +01005963 break;
Yuval Mintz64ed1b92018-03-26 15:01:44 +03005964 case RTNL_FAMILY_IP6MR:
Yotam Gigid42b0962017-09-27 08:23:20 +02005965 case RTNL_FAMILY_IPMR:
5966 INIT_WORK(&fib_work->work, mlxsw_sp_router_fibmr_event_work);
5967 mlxsw_sp_router_fibmr_event(fib_work, info);
5968 break;
Ido Schimmel30572242016-12-03 16:45:01 +01005969 }
5970
Ido Schimmela0e47612017-02-06 16:20:10 +01005971 mlxsw_core_schedule_work(&fib_work->work);
Ido Schimmel30572242016-12-03 16:45:01 +01005972
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005973 return NOTIFY_DONE;
5974}
5975
Petr Machata0c412922018-06-25 10:48:15 +03005976struct mlxsw_sp_rif *
Ido Schimmel4724ba562017-03-10 08:53:39 +01005977mlxsw_sp_rif_find_by_dev(const struct mlxsw_sp *mlxsw_sp,
5978 const struct net_device *dev)
5979{
5980 int i;
5981
5982 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++)
Ido Schimmel5f9efff2017-05-16 19:38:27 +02005983 if (mlxsw_sp->router->rifs[i] &&
5984 mlxsw_sp->router->rifs[i]->dev == dev)
5985 return mlxsw_sp->router->rifs[i];
Ido Schimmel4724ba562017-03-10 08:53:39 +01005986
5987 return NULL;
5988}
5989
5990static int mlxsw_sp_router_rif_disable(struct mlxsw_sp *mlxsw_sp, u16 rif)
5991{
5992 char ritr_pl[MLXSW_REG_RITR_LEN];
5993 int err;
5994
5995 mlxsw_reg_ritr_rif_pack(ritr_pl, rif);
5996 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
5997 if (WARN_ON_ONCE(err))
5998 return err;
5999
6000 mlxsw_reg_ritr_enable_set(ritr_pl, false);
6001 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
6002}
6003
6004static void mlxsw_sp_router_rif_gone_sync(struct mlxsw_sp *mlxsw_sp,
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01006005 struct mlxsw_sp_rif *rif)
Ido Schimmel4724ba562017-03-10 08:53:39 +01006006{
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01006007 mlxsw_sp_router_rif_disable(mlxsw_sp, rif->rif_index);
6008 mlxsw_sp_nexthop_rif_gone_sync(mlxsw_sp, rif);
6009 mlxsw_sp_neigh_rif_gone_sync(mlxsw_sp, rif);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006010}
6011
Arkadi Sharshevsky5ea12372017-07-18 10:10:13 +02006012static bool
6013mlxsw_sp_rif_should_config(struct mlxsw_sp_rif *rif, struct net_device *dev,
6014 unsigned long event)
Ido Schimmel4724ba562017-03-10 08:53:39 +01006015{
Arkadi Sharshevsky5ea12372017-07-18 10:10:13 +02006016 struct inet6_dev *inet6_dev;
6017 bool addr_list_empty = true;
6018 struct in_device *idev;
6019
Ido Schimmel4724ba562017-03-10 08:53:39 +01006020 switch (event) {
6021 case NETDEV_UP:
Petr Machataf1b1f272017-07-31 09:27:28 +02006022 return rif == NULL;
Ido Schimmel4724ba562017-03-10 08:53:39 +01006023 case NETDEV_DOWN:
Arkadi Sharshevsky5ea12372017-07-18 10:10:13 +02006024 idev = __in_dev_get_rtnl(dev);
6025 if (idev && idev->ifa_list)
6026 addr_list_empty = false;
6027
6028 inet6_dev = __in6_dev_get(dev);
6029 if (addr_list_empty && inet6_dev &&
6030 !list_empty(&inet6_dev->addr_list))
6031 addr_list_empty = false;
6032
Ido Schimmel2db99372018-07-14 11:39:52 +03006033 /* macvlans do not have a RIF, but rather piggy back on the
6034 * RIF of their lower device.
6035 */
6036 if (netif_is_macvlan(dev) && addr_list_empty)
6037 return true;
6038
Arkadi Sharshevsky5ea12372017-07-18 10:10:13 +02006039 if (rif && addr_list_empty &&
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01006040 !netif_is_l3_slave(rif->dev))
Ido Schimmel4724ba562017-03-10 08:53:39 +01006041 return true;
6042 /* It is possible we already removed the RIF ourselves
6043 * if it was assigned to a netdev that is now a bridge
6044 * or LAG slave.
6045 */
6046 return false;
6047 }
6048
6049 return false;
6050}
6051
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006052static enum mlxsw_sp_rif_type
6053mlxsw_sp_dev_rif_type(const struct mlxsw_sp *mlxsw_sp,
6054 const struct net_device *dev)
6055{
6056 enum mlxsw_sp_fid_type type;
6057
Petr Machata6ddb7422017-09-02 23:49:19 +02006058 if (mlxsw_sp_netdev_ipip_type(mlxsw_sp, dev, NULL))
6059 return MLXSW_SP_RIF_TYPE_IPIP_LB;
6060
6061 /* Otherwise RIF type is derived from the type of the underlying FID. */
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006062 if (is_vlan_dev(dev) && netif_is_bridge_master(vlan_dev_real_dev(dev)))
6063 type = MLXSW_SP_FID_TYPE_8021Q;
6064 else if (netif_is_bridge_master(dev) && br_vlan_enabled(dev))
6065 type = MLXSW_SP_FID_TYPE_8021Q;
6066 else if (netif_is_bridge_master(dev))
6067 type = MLXSW_SP_FID_TYPE_8021D;
6068 else
6069 type = MLXSW_SP_FID_TYPE_RFID;
6070
6071 return mlxsw_sp_fid_type_rif_type(mlxsw_sp, type);
6072}
6073
Ido Schimmelde5ed992017-06-04 16:53:40 +02006074static int mlxsw_sp_rif_index_alloc(struct mlxsw_sp *mlxsw_sp, u16 *p_rif_index)
Ido Schimmel4724ba562017-03-10 08:53:39 +01006075{
6076 int i;
6077
Ido Schimmelde5ed992017-06-04 16:53:40 +02006078 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++) {
6079 if (!mlxsw_sp->router->rifs[i]) {
6080 *p_rif_index = i;
6081 return 0;
6082 }
6083 }
Ido Schimmel4724ba562017-03-10 08:53:39 +01006084
Ido Schimmelde5ed992017-06-04 16:53:40 +02006085 return -ENOBUFS;
Ido Schimmel4724ba562017-03-10 08:53:39 +01006086}
6087
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006088static struct mlxsw_sp_rif *mlxsw_sp_rif_alloc(size_t rif_size, u16 rif_index,
6089 u16 vr_id,
6090 struct net_device *l3_dev)
Ido Schimmel4724ba562017-03-10 08:53:39 +01006091{
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01006092 struct mlxsw_sp_rif *rif;
Ido Schimmel4724ba562017-03-10 08:53:39 +01006093
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006094 rif = kzalloc(rif_size, GFP_KERNEL);
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01006095 if (!rif)
Ido Schimmel4724ba562017-03-10 08:53:39 +01006096 return NULL;
6097
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01006098 INIT_LIST_HEAD(&rif->nexthop_list);
6099 INIT_LIST_HEAD(&rif->neigh_list);
6100 ether_addr_copy(rif->addr, l3_dev->dev_addr);
6101 rif->mtu = l3_dev->mtu;
6102 rif->vr_id = vr_id;
6103 rif->dev = l3_dev;
6104 rif->rif_index = rif_index;
Ido Schimmel4724ba562017-03-10 08:53:39 +01006105
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01006106 return rif;
Ido Schimmel4724ba562017-03-10 08:53:39 +01006107}
6108
Ido Schimmel5f9efff2017-05-16 19:38:27 +02006109struct mlxsw_sp_rif *mlxsw_sp_rif_by_index(const struct mlxsw_sp *mlxsw_sp,
6110 u16 rif_index)
6111{
6112 return mlxsw_sp->router->rifs[rif_index];
6113}
6114
Arkadi Sharshevskyfd1b9d42017-03-28 17:24:16 +02006115u16 mlxsw_sp_rif_index(const struct mlxsw_sp_rif *rif)
6116{
6117 return rif->rif_index;
6118}
6119
Petr Machata92107cf2017-09-02 23:49:28 +02006120u16 mlxsw_sp_ipip_lb_rif_index(const struct mlxsw_sp_rif_ipip_lb *lb_rif)
6121{
6122 return lb_rif->common.rif_index;
6123}
6124
6125u16 mlxsw_sp_ipip_lb_ul_vr_id(const struct mlxsw_sp_rif_ipip_lb *lb_rif)
6126{
6127 return lb_rif->ul_vr_id;
6128}
6129
Arkadi Sharshevskyfd1b9d42017-03-28 17:24:16 +02006130int mlxsw_sp_rif_dev_ifindex(const struct mlxsw_sp_rif *rif)
6131{
6132 return rif->dev->ifindex;
6133}
6134
Yotam Gigi91e4d592017-09-19 10:00:19 +02006135const struct net_device *mlxsw_sp_rif_dev(const struct mlxsw_sp_rif *rif)
6136{
6137 return rif->dev;
6138}
6139
Petr Machataa28b1eb2018-06-25 10:48:16 +03006140struct mlxsw_sp_fid *mlxsw_sp_rif_fid(const struct mlxsw_sp_rif *rif)
6141{
6142 return rif->fid;
6143}
6144
Ido Schimmel4724ba562017-03-10 08:53:39 +01006145static struct mlxsw_sp_rif *
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006146mlxsw_sp_rif_create(struct mlxsw_sp *mlxsw_sp,
David Ahernf8fa9b42017-10-18 09:56:56 -07006147 const struct mlxsw_sp_rif_params *params,
6148 struct netlink_ext_ack *extack)
Ido Schimmel4724ba562017-03-10 08:53:39 +01006149{
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006150 u32 tb_id = l3mdev_fib_table(params->dev);
6151 const struct mlxsw_sp_rif_ops *ops;
Petr Machata010cadf2017-09-02 23:49:18 +02006152 struct mlxsw_sp_fid *fid = NULL;
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006153 enum mlxsw_sp_rif_type type;
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01006154 struct mlxsw_sp_rif *rif;
Ido Schimmela1107482017-05-26 08:37:39 +02006155 struct mlxsw_sp_vr *vr;
6156 u16 rif_index;
Yuval Mintz9742f862018-03-26 15:01:40 +03006157 int i, err;
Ido Schimmel4724ba562017-03-10 08:53:39 +01006158
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006159 type = mlxsw_sp_dev_rif_type(mlxsw_sp, params->dev);
6160 ops = mlxsw_sp->router->rif_ops_arr[type];
6161
David Ahernf8fa9b42017-10-18 09:56:56 -07006162 vr = mlxsw_sp_vr_get(mlxsw_sp, tb_id ? : RT_TABLE_MAIN, extack);
Ido Schimmelc9ec53f2017-05-26 08:37:38 +02006163 if (IS_ERR(vr))
6164 return ERR_CAST(vr);
Petr Machata28a04c72017-10-02 12:14:56 +02006165 vr->rif_count++;
Ido Schimmelc9ec53f2017-05-26 08:37:38 +02006166
Ido Schimmelde5ed992017-06-04 16:53:40 +02006167 err = mlxsw_sp_rif_index_alloc(mlxsw_sp, &rif_index);
David Ahernf8fa9b42017-10-18 09:56:56 -07006168 if (err) {
Arkadi Sharshevsky6c677752018-02-13 11:29:05 +01006169 NL_SET_ERR_MSG_MOD(extack, "Exceeded number of supported router interfaces");
Ido Schimmelde5ed992017-06-04 16:53:40 +02006170 goto err_rif_index_alloc;
David Ahernf8fa9b42017-10-18 09:56:56 -07006171 }
Ido Schimmel4724ba562017-03-10 08:53:39 +01006172
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006173 rif = mlxsw_sp_rif_alloc(ops->rif_size, rif_index, vr->id, params->dev);
Ido Schimmela13a5942017-05-26 08:37:33 +02006174 if (!rif) {
6175 err = -ENOMEM;
6176 goto err_rif_alloc;
6177 }
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006178 rif->mlxsw_sp = mlxsw_sp;
6179 rif->ops = ops;
Ido Schimmela13a5942017-05-26 08:37:33 +02006180
Petr Machata010cadf2017-09-02 23:49:18 +02006181 if (ops->fid_get) {
Petr Machata5f15e252018-06-25 10:48:13 +03006182 fid = ops->fid_get(rif, extack);
Petr Machata010cadf2017-09-02 23:49:18 +02006183 if (IS_ERR(fid)) {
6184 err = PTR_ERR(fid);
6185 goto err_fid_get;
6186 }
6187 rif->fid = fid;
Ido Schimmel4d93cee2017-05-26 08:37:34 +02006188 }
6189
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006190 if (ops->setup)
6191 ops->setup(rif, params);
6192
6193 err = ops->configure(rif);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006194 if (err)
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006195 goto err_configure;
Ido Schimmel4724ba562017-03-10 08:53:39 +01006196
Yuval Mintz9742f862018-03-26 15:01:40 +03006197 for (i = 0; i < MLXSW_SP_L3_PROTO_MAX; i++) {
6198 err = mlxsw_sp_mr_rif_add(vr->mr_table[i], rif);
6199 if (err)
6200 goto err_mr_rif_add;
6201 }
Yotam Gigid42b0962017-09-27 08:23:20 +02006202
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006203 mlxsw_sp_rif_counters_alloc(rif);
Ido Schimmel5f9efff2017-05-16 19:38:27 +02006204 mlxsw_sp->router->rifs[rif_index] = rif;
Ido Schimmel4724ba562017-03-10 08:53:39 +01006205
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01006206 return rif;
Ido Schimmel4724ba562017-03-10 08:53:39 +01006207
Yotam Gigid42b0962017-09-27 08:23:20 +02006208err_mr_rif_add:
Yuval Mintz9742f862018-03-26 15:01:40 +03006209 for (i--; i >= 0; i--)
6210 mlxsw_sp_mr_rif_del(vr->mr_table[i], rif);
Yotam Gigid42b0962017-09-27 08:23:20 +02006211 ops->deconfigure(rif);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006212err_configure:
Petr Machata010cadf2017-09-02 23:49:18 +02006213 if (fid)
6214 mlxsw_sp_fid_put(fid);
Ido Schimmela1107482017-05-26 08:37:39 +02006215err_fid_get:
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006216 kfree(rif);
6217err_rif_alloc:
Ido Schimmelde5ed992017-06-04 16:53:40 +02006218err_rif_index_alloc:
Petr Machata28a04c72017-10-02 12:14:56 +02006219 vr->rif_count--;
Ido Schimmel2b52ce02018-01-22 09:17:42 +01006220 mlxsw_sp_vr_put(mlxsw_sp, vr);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006221 return ERR_PTR(err);
6222}
6223
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006224void mlxsw_sp_rif_destroy(struct mlxsw_sp_rif *rif)
Ido Schimmel4724ba562017-03-10 08:53:39 +01006225{
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006226 const struct mlxsw_sp_rif_ops *ops = rif->ops;
6227 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
Ido Schimmela1107482017-05-26 08:37:39 +02006228 struct mlxsw_sp_fid *fid = rif->fid;
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006229 struct mlxsw_sp_vr *vr;
Yuval Mintz9742f862018-03-26 15:01:40 +03006230 int i;
Ido Schimmel4724ba562017-03-10 08:53:39 +01006231
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01006232 mlxsw_sp_router_rif_gone_sync(mlxsw_sp, rif);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006233 vr = &mlxsw_sp->router->vrs[rif->vr_id];
Arkadi Sharshevskye0c0afd2017-03-28 17:24:15 +02006234
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006235 mlxsw_sp->router->rifs[rif->rif_index] = NULL;
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006236 mlxsw_sp_rif_counters_free(rif);
Yuval Mintz9742f862018-03-26 15:01:40 +03006237 for (i = 0; i < MLXSW_SP_L3_PROTO_MAX; i++)
6238 mlxsw_sp_mr_rif_del(vr->mr_table[i], rif);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006239 ops->deconfigure(rif);
Petr Machata010cadf2017-09-02 23:49:18 +02006240 if (fid)
6241 /* Loopback RIFs are not associated with a FID. */
6242 mlxsw_sp_fid_put(fid);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006243 kfree(rif);
Petr Machata28a04c72017-10-02 12:14:56 +02006244 vr->rif_count--;
Ido Schimmel2b52ce02018-01-22 09:17:42 +01006245 mlxsw_sp_vr_put(mlxsw_sp, vr);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006246}
6247
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006248static void
6249mlxsw_sp_rif_subport_params_init(struct mlxsw_sp_rif_params *params,
6250 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
6251{
6252 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
6253
6254 params->vid = mlxsw_sp_port_vlan->vid;
6255 params->lag = mlxsw_sp_port->lagged;
6256 if (params->lag)
6257 params->lag_id = mlxsw_sp_port->lag_id;
6258 else
6259 params->system_port = mlxsw_sp_port->local_port;
6260}
6261
Ido Schimmel7cbecf22017-05-26 08:37:28 +02006262static int
Ido Schimmela1107482017-05-26 08:37:39 +02006263mlxsw_sp_port_vlan_router_join(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan,
David Ahernf8fa9b42017-10-18 09:56:56 -07006264 struct net_device *l3_dev,
6265 struct netlink_ext_ack *extack)
Ido Schimmel4724ba562017-03-10 08:53:39 +01006266{
Ido Schimmel7cbecf22017-05-26 08:37:28 +02006267 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
Ido Schimmel1b8f09a2017-05-26 08:37:36 +02006268 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
Ido Schimmel7cbecf22017-05-26 08:37:28 +02006269 u16 vid = mlxsw_sp_port_vlan->vid;
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01006270 struct mlxsw_sp_rif *rif;
Ido Schimmela1107482017-05-26 08:37:39 +02006271 struct mlxsw_sp_fid *fid;
Ido Schimmel03ea01e2017-05-23 21:56:30 +02006272 int err;
Ido Schimmel4724ba562017-03-10 08:53:39 +01006273
Ido Schimmel1b8f09a2017-05-26 08:37:36 +02006274 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev);
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01006275 if (!rif) {
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006276 struct mlxsw_sp_rif_params params = {
6277 .dev = l3_dev,
6278 };
6279
6280 mlxsw_sp_rif_subport_params_init(&params, mlxsw_sp_port_vlan);
David Ahernf8fa9b42017-10-18 09:56:56 -07006281 rif = mlxsw_sp_rif_create(mlxsw_sp, &params, extack);
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01006282 if (IS_ERR(rif))
6283 return PTR_ERR(rif);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006284 }
6285
Ido Schimmela1107482017-05-26 08:37:39 +02006286 /* FID was already created, just take a reference */
Petr Machata5f15e252018-06-25 10:48:13 +03006287 fid = rif->ops->fid_get(rif, extack);
Ido Schimmela1107482017-05-26 08:37:39 +02006288 err = mlxsw_sp_fid_port_vid_map(fid, mlxsw_sp_port, vid);
6289 if (err)
6290 goto err_fid_port_vid_map;
6291
Ido Schimmel7cbecf22017-05-26 08:37:28 +02006292 err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, false);
Ido Schimmel03ea01e2017-05-23 21:56:30 +02006293 if (err)
6294 goto err_port_vid_learning_set;
6295
Ido Schimmel7cbecf22017-05-26 08:37:28 +02006296 err = mlxsw_sp_port_vid_stp_set(mlxsw_sp_port, vid,
Ido Schimmel03ea01e2017-05-23 21:56:30 +02006297 BR_STATE_FORWARDING);
6298 if (err)
6299 goto err_port_vid_stp_set;
6300
Ido Schimmela1107482017-05-26 08:37:39 +02006301 mlxsw_sp_port_vlan->fid = fid;
Ido Schimmel4724ba562017-03-10 08:53:39 +01006302
Ido Schimmel4724ba562017-03-10 08:53:39 +01006303 return 0;
Ido Schimmel03ea01e2017-05-23 21:56:30 +02006304
6305err_port_vid_stp_set:
Ido Schimmel7cbecf22017-05-26 08:37:28 +02006306 mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, true);
Ido Schimmel03ea01e2017-05-23 21:56:30 +02006307err_port_vid_learning_set:
Ido Schimmela1107482017-05-26 08:37:39 +02006308 mlxsw_sp_fid_port_vid_unmap(fid, mlxsw_sp_port, vid);
6309err_fid_port_vid_map:
6310 mlxsw_sp_fid_put(fid);
Ido Schimmel03ea01e2017-05-23 21:56:30 +02006311 return err;
Ido Schimmel4724ba562017-03-10 08:53:39 +01006312}
6313
Ido Schimmela1107482017-05-26 08:37:39 +02006314void
6315mlxsw_sp_port_vlan_router_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
Ido Schimmel4724ba562017-03-10 08:53:39 +01006316{
Ido Schimmelce95e152017-05-26 08:37:27 +02006317 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
Ido Schimmel7cbecf22017-05-26 08:37:28 +02006318 struct mlxsw_sp_fid *fid = mlxsw_sp_port_vlan->fid;
Ido Schimmelce95e152017-05-26 08:37:27 +02006319 u16 vid = mlxsw_sp_port_vlan->vid;
Ido Schimmelce95e152017-05-26 08:37:27 +02006320
Ido Schimmela1107482017-05-26 08:37:39 +02006321 if (WARN_ON(mlxsw_sp_fid_type(fid) != MLXSW_SP_FID_TYPE_RFID))
6322 return;
Ido Schimmel4aafc362017-05-26 08:37:25 +02006323
Ido Schimmela1107482017-05-26 08:37:39 +02006324 mlxsw_sp_port_vlan->fid = NULL;
Ido Schimmel7cbecf22017-05-26 08:37:28 +02006325 mlxsw_sp_port_vid_stp_set(mlxsw_sp_port, vid, BR_STATE_BLOCKING);
6326 mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, true);
Ido Schimmela1107482017-05-26 08:37:39 +02006327 mlxsw_sp_fid_port_vid_unmap(fid, mlxsw_sp_port, vid);
6328 /* If router port holds the last reference on the rFID, then the
6329 * associated Sub-port RIF will be destroyed.
6330 */
6331 mlxsw_sp_fid_put(fid);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006332}
6333
Ido Schimmel7cbecf22017-05-26 08:37:28 +02006334static int mlxsw_sp_inetaddr_port_vlan_event(struct net_device *l3_dev,
6335 struct net_device *port_dev,
David Ahernf8fa9b42017-10-18 09:56:56 -07006336 unsigned long event, u16 vid,
6337 struct netlink_ext_ack *extack)
Ido Schimmel4724ba562017-03-10 08:53:39 +01006338{
6339 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(port_dev);
Ido Schimmelce95e152017-05-26 08:37:27 +02006340 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
Ido Schimmel4724ba562017-03-10 08:53:39 +01006341
Ido Schimmelce95e152017-05-26 08:37:27 +02006342 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
Ido Schimmel7cbecf22017-05-26 08:37:28 +02006343 if (WARN_ON(!mlxsw_sp_port_vlan))
6344 return -EINVAL;
Ido Schimmel4724ba562017-03-10 08:53:39 +01006345
6346 switch (event) {
6347 case NETDEV_UP:
Ido Schimmela1107482017-05-26 08:37:39 +02006348 return mlxsw_sp_port_vlan_router_join(mlxsw_sp_port_vlan,
David Ahernf8fa9b42017-10-18 09:56:56 -07006349 l3_dev, extack);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006350 case NETDEV_DOWN:
Ido Schimmela1107482017-05-26 08:37:39 +02006351 mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port_vlan);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006352 break;
6353 }
6354
6355 return 0;
6356}
6357
6358static int mlxsw_sp_inetaddr_port_event(struct net_device *port_dev,
David Ahernf8fa9b42017-10-18 09:56:56 -07006359 unsigned long event,
6360 struct netlink_ext_ack *extack)
Ido Schimmel4724ba562017-03-10 08:53:39 +01006361{
Jiri Pirko2b94e582017-04-18 16:55:37 +02006362 if (netif_is_bridge_port(port_dev) ||
6363 netif_is_lag_port(port_dev) ||
6364 netif_is_ovs_port(port_dev))
Ido Schimmel4724ba562017-03-10 08:53:39 +01006365 return 0;
6366
David Ahernf8fa9b42017-10-18 09:56:56 -07006367 return mlxsw_sp_inetaddr_port_vlan_event(port_dev, port_dev, event, 1,
6368 extack);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006369}
6370
6371static int __mlxsw_sp_inetaddr_lag_event(struct net_device *l3_dev,
6372 struct net_device *lag_dev,
David Ahernf8fa9b42017-10-18 09:56:56 -07006373 unsigned long event, u16 vid,
6374 struct netlink_ext_ack *extack)
Ido Schimmel4724ba562017-03-10 08:53:39 +01006375{
6376 struct net_device *port_dev;
6377 struct list_head *iter;
6378 int err;
6379
6380 netdev_for_each_lower_dev(lag_dev, port_dev, iter) {
6381 if (mlxsw_sp_port_dev_check(port_dev)) {
Ido Schimmel7cbecf22017-05-26 08:37:28 +02006382 err = mlxsw_sp_inetaddr_port_vlan_event(l3_dev,
6383 port_dev,
David Ahernf8fa9b42017-10-18 09:56:56 -07006384 event, vid,
6385 extack);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006386 if (err)
6387 return err;
6388 }
6389 }
6390
6391 return 0;
6392}
6393
6394static int mlxsw_sp_inetaddr_lag_event(struct net_device *lag_dev,
David Ahernf8fa9b42017-10-18 09:56:56 -07006395 unsigned long event,
6396 struct netlink_ext_ack *extack)
Ido Schimmel4724ba562017-03-10 08:53:39 +01006397{
6398 if (netif_is_bridge_port(lag_dev))
6399 return 0;
6400
David Ahernf8fa9b42017-10-18 09:56:56 -07006401 return __mlxsw_sp_inetaddr_lag_event(lag_dev, lag_dev, event, 1,
6402 extack);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006403}
6404
Ido Schimmel4724ba562017-03-10 08:53:39 +01006405static int mlxsw_sp_inetaddr_bridge_event(struct net_device *l3_dev,
David Ahernf8fa9b42017-10-18 09:56:56 -07006406 unsigned long event,
6407 struct netlink_ext_ack *extack)
Ido Schimmel4724ba562017-03-10 08:53:39 +01006408{
6409 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(l3_dev);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006410 struct mlxsw_sp_rif_params params = {
6411 .dev = l3_dev,
6412 };
Ido Schimmela1107482017-05-26 08:37:39 +02006413 struct mlxsw_sp_rif *rif;
Ido Schimmel4724ba562017-03-10 08:53:39 +01006414
6415 switch (event) {
6416 case NETDEV_UP:
David Ahernf8fa9b42017-10-18 09:56:56 -07006417 rif = mlxsw_sp_rif_create(mlxsw_sp, &params, extack);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006418 if (IS_ERR(rif))
6419 return PTR_ERR(rif);
6420 break;
Ido Schimmel4724ba562017-03-10 08:53:39 +01006421 case NETDEV_DOWN:
Ido Schimmela1107482017-05-26 08:37:39 +02006422 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006423 mlxsw_sp_rif_destroy(rif);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006424 break;
6425 }
6426
6427 return 0;
6428}
6429
6430static int mlxsw_sp_inetaddr_vlan_event(struct net_device *vlan_dev,
David Ahernf8fa9b42017-10-18 09:56:56 -07006431 unsigned long event,
6432 struct netlink_ext_ack *extack)
Ido Schimmel4724ba562017-03-10 08:53:39 +01006433{
6434 struct net_device *real_dev = vlan_dev_real_dev(vlan_dev);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006435 u16 vid = vlan_dev_vlan_id(vlan_dev);
6436
Ido Schimmel6b27c8a2017-06-28 09:03:12 +03006437 if (netif_is_bridge_port(vlan_dev))
6438 return 0;
6439
Ido Schimmel4724ba562017-03-10 08:53:39 +01006440 if (mlxsw_sp_port_dev_check(real_dev))
Ido Schimmel7cbecf22017-05-26 08:37:28 +02006441 return mlxsw_sp_inetaddr_port_vlan_event(vlan_dev, real_dev,
David Ahernf8fa9b42017-10-18 09:56:56 -07006442 event, vid, extack);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006443 else if (netif_is_lag_master(real_dev))
6444 return __mlxsw_sp_inetaddr_lag_event(vlan_dev, real_dev, event,
David Ahernf8fa9b42017-10-18 09:56:56 -07006445 vid, extack);
Ido Schimmelc57529e2017-05-26 08:37:31 +02006446 else if (netif_is_bridge_master(real_dev) && br_vlan_enabled(real_dev))
David Ahernf8fa9b42017-10-18 09:56:56 -07006447 return mlxsw_sp_inetaddr_bridge_event(vlan_dev, event, extack);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006448
6449 return 0;
6450}
6451
Ido Schimmel2db99372018-07-14 11:39:52 +03006452static int mlxsw_sp_rif_macvlan_add(struct mlxsw_sp *mlxsw_sp,
6453 const struct net_device *macvlan_dev,
6454 struct netlink_ext_ack *extack)
6455{
6456 struct macvlan_dev *vlan = netdev_priv(macvlan_dev);
6457 struct mlxsw_sp_rif *rif;
6458 int err;
6459
6460 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, vlan->lowerdev);
6461 if (!rif) {
6462 NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces");
6463 return -EOPNOTSUPP;
6464 }
6465
6466 err = mlxsw_sp_rif_fdb_op(mlxsw_sp, macvlan_dev->dev_addr,
6467 mlxsw_sp_fid_index(rif->fid), true);
6468 if (err)
6469 return err;
6470
6471 /* Make sure the bridge driver does not have this MAC pointing at
6472 * some other port.
6473 */
6474 if (rif->ops->fdb_del)
6475 rif->ops->fdb_del(rif, macvlan_dev->dev_addr);
6476
6477 return 0;
6478}
6479
6480void mlxsw_sp_rif_macvlan_del(struct mlxsw_sp *mlxsw_sp,
6481 const struct net_device *macvlan_dev)
6482{
6483 struct macvlan_dev *vlan = netdev_priv(macvlan_dev);
6484 struct mlxsw_sp_rif *rif;
6485
6486 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, vlan->lowerdev);
6487 /* If we do not have a RIF, then we already took care of
6488 * removing the macvlan's MAC during RIF deletion.
6489 */
6490 if (!rif)
6491 return;
6492 mlxsw_sp_rif_fdb_op(mlxsw_sp, macvlan_dev->dev_addr,
6493 mlxsw_sp_fid_index(rif->fid), false);
6494}
6495
6496static int mlxsw_sp_inetaddr_macvlan_event(struct net_device *macvlan_dev,
6497 unsigned long event,
6498 struct netlink_ext_ack *extack)
6499{
6500 struct mlxsw_sp *mlxsw_sp;
6501
6502 mlxsw_sp = mlxsw_sp_lower_get(macvlan_dev);
6503 if (!mlxsw_sp)
6504 return 0;
6505
6506 switch (event) {
6507 case NETDEV_UP:
6508 return mlxsw_sp_rif_macvlan_add(mlxsw_sp, macvlan_dev, extack);
6509 case NETDEV_DOWN:
6510 mlxsw_sp_rif_macvlan_del(mlxsw_sp, macvlan_dev);
6511 break;
6512 }
6513
6514 return 0;
6515}
6516
Ido Schimmelb1e45522017-04-30 19:47:14 +03006517static int __mlxsw_sp_inetaddr_event(struct net_device *dev,
David Ahernf8fa9b42017-10-18 09:56:56 -07006518 unsigned long event,
6519 struct netlink_ext_ack *extack)
Ido Schimmelb1e45522017-04-30 19:47:14 +03006520{
6521 if (mlxsw_sp_port_dev_check(dev))
David Ahernf8fa9b42017-10-18 09:56:56 -07006522 return mlxsw_sp_inetaddr_port_event(dev, event, extack);
Ido Schimmelb1e45522017-04-30 19:47:14 +03006523 else if (netif_is_lag_master(dev))
David Ahernf8fa9b42017-10-18 09:56:56 -07006524 return mlxsw_sp_inetaddr_lag_event(dev, event, extack);
Ido Schimmelb1e45522017-04-30 19:47:14 +03006525 else if (netif_is_bridge_master(dev))
David Ahernf8fa9b42017-10-18 09:56:56 -07006526 return mlxsw_sp_inetaddr_bridge_event(dev, event, extack);
Ido Schimmelb1e45522017-04-30 19:47:14 +03006527 else if (is_vlan_dev(dev))
David Ahernf8fa9b42017-10-18 09:56:56 -07006528 return mlxsw_sp_inetaddr_vlan_event(dev, event, extack);
Ido Schimmel2db99372018-07-14 11:39:52 +03006529 else if (netif_is_macvlan(dev))
6530 return mlxsw_sp_inetaddr_macvlan_event(dev, event, extack);
Ido Schimmelb1e45522017-04-30 19:47:14 +03006531 else
6532 return 0;
6533}
6534
Ido Schimmel4724ba562017-03-10 08:53:39 +01006535int mlxsw_sp_inetaddr_event(struct notifier_block *unused,
6536 unsigned long event, void *ptr)
6537{
6538 struct in_ifaddr *ifa = (struct in_ifaddr *) ptr;
6539 struct net_device *dev = ifa->ifa_dev->dev;
6540 struct mlxsw_sp *mlxsw_sp;
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01006541 struct mlxsw_sp_rif *rif;
Ido Schimmel4724ba562017-03-10 08:53:39 +01006542 int err = 0;
6543
David Ahern89d5dd22017-10-18 09:56:55 -07006544 /* NETDEV_UP event is handled by mlxsw_sp_inetaddr_valid_event */
6545 if (event == NETDEV_UP)
6546 goto out;
6547
6548 mlxsw_sp = mlxsw_sp_lower_get(dev);
6549 if (!mlxsw_sp)
6550 goto out;
6551
6552 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
6553 if (!mlxsw_sp_rif_should_config(rif, dev, event))
6554 goto out;
6555
David Ahernf8fa9b42017-10-18 09:56:56 -07006556 err = __mlxsw_sp_inetaddr_event(dev, event, NULL);
David Ahern89d5dd22017-10-18 09:56:55 -07006557out:
6558 return notifier_from_errno(err);
6559}
6560
6561int mlxsw_sp_inetaddr_valid_event(struct notifier_block *unused,
6562 unsigned long event, void *ptr)
6563{
6564 struct in_validator_info *ivi = (struct in_validator_info *) ptr;
6565 struct net_device *dev = ivi->ivi_dev->dev;
6566 struct mlxsw_sp *mlxsw_sp;
6567 struct mlxsw_sp_rif *rif;
6568 int err = 0;
6569
Ido Schimmel4724ba562017-03-10 08:53:39 +01006570 mlxsw_sp = mlxsw_sp_lower_get(dev);
6571 if (!mlxsw_sp)
6572 goto out;
6573
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01006574 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
Arkadi Sharshevsky5ea12372017-07-18 10:10:13 +02006575 if (!mlxsw_sp_rif_should_config(rif, dev, event))
Ido Schimmel4724ba562017-03-10 08:53:39 +01006576 goto out;
6577
David Ahernf8fa9b42017-10-18 09:56:56 -07006578 err = __mlxsw_sp_inetaddr_event(dev, event, ivi->extack);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006579out:
6580 return notifier_from_errno(err);
6581}
6582
Arkadi Sharshevsky5ea12372017-07-18 10:10:13 +02006583struct mlxsw_sp_inet6addr_event_work {
6584 struct work_struct work;
6585 struct net_device *dev;
6586 unsigned long event;
6587};
6588
6589static void mlxsw_sp_inet6addr_event_work(struct work_struct *work)
6590{
6591 struct mlxsw_sp_inet6addr_event_work *inet6addr_work =
6592 container_of(work, struct mlxsw_sp_inet6addr_event_work, work);
6593 struct net_device *dev = inet6addr_work->dev;
6594 unsigned long event = inet6addr_work->event;
6595 struct mlxsw_sp *mlxsw_sp;
6596 struct mlxsw_sp_rif *rif;
6597
6598 rtnl_lock();
6599 mlxsw_sp = mlxsw_sp_lower_get(dev);
6600 if (!mlxsw_sp)
6601 goto out;
6602
6603 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
6604 if (!mlxsw_sp_rif_should_config(rif, dev, event))
6605 goto out;
6606
David Ahernf8fa9b42017-10-18 09:56:56 -07006607 __mlxsw_sp_inetaddr_event(dev, event, NULL);
Arkadi Sharshevsky5ea12372017-07-18 10:10:13 +02006608out:
6609 rtnl_unlock();
6610 dev_put(dev);
6611 kfree(inet6addr_work);
6612}
6613
6614/* Called with rcu_read_lock() */
6615int mlxsw_sp_inet6addr_event(struct notifier_block *unused,
6616 unsigned long event, void *ptr)
6617{
6618 struct inet6_ifaddr *if6 = (struct inet6_ifaddr *) ptr;
6619 struct mlxsw_sp_inet6addr_event_work *inet6addr_work;
6620 struct net_device *dev = if6->idev->dev;
6621
David Ahern89d5dd22017-10-18 09:56:55 -07006622 /* NETDEV_UP event is handled by mlxsw_sp_inet6addr_valid_event */
6623 if (event == NETDEV_UP)
6624 return NOTIFY_DONE;
6625
Arkadi Sharshevsky5ea12372017-07-18 10:10:13 +02006626 if (!mlxsw_sp_port_dev_lower_find_rcu(dev))
6627 return NOTIFY_DONE;
6628
6629 inet6addr_work = kzalloc(sizeof(*inet6addr_work), GFP_ATOMIC);
6630 if (!inet6addr_work)
6631 return NOTIFY_BAD;
6632
6633 INIT_WORK(&inet6addr_work->work, mlxsw_sp_inet6addr_event_work);
6634 inet6addr_work->dev = dev;
6635 inet6addr_work->event = event;
6636 dev_hold(dev);
6637 mlxsw_core_schedule_work(&inet6addr_work->work);
6638
6639 return NOTIFY_DONE;
6640}
6641
David Ahern89d5dd22017-10-18 09:56:55 -07006642int mlxsw_sp_inet6addr_valid_event(struct notifier_block *unused,
6643 unsigned long event, void *ptr)
6644{
6645 struct in6_validator_info *i6vi = (struct in6_validator_info *) ptr;
6646 struct net_device *dev = i6vi->i6vi_dev->dev;
6647 struct mlxsw_sp *mlxsw_sp;
6648 struct mlxsw_sp_rif *rif;
6649 int err = 0;
6650
6651 mlxsw_sp = mlxsw_sp_lower_get(dev);
6652 if (!mlxsw_sp)
6653 goto out;
6654
6655 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
6656 if (!mlxsw_sp_rif_should_config(rif, dev, event))
6657 goto out;
6658
David Ahernf8fa9b42017-10-18 09:56:56 -07006659 err = __mlxsw_sp_inetaddr_event(dev, event, i6vi->extack);
David Ahern89d5dd22017-10-18 09:56:55 -07006660out:
6661 return notifier_from_errno(err);
6662}
6663
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01006664static int mlxsw_sp_rif_edit(struct mlxsw_sp *mlxsw_sp, u16 rif_index,
Ido Schimmel4724ba562017-03-10 08:53:39 +01006665 const char *mac, int mtu)
6666{
6667 char ritr_pl[MLXSW_REG_RITR_LEN];
6668 int err;
6669
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01006670 mlxsw_reg_ritr_rif_pack(ritr_pl, rif_index);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006671 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
6672 if (err)
6673 return err;
6674
6675 mlxsw_reg_ritr_mtu_set(ritr_pl, mtu);
6676 mlxsw_reg_ritr_if_mac_memcpy_to(ritr_pl, mac);
6677 mlxsw_reg_ritr_op_set(ritr_pl, MLXSW_REG_RITR_RIF_CREATE);
6678 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
6679}
6680
6681int mlxsw_sp_netdevice_router_port_event(struct net_device *dev)
6682{
6683 struct mlxsw_sp *mlxsw_sp;
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01006684 struct mlxsw_sp_rif *rif;
Ido Schimmela1107482017-05-26 08:37:39 +02006685 u16 fid_index;
Ido Schimmel4724ba562017-03-10 08:53:39 +01006686 int err;
6687
6688 mlxsw_sp = mlxsw_sp_lower_get(dev);
6689 if (!mlxsw_sp)
6690 return 0;
6691
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01006692 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
6693 if (!rif)
Ido Schimmel4724ba562017-03-10 08:53:39 +01006694 return 0;
Ido Schimmela1107482017-05-26 08:37:39 +02006695 fid_index = mlxsw_sp_fid_index(rif->fid);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006696
Ido Schimmela1107482017-05-26 08:37:39 +02006697 err = mlxsw_sp_rif_fdb_op(mlxsw_sp, rif->addr, fid_index, false);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006698 if (err)
6699 return err;
6700
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01006701 err = mlxsw_sp_rif_edit(mlxsw_sp, rif->rif_index, dev->dev_addr,
6702 dev->mtu);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006703 if (err)
6704 goto err_rif_edit;
6705
Ido Schimmela1107482017-05-26 08:37:39 +02006706 err = mlxsw_sp_rif_fdb_op(mlxsw_sp, dev->dev_addr, fid_index, true);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006707 if (err)
6708 goto err_rif_fdb_op;
6709
Yotam Gigifd890fe2017-09-27 08:23:21 +02006710 if (rif->mtu != dev->mtu) {
6711 struct mlxsw_sp_vr *vr;
Yuval Mintz9742f862018-03-26 15:01:40 +03006712 int i;
Yotam Gigifd890fe2017-09-27 08:23:21 +02006713
6714 /* The RIF is relevant only to its mr_table instance, as unlike
6715 * unicast routing, in multicast routing a RIF cannot be shared
6716 * between several multicast routing tables.
6717 */
6718 vr = &mlxsw_sp->router->vrs[rif->vr_id];
Yuval Mintz9742f862018-03-26 15:01:40 +03006719 for (i = 0; i < MLXSW_SP_L3_PROTO_MAX; i++)
6720 mlxsw_sp_mr_rif_mtu_update(vr->mr_table[i],
6721 rif, dev->mtu);
Yotam Gigifd890fe2017-09-27 08:23:21 +02006722 }
6723
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01006724 ether_addr_copy(rif->addr, dev->dev_addr);
6725 rif->mtu = dev->mtu;
Ido Schimmel4724ba562017-03-10 08:53:39 +01006726
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01006727 netdev_dbg(dev, "Updated RIF=%d\n", rif->rif_index);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006728
6729 return 0;
6730
6731err_rif_fdb_op:
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01006732 mlxsw_sp_rif_edit(mlxsw_sp, rif->rif_index, rif->addr, rif->mtu);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006733err_rif_edit:
Ido Schimmela1107482017-05-26 08:37:39 +02006734 mlxsw_sp_rif_fdb_op(mlxsw_sp, rif->addr, fid_index, true);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006735 return err;
6736}
6737
Ido Schimmelb1e45522017-04-30 19:47:14 +03006738static int mlxsw_sp_port_vrf_join(struct mlxsw_sp *mlxsw_sp,
David Ahernf8fa9b42017-10-18 09:56:56 -07006739 struct net_device *l3_dev,
6740 struct netlink_ext_ack *extack)
Ido Schimmel7179eb52017-03-16 09:08:18 +01006741{
Ido Schimmelb1e45522017-04-30 19:47:14 +03006742 struct mlxsw_sp_rif *rif;
Ido Schimmel7179eb52017-03-16 09:08:18 +01006743
Ido Schimmelb1e45522017-04-30 19:47:14 +03006744 /* If netdev is already associated with a RIF, then we need to
6745 * destroy it and create a new one with the new virtual router ID.
Ido Schimmel7179eb52017-03-16 09:08:18 +01006746 */
Ido Schimmelb1e45522017-04-30 19:47:14 +03006747 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev);
6748 if (rif)
David Ahernf8fa9b42017-10-18 09:56:56 -07006749 __mlxsw_sp_inetaddr_event(l3_dev, NETDEV_DOWN, extack);
Ido Schimmel7179eb52017-03-16 09:08:18 +01006750
David Ahernf8fa9b42017-10-18 09:56:56 -07006751 return __mlxsw_sp_inetaddr_event(l3_dev, NETDEV_UP, extack);
Ido Schimmel7179eb52017-03-16 09:08:18 +01006752}
6753
Ido Schimmelb1e45522017-04-30 19:47:14 +03006754static void mlxsw_sp_port_vrf_leave(struct mlxsw_sp *mlxsw_sp,
6755 struct net_device *l3_dev)
Ido Schimmel7179eb52017-03-16 09:08:18 +01006756{
Ido Schimmelb1e45522017-04-30 19:47:14 +03006757 struct mlxsw_sp_rif *rif;
Ido Schimmel7179eb52017-03-16 09:08:18 +01006758
Ido Schimmelb1e45522017-04-30 19:47:14 +03006759 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev);
6760 if (!rif)
Ido Schimmel7179eb52017-03-16 09:08:18 +01006761 return;
David Ahernf8fa9b42017-10-18 09:56:56 -07006762 __mlxsw_sp_inetaddr_event(l3_dev, NETDEV_DOWN, NULL);
Ido Schimmel7179eb52017-03-16 09:08:18 +01006763}
6764
Ido Schimmelb1e45522017-04-30 19:47:14 +03006765int mlxsw_sp_netdevice_vrf_event(struct net_device *l3_dev, unsigned long event,
6766 struct netdev_notifier_changeupper_info *info)
Ido Schimmel3d70e4582017-03-16 09:08:19 +01006767{
Ido Schimmelb1e45522017-04-30 19:47:14 +03006768 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(l3_dev);
6769 int err = 0;
Ido Schimmel3d70e4582017-03-16 09:08:19 +01006770
Ido Schimmelc5516182018-07-14 11:39:51 +03006771 /* We do not create a RIF for a macvlan, but only use it to
6772 * direct more MAC addresses to the router.
6773 */
6774 if (!mlxsw_sp || netif_is_macvlan(l3_dev))
Ido Schimmelb1e45522017-04-30 19:47:14 +03006775 return 0;
Ido Schimmel3d70e4582017-03-16 09:08:19 +01006776
Ido Schimmelb1e45522017-04-30 19:47:14 +03006777 switch (event) {
6778 case NETDEV_PRECHANGEUPPER:
6779 return 0;
6780 case NETDEV_CHANGEUPPER:
David Ahernf8fa9b42017-10-18 09:56:56 -07006781 if (info->linking) {
6782 struct netlink_ext_ack *extack;
6783
6784 extack = netdev_notifier_info_to_extack(&info->info);
6785 err = mlxsw_sp_port_vrf_join(mlxsw_sp, l3_dev, extack);
6786 } else {
Ido Schimmelb1e45522017-04-30 19:47:14 +03006787 mlxsw_sp_port_vrf_leave(mlxsw_sp, l3_dev);
David Ahernf8fa9b42017-10-18 09:56:56 -07006788 }
Ido Schimmelb1e45522017-04-30 19:47:14 +03006789 break;
6790 }
Ido Schimmel3d70e4582017-03-16 09:08:19 +01006791
Ido Schimmelb1e45522017-04-30 19:47:14 +03006792 return err;
Ido Schimmel3d70e4582017-03-16 09:08:19 +01006793}
6794
Ido Schimmel2db99372018-07-14 11:39:52 +03006795static int __mlxsw_sp_rif_macvlan_flush(struct net_device *dev, void *data)
6796{
6797 struct mlxsw_sp_rif *rif = data;
6798
6799 if (!netif_is_macvlan(dev))
6800 return 0;
6801
6802 return mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, dev->dev_addr,
6803 mlxsw_sp_fid_index(rif->fid), false);
6804}
6805
6806static int mlxsw_sp_rif_macvlan_flush(struct mlxsw_sp_rif *rif)
6807{
6808 if (!netif_is_macvlan_port(rif->dev))
6809 return 0;
6810
6811 netdev_warn(rif->dev, "Router interface is deleted. Upper macvlans will not work\n");
6812 return netdev_walk_all_upper_dev_rcu(rif->dev,
6813 __mlxsw_sp_rif_macvlan_flush, rif);
6814}
6815
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006816static struct mlxsw_sp_rif_subport *
6817mlxsw_sp_rif_subport_rif(const struct mlxsw_sp_rif *rif)
Ido Schimmela1107482017-05-26 08:37:39 +02006818{
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006819 return container_of(rif, struct mlxsw_sp_rif_subport, common);
Ido Schimmela1107482017-05-26 08:37:39 +02006820}
6821
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006822static void mlxsw_sp_rif_subport_setup(struct mlxsw_sp_rif *rif,
6823 const struct mlxsw_sp_rif_params *params)
6824{
6825 struct mlxsw_sp_rif_subport *rif_subport;
6826
6827 rif_subport = mlxsw_sp_rif_subport_rif(rif);
6828 rif_subport->vid = params->vid;
6829 rif_subport->lag = params->lag;
6830 if (params->lag)
6831 rif_subport->lag_id = params->lag_id;
6832 else
6833 rif_subport->system_port = params->system_port;
6834}
6835
6836static int mlxsw_sp_rif_subport_op(struct mlxsw_sp_rif *rif, bool enable)
6837{
6838 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
6839 struct mlxsw_sp_rif_subport *rif_subport;
6840 char ritr_pl[MLXSW_REG_RITR_LEN];
6841
6842 rif_subport = mlxsw_sp_rif_subport_rif(rif);
6843 mlxsw_reg_ritr_pack(ritr_pl, enable, MLXSW_REG_RITR_SP_IF,
Petr Machata9571e822017-09-02 23:49:14 +02006844 rif->rif_index, rif->vr_id, rif->dev->mtu);
6845 mlxsw_reg_ritr_mac_pack(ritr_pl, rif->dev->dev_addr);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006846 mlxsw_reg_ritr_sp_if_pack(ritr_pl, rif_subport->lag,
6847 rif_subport->lag ? rif_subport->lag_id :
6848 rif_subport->system_port,
6849 rif_subport->vid);
6850
6851 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
6852}
6853
6854static int mlxsw_sp_rif_subport_configure(struct mlxsw_sp_rif *rif)
6855{
Petr Machata010cadf2017-09-02 23:49:18 +02006856 int err;
6857
6858 err = mlxsw_sp_rif_subport_op(rif, true);
6859 if (err)
6860 return err;
6861
6862 err = mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
6863 mlxsw_sp_fid_index(rif->fid), true);
6864 if (err)
6865 goto err_rif_fdb_op;
6866
6867 mlxsw_sp_fid_rif_set(rif->fid, rif);
6868 return 0;
6869
6870err_rif_fdb_op:
6871 mlxsw_sp_rif_subport_op(rif, false);
6872 return err;
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006873}
6874
6875static void mlxsw_sp_rif_subport_deconfigure(struct mlxsw_sp_rif *rif)
6876{
Petr Machata010cadf2017-09-02 23:49:18 +02006877 struct mlxsw_sp_fid *fid = rif->fid;
6878
6879 mlxsw_sp_fid_rif_set(fid, NULL);
6880 mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
6881 mlxsw_sp_fid_index(fid), false);
Ido Schimmel2db99372018-07-14 11:39:52 +03006882 mlxsw_sp_rif_macvlan_flush(rif);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006883 mlxsw_sp_rif_subport_op(rif, false);
6884}
6885
6886static struct mlxsw_sp_fid *
Petr Machata5f15e252018-06-25 10:48:13 +03006887mlxsw_sp_rif_subport_fid_get(struct mlxsw_sp_rif *rif,
6888 struct netlink_ext_ack *extack)
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006889{
6890 return mlxsw_sp_fid_rfid_get(rif->mlxsw_sp, rif->rif_index);
6891}
6892
6893static const struct mlxsw_sp_rif_ops mlxsw_sp_rif_subport_ops = {
6894 .type = MLXSW_SP_RIF_TYPE_SUBPORT,
6895 .rif_size = sizeof(struct mlxsw_sp_rif_subport),
6896 .setup = mlxsw_sp_rif_subport_setup,
6897 .configure = mlxsw_sp_rif_subport_configure,
6898 .deconfigure = mlxsw_sp_rif_subport_deconfigure,
6899 .fid_get = mlxsw_sp_rif_subport_fid_get,
6900};
6901
6902static int mlxsw_sp_rif_vlan_fid_op(struct mlxsw_sp_rif *rif,
6903 enum mlxsw_reg_ritr_if_type type,
6904 u16 vid_fid, bool enable)
6905{
6906 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
6907 char ritr_pl[MLXSW_REG_RITR_LEN];
6908
6909 mlxsw_reg_ritr_pack(ritr_pl, enable, type, rif->rif_index, rif->vr_id,
Petr Machata9571e822017-09-02 23:49:14 +02006910 rif->dev->mtu);
6911 mlxsw_reg_ritr_mac_pack(ritr_pl, rif->dev->dev_addr);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006912 mlxsw_reg_ritr_fid_set(ritr_pl, type, vid_fid);
6913
6914 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
6915}
6916
Yotam Gigib35750f2017-10-09 11:15:33 +02006917u8 mlxsw_sp_router_port(const struct mlxsw_sp *mlxsw_sp)
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006918{
6919 return mlxsw_core_max_ports(mlxsw_sp->core) + 1;
6920}
6921
6922static int mlxsw_sp_rif_vlan_configure(struct mlxsw_sp_rif *rif)
6923{
6924 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
6925 u16 vid = mlxsw_sp_fid_8021q_vid(rif->fid);
6926 int err;
6927
6928 err = mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_VLAN_IF, vid, true);
6929 if (err)
6930 return err;
6931
Ido Schimmel0d284812017-07-18 10:10:12 +02006932 err = mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
6933 mlxsw_sp_router_port(mlxsw_sp), true);
6934 if (err)
6935 goto err_fid_mc_flood_set;
6936
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006937 err = mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
6938 mlxsw_sp_router_port(mlxsw_sp), true);
6939 if (err)
6940 goto err_fid_bc_flood_set;
6941
Petr Machata010cadf2017-09-02 23:49:18 +02006942 err = mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
6943 mlxsw_sp_fid_index(rif->fid), true);
6944 if (err)
6945 goto err_rif_fdb_op;
6946
6947 mlxsw_sp_fid_rif_set(rif->fid, rif);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006948 return 0;
6949
Petr Machata010cadf2017-09-02 23:49:18 +02006950err_rif_fdb_op:
6951 mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
6952 mlxsw_sp_router_port(mlxsw_sp), false);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006953err_fid_bc_flood_set:
Ido Schimmel0d284812017-07-18 10:10:12 +02006954 mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
6955 mlxsw_sp_router_port(mlxsw_sp), false);
6956err_fid_mc_flood_set:
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006957 mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_VLAN_IF, vid, false);
6958 return err;
6959}
6960
6961static void mlxsw_sp_rif_vlan_deconfigure(struct mlxsw_sp_rif *rif)
6962{
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006963 u16 vid = mlxsw_sp_fid_8021q_vid(rif->fid);
Petr Machata010cadf2017-09-02 23:49:18 +02006964 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
6965 struct mlxsw_sp_fid *fid = rif->fid;
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006966
Petr Machata010cadf2017-09-02 23:49:18 +02006967 mlxsw_sp_fid_rif_set(fid, NULL);
6968 mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
6969 mlxsw_sp_fid_index(fid), false);
Ido Schimmel2db99372018-07-14 11:39:52 +03006970 mlxsw_sp_rif_macvlan_flush(rif);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006971 mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
6972 mlxsw_sp_router_port(mlxsw_sp), false);
Ido Schimmel0d284812017-07-18 10:10:12 +02006973 mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
6974 mlxsw_sp_router_port(mlxsw_sp), false);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006975 mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_VLAN_IF, vid, false);
6976}
6977
6978static struct mlxsw_sp_fid *
Petr Machata5f15e252018-06-25 10:48:13 +03006979mlxsw_sp_rif_vlan_fid_get(struct mlxsw_sp_rif *rif,
6980 struct netlink_ext_ack *extack)
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006981{
Petr Machatae6f19602018-06-25 10:48:14 +03006982 u16 vid;
6983 int err;
6984
6985 if (is_vlan_dev(rif->dev)) {
6986 vid = vlan_dev_vlan_id(rif->dev);
6987 } else {
6988 err = br_vlan_get_pvid(rif->dev, &vid);
Arnd Bergmannbe9c64b2018-07-06 14:44:45 +02006989 if (err < 0 || !vid) {
Petr Machatae6f19602018-06-25 10:48:14 +03006990 NL_SET_ERR_MSG_MOD(extack, "Couldn't determine bridge PVID");
Arnd Bergmannbe9c64b2018-07-06 14:44:45 +02006991 return ERR_PTR(-EINVAL);
Petr Machatae6f19602018-06-25 10:48:14 +03006992 }
6993 }
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006994
6995 return mlxsw_sp_fid_8021q_get(rif->mlxsw_sp, vid);
6996}
6997
Ido Schimmel2db99372018-07-14 11:39:52 +03006998static void mlxsw_sp_rif_vlan_fdb_del(struct mlxsw_sp_rif *rif, const char *mac)
6999{
7000 u16 vid = mlxsw_sp_fid_8021q_vid(rif->fid);
7001 struct switchdev_notifier_fdb_info info;
7002 struct net_device *br_dev;
7003 struct net_device *dev;
7004
7005 br_dev = is_vlan_dev(rif->dev) ? vlan_dev_real_dev(rif->dev) : rif->dev;
7006 dev = br_fdb_find_port(br_dev, mac, vid);
7007 if (!dev)
7008 return;
7009
7010 info.addr = mac;
7011 info.vid = vid;
7012 call_switchdev_notifiers(SWITCHDEV_FDB_DEL_TO_BRIDGE, dev, &info.info);
7013}
7014
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02007015static const struct mlxsw_sp_rif_ops mlxsw_sp_rif_vlan_ops = {
7016 .type = MLXSW_SP_RIF_TYPE_VLAN,
7017 .rif_size = sizeof(struct mlxsw_sp_rif),
7018 .configure = mlxsw_sp_rif_vlan_configure,
7019 .deconfigure = mlxsw_sp_rif_vlan_deconfigure,
7020 .fid_get = mlxsw_sp_rif_vlan_fid_get,
Ido Schimmel2db99372018-07-14 11:39:52 +03007021 .fdb_del = mlxsw_sp_rif_vlan_fdb_del,
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02007022};
7023
7024static int mlxsw_sp_rif_fid_configure(struct mlxsw_sp_rif *rif)
7025{
7026 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
7027 u16 fid_index = mlxsw_sp_fid_index(rif->fid);
7028 int err;
7029
7030 err = mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_FID_IF, fid_index,
7031 true);
7032 if (err)
7033 return err;
7034
Ido Schimmel0d284812017-07-18 10:10:12 +02007035 err = mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
7036 mlxsw_sp_router_port(mlxsw_sp), true);
7037 if (err)
7038 goto err_fid_mc_flood_set;
7039
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02007040 err = mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
7041 mlxsw_sp_router_port(mlxsw_sp), true);
7042 if (err)
7043 goto err_fid_bc_flood_set;
7044
Petr Machata010cadf2017-09-02 23:49:18 +02007045 err = mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
7046 mlxsw_sp_fid_index(rif->fid), true);
7047 if (err)
7048 goto err_rif_fdb_op;
7049
7050 mlxsw_sp_fid_rif_set(rif->fid, rif);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02007051 return 0;
7052
Petr Machata010cadf2017-09-02 23:49:18 +02007053err_rif_fdb_op:
7054 mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
7055 mlxsw_sp_router_port(mlxsw_sp), false);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02007056err_fid_bc_flood_set:
Ido Schimmel0d284812017-07-18 10:10:12 +02007057 mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
7058 mlxsw_sp_router_port(mlxsw_sp), false);
7059err_fid_mc_flood_set:
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02007060 mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_FID_IF, fid_index, false);
7061 return err;
7062}
7063
7064static void mlxsw_sp_rif_fid_deconfigure(struct mlxsw_sp_rif *rif)
7065{
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02007066 u16 fid_index = mlxsw_sp_fid_index(rif->fid);
Petr Machata010cadf2017-09-02 23:49:18 +02007067 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
7068 struct mlxsw_sp_fid *fid = rif->fid;
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02007069
Petr Machata010cadf2017-09-02 23:49:18 +02007070 mlxsw_sp_fid_rif_set(fid, NULL);
7071 mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
7072 mlxsw_sp_fid_index(fid), false);
Ido Schimmel2db99372018-07-14 11:39:52 +03007073 mlxsw_sp_rif_macvlan_flush(rif);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02007074 mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
7075 mlxsw_sp_router_port(mlxsw_sp), false);
Ido Schimmel0d284812017-07-18 10:10:12 +02007076 mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
7077 mlxsw_sp_router_port(mlxsw_sp), false);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02007078 mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_FID_IF, fid_index, false);
7079}
7080
7081static struct mlxsw_sp_fid *
Petr Machata5f15e252018-06-25 10:48:13 +03007082mlxsw_sp_rif_fid_fid_get(struct mlxsw_sp_rif *rif,
7083 struct netlink_ext_ack *extack)
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02007084{
7085 return mlxsw_sp_fid_8021d_get(rif->mlxsw_sp, rif->dev->ifindex);
7086}
7087
Ido Schimmel2db99372018-07-14 11:39:52 +03007088static void mlxsw_sp_rif_fid_fdb_del(struct mlxsw_sp_rif *rif, const char *mac)
7089{
7090 struct switchdev_notifier_fdb_info info;
7091 struct net_device *dev;
7092
7093 dev = br_fdb_find_port(rif->dev, mac, 0);
7094 if (!dev)
7095 return;
7096
7097 info.addr = mac;
7098 info.vid = 0;
7099 call_switchdev_notifiers(SWITCHDEV_FDB_DEL_TO_BRIDGE, dev, &info.info);
7100}
7101
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02007102static const struct mlxsw_sp_rif_ops mlxsw_sp_rif_fid_ops = {
7103 .type = MLXSW_SP_RIF_TYPE_FID,
7104 .rif_size = sizeof(struct mlxsw_sp_rif),
7105 .configure = mlxsw_sp_rif_fid_configure,
7106 .deconfigure = mlxsw_sp_rif_fid_deconfigure,
7107 .fid_get = mlxsw_sp_rif_fid_fid_get,
Ido Schimmel2db99372018-07-14 11:39:52 +03007108 .fdb_del = mlxsw_sp_rif_fid_fdb_del,
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02007109};
7110
Petr Machata6ddb7422017-09-02 23:49:19 +02007111static struct mlxsw_sp_rif_ipip_lb *
7112mlxsw_sp_rif_ipip_lb_rif(struct mlxsw_sp_rif *rif)
7113{
7114 return container_of(rif, struct mlxsw_sp_rif_ipip_lb, common);
7115}
7116
7117static void
7118mlxsw_sp_rif_ipip_lb_setup(struct mlxsw_sp_rif *rif,
7119 const struct mlxsw_sp_rif_params *params)
7120{
7121 struct mlxsw_sp_rif_params_ipip_lb *params_lb;
7122 struct mlxsw_sp_rif_ipip_lb *rif_lb;
7123
7124 params_lb = container_of(params, struct mlxsw_sp_rif_params_ipip_lb,
7125 common);
7126 rif_lb = mlxsw_sp_rif_ipip_lb_rif(rif);
7127 rif_lb->lb_config = params_lb->lb_config;
7128}
7129
7130static int
Petr Machata6ddb7422017-09-02 23:49:19 +02007131mlxsw_sp_rif_ipip_lb_configure(struct mlxsw_sp_rif *rif)
7132{
7133 struct mlxsw_sp_rif_ipip_lb *lb_rif = mlxsw_sp_rif_ipip_lb_rif(rif);
7134 u32 ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(rif->dev);
7135 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
7136 struct mlxsw_sp_vr *ul_vr;
7137 int err;
7138
David Ahernf8fa9b42017-10-18 09:56:56 -07007139 ul_vr = mlxsw_sp_vr_get(mlxsw_sp, ul_tb_id, NULL);
Petr Machata6ddb7422017-09-02 23:49:19 +02007140 if (IS_ERR(ul_vr))
7141 return PTR_ERR(ul_vr);
7142
7143 err = mlxsw_sp_rif_ipip_lb_op(lb_rif, ul_vr, true);
7144 if (err)
7145 goto err_loopback_op;
7146
7147 lb_rif->ul_vr_id = ul_vr->id;
7148 ++ul_vr->rif_count;
7149 return 0;
7150
7151err_loopback_op:
Ido Schimmel2b52ce02018-01-22 09:17:42 +01007152 mlxsw_sp_vr_put(mlxsw_sp, ul_vr);
Petr Machata6ddb7422017-09-02 23:49:19 +02007153 return err;
7154}
7155
7156static void mlxsw_sp_rif_ipip_lb_deconfigure(struct mlxsw_sp_rif *rif)
7157{
7158 struct mlxsw_sp_rif_ipip_lb *lb_rif = mlxsw_sp_rif_ipip_lb_rif(rif);
7159 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
7160 struct mlxsw_sp_vr *ul_vr;
7161
7162 ul_vr = &mlxsw_sp->router->vrs[lb_rif->ul_vr_id];
7163 mlxsw_sp_rif_ipip_lb_op(lb_rif, ul_vr, false);
7164
7165 --ul_vr->rif_count;
Ido Schimmel2b52ce02018-01-22 09:17:42 +01007166 mlxsw_sp_vr_put(mlxsw_sp, ul_vr);
Petr Machata6ddb7422017-09-02 23:49:19 +02007167}
7168
7169static const struct mlxsw_sp_rif_ops mlxsw_sp_rif_ipip_lb_ops = {
7170 .type = MLXSW_SP_RIF_TYPE_IPIP_LB,
7171 .rif_size = sizeof(struct mlxsw_sp_rif_ipip_lb),
7172 .setup = mlxsw_sp_rif_ipip_lb_setup,
7173 .configure = mlxsw_sp_rif_ipip_lb_configure,
7174 .deconfigure = mlxsw_sp_rif_ipip_lb_deconfigure,
7175};
7176
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02007177static const struct mlxsw_sp_rif_ops *mlxsw_sp_rif_ops_arr[] = {
7178 [MLXSW_SP_RIF_TYPE_SUBPORT] = &mlxsw_sp_rif_subport_ops,
7179 [MLXSW_SP_RIF_TYPE_VLAN] = &mlxsw_sp_rif_vlan_ops,
7180 [MLXSW_SP_RIF_TYPE_FID] = &mlxsw_sp_rif_fid_ops,
Petr Machata6ddb7422017-09-02 23:49:19 +02007181 [MLXSW_SP_RIF_TYPE_IPIP_LB] = &mlxsw_sp_rif_ipip_lb_ops,
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02007182};
7183
Ido Schimmel348b8fc2017-05-16 19:38:29 +02007184static int mlxsw_sp_rifs_init(struct mlxsw_sp *mlxsw_sp)
7185{
7186 u64 max_rifs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS);
7187
7188 mlxsw_sp->router->rifs = kcalloc(max_rifs,
7189 sizeof(struct mlxsw_sp_rif *),
7190 GFP_KERNEL);
7191 if (!mlxsw_sp->router->rifs)
7192 return -ENOMEM;
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02007193
7194 mlxsw_sp->router->rif_ops_arr = mlxsw_sp_rif_ops_arr;
7195
Ido Schimmel348b8fc2017-05-16 19:38:29 +02007196 return 0;
7197}
7198
7199static void mlxsw_sp_rifs_fini(struct mlxsw_sp *mlxsw_sp)
7200{
7201 int i;
7202
7203 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++)
7204 WARN_ON_ONCE(mlxsw_sp->router->rifs[i]);
7205
7206 kfree(mlxsw_sp->router->rifs);
7207}
7208
Petr Machatadcbda282017-10-20 09:16:16 +02007209static int
7210mlxsw_sp_ipip_config_tigcr(struct mlxsw_sp *mlxsw_sp)
7211{
7212 char tigcr_pl[MLXSW_REG_TIGCR_LEN];
7213
7214 mlxsw_reg_tigcr_pack(tigcr_pl, true, 0);
7215 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(tigcr), tigcr_pl);
7216}
7217
Petr Machata38ebc0f2017-09-02 23:49:17 +02007218static int mlxsw_sp_ipips_init(struct mlxsw_sp *mlxsw_sp)
7219{
7220 mlxsw_sp->router->ipip_ops_arr = mlxsw_sp_ipip_ops_arr;
Petr Machata1012b9a2017-09-02 23:49:23 +02007221 INIT_LIST_HEAD(&mlxsw_sp->router->ipip_list);
Petr Machatadcbda282017-10-20 09:16:16 +02007222 return mlxsw_sp_ipip_config_tigcr(mlxsw_sp);
Petr Machata38ebc0f2017-09-02 23:49:17 +02007223}
7224
7225static void mlxsw_sp_ipips_fini(struct mlxsw_sp *mlxsw_sp)
7226{
Petr Machata1012b9a2017-09-02 23:49:23 +02007227 WARN_ON(!list_empty(&mlxsw_sp->router->ipip_list));
Petr Machata38ebc0f2017-09-02 23:49:17 +02007228}
7229
Ido Schimmelc3852ef2016-12-03 16:45:07 +01007230static void mlxsw_sp_router_fib_dump_flush(struct notifier_block *nb)
7231{
Ido Schimmel7e39d112017-05-16 19:38:28 +02007232 struct mlxsw_sp_router *router;
Ido Schimmelc3852ef2016-12-03 16:45:07 +01007233
7234 /* Flush pending FIB notifications and then flush the device's
7235 * table before requesting another dump. The FIB notification
7236 * block is unregistered, so no need to take RTNL.
7237 */
7238 mlxsw_core_flush_owq();
Ido Schimmel7e39d112017-05-16 19:38:28 +02007239 router = container_of(nb, struct mlxsw_sp_router, fib_nb);
7240 mlxsw_sp_router_fib_flush(router->mlxsw_sp);
Ido Schimmelc3852ef2016-12-03 16:45:07 +01007241}
7242
Ido Schimmelaf658b62017-11-02 17:14:09 +01007243#ifdef CONFIG_IP_ROUTE_MULTIPATH
7244static void mlxsw_sp_mp_hash_header_set(char *recr2_pl, int header)
7245{
7246 mlxsw_reg_recr2_outer_header_enables_set(recr2_pl, header, true);
7247}
7248
7249static void mlxsw_sp_mp_hash_field_set(char *recr2_pl, int field)
7250{
7251 mlxsw_reg_recr2_outer_header_fields_enable_set(recr2_pl, field, true);
7252}
7253
7254static void mlxsw_sp_mp4_hash_init(char *recr2_pl)
7255{
7256 bool only_l3 = !init_net.ipv4.sysctl_fib_multipath_hash_policy;
7257
7258 mlxsw_sp_mp_hash_header_set(recr2_pl,
7259 MLXSW_REG_RECR2_IPV4_EN_NOT_TCP_NOT_UDP);
7260 mlxsw_sp_mp_hash_header_set(recr2_pl, MLXSW_REG_RECR2_IPV4_EN_TCP_UDP);
7261 mlxsw_reg_recr2_ipv4_sip_enable(recr2_pl);
7262 mlxsw_reg_recr2_ipv4_dip_enable(recr2_pl);
7263 if (only_l3)
7264 return;
7265 mlxsw_sp_mp_hash_header_set(recr2_pl, MLXSW_REG_RECR2_TCP_UDP_EN_IPV4);
7266 mlxsw_sp_mp_hash_field_set(recr2_pl, MLXSW_REG_RECR2_IPV4_PROTOCOL);
7267 mlxsw_sp_mp_hash_field_set(recr2_pl, MLXSW_REG_RECR2_TCP_UDP_SPORT);
7268 mlxsw_sp_mp_hash_field_set(recr2_pl, MLXSW_REG_RECR2_TCP_UDP_DPORT);
7269}
7270
7271static void mlxsw_sp_mp6_hash_init(char *recr2_pl)
7272{
Petr Machata918ee502018-03-11 09:45:47 +02007273 bool only_l3 = !ip6_multipath_hash_policy(&init_net);
David Ahern5e18b9c552018-03-02 08:32:19 -08007274
Ido Schimmelaf658b62017-11-02 17:14:09 +01007275 mlxsw_sp_mp_hash_header_set(recr2_pl,
7276 MLXSW_REG_RECR2_IPV6_EN_NOT_TCP_NOT_UDP);
7277 mlxsw_sp_mp_hash_header_set(recr2_pl, MLXSW_REG_RECR2_IPV6_EN_TCP_UDP);
7278 mlxsw_reg_recr2_ipv6_sip_enable(recr2_pl);
7279 mlxsw_reg_recr2_ipv6_dip_enable(recr2_pl);
Ido Schimmelaf658b62017-11-02 17:14:09 +01007280 mlxsw_sp_mp_hash_field_set(recr2_pl, MLXSW_REG_RECR2_IPV6_NEXT_HEADER);
David Ahern5e18b9c552018-03-02 08:32:19 -08007281 if (only_l3) {
7282 mlxsw_sp_mp_hash_field_set(recr2_pl,
7283 MLXSW_REG_RECR2_IPV6_FLOW_LABEL);
7284 } else {
7285 mlxsw_sp_mp_hash_header_set(recr2_pl,
7286 MLXSW_REG_RECR2_TCP_UDP_EN_IPV6);
7287 mlxsw_sp_mp_hash_field_set(recr2_pl,
7288 MLXSW_REG_RECR2_TCP_UDP_SPORT);
7289 mlxsw_sp_mp_hash_field_set(recr2_pl,
7290 MLXSW_REG_RECR2_TCP_UDP_DPORT);
7291 }
Ido Schimmelaf658b62017-11-02 17:14:09 +01007292}
7293
7294static int mlxsw_sp_mp_hash_init(struct mlxsw_sp *mlxsw_sp)
7295{
7296 char recr2_pl[MLXSW_REG_RECR2_LEN];
7297 u32 seed;
7298
7299 get_random_bytes(&seed, sizeof(seed));
7300 mlxsw_reg_recr2_pack(recr2_pl, seed);
7301 mlxsw_sp_mp4_hash_init(recr2_pl);
7302 mlxsw_sp_mp6_hash_init(recr2_pl);
7303
7304 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(recr2), recr2_pl);
7305}
7306#else
7307static int mlxsw_sp_mp_hash_init(struct mlxsw_sp *mlxsw_sp)
7308{
7309 return 0;
7310}
7311#endif
7312
Yuval Mintz48276a22018-01-14 12:33:14 +01007313static int mlxsw_sp_dscp_init(struct mlxsw_sp *mlxsw_sp)
7314{
7315 char rdpm_pl[MLXSW_REG_RDPM_LEN];
7316 unsigned int i;
7317
7318 MLXSW_REG_ZERO(rdpm, rdpm_pl);
7319
7320 /* HW is determining switch priority based on DSCP-bits, but the
7321 * kernel is still doing that based on the ToS. Since there's a
7322 * mismatch in bits we need to make sure to translate the right
7323 * value ToS would observe, skipping the 2 least-significant ECN bits.
7324 */
7325 for (i = 0; i < MLXSW_REG_RDPM_DSCP_ENTRY_REC_MAX_COUNT; i++)
7326 mlxsw_reg_rdpm_pack(rdpm_pl, i, rt_tos2priority(i << 2));
7327
7328 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rdpm), rdpm_pl);
7329}
7330
Ido Schimmel4724ba562017-03-10 08:53:39 +01007331static int __mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp)
7332{
7333 char rgcr_pl[MLXSW_REG_RGCR_LEN];
7334 u64 max_rifs;
7335 int err;
7336
7337 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_RIFS))
7338 return -EIO;
Ido Schimmel4724ba562017-03-10 08:53:39 +01007339 max_rifs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS);
Ido Schimmel4724ba562017-03-10 08:53:39 +01007340
Arkadi Sharshevskye29237e2017-07-18 10:10:09 +02007341 mlxsw_reg_rgcr_pack(rgcr_pl, true, true);
Ido Schimmel4724ba562017-03-10 08:53:39 +01007342 mlxsw_reg_rgcr_max_router_interfaces_set(rgcr_pl, max_rifs);
Yuval Mintz48276a22018-01-14 12:33:14 +01007343 mlxsw_reg_rgcr_usp_set(rgcr_pl, true);
Ido Schimmel4724ba562017-03-10 08:53:39 +01007344 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rgcr), rgcr_pl);
7345 if (err)
Ido Schimmel348b8fc2017-05-16 19:38:29 +02007346 return err;
Ido Schimmel4724ba562017-03-10 08:53:39 +01007347 return 0;
Ido Schimmel4724ba562017-03-10 08:53:39 +01007348}
7349
7350static void __mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp)
7351{
7352 char rgcr_pl[MLXSW_REG_RGCR_LEN];
Ido Schimmel4724ba562017-03-10 08:53:39 +01007353
Arkadi Sharshevskye29237e2017-07-18 10:10:09 +02007354 mlxsw_reg_rgcr_pack(rgcr_pl, false, false);
Ido Schimmel4724ba562017-03-10 08:53:39 +01007355 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rgcr), rgcr_pl);
Ido Schimmel4724ba562017-03-10 08:53:39 +01007356}
7357
Jiri Pirkob45f64d2016-09-26 12:52:31 +02007358int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp)
7359{
Ido Schimmel9011b672017-05-16 19:38:25 +02007360 struct mlxsw_sp_router *router;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02007361 int err;
7362
Ido Schimmel9011b672017-05-16 19:38:25 +02007363 router = kzalloc(sizeof(*mlxsw_sp->router), GFP_KERNEL);
7364 if (!router)
7365 return -ENOMEM;
7366 mlxsw_sp->router = router;
7367 router->mlxsw_sp = mlxsw_sp;
7368
7369 INIT_LIST_HEAD(&mlxsw_sp->router->nexthop_neighs_list);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02007370 err = __mlxsw_sp_router_init(mlxsw_sp);
7371 if (err)
Ido Schimmel9011b672017-05-16 19:38:25 +02007372 goto err_router_init;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02007373
Ido Schimmel348b8fc2017-05-16 19:38:29 +02007374 err = mlxsw_sp_rifs_init(mlxsw_sp);
7375 if (err)
7376 goto err_rifs_init;
7377
Petr Machata38ebc0f2017-09-02 23:49:17 +02007378 err = mlxsw_sp_ipips_init(mlxsw_sp);
7379 if (err)
7380 goto err_ipips_init;
7381
Ido Schimmel9011b672017-05-16 19:38:25 +02007382 err = rhashtable_init(&mlxsw_sp->router->nexthop_ht,
Ido Schimmelc53b8e12017-02-08 11:16:30 +01007383 &mlxsw_sp_nexthop_ht_params);
7384 if (err)
7385 goto err_nexthop_ht_init;
7386
Ido Schimmel9011b672017-05-16 19:38:25 +02007387 err = rhashtable_init(&mlxsw_sp->router->nexthop_group_ht,
Ido Schimmele9ad5e72017-02-08 11:16:29 +01007388 &mlxsw_sp_nexthop_group_ht_params);
7389 if (err)
7390 goto err_nexthop_group_ht_init;
7391
Arkadi Sharshevskydbe45982017-09-25 10:32:23 +02007392 INIT_LIST_HEAD(&mlxsw_sp->router->nexthop_list);
Ido Schimmel8494ab02017-03-24 08:02:47 +01007393 err = mlxsw_sp_lpm_init(mlxsw_sp);
7394 if (err)
7395 goto err_lpm_init;
7396
Yotam Gigid42b0962017-09-27 08:23:20 +02007397 err = mlxsw_sp_mr_init(mlxsw_sp, &mlxsw_sp_mr_tcam_ops);
7398 if (err)
7399 goto err_mr_init;
7400
Jiri Pirkob45f64d2016-09-26 12:52:31 +02007401 err = mlxsw_sp_vrs_init(mlxsw_sp);
7402 if (err)
7403 goto err_vrs_init;
7404
Ido Schimmel8c9583a2016-10-27 15:12:57 +02007405 err = mlxsw_sp_neigh_init(mlxsw_sp);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02007406 if (err)
7407 goto err_neigh_init;
7408
Ido Schimmel48fac882017-11-02 17:14:06 +01007409 mlxsw_sp->router->netevent_nb.notifier_call =
7410 mlxsw_sp_router_netevent_event;
7411 err = register_netevent_notifier(&mlxsw_sp->router->netevent_nb);
7412 if (err)
7413 goto err_register_netevent_notifier;
7414
Ido Schimmelaf658b62017-11-02 17:14:09 +01007415 err = mlxsw_sp_mp_hash_init(mlxsw_sp);
7416 if (err)
7417 goto err_mp_hash_init;
7418
Yuval Mintz48276a22018-01-14 12:33:14 +01007419 err = mlxsw_sp_dscp_init(mlxsw_sp);
7420 if (err)
7421 goto err_dscp_init;
7422
Ido Schimmel7e39d112017-05-16 19:38:28 +02007423 mlxsw_sp->router->fib_nb.notifier_call = mlxsw_sp_router_fib_event;
7424 err = register_fib_notifier(&mlxsw_sp->router->fib_nb,
Ido Schimmelc3852ef2016-12-03 16:45:07 +01007425 mlxsw_sp_router_fib_dump_flush);
7426 if (err)
7427 goto err_register_fib_notifier;
7428
Jiri Pirkob45f64d2016-09-26 12:52:31 +02007429 return 0;
7430
Ido Schimmelc3852ef2016-12-03 16:45:07 +01007431err_register_fib_notifier:
Yuval Mintz48276a22018-01-14 12:33:14 +01007432err_dscp_init:
Ido Schimmelaf658b62017-11-02 17:14:09 +01007433err_mp_hash_init:
Ido Schimmel48fac882017-11-02 17:14:06 +01007434 unregister_netevent_notifier(&mlxsw_sp->router->netevent_nb);
7435err_register_netevent_notifier:
Ido Schimmelc3852ef2016-12-03 16:45:07 +01007436 mlxsw_sp_neigh_fini(mlxsw_sp);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02007437err_neigh_init:
7438 mlxsw_sp_vrs_fini(mlxsw_sp);
7439err_vrs_init:
Yotam Gigid42b0962017-09-27 08:23:20 +02007440 mlxsw_sp_mr_fini(mlxsw_sp);
7441err_mr_init:
Ido Schimmel8494ab02017-03-24 08:02:47 +01007442 mlxsw_sp_lpm_fini(mlxsw_sp);
7443err_lpm_init:
Ido Schimmel9011b672017-05-16 19:38:25 +02007444 rhashtable_destroy(&mlxsw_sp->router->nexthop_group_ht);
Ido Schimmele9ad5e72017-02-08 11:16:29 +01007445err_nexthop_group_ht_init:
Ido Schimmel9011b672017-05-16 19:38:25 +02007446 rhashtable_destroy(&mlxsw_sp->router->nexthop_ht);
Ido Schimmelc53b8e12017-02-08 11:16:30 +01007447err_nexthop_ht_init:
Petr Machata38ebc0f2017-09-02 23:49:17 +02007448 mlxsw_sp_ipips_fini(mlxsw_sp);
7449err_ipips_init:
Ido Schimmel348b8fc2017-05-16 19:38:29 +02007450 mlxsw_sp_rifs_fini(mlxsw_sp);
7451err_rifs_init:
Jiri Pirkob45f64d2016-09-26 12:52:31 +02007452 __mlxsw_sp_router_fini(mlxsw_sp);
Ido Schimmel9011b672017-05-16 19:38:25 +02007453err_router_init:
7454 kfree(mlxsw_sp->router);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02007455 return err;
7456}
7457
7458void mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp)
7459{
Ido Schimmel7e39d112017-05-16 19:38:28 +02007460 unregister_fib_notifier(&mlxsw_sp->router->fib_nb);
Ido Schimmel48fac882017-11-02 17:14:06 +01007461 unregister_netevent_notifier(&mlxsw_sp->router->netevent_nb);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02007462 mlxsw_sp_neigh_fini(mlxsw_sp);
7463 mlxsw_sp_vrs_fini(mlxsw_sp);
Yotam Gigid42b0962017-09-27 08:23:20 +02007464 mlxsw_sp_mr_fini(mlxsw_sp);
Ido Schimmel8494ab02017-03-24 08:02:47 +01007465 mlxsw_sp_lpm_fini(mlxsw_sp);
Ido Schimmel9011b672017-05-16 19:38:25 +02007466 rhashtable_destroy(&mlxsw_sp->router->nexthop_group_ht);
7467 rhashtable_destroy(&mlxsw_sp->router->nexthop_ht);
Petr Machata38ebc0f2017-09-02 23:49:17 +02007468 mlxsw_sp_ipips_fini(mlxsw_sp);
Ido Schimmel348b8fc2017-05-16 19:38:29 +02007469 mlxsw_sp_rifs_fini(mlxsw_sp);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02007470 __mlxsw_sp_router_fini(mlxsw_sp);
Ido Schimmel9011b672017-05-16 19:38:25 +02007471 kfree(mlxsw_sp->router);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02007472}