blob: d657f01f2d7924dcf9d574b9afd11cd19e0af1a3 [file] [log] [blame]
Ido Schimmel464dce12016-07-02 11:00:15 +02001/*
2 * drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
Petr Machata6ddb7422017-09-02 23:49:19 +02003 * Copyright (c) 2016-2017 Mellanox Technologies. All rights reserved.
Ido Schimmel464dce12016-07-02 11:00:15 +02004 * Copyright (c) 2016 Jiri Pirko <jiri@mellanox.com>
5 * Copyright (c) 2016 Ido Schimmel <idosch@mellanox.com>
Yotam Gigic723c7352016-07-05 11:27:43 +02006 * Copyright (c) 2016 Yotam Gigi <yotamg@mellanox.com>
Petr Machata6ddb7422017-09-02 23:49:19 +02007 * Copyright (c) 2017 Petr Machata <petrm@mellanox.com>
Ido Schimmel464dce12016-07-02 11:00:15 +02008 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions are met:
11 *
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. Neither the names of the copyright holders nor the names of its
18 * contributors may be used to endorse or promote products derived from
19 * this software without specific prior written permission.
20 *
21 * Alternatively, this software may be distributed under the terms of the
22 * GNU General Public License ("GPL") version 2 as published by the Free
23 * Software Foundation.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
26 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
29 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
36 */
37
38#include <linux/kernel.h>
39#include <linux/types.h>
Jiri Pirko5e9c16c2016-07-04 08:23:04 +020040#include <linux/rhashtable.h>
41#include <linux/bitops.h>
42#include <linux/in6.h>
Yotam Gigic723c7352016-07-05 11:27:43 +020043#include <linux/notifier.h>
Ido Schimmeldf6dd79b2017-02-08 14:36:49 +010044#include <linux/inetdevice.h>
Ido Schimmel9db032b2017-03-16 09:08:17 +010045#include <linux/netdevice.h>
Ido Schimmel03ea01e2017-05-23 21:56:30 +020046#include <linux/if_bridge.h>
Ido Schimmelb5f3e0d2017-07-24 09:56:00 +020047#include <linux/socket.h>
Ido Schimmel428b8512017-08-03 13:28:28 +020048#include <linux/route.h>
Ido Schimmeleb789982017-10-22 23:11:48 +020049#include <linux/gcd.h>
Ido Schimmelaf658b62017-11-02 17:14:09 +010050#include <linux/random.h>
Yotam Gigic723c7352016-07-05 11:27:43 +020051#include <net/netevent.h>
Jiri Pirko6cf3c972016-07-05 11:27:39 +020052#include <net/neighbour.h>
53#include <net/arp.h>
Jiri Pirkob45f64d2016-09-26 12:52:31 +020054#include <net/ip_fib.h>
Ido Schimmel583419f2017-08-03 13:28:27 +020055#include <net/ip6_fib.h>
Ido Schimmel5d7bfd12017-03-16 09:08:14 +010056#include <net/fib_rules.h>
Petr Machata6ddb7422017-09-02 23:49:19 +020057#include <net/ip_tunnels.h>
Ido Schimmel57837882017-03-16 09:08:16 +010058#include <net/l3mdev.h>
Arkadi Sharshevsky5ea12372017-07-18 10:10:13 +020059#include <net/addrconf.h>
Arkadi Sharshevskyd5eb89c2017-07-18 10:10:15 +020060#include <net/ndisc.h>
61#include <net/ipv6.h>
Ido Schimmel04b1d4e2017-08-03 13:28:11 +020062#include <net/fib_notifier.h>
Ido Schimmel464dce12016-07-02 11:00:15 +020063
64#include "spectrum.h"
65#include "core.h"
66#include "reg.h"
Arkadi Sharshevskye0c0afd2017-03-28 17:24:15 +020067#include "spectrum_cnt.h"
68#include "spectrum_dpipe.h"
Petr Machata38ebc0f2017-09-02 23:49:17 +020069#include "spectrum_ipip.h"
Yotam Gigid42b0962017-09-27 08:23:20 +020070#include "spectrum_mr.h"
71#include "spectrum_mr_tcam.h"
Arkadi Sharshevskye0c0afd2017-03-28 17:24:15 +020072#include "spectrum_router.h"
Ido Schimmel464dce12016-07-02 11:00:15 +020073
Ido Schimmel9011b672017-05-16 19:38:25 +020074struct mlxsw_sp_vr;
75struct mlxsw_sp_lpm_tree;
Ido Schimmele4f3c1c2017-05-26 08:37:40 +020076struct mlxsw_sp_rif_ops;
Ido Schimmel9011b672017-05-16 19:38:25 +020077
78struct mlxsw_sp_router {
79 struct mlxsw_sp *mlxsw_sp;
Ido Schimmel5f9efff2017-05-16 19:38:27 +020080 struct mlxsw_sp_rif **rifs;
Ido Schimmel9011b672017-05-16 19:38:25 +020081 struct mlxsw_sp_vr *vrs;
82 struct rhashtable neigh_ht;
83 struct rhashtable nexthop_group_ht;
84 struct rhashtable nexthop_ht;
Arkadi Sharshevskydbe45982017-09-25 10:32:23 +020085 struct list_head nexthop_list;
Ido Schimmel9011b672017-05-16 19:38:25 +020086 struct {
87 struct mlxsw_sp_lpm_tree *trees;
88 unsigned int tree_count;
89 } lpm;
90 struct {
91 struct delayed_work dw;
92 unsigned long interval; /* ms */
93 } neighs_update;
94 struct delayed_work nexthop_probe_dw;
95#define MLXSW_SP_UNRESOLVED_NH_PROBE_INTERVAL 5000 /* ms */
96 struct list_head nexthop_neighs_list;
Petr Machata1012b9a2017-09-02 23:49:23 +020097 struct list_head ipip_list;
Ido Schimmel9011b672017-05-16 19:38:25 +020098 bool aborted;
Ido Schimmel7e39d112017-05-16 19:38:28 +020099 struct notifier_block fib_nb;
Ido Schimmel48fac882017-11-02 17:14:06 +0100100 struct notifier_block netevent_nb;
Ido Schimmele4f3c1c2017-05-26 08:37:40 +0200101 const struct mlxsw_sp_rif_ops **rif_ops_arr;
Petr Machata38ebc0f2017-09-02 23:49:17 +0200102 const struct mlxsw_sp_ipip_ops **ipip_ops_arr;
Ido Schimmel9011b672017-05-16 19:38:25 +0200103};
104
Ido Schimmel4724ba562017-03-10 08:53:39 +0100105struct mlxsw_sp_rif {
106 struct list_head nexthop_list;
107 struct list_head neigh_list;
108 struct net_device *dev;
Ido Schimmela1107482017-05-26 08:37:39 +0200109 struct mlxsw_sp_fid *fid;
Ido Schimmel4724ba562017-03-10 08:53:39 +0100110 unsigned char addr[ETH_ALEN];
111 int mtu;
Arkadi Sharshevskybf952332017-03-17 09:38:00 +0100112 u16 rif_index;
Ido Schimmel69132292017-03-10 08:53:42 +0100113 u16 vr_id;
Ido Schimmele4f3c1c2017-05-26 08:37:40 +0200114 const struct mlxsw_sp_rif_ops *ops;
115 struct mlxsw_sp *mlxsw_sp;
116
Arkadi Sharshevskye0c0afd2017-03-28 17:24:15 +0200117 unsigned int counter_ingress;
118 bool counter_ingress_valid;
119 unsigned int counter_egress;
120 bool counter_egress_valid;
Ido Schimmel4724ba562017-03-10 08:53:39 +0100121};
122
Ido Schimmele4f3c1c2017-05-26 08:37:40 +0200123struct mlxsw_sp_rif_params {
124 struct net_device *dev;
125 union {
126 u16 system_port;
127 u16 lag_id;
128 };
129 u16 vid;
130 bool lag;
131};
132
Ido Schimmel4d93cee2017-05-26 08:37:34 +0200133struct mlxsw_sp_rif_subport {
134 struct mlxsw_sp_rif common;
135 union {
136 u16 system_port;
137 u16 lag_id;
138 };
139 u16 vid;
140 bool lag;
141};
142
Petr Machata6ddb7422017-09-02 23:49:19 +0200143struct mlxsw_sp_rif_ipip_lb {
144 struct mlxsw_sp_rif common;
145 struct mlxsw_sp_rif_ipip_lb_config lb_config;
146 u16 ul_vr_id; /* Reserved for Spectrum-2. */
147};
148
149struct mlxsw_sp_rif_params_ipip_lb {
150 struct mlxsw_sp_rif_params common;
151 struct mlxsw_sp_rif_ipip_lb_config lb_config;
152};
153
Ido Schimmele4f3c1c2017-05-26 08:37:40 +0200154struct mlxsw_sp_rif_ops {
155 enum mlxsw_sp_rif_type type;
156 size_t rif_size;
157
158 void (*setup)(struct mlxsw_sp_rif *rif,
159 const struct mlxsw_sp_rif_params *params);
160 int (*configure)(struct mlxsw_sp_rif *rif);
161 void (*deconfigure)(struct mlxsw_sp_rif *rif);
162 struct mlxsw_sp_fid * (*fid_get)(struct mlxsw_sp_rif *rif);
163};
164
Arkadi Sharshevskye0c0afd2017-03-28 17:24:15 +0200165static unsigned int *
166mlxsw_sp_rif_p_counter_get(struct mlxsw_sp_rif *rif,
167 enum mlxsw_sp_rif_counter_dir dir)
168{
169 switch (dir) {
170 case MLXSW_SP_RIF_COUNTER_EGRESS:
171 return &rif->counter_egress;
172 case MLXSW_SP_RIF_COUNTER_INGRESS:
173 return &rif->counter_ingress;
174 }
175 return NULL;
176}
177
178static bool
179mlxsw_sp_rif_counter_valid_get(struct mlxsw_sp_rif *rif,
180 enum mlxsw_sp_rif_counter_dir dir)
181{
182 switch (dir) {
183 case MLXSW_SP_RIF_COUNTER_EGRESS:
184 return rif->counter_egress_valid;
185 case MLXSW_SP_RIF_COUNTER_INGRESS:
186 return rif->counter_ingress_valid;
187 }
188 return false;
189}
190
191static void
192mlxsw_sp_rif_counter_valid_set(struct mlxsw_sp_rif *rif,
193 enum mlxsw_sp_rif_counter_dir dir,
194 bool valid)
195{
196 switch (dir) {
197 case MLXSW_SP_RIF_COUNTER_EGRESS:
198 rif->counter_egress_valid = valid;
199 break;
200 case MLXSW_SP_RIF_COUNTER_INGRESS:
201 rif->counter_ingress_valid = valid;
202 break;
203 }
204}
205
206static int mlxsw_sp_rif_counter_edit(struct mlxsw_sp *mlxsw_sp, u16 rif_index,
207 unsigned int counter_index, bool enable,
208 enum mlxsw_sp_rif_counter_dir dir)
209{
210 char ritr_pl[MLXSW_REG_RITR_LEN];
211 bool is_egress = false;
212 int err;
213
214 if (dir == MLXSW_SP_RIF_COUNTER_EGRESS)
215 is_egress = true;
216 mlxsw_reg_ritr_rif_pack(ritr_pl, rif_index);
217 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
218 if (err)
219 return err;
220
221 mlxsw_reg_ritr_counter_pack(ritr_pl, counter_index, enable,
222 is_egress);
223 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
224}
225
226int mlxsw_sp_rif_counter_value_get(struct mlxsw_sp *mlxsw_sp,
227 struct mlxsw_sp_rif *rif,
228 enum mlxsw_sp_rif_counter_dir dir, u64 *cnt)
229{
230 char ricnt_pl[MLXSW_REG_RICNT_LEN];
231 unsigned int *p_counter_index;
232 bool valid;
233 int err;
234
235 valid = mlxsw_sp_rif_counter_valid_get(rif, dir);
236 if (!valid)
237 return -EINVAL;
238
239 p_counter_index = mlxsw_sp_rif_p_counter_get(rif, dir);
240 if (!p_counter_index)
241 return -EINVAL;
242 mlxsw_reg_ricnt_pack(ricnt_pl, *p_counter_index,
243 MLXSW_REG_RICNT_OPCODE_NOP);
244 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ricnt), ricnt_pl);
245 if (err)
246 return err;
247 *cnt = mlxsw_reg_ricnt_good_unicast_packets_get(ricnt_pl);
248 return 0;
249}
250
251static int mlxsw_sp_rif_counter_clear(struct mlxsw_sp *mlxsw_sp,
252 unsigned int counter_index)
253{
254 char ricnt_pl[MLXSW_REG_RICNT_LEN];
255
256 mlxsw_reg_ricnt_pack(ricnt_pl, counter_index,
257 MLXSW_REG_RICNT_OPCODE_CLEAR);
258 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ricnt), ricnt_pl);
259}
260
261int mlxsw_sp_rif_counter_alloc(struct mlxsw_sp *mlxsw_sp,
262 struct mlxsw_sp_rif *rif,
263 enum mlxsw_sp_rif_counter_dir dir)
264{
265 unsigned int *p_counter_index;
266 int err;
267
268 p_counter_index = mlxsw_sp_rif_p_counter_get(rif, dir);
269 if (!p_counter_index)
270 return -EINVAL;
271 err = mlxsw_sp_counter_alloc(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_RIF,
272 p_counter_index);
273 if (err)
274 return err;
275
276 err = mlxsw_sp_rif_counter_clear(mlxsw_sp, *p_counter_index);
277 if (err)
278 goto err_counter_clear;
279
280 err = mlxsw_sp_rif_counter_edit(mlxsw_sp, rif->rif_index,
281 *p_counter_index, true, dir);
282 if (err)
283 goto err_counter_edit;
284 mlxsw_sp_rif_counter_valid_set(rif, dir, true);
285 return 0;
286
287err_counter_edit:
288err_counter_clear:
289 mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_RIF,
290 *p_counter_index);
291 return err;
292}
293
294void mlxsw_sp_rif_counter_free(struct mlxsw_sp *mlxsw_sp,
295 struct mlxsw_sp_rif *rif,
296 enum mlxsw_sp_rif_counter_dir dir)
297{
298 unsigned int *p_counter_index;
299
Arkadi Sharshevsky6b1206b2017-05-18 09:18:53 +0200300 if (!mlxsw_sp_rif_counter_valid_get(rif, dir))
301 return;
302
Arkadi Sharshevskye0c0afd2017-03-28 17:24:15 +0200303 p_counter_index = mlxsw_sp_rif_p_counter_get(rif, dir);
304 if (WARN_ON(!p_counter_index))
305 return;
306 mlxsw_sp_rif_counter_edit(mlxsw_sp, rif->rif_index,
307 *p_counter_index, false, dir);
308 mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_RIF,
309 *p_counter_index);
310 mlxsw_sp_rif_counter_valid_set(rif, dir, false);
311}
312
Ido Schimmele4f3c1c2017-05-26 08:37:40 +0200313static void mlxsw_sp_rif_counters_alloc(struct mlxsw_sp_rif *rif)
314{
315 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
316 struct devlink *devlink;
317
318 devlink = priv_to_devlink(mlxsw_sp->core);
319 if (!devlink_dpipe_table_counter_enabled(devlink,
320 MLXSW_SP_DPIPE_TABLE_NAME_ERIF))
321 return;
322 mlxsw_sp_rif_counter_alloc(mlxsw_sp, rif, MLXSW_SP_RIF_COUNTER_EGRESS);
323}
324
325static void mlxsw_sp_rif_counters_free(struct mlxsw_sp_rif *rif)
326{
327 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
328
329 mlxsw_sp_rif_counter_free(mlxsw_sp, rif, MLXSW_SP_RIF_COUNTER_EGRESS);
330}
331
Ido Schimmel4724ba562017-03-10 08:53:39 +0100332static struct mlxsw_sp_rif *
333mlxsw_sp_rif_find_by_dev(const struct mlxsw_sp *mlxsw_sp,
334 const struct net_device *dev);
335
Ido Schimmel7dcc18a2017-07-18 10:10:30 +0200336#define MLXSW_SP_PREFIX_COUNT (sizeof(struct in6_addr) * BITS_PER_BYTE + 1)
Ido Schimmel9011b672017-05-16 19:38:25 +0200337
338struct mlxsw_sp_prefix_usage {
339 DECLARE_BITMAP(b, MLXSW_SP_PREFIX_COUNT);
340};
341
Jiri Pirko53342022016-07-04 08:23:08 +0200342#define mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage) \
343 for_each_set_bit(prefix, (prefix_usage)->b, MLXSW_SP_PREFIX_COUNT)
344
345static bool
346mlxsw_sp_prefix_usage_eq(struct mlxsw_sp_prefix_usage *prefix_usage1,
347 struct mlxsw_sp_prefix_usage *prefix_usage2)
348{
349 return !memcmp(prefix_usage1, prefix_usage2, sizeof(*prefix_usage1));
350}
351
Jiri Pirko6b75c482016-07-04 08:23:09 +0200352static bool
353mlxsw_sp_prefix_usage_none(struct mlxsw_sp_prefix_usage *prefix_usage)
354{
355 struct mlxsw_sp_prefix_usage prefix_usage_none = {{ 0 } };
356
357 return mlxsw_sp_prefix_usage_eq(prefix_usage, &prefix_usage_none);
358}
359
360static void
361mlxsw_sp_prefix_usage_cpy(struct mlxsw_sp_prefix_usage *prefix_usage1,
362 struct mlxsw_sp_prefix_usage *prefix_usage2)
363{
364 memcpy(prefix_usage1, prefix_usage2, sizeof(*prefix_usage1));
365}
366
367static void
Jiri Pirko5e9c16c2016-07-04 08:23:04 +0200368mlxsw_sp_prefix_usage_set(struct mlxsw_sp_prefix_usage *prefix_usage,
369 unsigned char prefix_len)
370{
371 set_bit(prefix_len, prefix_usage->b);
372}
373
374static void
375mlxsw_sp_prefix_usage_clear(struct mlxsw_sp_prefix_usage *prefix_usage,
376 unsigned char prefix_len)
377{
378 clear_bit(prefix_len, prefix_usage->b);
379}
380
381struct mlxsw_sp_fib_key {
382 unsigned char addr[sizeof(struct in6_addr)];
383 unsigned char prefix_len;
384};
385
Jiri Pirko61c503f2016-07-04 08:23:11 +0200386enum mlxsw_sp_fib_entry_type {
387 MLXSW_SP_FIB_ENTRY_TYPE_REMOTE,
388 MLXSW_SP_FIB_ENTRY_TYPE_LOCAL,
389 MLXSW_SP_FIB_ENTRY_TYPE_TRAP,
Petr Machata4607f6d2017-09-02 23:49:25 +0200390
391 /* This is a special case of local delivery, where a packet should be
392 * decapsulated on reception. Note that there is no corresponding ENCAP,
393 * because that's a type of next hop, not of FIB entry. (There can be
394 * several next hops in a REMOTE entry, and some of them may be
395 * encapsulating entries.)
396 */
397 MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP,
Jiri Pirko61c503f2016-07-04 08:23:11 +0200398};
399
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +0200400struct mlxsw_sp_nexthop_group;
Ido Schimmel9011b672017-05-16 19:38:25 +0200401struct mlxsw_sp_fib;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +0200402
Ido Schimmel9aecce12017-02-09 10:28:42 +0100403struct mlxsw_sp_fib_node {
404 struct list_head entry_list;
Jiri Pirkob45f64d2016-09-26 12:52:31 +0200405 struct list_head list;
Ido Schimmel9aecce12017-02-09 10:28:42 +0100406 struct rhash_head ht_node;
Ido Schimmel76610eb2017-03-10 08:53:41 +0100407 struct mlxsw_sp_fib *fib;
Ido Schimmel9aecce12017-02-09 10:28:42 +0100408 struct mlxsw_sp_fib_key key;
409};
410
Petr Machata4607f6d2017-09-02 23:49:25 +0200411struct mlxsw_sp_fib_entry_decap {
412 struct mlxsw_sp_ipip_entry *ipip_entry;
413 u32 tunnel_index;
414};
415
Ido Schimmel9aecce12017-02-09 10:28:42 +0100416struct mlxsw_sp_fib_entry {
417 struct list_head list;
418 struct mlxsw_sp_fib_node *fib_node;
419 enum mlxsw_sp_fib_entry_type type;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +0200420 struct list_head nexthop_group_node;
421 struct mlxsw_sp_nexthop_group *nh_group;
Petr Machata4607f6d2017-09-02 23:49:25 +0200422 struct mlxsw_sp_fib_entry_decap decap; /* Valid for decap entries. */
Jiri Pirko5e9c16c2016-07-04 08:23:04 +0200423};
424
Ido Schimmel4f1c7f12017-07-18 10:10:26 +0200425struct mlxsw_sp_fib4_entry {
426 struct mlxsw_sp_fib_entry common;
427 u32 tb_id;
428 u32 prio;
429 u8 tos;
430 u8 type;
431};
432
Ido Schimmel428b8512017-08-03 13:28:28 +0200433struct mlxsw_sp_fib6_entry {
434 struct mlxsw_sp_fib_entry common;
435 struct list_head rt6_list;
436 unsigned int nrt6;
437};
438
439struct mlxsw_sp_rt6 {
440 struct list_head list;
441 struct rt6_info *rt;
442};
443
Ido Schimmel9011b672017-05-16 19:38:25 +0200444struct mlxsw_sp_lpm_tree {
445 u8 id; /* tree ID */
446 unsigned int ref_count;
447 enum mlxsw_sp_l3proto proto;
448 struct mlxsw_sp_prefix_usage prefix_usage;
449};
450
Jiri Pirko5e9c16c2016-07-04 08:23:04 +0200451struct mlxsw_sp_fib {
452 struct rhashtable ht;
Ido Schimmel9aecce12017-02-09 10:28:42 +0100453 struct list_head node_list;
Ido Schimmel76610eb2017-03-10 08:53:41 +0100454 struct mlxsw_sp_vr *vr;
455 struct mlxsw_sp_lpm_tree *lpm_tree;
Jiri Pirko5e9c16c2016-07-04 08:23:04 +0200456 unsigned long prefix_ref_count[MLXSW_SP_PREFIX_COUNT];
457 struct mlxsw_sp_prefix_usage prefix_usage;
Ido Schimmel76610eb2017-03-10 08:53:41 +0100458 enum mlxsw_sp_l3proto proto;
Jiri Pirko5e9c16c2016-07-04 08:23:04 +0200459};
460
Ido Schimmel9011b672017-05-16 19:38:25 +0200461struct mlxsw_sp_vr {
462 u16 id; /* virtual router ID */
463 u32 tb_id; /* kernel fib table id */
464 unsigned int rif_count;
465 struct mlxsw_sp_fib *fib4;
Ido Schimmela3d9bc52017-07-18 10:10:22 +0200466 struct mlxsw_sp_fib *fib6;
Yotam Gigid42b0962017-09-27 08:23:20 +0200467 struct mlxsw_sp_mr_table *mr4_table;
Ido Schimmel9011b672017-05-16 19:38:25 +0200468};
469
Ido Schimmel9aecce12017-02-09 10:28:42 +0100470static const struct rhashtable_params mlxsw_sp_fib_ht_params;
Jiri Pirko5e9c16c2016-07-04 08:23:04 +0200471
Ido Schimmel76610eb2017-03-10 08:53:41 +0100472static struct mlxsw_sp_fib *mlxsw_sp_fib_create(struct mlxsw_sp_vr *vr,
473 enum mlxsw_sp_l3proto proto)
Jiri Pirko5e9c16c2016-07-04 08:23:04 +0200474{
475 struct mlxsw_sp_fib *fib;
476 int err;
477
478 fib = kzalloc(sizeof(*fib), GFP_KERNEL);
479 if (!fib)
480 return ERR_PTR(-ENOMEM);
481 err = rhashtable_init(&fib->ht, &mlxsw_sp_fib_ht_params);
482 if (err)
483 goto err_rhashtable_init;
Ido Schimmel9aecce12017-02-09 10:28:42 +0100484 INIT_LIST_HEAD(&fib->node_list);
Ido Schimmel76610eb2017-03-10 08:53:41 +0100485 fib->proto = proto;
486 fib->vr = vr;
Jiri Pirko5e9c16c2016-07-04 08:23:04 +0200487 return fib;
488
489err_rhashtable_init:
490 kfree(fib);
491 return ERR_PTR(err);
492}
493
494static void mlxsw_sp_fib_destroy(struct mlxsw_sp_fib *fib)
495{
Ido Schimmel9aecce12017-02-09 10:28:42 +0100496 WARN_ON(!list_empty(&fib->node_list));
Ido Schimmel76610eb2017-03-10 08:53:41 +0100497 WARN_ON(fib->lpm_tree);
Jiri Pirko5e9c16c2016-07-04 08:23:04 +0200498 rhashtable_destroy(&fib->ht);
499 kfree(fib);
500}
501
Jiri Pirko53342022016-07-04 08:23:08 +0200502static struct mlxsw_sp_lpm_tree *
Ido Schimmel382dbb42017-03-10 08:53:40 +0100503mlxsw_sp_lpm_tree_find_unused(struct mlxsw_sp *mlxsw_sp)
Jiri Pirko53342022016-07-04 08:23:08 +0200504{
505 static struct mlxsw_sp_lpm_tree *lpm_tree;
506 int i;
507
Ido Schimmel9011b672017-05-16 19:38:25 +0200508 for (i = 0; i < mlxsw_sp->router->lpm.tree_count; i++) {
509 lpm_tree = &mlxsw_sp->router->lpm.trees[i];
Ido Schimmel382dbb42017-03-10 08:53:40 +0100510 if (lpm_tree->ref_count == 0)
511 return lpm_tree;
Jiri Pirko53342022016-07-04 08:23:08 +0200512 }
513 return NULL;
514}
515
516static int mlxsw_sp_lpm_tree_alloc(struct mlxsw_sp *mlxsw_sp,
517 struct mlxsw_sp_lpm_tree *lpm_tree)
518{
519 char ralta_pl[MLXSW_REG_RALTA_LEN];
520
Ido Schimmel1a9234e662016-09-19 08:29:26 +0200521 mlxsw_reg_ralta_pack(ralta_pl, true,
522 (enum mlxsw_reg_ralxx_protocol) lpm_tree->proto,
523 lpm_tree->id);
Jiri Pirko53342022016-07-04 08:23:08 +0200524 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralta), ralta_pl);
525}
526
Ido Schimmelcc702672017-08-14 10:54:03 +0200527static void mlxsw_sp_lpm_tree_free(struct mlxsw_sp *mlxsw_sp,
528 struct mlxsw_sp_lpm_tree *lpm_tree)
Jiri Pirko53342022016-07-04 08:23:08 +0200529{
530 char ralta_pl[MLXSW_REG_RALTA_LEN];
531
Ido Schimmel1a9234e662016-09-19 08:29:26 +0200532 mlxsw_reg_ralta_pack(ralta_pl, false,
533 (enum mlxsw_reg_ralxx_protocol) lpm_tree->proto,
534 lpm_tree->id);
Ido Schimmelcc702672017-08-14 10:54:03 +0200535 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralta), ralta_pl);
Jiri Pirko53342022016-07-04 08:23:08 +0200536}
537
538static int
539mlxsw_sp_lpm_tree_left_struct_set(struct mlxsw_sp *mlxsw_sp,
540 struct mlxsw_sp_prefix_usage *prefix_usage,
541 struct mlxsw_sp_lpm_tree *lpm_tree)
542{
543 char ralst_pl[MLXSW_REG_RALST_LEN];
544 u8 root_bin = 0;
545 u8 prefix;
546 u8 last_prefix = MLXSW_REG_RALST_BIN_NO_CHILD;
547
548 mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage)
549 root_bin = prefix;
550
551 mlxsw_reg_ralst_pack(ralst_pl, root_bin, lpm_tree->id);
552 mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage) {
553 if (prefix == 0)
554 continue;
555 mlxsw_reg_ralst_bin_pack(ralst_pl, prefix, last_prefix,
556 MLXSW_REG_RALST_BIN_NO_CHILD);
557 last_prefix = prefix;
558 }
559 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralst), ralst_pl);
560}
561
562static struct mlxsw_sp_lpm_tree *
563mlxsw_sp_lpm_tree_create(struct mlxsw_sp *mlxsw_sp,
564 struct mlxsw_sp_prefix_usage *prefix_usage,
Ido Schimmel382dbb42017-03-10 08:53:40 +0100565 enum mlxsw_sp_l3proto proto)
Jiri Pirko53342022016-07-04 08:23:08 +0200566{
567 struct mlxsw_sp_lpm_tree *lpm_tree;
568 int err;
569
Ido Schimmel382dbb42017-03-10 08:53:40 +0100570 lpm_tree = mlxsw_sp_lpm_tree_find_unused(mlxsw_sp);
Jiri Pirko53342022016-07-04 08:23:08 +0200571 if (!lpm_tree)
572 return ERR_PTR(-EBUSY);
573 lpm_tree->proto = proto;
574 err = mlxsw_sp_lpm_tree_alloc(mlxsw_sp, lpm_tree);
575 if (err)
576 return ERR_PTR(err);
577
578 err = mlxsw_sp_lpm_tree_left_struct_set(mlxsw_sp, prefix_usage,
579 lpm_tree);
580 if (err)
581 goto err_left_struct_set;
Jiri Pirko2083d362016-10-25 11:25:56 +0200582 memcpy(&lpm_tree->prefix_usage, prefix_usage,
583 sizeof(lpm_tree->prefix_usage));
Jiri Pirko53342022016-07-04 08:23:08 +0200584 return lpm_tree;
585
586err_left_struct_set:
587 mlxsw_sp_lpm_tree_free(mlxsw_sp, lpm_tree);
588 return ERR_PTR(err);
589}
590
Ido Schimmelcc702672017-08-14 10:54:03 +0200591static void mlxsw_sp_lpm_tree_destroy(struct mlxsw_sp *mlxsw_sp,
592 struct mlxsw_sp_lpm_tree *lpm_tree)
Jiri Pirko53342022016-07-04 08:23:08 +0200593{
Ido Schimmelcc702672017-08-14 10:54:03 +0200594 mlxsw_sp_lpm_tree_free(mlxsw_sp, lpm_tree);
Jiri Pirko53342022016-07-04 08:23:08 +0200595}
596
597static struct mlxsw_sp_lpm_tree *
598mlxsw_sp_lpm_tree_get(struct mlxsw_sp *mlxsw_sp,
599 struct mlxsw_sp_prefix_usage *prefix_usage,
Ido Schimmel382dbb42017-03-10 08:53:40 +0100600 enum mlxsw_sp_l3proto proto)
Jiri Pirko53342022016-07-04 08:23:08 +0200601{
602 struct mlxsw_sp_lpm_tree *lpm_tree;
603 int i;
604
Ido Schimmel9011b672017-05-16 19:38:25 +0200605 for (i = 0; i < mlxsw_sp->router->lpm.tree_count; i++) {
606 lpm_tree = &mlxsw_sp->router->lpm.trees[i];
Jiri Pirko8b99bec2016-10-25 11:25:57 +0200607 if (lpm_tree->ref_count != 0 &&
608 lpm_tree->proto == proto &&
Jiri Pirko53342022016-07-04 08:23:08 +0200609 mlxsw_sp_prefix_usage_eq(&lpm_tree->prefix_usage,
610 prefix_usage))
Ido Schimmelfc922bb2017-08-14 10:54:05 +0200611 return lpm_tree;
Jiri Pirko53342022016-07-04 08:23:08 +0200612 }
Ido Schimmelfc922bb2017-08-14 10:54:05 +0200613 return mlxsw_sp_lpm_tree_create(mlxsw_sp, prefix_usage, proto);
614}
Jiri Pirko53342022016-07-04 08:23:08 +0200615
Ido Schimmelfc922bb2017-08-14 10:54:05 +0200616static void mlxsw_sp_lpm_tree_hold(struct mlxsw_sp_lpm_tree *lpm_tree)
617{
Jiri Pirko53342022016-07-04 08:23:08 +0200618 lpm_tree->ref_count++;
Jiri Pirko53342022016-07-04 08:23:08 +0200619}
620
Ido Schimmelcc702672017-08-14 10:54:03 +0200621static void mlxsw_sp_lpm_tree_put(struct mlxsw_sp *mlxsw_sp,
622 struct mlxsw_sp_lpm_tree *lpm_tree)
Jiri Pirko53342022016-07-04 08:23:08 +0200623{
624 if (--lpm_tree->ref_count == 0)
Ido Schimmelcc702672017-08-14 10:54:03 +0200625 mlxsw_sp_lpm_tree_destroy(mlxsw_sp, lpm_tree);
Jiri Pirko53342022016-07-04 08:23:08 +0200626}
627
Ido Schimmeld7a60302017-06-08 08:47:43 +0200628#define MLXSW_SP_LPM_TREE_MIN 1 /* tree 0 is reserved */
Ido Schimmel8494ab02017-03-24 08:02:47 +0100629
630static int mlxsw_sp_lpm_init(struct mlxsw_sp *mlxsw_sp)
Jiri Pirko53342022016-07-04 08:23:08 +0200631{
632 struct mlxsw_sp_lpm_tree *lpm_tree;
Ido Schimmel8494ab02017-03-24 08:02:47 +0100633 u64 max_trees;
Jiri Pirko53342022016-07-04 08:23:08 +0200634 int i;
635
Ido Schimmel8494ab02017-03-24 08:02:47 +0100636 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_LPM_TREES))
637 return -EIO;
638
639 max_trees = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_LPM_TREES);
Ido Schimmel9011b672017-05-16 19:38:25 +0200640 mlxsw_sp->router->lpm.tree_count = max_trees - MLXSW_SP_LPM_TREE_MIN;
641 mlxsw_sp->router->lpm.trees = kcalloc(mlxsw_sp->router->lpm.tree_count,
Ido Schimmel8494ab02017-03-24 08:02:47 +0100642 sizeof(struct mlxsw_sp_lpm_tree),
643 GFP_KERNEL);
Ido Schimmel9011b672017-05-16 19:38:25 +0200644 if (!mlxsw_sp->router->lpm.trees)
Ido Schimmel8494ab02017-03-24 08:02:47 +0100645 return -ENOMEM;
646
Ido Schimmel9011b672017-05-16 19:38:25 +0200647 for (i = 0; i < mlxsw_sp->router->lpm.tree_count; i++) {
648 lpm_tree = &mlxsw_sp->router->lpm.trees[i];
Jiri Pirko53342022016-07-04 08:23:08 +0200649 lpm_tree->id = i + MLXSW_SP_LPM_TREE_MIN;
650 }
Ido Schimmel8494ab02017-03-24 08:02:47 +0100651
652 return 0;
653}
654
655static void mlxsw_sp_lpm_fini(struct mlxsw_sp *mlxsw_sp)
656{
Ido Schimmel9011b672017-05-16 19:38:25 +0200657 kfree(mlxsw_sp->router->lpm.trees);
Jiri Pirko53342022016-07-04 08:23:08 +0200658}
659
Ido Schimmel76610eb2017-03-10 08:53:41 +0100660static bool mlxsw_sp_vr_is_used(const struct mlxsw_sp_vr *vr)
661{
Yotam Gigid42b0962017-09-27 08:23:20 +0200662 return !!vr->fib4 || !!vr->fib6 || !!vr->mr4_table;
Ido Schimmel76610eb2017-03-10 08:53:41 +0100663}
664
Jiri Pirko6b75c482016-07-04 08:23:09 +0200665static struct mlxsw_sp_vr *mlxsw_sp_vr_find_unused(struct mlxsw_sp *mlxsw_sp)
666{
667 struct mlxsw_sp_vr *vr;
668 int i;
669
Jiri Pirkoc1a38312016-10-21 16:07:23 +0200670 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
Ido Schimmel9011b672017-05-16 19:38:25 +0200671 vr = &mlxsw_sp->router->vrs[i];
Ido Schimmel76610eb2017-03-10 08:53:41 +0100672 if (!mlxsw_sp_vr_is_used(vr))
Jiri Pirko6b75c482016-07-04 08:23:09 +0200673 return vr;
674 }
675 return NULL;
676}
677
678static int mlxsw_sp_vr_lpm_tree_bind(struct mlxsw_sp *mlxsw_sp,
Ido Schimmel0adb2142017-08-14 10:54:04 +0200679 const struct mlxsw_sp_fib *fib, u8 tree_id)
Jiri Pirko6b75c482016-07-04 08:23:09 +0200680{
681 char raltb_pl[MLXSW_REG_RALTB_LEN];
682
Ido Schimmel76610eb2017-03-10 08:53:41 +0100683 mlxsw_reg_raltb_pack(raltb_pl, fib->vr->id,
684 (enum mlxsw_reg_ralxx_protocol) fib->proto,
Ido Schimmel0adb2142017-08-14 10:54:04 +0200685 tree_id);
Jiri Pirko6b75c482016-07-04 08:23:09 +0200686 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raltb), raltb_pl);
687}
688
689static int mlxsw_sp_vr_lpm_tree_unbind(struct mlxsw_sp *mlxsw_sp,
Ido Schimmel76610eb2017-03-10 08:53:41 +0100690 const struct mlxsw_sp_fib *fib)
Jiri Pirko6b75c482016-07-04 08:23:09 +0200691{
692 char raltb_pl[MLXSW_REG_RALTB_LEN];
693
694 /* Bind to tree 0 which is default */
Ido Schimmel76610eb2017-03-10 08:53:41 +0100695 mlxsw_reg_raltb_pack(raltb_pl, fib->vr->id,
696 (enum mlxsw_reg_ralxx_protocol) fib->proto, 0);
Jiri Pirko6b75c482016-07-04 08:23:09 +0200697 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raltb), raltb_pl);
698}
699
700static u32 mlxsw_sp_fix_tb_id(u32 tb_id)
701{
Yotam Gigi7e50d432017-09-27 08:23:19 +0200702 /* For our purpose, squash main, default and local tables into one */
703 if (tb_id == RT_TABLE_LOCAL || tb_id == RT_TABLE_DEFAULT)
Jiri Pirko6b75c482016-07-04 08:23:09 +0200704 tb_id = RT_TABLE_MAIN;
705 return tb_id;
706}
707
708static struct mlxsw_sp_vr *mlxsw_sp_vr_find(struct mlxsw_sp *mlxsw_sp,
Ido Schimmel76610eb2017-03-10 08:53:41 +0100709 u32 tb_id)
Jiri Pirko6b75c482016-07-04 08:23:09 +0200710{
711 struct mlxsw_sp_vr *vr;
712 int i;
713
714 tb_id = mlxsw_sp_fix_tb_id(tb_id);
Nogah Frankel9497c042016-09-20 11:16:54 +0200715
Jiri Pirkoc1a38312016-10-21 16:07:23 +0200716 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
Ido Schimmel9011b672017-05-16 19:38:25 +0200717 vr = &mlxsw_sp->router->vrs[i];
Ido Schimmel76610eb2017-03-10 08:53:41 +0100718 if (mlxsw_sp_vr_is_used(vr) && vr->tb_id == tb_id)
Jiri Pirko6b75c482016-07-04 08:23:09 +0200719 return vr;
720 }
721 return NULL;
722}
723
Ido Schimmel76610eb2017-03-10 08:53:41 +0100724static struct mlxsw_sp_fib *mlxsw_sp_vr_fib(const struct mlxsw_sp_vr *vr,
725 enum mlxsw_sp_l3proto proto)
Jiri Pirko6b75c482016-07-04 08:23:09 +0200726{
Ido Schimmel76610eb2017-03-10 08:53:41 +0100727 switch (proto) {
728 case MLXSW_SP_L3_PROTO_IPV4:
729 return vr->fib4;
730 case MLXSW_SP_L3_PROTO_IPV6:
Ido Schimmela3d9bc52017-07-18 10:10:22 +0200731 return vr->fib6;
Ido Schimmel76610eb2017-03-10 08:53:41 +0100732 }
733 return NULL;
734}
735
736static struct mlxsw_sp_vr *mlxsw_sp_vr_create(struct mlxsw_sp *mlxsw_sp,
David Ahernf8fa9b42017-10-18 09:56:56 -0700737 u32 tb_id,
738 struct netlink_ext_ack *extack)
Ido Schimmel76610eb2017-03-10 08:53:41 +0100739{
Jiri Pirko6b75c482016-07-04 08:23:09 +0200740 struct mlxsw_sp_vr *vr;
Ido Schimmela3d9bc52017-07-18 10:10:22 +0200741 int err;
Jiri Pirko6b75c482016-07-04 08:23:09 +0200742
743 vr = mlxsw_sp_vr_find_unused(mlxsw_sp);
David Ahernf8fa9b42017-10-18 09:56:56 -0700744 if (!vr) {
745 NL_SET_ERR_MSG(extack, "spectrum: Exceeded number of supported virtual routers");
Jiri Pirko6b75c482016-07-04 08:23:09 +0200746 return ERR_PTR(-EBUSY);
David Ahernf8fa9b42017-10-18 09:56:56 -0700747 }
Ido Schimmel76610eb2017-03-10 08:53:41 +0100748 vr->fib4 = mlxsw_sp_fib_create(vr, MLXSW_SP_L3_PROTO_IPV4);
749 if (IS_ERR(vr->fib4))
750 return ERR_CAST(vr->fib4);
Ido Schimmela3d9bc52017-07-18 10:10:22 +0200751 vr->fib6 = mlxsw_sp_fib_create(vr, MLXSW_SP_L3_PROTO_IPV6);
752 if (IS_ERR(vr->fib6)) {
753 err = PTR_ERR(vr->fib6);
754 goto err_fib6_create;
755 }
Yotam Gigid42b0962017-09-27 08:23:20 +0200756 vr->mr4_table = mlxsw_sp_mr_table_create(mlxsw_sp, vr->id,
757 MLXSW_SP_L3_PROTO_IPV4);
758 if (IS_ERR(vr->mr4_table)) {
759 err = PTR_ERR(vr->mr4_table);
760 goto err_mr_table_create;
761 }
Jiri Pirko6b75c482016-07-04 08:23:09 +0200762 vr->tb_id = tb_id;
Jiri Pirko6b75c482016-07-04 08:23:09 +0200763 return vr;
Ido Schimmela3d9bc52017-07-18 10:10:22 +0200764
Yotam Gigid42b0962017-09-27 08:23:20 +0200765err_mr_table_create:
766 mlxsw_sp_fib_destroy(vr->fib6);
767 vr->fib6 = NULL;
Ido Schimmela3d9bc52017-07-18 10:10:22 +0200768err_fib6_create:
769 mlxsw_sp_fib_destroy(vr->fib4);
770 vr->fib4 = NULL;
771 return ERR_PTR(err);
Jiri Pirko6b75c482016-07-04 08:23:09 +0200772}
773
Ido Schimmel76610eb2017-03-10 08:53:41 +0100774static void mlxsw_sp_vr_destroy(struct mlxsw_sp_vr *vr)
Jiri Pirko6b75c482016-07-04 08:23:09 +0200775{
Yotam Gigid42b0962017-09-27 08:23:20 +0200776 mlxsw_sp_mr_table_destroy(vr->mr4_table);
777 vr->mr4_table = NULL;
Ido Schimmela3d9bc52017-07-18 10:10:22 +0200778 mlxsw_sp_fib_destroy(vr->fib6);
779 vr->fib6 = NULL;
Ido Schimmel76610eb2017-03-10 08:53:41 +0100780 mlxsw_sp_fib_destroy(vr->fib4);
781 vr->fib4 = NULL;
Jiri Pirko6b75c482016-07-04 08:23:09 +0200782}
783
David Ahernf8fa9b42017-10-18 09:56:56 -0700784static struct mlxsw_sp_vr *mlxsw_sp_vr_get(struct mlxsw_sp *mlxsw_sp, u32 tb_id,
785 struct netlink_ext_ack *extack)
Jiri Pirko6b75c482016-07-04 08:23:09 +0200786{
787 struct mlxsw_sp_vr *vr;
Jiri Pirko6b75c482016-07-04 08:23:09 +0200788
789 tb_id = mlxsw_sp_fix_tb_id(tb_id);
Ido Schimmel76610eb2017-03-10 08:53:41 +0100790 vr = mlxsw_sp_vr_find(mlxsw_sp, tb_id);
791 if (!vr)
David Ahernf8fa9b42017-10-18 09:56:56 -0700792 vr = mlxsw_sp_vr_create(mlxsw_sp, tb_id, extack);
Jiri Pirko6b75c482016-07-04 08:23:09 +0200793 return vr;
794}
795
Ido Schimmel76610eb2017-03-10 08:53:41 +0100796static void mlxsw_sp_vr_put(struct mlxsw_sp_vr *vr)
Jiri Pirko6b75c482016-07-04 08:23:09 +0200797{
Ido Schimmela3d9bc52017-07-18 10:10:22 +0200798 if (!vr->rif_count && list_empty(&vr->fib4->node_list) &&
Yotam Gigid42b0962017-09-27 08:23:20 +0200799 list_empty(&vr->fib6->node_list) &&
800 mlxsw_sp_mr_table_empty(vr->mr4_table))
Ido Schimmel76610eb2017-03-10 08:53:41 +0100801 mlxsw_sp_vr_destroy(vr);
Jiri Pirko6b75c482016-07-04 08:23:09 +0200802}
803
Ido Schimmelfc922bb2017-08-14 10:54:05 +0200804static bool
805mlxsw_sp_vr_lpm_tree_should_replace(struct mlxsw_sp_vr *vr,
806 enum mlxsw_sp_l3proto proto, u8 tree_id)
807{
808 struct mlxsw_sp_fib *fib = mlxsw_sp_vr_fib(vr, proto);
809
810 if (!mlxsw_sp_vr_is_used(vr))
811 return false;
812 if (fib->lpm_tree && fib->lpm_tree->id == tree_id)
813 return true;
814 return false;
815}
816
817static int mlxsw_sp_vr_lpm_tree_replace(struct mlxsw_sp *mlxsw_sp,
818 struct mlxsw_sp_fib *fib,
819 struct mlxsw_sp_lpm_tree *new_tree)
820{
821 struct mlxsw_sp_lpm_tree *old_tree = fib->lpm_tree;
822 int err;
823
824 err = mlxsw_sp_vr_lpm_tree_bind(mlxsw_sp, fib, new_tree->id);
825 if (err)
826 return err;
827 fib->lpm_tree = new_tree;
828 mlxsw_sp_lpm_tree_hold(new_tree);
829 mlxsw_sp_lpm_tree_put(mlxsw_sp, old_tree);
830 return 0;
831}
832
833static int mlxsw_sp_vrs_lpm_tree_replace(struct mlxsw_sp *mlxsw_sp,
834 struct mlxsw_sp_fib *fib,
835 struct mlxsw_sp_lpm_tree *new_tree)
836{
837 struct mlxsw_sp_lpm_tree *old_tree = fib->lpm_tree;
838 enum mlxsw_sp_l3proto proto = fib->proto;
839 u8 old_id, new_id = new_tree->id;
840 struct mlxsw_sp_vr *vr;
841 int i, err;
842
843 if (!old_tree)
844 goto no_replace;
845 old_id = old_tree->id;
846
847 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
848 vr = &mlxsw_sp->router->vrs[i];
849 if (!mlxsw_sp_vr_lpm_tree_should_replace(vr, proto, old_id))
850 continue;
851 err = mlxsw_sp_vr_lpm_tree_replace(mlxsw_sp,
852 mlxsw_sp_vr_fib(vr, proto),
853 new_tree);
854 if (err)
855 goto err_tree_replace;
856 }
857
858 return 0;
859
860err_tree_replace:
861 for (i--; i >= 0; i--) {
862 if (!mlxsw_sp_vr_lpm_tree_should_replace(vr, proto, new_id))
863 continue;
864 mlxsw_sp_vr_lpm_tree_replace(mlxsw_sp,
865 mlxsw_sp_vr_fib(vr, proto),
866 old_tree);
867 }
868 return err;
869
870no_replace:
871 err = mlxsw_sp_vr_lpm_tree_bind(mlxsw_sp, fib, new_tree->id);
872 if (err)
873 return err;
874 fib->lpm_tree = new_tree;
875 mlxsw_sp_lpm_tree_hold(new_tree);
876 return 0;
877}
878
879static void
880mlxsw_sp_vrs_prefixes(struct mlxsw_sp *mlxsw_sp,
881 enum mlxsw_sp_l3proto proto,
882 struct mlxsw_sp_prefix_usage *req_prefix_usage)
883{
884 int i;
885
886 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
887 struct mlxsw_sp_vr *vr = &mlxsw_sp->router->vrs[i];
888 struct mlxsw_sp_fib *fib = mlxsw_sp_vr_fib(vr, proto);
889 unsigned char prefix;
890
891 if (!mlxsw_sp_vr_is_used(vr))
892 continue;
893 mlxsw_sp_prefix_usage_for_each(prefix, &fib->prefix_usage)
894 mlxsw_sp_prefix_usage_set(req_prefix_usage, prefix);
895 }
896}
897
Nogah Frankel9497c042016-09-20 11:16:54 +0200898static int mlxsw_sp_vrs_init(struct mlxsw_sp *mlxsw_sp)
Jiri Pirko6b75c482016-07-04 08:23:09 +0200899{
900 struct mlxsw_sp_vr *vr;
Jiri Pirkoc1a38312016-10-21 16:07:23 +0200901 u64 max_vrs;
Jiri Pirko6b75c482016-07-04 08:23:09 +0200902 int i;
903
Jiri Pirkoc1a38312016-10-21 16:07:23 +0200904 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_VRS))
Nogah Frankel9497c042016-09-20 11:16:54 +0200905 return -EIO;
906
Jiri Pirkoc1a38312016-10-21 16:07:23 +0200907 max_vrs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS);
Ido Schimmel9011b672017-05-16 19:38:25 +0200908 mlxsw_sp->router->vrs = kcalloc(max_vrs, sizeof(struct mlxsw_sp_vr),
909 GFP_KERNEL);
910 if (!mlxsw_sp->router->vrs)
Nogah Frankel9497c042016-09-20 11:16:54 +0200911 return -ENOMEM;
912
Jiri Pirkoc1a38312016-10-21 16:07:23 +0200913 for (i = 0; i < max_vrs; i++) {
Ido Schimmel9011b672017-05-16 19:38:25 +0200914 vr = &mlxsw_sp->router->vrs[i];
Jiri Pirko6b75c482016-07-04 08:23:09 +0200915 vr->id = i;
916 }
Nogah Frankel9497c042016-09-20 11:16:54 +0200917
918 return 0;
919}
920
Ido Schimmelac571de2016-11-14 11:26:32 +0100921static void mlxsw_sp_router_fib_flush(struct mlxsw_sp *mlxsw_sp);
922
Nogah Frankel9497c042016-09-20 11:16:54 +0200923static void mlxsw_sp_vrs_fini(struct mlxsw_sp *mlxsw_sp)
924{
Ido Schimmel30572242016-12-03 16:45:01 +0100925 /* At this stage we're guaranteed not to have new incoming
926 * FIB notifications and the work queue is free from FIBs
927 * sitting on top of mlxsw netdevs. However, we can still
928 * have other FIBs queued. Flush the queue before flushing
929 * the device's tables. No need for locks, as we're the only
930 * writer.
931 */
932 mlxsw_core_flush_owq();
Ido Schimmelac571de2016-11-14 11:26:32 +0100933 mlxsw_sp_router_fib_flush(mlxsw_sp);
Ido Schimmel9011b672017-05-16 19:38:25 +0200934 kfree(mlxsw_sp->router->vrs);
Jiri Pirko6b75c482016-07-04 08:23:09 +0200935}
936
Petr Machata6ddb7422017-09-02 23:49:19 +0200937static struct net_device *
938__mlxsw_sp_ipip_netdev_ul_dev_get(const struct net_device *ol_dev)
939{
940 struct ip_tunnel *tun = netdev_priv(ol_dev);
941 struct net *net = dev_net(ol_dev);
942
943 return __dev_get_by_index(net, tun->parms.link);
944}
945
946static u32 mlxsw_sp_ipip_dev_ul_tb_id(const struct net_device *ol_dev)
947{
948 struct net_device *d = __mlxsw_sp_ipip_netdev_ul_dev_get(ol_dev);
949
950 if (d)
951 return l3mdev_fib_table(d) ? : RT_TABLE_MAIN;
952 else
953 return l3mdev_fib_table(ol_dev) ? : RT_TABLE_MAIN;
954}
955
Petr Machata1012b9a2017-09-02 23:49:23 +0200956static struct mlxsw_sp_rif *
957mlxsw_sp_rif_create(struct mlxsw_sp *mlxsw_sp,
David Ahernf8fa9b42017-10-18 09:56:56 -0700958 const struct mlxsw_sp_rif_params *params,
959 struct netlink_ext_ack *extack);
Petr Machata1012b9a2017-09-02 23:49:23 +0200960
961static struct mlxsw_sp_rif_ipip_lb *
962mlxsw_sp_ipip_ol_ipip_lb_create(struct mlxsw_sp *mlxsw_sp,
963 enum mlxsw_sp_ipip_type ipipt,
964 struct net_device *ol_dev)
965{
966 struct mlxsw_sp_rif_params_ipip_lb lb_params;
967 const struct mlxsw_sp_ipip_ops *ipip_ops;
968 struct mlxsw_sp_rif *rif;
969
970 ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipipt];
971 lb_params = (struct mlxsw_sp_rif_params_ipip_lb) {
972 .common.dev = ol_dev,
973 .common.lag = false,
974 .lb_config = ipip_ops->ol_loopback_config(mlxsw_sp, ol_dev),
975 };
976
David Ahernf8fa9b42017-10-18 09:56:56 -0700977 rif = mlxsw_sp_rif_create(mlxsw_sp, &lb_params.common, NULL);
Petr Machata1012b9a2017-09-02 23:49:23 +0200978 if (IS_ERR(rif))
979 return ERR_CAST(rif);
980 return container_of(rif, struct mlxsw_sp_rif_ipip_lb, common);
981}
982
983static struct mlxsw_sp_ipip_entry *
984mlxsw_sp_ipip_entry_alloc(struct mlxsw_sp *mlxsw_sp,
985 enum mlxsw_sp_ipip_type ipipt,
986 struct net_device *ol_dev)
987{
988 struct mlxsw_sp_ipip_entry *ipip_entry;
989 struct mlxsw_sp_ipip_entry *ret = NULL;
990
991 ipip_entry = kzalloc(sizeof(*ipip_entry), GFP_KERNEL);
992 if (!ipip_entry)
993 return ERR_PTR(-ENOMEM);
994
995 ipip_entry->ol_lb = mlxsw_sp_ipip_ol_ipip_lb_create(mlxsw_sp, ipipt,
996 ol_dev);
997 if (IS_ERR(ipip_entry->ol_lb)) {
998 ret = ERR_CAST(ipip_entry->ol_lb);
999 goto err_ol_ipip_lb_create;
1000 }
1001
1002 ipip_entry->ipipt = ipipt;
1003 ipip_entry->ol_dev = ol_dev;
1004
1005 return ipip_entry;
1006
1007err_ol_ipip_lb_create:
1008 kfree(ipip_entry);
1009 return ret;
1010}
1011
1012static void
Petr Machata4cccb732017-10-16 16:26:39 +02001013mlxsw_sp_ipip_entry_dealloc(struct mlxsw_sp_ipip_entry *ipip_entry)
Petr Machata1012b9a2017-09-02 23:49:23 +02001014{
Petr Machata1012b9a2017-09-02 23:49:23 +02001015 mlxsw_sp_rif_destroy(&ipip_entry->ol_lb->common);
1016 kfree(ipip_entry);
1017}
1018
1019static __be32
1020mlxsw_sp_ipip_netdev_saddr4(const struct net_device *ol_dev)
1021{
1022 struct ip_tunnel *tun = netdev_priv(ol_dev);
1023
1024 return tun->parms.iph.saddr;
1025}
1026
1027union mlxsw_sp_l3addr
1028mlxsw_sp_ipip_netdev_saddr(enum mlxsw_sp_l3proto proto,
1029 const struct net_device *ol_dev)
1030{
1031 switch (proto) {
1032 case MLXSW_SP_L3_PROTO_IPV4:
1033 return (union mlxsw_sp_l3addr) {
1034 .addr4 = mlxsw_sp_ipip_netdev_saddr4(ol_dev),
1035 };
1036 case MLXSW_SP_L3_PROTO_IPV6:
1037 break;
1038 };
1039
1040 WARN_ON(1);
1041 return (union mlxsw_sp_l3addr) {
1042 .addr4 = 0,
1043 };
1044}
1045
Petr Machataee954d1a2017-09-02 23:49:29 +02001046__be32 mlxsw_sp_ipip_netdev_daddr4(const struct net_device *ol_dev)
1047{
1048 struct ip_tunnel *tun = netdev_priv(ol_dev);
1049
1050 return tun->parms.iph.daddr;
1051}
1052
1053union mlxsw_sp_l3addr
1054mlxsw_sp_ipip_netdev_daddr(enum mlxsw_sp_l3proto proto,
1055 const struct net_device *ol_dev)
1056{
1057 switch (proto) {
1058 case MLXSW_SP_L3_PROTO_IPV4:
1059 return (union mlxsw_sp_l3addr) {
1060 .addr4 = mlxsw_sp_ipip_netdev_daddr4(ol_dev),
1061 };
1062 case MLXSW_SP_L3_PROTO_IPV6:
1063 break;
1064 };
1065
1066 WARN_ON(1);
1067 return (union mlxsw_sp_l3addr) {
1068 .addr4 = 0,
1069 };
1070}
1071
Petr Machata1012b9a2017-09-02 23:49:23 +02001072static bool mlxsw_sp_l3addr_eq(const union mlxsw_sp_l3addr *addr1,
1073 const union mlxsw_sp_l3addr *addr2)
1074{
1075 return !memcmp(addr1, addr2, sizeof(*addr1));
1076}
1077
1078static bool
1079mlxsw_sp_ipip_entry_saddr_matches(struct mlxsw_sp *mlxsw_sp,
1080 const enum mlxsw_sp_l3proto ul_proto,
1081 union mlxsw_sp_l3addr saddr,
1082 u32 ul_tb_id,
1083 struct mlxsw_sp_ipip_entry *ipip_entry)
1084{
1085 u32 tun_ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(ipip_entry->ol_dev);
1086 enum mlxsw_sp_ipip_type ipipt = ipip_entry->ipipt;
1087 union mlxsw_sp_l3addr tun_saddr;
1088
1089 if (mlxsw_sp->router->ipip_ops_arr[ipipt]->ul_proto != ul_proto)
1090 return false;
1091
1092 tun_saddr = mlxsw_sp_ipip_netdev_saddr(ul_proto, ipip_entry->ol_dev);
1093 return tun_ul_tb_id == ul_tb_id &&
1094 mlxsw_sp_l3addr_eq(&tun_saddr, &saddr);
1095}
1096
Petr Machata4607f6d2017-09-02 23:49:25 +02001097static int
1098mlxsw_sp_fib_entry_decap_init(struct mlxsw_sp *mlxsw_sp,
1099 struct mlxsw_sp_fib_entry *fib_entry,
1100 struct mlxsw_sp_ipip_entry *ipip_entry)
1101{
1102 u32 tunnel_index;
1103 int err;
1104
1105 err = mlxsw_sp_kvdl_alloc(mlxsw_sp, 1, &tunnel_index);
1106 if (err)
1107 return err;
1108
1109 ipip_entry->decap_fib_entry = fib_entry;
1110 fib_entry->decap.ipip_entry = ipip_entry;
1111 fib_entry->decap.tunnel_index = tunnel_index;
1112 return 0;
1113}
1114
1115static void mlxsw_sp_fib_entry_decap_fini(struct mlxsw_sp *mlxsw_sp,
1116 struct mlxsw_sp_fib_entry *fib_entry)
1117{
1118 /* Unlink this node from the IPIP entry that it's the decap entry of. */
1119 fib_entry->decap.ipip_entry->decap_fib_entry = NULL;
1120 fib_entry->decap.ipip_entry = NULL;
1121 mlxsw_sp_kvdl_free(mlxsw_sp, fib_entry->decap.tunnel_index);
1122}
1123
Petr Machata1cc38fb2017-09-02 23:49:26 +02001124static struct mlxsw_sp_fib_node *
1125mlxsw_sp_fib_node_lookup(struct mlxsw_sp_fib *fib, const void *addr,
1126 size_t addr_len, unsigned char prefix_len);
Petr Machata4607f6d2017-09-02 23:49:25 +02001127static int mlxsw_sp_fib_entry_update(struct mlxsw_sp *mlxsw_sp,
1128 struct mlxsw_sp_fib_entry *fib_entry);
1129
1130static void
1131mlxsw_sp_ipip_entry_demote_decap(struct mlxsw_sp *mlxsw_sp,
1132 struct mlxsw_sp_ipip_entry *ipip_entry)
1133{
1134 struct mlxsw_sp_fib_entry *fib_entry = ipip_entry->decap_fib_entry;
1135
1136 mlxsw_sp_fib_entry_decap_fini(mlxsw_sp, fib_entry);
1137 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
1138
1139 mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
1140}
1141
Petr Machata1cc38fb2017-09-02 23:49:26 +02001142static void
1143mlxsw_sp_ipip_entry_promote_decap(struct mlxsw_sp *mlxsw_sp,
1144 struct mlxsw_sp_ipip_entry *ipip_entry,
1145 struct mlxsw_sp_fib_entry *decap_fib_entry)
1146{
1147 if (mlxsw_sp_fib_entry_decap_init(mlxsw_sp, decap_fib_entry,
1148 ipip_entry))
1149 return;
1150 decap_fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP;
1151
1152 if (mlxsw_sp_fib_entry_update(mlxsw_sp, decap_fib_entry))
1153 mlxsw_sp_ipip_entry_demote_decap(mlxsw_sp, ipip_entry);
1154}
1155
1156/* Given an IPIP entry, find the corresponding decap route. */
1157static struct mlxsw_sp_fib_entry *
1158mlxsw_sp_ipip_entry_find_decap(struct mlxsw_sp *mlxsw_sp,
1159 struct mlxsw_sp_ipip_entry *ipip_entry)
1160{
1161 static struct mlxsw_sp_fib_node *fib_node;
1162 const struct mlxsw_sp_ipip_ops *ipip_ops;
1163 struct mlxsw_sp_fib_entry *fib_entry;
1164 unsigned char saddr_prefix_len;
1165 union mlxsw_sp_l3addr saddr;
1166 struct mlxsw_sp_fib *ul_fib;
1167 struct mlxsw_sp_vr *ul_vr;
1168 const void *saddrp;
1169 size_t saddr_len;
1170 u32 ul_tb_id;
1171 u32 saddr4;
1172
1173 ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt];
1174
1175 ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(ipip_entry->ol_dev);
1176 ul_vr = mlxsw_sp_vr_find(mlxsw_sp, ul_tb_id);
1177 if (!ul_vr)
1178 return NULL;
1179
1180 ul_fib = mlxsw_sp_vr_fib(ul_vr, ipip_ops->ul_proto);
1181 saddr = mlxsw_sp_ipip_netdev_saddr(ipip_ops->ul_proto,
1182 ipip_entry->ol_dev);
1183
1184 switch (ipip_ops->ul_proto) {
1185 case MLXSW_SP_L3_PROTO_IPV4:
1186 saddr4 = be32_to_cpu(saddr.addr4);
1187 saddrp = &saddr4;
1188 saddr_len = 4;
1189 saddr_prefix_len = 32;
1190 break;
1191 case MLXSW_SP_L3_PROTO_IPV6:
1192 WARN_ON(1);
1193 return NULL;
1194 }
1195
1196 fib_node = mlxsw_sp_fib_node_lookup(ul_fib, saddrp, saddr_len,
1197 saddr_prefix_len);
1198 if (!fib_node || list_empty(&fib_node->entry_list))
1199 return NULL;
1200
1201 fib_entry = list_first_entry(&fib_node->entry_list,
1202 struct mlxsw_sp_fib_entry, list);
1203 if (fib_entry->type != MLXSW_SP_FIB_ENTRY_TYPE_TRAP)
1204 return NULL;
1205
1206 return fib_entry;
1207}
1208
Petr Machata1012b9a2017-09-02 23:49:23 +02001209static struct mlxsw_sp_ipip_entry *
Petr Machata4cccb732017-10-16 16:26:39 +02001210mlxsw_sp_ipip_entry_create(struct mlxsw_sp *mlxsw_sp,
1211 enum mlxsw_sp_ipip_type ipipt,
1212 struct net_device *ol_dev)
Petr Machata1012b9a2017-09-02 23:49:23 +02001213{
1214 u32 ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(ol_dev);
1215 struct mlxsw_sp_router *router = mlxsw_sp->router;
1216 struct mlxsw_sp_ipip_entry *ipip_entry;
1217 enum mlxsw_sp_l3proto ul_proto;
1218 union mlxsw_sp_l3addr saddr;
1219
Petr Machata4cccb732017-10-16 16:26:39 +02001220 /* The configuration where several tunnels have the same local address
1221 * in the same underlay table needs special treatment in the HW. That is
1222 * currently not implemented in the driver.
1223 */
Petr Machata1012b9a2017-09-02 23:49:23 +02001224 list_for_each_entry(ipip_entry, &mlxsw_sp->router->ipip_list,
1225 ipip_list_node) {
Petr Machata1012b9a2017-09-02 23:49:23 +02001226 ul_proto = router->ipip_ops_arr[ipip_entry->ipipt]->ul_proto;
1227 saddr = mlxsw_sp_ipip_netdev_saddr(ul_proto, ol_dev);
1228 if (mlxsw_sp_ipip_entry_saddr_matches(mlxsw_sp, ul_proto, saddr,
1229 ul_tb_id, ipip_entry))
1230 return ERR_PTR(-EEXIST);
1231 }
1232
1233 ipip_entry = mlxsw_sp_ipip_entry_alloc(mlxsw_sp, ipipt, ol_dev);
1234 if (IS_ERR(ipip_entry))
1235 return ipip_entry;
1236
1237 list_add_tail(&ipip_entry->ipip_list_node,
1238 &mlxsw_sp->router->ipip_list);
1239
Petr Machata1012b9a2017-09-02 23:49:23 +02001240 return ipip_entry;
1241}
1242
1243static void
Petr Machata4cccb732017-10-16 16:26:39 +02001244mlxsw_sp_ipip_entry_destroy(struct mlxsw_sp *mlxsw_sp,
1245 struct mlxsw_sp_ipip_entry *ipip_entry)
Petr Machata1012b9a2017-09-02 23:49:23 +02001246{
Petr Machata4cccb732017-10-16 16:26:39 +02001247 list_del(&ipip_entry->ipip_list_node);
1248 mlxsw_sp_ipip_entry_dealloc(ipip_entry);
Petr Machata1012b9a2017-09-02 23:49:23 +02001249}
1250
Petr Machata4607f6d2017-09-02 23:49:25 +02001251static bool
1252mlxsw_sp_ipip_entry_matches_decap(struct mlxsw_sp *mlxsw_sp,
1253 const struct net_device *ul_dev,
1254 enum mlxsw_sp_l3proto ul_proto,
1255 union mlxsw_sp_l3addr ul_dip,
1256 struct mlxsw_sp_ipip_entry *ipip_entry)
1257{
1258 u32 ul_tb_id = l3mdev_fib_table(ul_dev) ? : RT_TABLE_MAIN;
1259 enum mlxsw_sp_ipip_type ipipt = ipip_entry->ipipt;
1260 struct net_device *ipip_ul_dev;
1261
1262 if (mlxsw_sp->router->ipip_ops_arr[ipipt]->ul_proto != ul_proto)
1263 return false;
1264
1265 ipip_ul_dev = __mlxsw_sp_ipip_netdev_ul_dev_get(ipip_entry->ol_dev);
1266 return mlxsw_sp_ipip_entry_saddr_matches(mlxsw_sp, ul_proto, ul_dip,
1267 ul_tb_id, ipip_entry) &&
1268 (!ipip_ul_dev || ipip_ul_dev == ul_dev);
1269}
1270
1271/* Given decap parameters, find the corresponding IPIP entry. */
1272static struct mlxsw_sp_ipip_entry *
1273mlxsw_sp_ipip_entry_find_by_decap(struct mlxsw_sp *mlxsw_sp,
1274 const struct net_device *ul_dev,
1275 enum mlxsw_sp_l3proto ul_proto,
1276 union mlxsw_sp_l3addr ul_dip)
1277{
1278 struct mlxsw_sp_ipip_entry *ipip_entry;
1279
1280 list_for_each_entry(ipip_entry, &mlxsw_sp->router->ipip_list,
1281 ipip_list_node)
1282 if (mlxsw_sp_ipip_entry_matches_decap(mlxsw_sp, ul_dev,
1283 ul_proto, ul_dip,
1284 ipip_entry))
1285 return ipip_entry;
1286
1287 return NULL;
1288}
1289
Petr Machata6698c162017-10-16 16:26:36 +02001290static bool mlxsw_sp_netdev_ipip_type(const struct mlxsw_sp *mlxsw_sp,
1291 const struct net_device *dev,
1292 enum mlxsw_sp_ipip_type *p_type)
1293{
1294 struct mlxsw_sp_router *router = mlxsw_sp->router;
1295 const struct mlxsw_sp_ipip_ops *ipip_ops;
1296 enum mlxsw_sp_ipip_type ipipt;
1297
1298 for (ipipt = 0; ipipt < MLXSW_SP_IPIP_TYPE_MAX; ++ipipt) {
1299 ipip_ops = router->ipip_ops_arr[ipipt];
1300 if (dev->type == ipip_ops->dev_type) {
1301 if (p_type)
1302 *p_type = ipipt;
1303 return true;
1304 }
1305 }
1306 return false;
1307}
1308
Petr Machata00635872017-10-16 16:26:37 +02001309bool mlxsw_sp_netdev_is_ipip(const struct mlxsw_sp *mlxsw_sp,
1310 const struct net_device *dev)
1311{
1312 return mlxsw_sp_netdev_ipip_type(mlxsw_sp, dev, NULL);
1313}
1314
1315static struct mlxsw_sp_ipip_entry *
1316mlxsw_sp_ipip_entry_find_by_ol_dev(struct mlxsw_sp *mlxsw_sp,
1317 const struct net_device *ol_dev)
1318{
1319 struct mlxsw_sp_ipip_entry *ipip_entry;
1320
1321 list_for_each_entry(ipip_entry, &mlxsw_sp->router->ipip_list,
1322 ipip_list_node)
1323 if (ipip_entry->ol_dev == ol_dev)
1324 return ipip_entry;
1325
1326 return NULL;
1327}
1328
1329static int mlxsw_sp_netdevice_ipip_reg_event(struct mlxsw_sp *mlxsw_sp,
1330 struct net_device *ol_dev)
1331{
1332 struct mlxsw_sp_router *router = mlxsw_sp->router;
1333 struct mlxsw_sp_ipip_entry *ipip_entry;
1334 enum mlxsw_sp_ipip_type ipipt;
1335
1336 mlxsw_sp_netdev_ipip_type(mlxsw_sp, ol_dev, &ipipt);
1337 if (router->ipip_ops_arr[ipipt]->can_offload(mlxsw_sp, ol_dev,
1338 MLXSW_SP_L3_PROTO_IPV4) ||
1339 router->ipip_ops_arr[ipipt]->can_offload(mlxsw_sp, ol_dev,
1340 MLXSW_SP_L3_PROTO_IPV6)) {
Petr Machata4cccb732017-10-16 16:26:39 +02001341 ipip_entry = mlxsw_sp_ipip_entry_create(mlxsw_sp, ipipt,
1342 ol_dev);
Petr Machata00635872017-10-16 16:26:37 +02001343 if (IS_ERR(ipip_entry))
1344 return PTR_ERR(ipip_entry);
1345 }
1346
1347 return 0;
1348}
1349
1350static void mlxsw_sp_netdevice_ipip_unreg_event(struct mlxsw_sp *mlxsw_sp,
1351 struct net_device *ol_dev)
1352{
1353 struct mlxsw_sp_ipip_entry *ipip_entry;
1354
1355 ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
1356 if (ipip_entry)
Petr Machata4cccb732017-10-16 16:26:39 +02001357 mlxsw_sp_ipip_entry_destroy(mlxsw_sp, ipip_entry);
Petr Machata00635872017-10-16 16:26:37 +02001358}
1359
1360static int mlxsw_sp_netdevice_ipip_up_event(struct mlxsw_sp *mlxsw_sp,
1361 struct net_device *ol_dev)
1362{
1363 struct mlxsw_sp_fib_entry *decap_fib_entry;
1364 struct mlxsw_sp_ipip_entry *ipip_entry;
1365
1366 ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
1367 if (ipip_entry) {
1368 decap_fib_entry = mlxsw_sp_ipip_entry_find_decap(mlxsw_sp,
1369 ipip_entry);
1370 if (decap_fib_entry)
1371 mlxsw_sp_ipip_entry_promote_decap(mlxsw_sp, ipip_entry,
1372 decap_fib_entry);
1373 }
1374
1375 return 0;
1376}
1377
1378static void mlxsw_sp_netdevice_ipip_down_event(struct mlxsw_sp *mlxsw_sp,
1379 struct net_device *ol_dev)
1380{
1381 struct mlxsw_sp_ipip_entry *ipip_entry;
1382
1383 ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
1384 if (ipip_entry && ipip_entry->decap_fib_entry)
1385 mlxsw_sp_ipip_entry_demote_decap(mlxsw_sp, ipip_entry);
1386}
1387
Petr Machataf63ce4e2017-10-16 16:26:38 +02001388static int mlxsw_sp_netdevice_ipip_vrf_event(struct mlxsw_sp *mlxsw_sp,
1389 struct net_device *ol_dev)
1390{
1391 struct mlxsw_sp_fib_entry *decap_fib_entry;
1392 struct mlxsw_sp_ipip_entry *ipip_entry;
1393 struct mlxsw_sp_rif_ipip_lb *lb_rif;
1394
1395 ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
1396 if (!ipip_entry)
1397 return 0;
1398
1399 /* When a tunneling device is moved to a different VRF, we need to
1400 * update the backing loopback. Since RIFs can't be edited, we need to
1401 * destroy and recreate it. That might create a window of opportunity
1402 * where RALUE and RATR registers end up referencing a RIF that's
1403 * already gone. RATRs are handled by the RIF destroy, and to take care
1404 * of RALUE, demote the decap route back.
1405 */
1406 if (ipip_entry->decap_fib_entry)
1407 mlxsw_sp_ipip_entry_demote_decap(mlxsw_sp, ipip_entry);
1408
1409 lb_rif = mlxsw_sp_ipip_ol_ipip_lb_create(mlxsw_sp, ipip_entry->ipipt,
1410 ol_dev);
1411 if (IS_ERR(lb_rif))
1412 return PTR_ERR(lb_rif);
1413 mlxsw_sp_rif_destroy(&ipip_entry->ol_lb->common);
1414 ipip_entry->ol_lb = lb_rif;
1415
1416 if (ol_dev->flags & IFF_UP) {
1417 decap_fib_entry = mlxsw_sp_ipip_entry_find_decap(mlxsw_sp,
1418 ipip_entry);
1419 if (decap_fib_entry)
1420 mlxsw_sp_ipip_entry_promote_decap(mlxsw_sp, ipip_entry,
1421 decap_fib_entry);
1422 }
1423
1424 return 0;
1425}
1426
Petr Machata00635872017-10-16 16:26:37 +02001427int mlxsw_sp_netdevice_ipip_event(struct mlxsw_sp *mlxsw_sp,
1428 struct net_device *ol_dev,
Petr Machataf63ce4e2017-10-16 16:26:38 +02001429 unsigned long event,
1430 struct netdev_notifier_changeupper_info *info)
Petr Machata00635872017-10-16 16:26:37 +02001431{
1432 switch (event) {
1433 case NETDEV_REGISTER:
1434 return mlxsw_sp_netdevice_ipip_reg_event(mlxsw_sp, ol_dev);
1435 case NETDEV_UNREGISTER:
1436 mlxsw_sp_netdevice_ipip_unreg_event(mlxsw_sp, ol_dev);
1437 return 0;
1438 case NETDEV_UP:
1439 return mlxsw_sp_netdevice_ipip_up_event(mlxsw_sp, ol_dev);
1440 case NETDEV_DOWN:
1441 mlxsw_sp_netdevice_ipip_down_event(mlxsw_sp, ol_dev);
1442 return 0;
Petr Machataf63ce4e2017-10-16 16:26:38 +02001443 case NETDEV_CHANGEUPPER:
1444 if (netif_is_l3_master(info->upper_dev))
1445 return mlxsw_sp_netdevice_ipip_vrf_event(mlxsw_sp,
1446 ol_dev);
1447 return 0;
Petr Machata00635872017-10-16 16:26:37 +02001448 }
1449 return 0;
1450}
1451
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001452struct mlxsw_sp_neigh_key {
Jiri Pirko33b13412016-11-10 12:31:04 +01001453 struct neighbour *n;
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001454};
1455
1456struct mlxsw_sp_neigh_entry {
Ido Schimmel9665b742017-02-08 11:16:42 +01001457 struct list_head rif_list_node;
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001458 struct rhash_head ht_node;
1459 struct mlxsw_sp_neigh_key key;
1460 u16 rif;
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001461 bool connected;
Yotam Gigia6bf9e92016-07-05 11:27:44 +02001462 unsigned char ha[ETH_ALEN];
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001463 struct list_head nexthop_list; /* list of nexthops using
1464 * this neigh entry
1465 */
Yotam Gigib2157142016-07-05 11:27:51 +02001466 struct list_head nexthop_neighs_list_node;
Arkadi Sharshevsky7cfcbc72017-08-24 08:40:08 +02001467 unsigned int counter_index;
1468 bool counter_valid;
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001469};
1470
1471static const struct rhashtable_params mlxsw_sp_neigh_ht_params = {
1472 .key_offset = offsetof(struct mlxsw_sp_neigh_entry, key),
1473 .head_offset = offsetof(struct mlxsw_sp_neigh_entry, ht_node),
1474 .key_len = sizeof(struct mlxsw_sp_neigh_key),
1475};
1476
Arkadi Sharshevskyf17cc842017-08-24 08:40:04 +02001477struct mlxsw_sp_neigh_entry *
1478mlxsw_sp_rif_neigh_next(struct mlxsw_sp_rif *rif,
1479 struct mlxsw_sp_neigh_entry *neigh_entry)
1480{
1481 if (!neigh_entry) {
1482 if (list_empty(&rif->neigh_list))
1483 return NULL;
1484 else
1485 return list_first_entry(&rif->neigh_list,
1486 typeof(*neigh_entry),
1487 rif_list_node);
1488 }
Arkadi Sharshevskyec2437f2017-09-25 10:32:24 +02001489 if (list_is_last(&neigh_entry->rif_list_node, &rif->neigh_list))
Arkadi Sharshevskyf17cc842017-08-24 08:40:04 +02001490 return NULL;
1491 return list_next_entry(neigh_entry, rif_list_node);
1492}
1493
1494int mlxsw_sp_neigh_entry_type(struct mlxsw_sp_neigh_entry *neigh_entry)
1495{
1496 return neigh_entry->key.n->tbl->family;
1497}
1498
1499unsigned char *
1500mlxsw_sp_neigh_entry_ha(struct mlxsw_sp_neigh_entry *neigh_entry)
1501{
1502 return neigh_entry->ha;
1503}
1504
1505u32 mlxsw_sp_neigh4_entry_dip(struct mlxsw_sp_neigh_entry *neigh_entry)
1506{
1507 struct neighbour *n;
1508
1509 n = neigh_entry->key.n;
1510 return ntohl(*((__be32 *) n->primary_key));
1511}
1512
Arkadi Sharshevsky02507682017-08-31 17:59:15 +02001513struct in6_addr *
1514mlxsw_sp_neigh6_entry_dip(struct mlxsw_sp_neigh_entry *neigh_entry)
1515{
1516 struct neighbour *n;
1517
1518 n = neigh_entry->key.n;
1519 return (struct in6_addr *) &n->primary_key;
1520}
1521
Arkadi Sharshevsky7cfcbc72017-08-24 08:40:08 +02001522int mlxsw_sp_neigh_counter_get(struct mlxsw_sp *mlxsw_sp,
1523 struct mlxsw_sp_neigh_entry *neigh_entry,
1524 u64 *p_counter)
1525{
1526 if (!neigh_entry->counter_valid)
1527 return -EINVAL;
1528
1529 return mlxsw_sp_flow_counter_get(mlxsw_sp, neigh_entry->counter_index,
1530 p_counter, NULL);
1531}
1532
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001533static struct mlxsw_sp_neigh_entry *
1534mlxsw_sp_neigh_entry_alloc(struct mlxsw_sp *mlxsw_sp, struct neighbour *n,
1535 u16 rif)
1536{
1537 struct mlxsw_sp_neigh_entry *neigh_entry;
1538
1539 neigh_entry = kzalloc(sizeof(*neigh_entry), GFP_KERNEL);
1540 if (!neigh_entry)
1541 return NULL;
1542
1543 neigh_entry->key.n = n;
1544 neigh_entry->rif = rif;
1545 INIT_LIST_HEAD(&neigh_entry->nexthop_list);
1546
1547 return neigh_entry;
1548}
1549
1550static void mlxsw_sp_neigh_entry_free(struct mlxsw_sp_neigh_entry *neigh_entry)
1551{
1552 kfree(neigh_entry);
1553}
1554
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001555static int
1556mlxsw_sp_neigh_entry_insert(struct mlxsw_sp *mlxsw_sp,
1557 struct mlxsw_sp_neigh_entry *neigh_entry)
1558{
Ido Schimmel9011b672017-05-16 19:38:25 +02001559 return rhashtable_insert_fast(&mlxsw_sp->router->neigh_ht,
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001560 &neigh_entry->ht_node,
1561 mlxsw_sp_neigh_ht_params);
1562}
1563
1564static void
1565mlxsw_sp_neigh_entry_remove(struct mlxsw_sp *mlxsw_sp,
1566 struct mlxsw_sp_neigh_entry *neigh_entry)
1567{
Ido Schimmel9011b672017-05-16 19:38:25 +02001568 rhashtable_remove_fast(&mlxsw_sp->router->neigh_ht,
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001569 &neigh_entry->ht_node,
1570 mlxsw_sp_neigh_ht_params);
1571}
1572
Arkadi Sharshevsky7cfcbc72017-08-24 08:40:08 +02001573static bool
Arkadi Sharshevsky1ed55742017-08-31 17:59:18 +02001574mlxsw_sp_neigh_counter_should_alloc(struct mlxsw_sp *mlxsw_sp,
1575 struct mlxsw_sp_neigh_entry *neigh_entry)
Arkadi Sharshevsky7cfcbc72017-08-24 08:40:08 +02001576{
1577 struct devlink *devlink;
Arkadi Sharshevsky1ed55742017-08-31 17:59:18 +02001578 const char *table_name;
1579
1580 switch (mlxsw_sp_neigh_entry_type(neigh_entry)) {
1581 case AF_INET:
1582 table_name = MLXSW_SP_DPIPE_TABLE_NAME_HOST4;
1583 break;
1584 case AF_INET6:
1585 table_name = MLXSW_SP_DPIPE_TABLE_NAME_HOST6;
1586 break;
1587 default:
1588 WARN_ON(1);
1589 return false;
1590 }
Arkadi Sharshevsky7cfcbc72017-08-24 08:40:08 +02001591
1592 devlink = priv_to_devlink(mlxsw_sp->core);
Arkadi Sharshevsky1ed55742017-08-31 17:59:18 +02001593 return devlink_dpipe_table_counter_enabled(devlink, table_name);
Arkadi Sharshevsky7cfcbc72017-08-24 08:40:08 +02001594}
1595
1596static void
1597mlxsw_sp_neigh_counter_alloc(struct mlxsw_sp *mlxsw_sp,
1598 struct mlxsw_sp_neigh_entry *neigh_entry)
1599{
Arkadi Sharshevsky1ed55742017-08-31 17:59:18 +02001600 if (!mlxsw_sp_neigh_counter_should_alloc(mlxsw_sp, neigh_entry))
Arkadi Sharshevsky7cfcbc72017-08-24 08:40:08 +02001601 return;
1602
1603 if (mlxsw_sp_flow_counter_alloc(mlxsw_sp, &neigh_entry->counter_index))
1604 return;
1605
1606 neigh_entry->counter_valid = true;
1607}
1608
1609static void
1610mlxsw_sp_neigh_counter_free(struct mlxsw_sp *mlxsw_sp,
1611 struct mlxsw_sp_neigh_entry *neigh_entry)
1612{
1613 if (!neigh_entry->counter_valid)
1614 return;
1615 mlxsw_sp_flow_counter_free(mlxsw_sp,
1616 neigh_entry->counter_index);
1617 neigh_entry->counter_valid = false;
1618}
1619
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001620static struct mlxsw_sp_neigh_entry *
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001621mlxsw_sp_neigh_entry_create(struct mlxsw_sp *mlxsw_sp, struct neighbour *n)
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001622{
1623 struct mlxsw_sp_neigh_entry *neigh_entry;
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01001624 struct mlxsw_sp_rif *rif;
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001625 int err;
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001626
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01001627 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, n->dev);
1628 if (!rif)
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001629 return ERR_PTR(-EINVAL);
1630
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01001631 neigh_entry = mlxsw_sp_neigh_entry_alloc(mlxsw_sp, n, rif->rif_index);
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001632 if (!neigh_entry)
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001633 return ERR_PTR(-ENOMEM);
1634
1635 err = mlxsw_sp_neigh_entry_insert(mlxsw_sp, neigh_entry);
1636 if (err)
1637 goto err_neigh_entry_insert;
1638
Arkadi Sharshevsky7cfcbc72017-08-24 08:40:08 +02001639 mlxsw_sp_neigh_counter_alloc(mlxsw_sp, neigh_entry);
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01001640 list_add(&neigh_entry->rif_list_node, &rif->neigh_list);
Ido Schimmel9665b742017-02-08 11:16:42 +01001641
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001642 return neigh_entry;
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001643
1644err_neigh_entry_insert:
1645 mlxsw_sp_neigh_entry_free(neigh_entry);
1646 return ERR_PTR(err);
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001647}
1648
1649static void
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001650mlxsw_sp_neigh_entry_destroy(struct mlxsw_sp *mlxsw_sp,
1651 struct mlxsw_sp_neigh_entry *neigh_entry)
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001652{
Ido Schimmel9665b742017-02-08 11:16:42 +01001653 list_del(&neigh_entry->rif_list_node);
Arkadi Sharshevsky7cfcbc72017-08-24 08:40:08 +02001654 mlxsw_sp_neigh_counter_free(mlxsw_sp, neigh_entry);
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001655 mlxsw_sp_neigh_entry_remove(mlxsw_sp, neigh_entry);
1656 mlxsw_sp_neigh_entry_free(neigh_entry);
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001657}
1658
1659static struct mlxsw_sp_neigh_entry *
Jiri Pirko33b13412016-11-10 12:31:04 +01001660mlxsw_sp_neigh_entry_lookup(struct mlxsw_sp *mlxsw_sp, struct neighbour *n)
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001661{
Jiri Pirko33b13412016-11-10 12:31:04 +01001662 struct mlxsw_sp_neigh_key key;
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001663
Jiri Pirko33b13412016-11-10 12:31:04 +01001664 key.n = n;
Ido Schimmel9011b672017-05-16 19:38:25 +02001665 return rhashtable_lookup_fast(&mlxsw_sp->router->neigh_ht,
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001666 &key, mlxsw_sp_neigh_ht_params);
1667}
1668
Yotam Gigic723c7352016-07-05 11:27:43 +02001669static void
1670mlxsw_sp_router_neighs_update_interval_init(struct mlxsw_sp *mlxsw_sp)
1671{
Arkadi Sharshevskya6c9b5d2017-07-18 10:10:18 +02001672 unsigned long interval;
Yotam Gigic723c7352016-07-05 11:27:43 +02001673
Ido Schimmelb5f3e0d2017-07-24 09:56:00 +02001674#if IS_ENABLED(CONFIG_IPV6)
Arkadi Sharshevskya6c9b5d2017-07-18 10:10:18 +02001675 interval = min_t(unsigned long,
1676 NEIGH_VAR(&arp_tbl.parms, DELAY_PROBE_TIME),
1677 NEIGH_VAR(&nd_tbl.parms, DELAY_PROBE_TIME));
Ido Schimmelb5f3e0d2017-07-24 09:56:00 +02001678#else
1679 interval = NEIGH_VAR(&arp_tbl.parms, DELAY_PROBE_TIME);
1680#endif
Ido Schimmel9011b672017-05-16 19:38:25 +02001681 mlxsw_sp->router->neighs_update.interval = jiffies_to_msecs(interval);
Yotam Gigic723c7352016-07-05 11:27:43 +02001682}
1683
1684static void mlxsw_sp_router_neigh_ent_ipv4_process(struct mlxsw_sp *mlxsw_sp,
1685 char *rauhtd_pl,
1686 int ent_index)
1687{
1688 struct net_device *dev;
1689 struct neighbour *n;
1690 __be32 dipn;
1691 u32 dip;
1692 u16 rif;
1693
1694 mlxsw_reg_rauhtd_ent_ipv4_unpack(rauhtd_pl, ent_index, &rif, &dip);
1695
Ido Schimmel5f9efff2017-05-16 19:38:27 +02001696 if (!mlxsw_sp->router->rifs[rif]) {
Yotam Gigic723c7352016-07-05 11:27:43 +02001697 dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Incorrect RIF in neighbour entry\n");
1698 return;
1699 }
1700
1701 dipn = htonl(dip);
Ido Schimmel5f9efff2017-05-16 19:38:27 +02001702 dev = mlxsw_sp->router->rifs[rif]->dev;
Yotam Gigic723c7352016-07-05 11:27:43 +02001703 n = neigh_lookup(&arp_tbl, &dipn, dev);
1704 if (!n) {
1705 netdev_err(dev, "Failed to find matching neighbour for IP=%pI4h\n",
1706 &dip);
1707 return;
1708 }
1709
1710 netdev_dbg(dev, "Updating neighbour with IP=%pI4h\n", &dip);
1711 neigh_event_send(n, NULL);
1712 neigh_release(n);
1713}
1714
Ido Schimmeldf9a21f2017-08-15 09:10:33 +02001715#if IS_ENABLED(CONFIG_IPV6)
Arkadi Sharshevsky60f040c2017-07-18 10:10:17 +02001716static void mlxsw_sp_router_neigh_ent_ipv6_process(struct mlxsw_sp *mlxsw_sp,
1717 char *rauhtd_pl,
1718 int rec_index)
1719{
1720 struct net_device *dev;
1721 struct neighbour *n;
1722 struct in6_addr dip;
1723 u16 rif;
1724
1725 mlxsw_reg_rauhtd_ent_ipv6_unpack(rauhtd_pl, rec_index, &rif,
1726 (char *) &dip);
1727
1728 if (!mlxsw_sp->router->rifs[rif]) {
1729 dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Incorrect RIF in neighbour entry\n");
1730 return;
1731 }
1732
1733 dev = mlxsw_sp->router->rifs[rif]->dev;
1734 n = neigh_lookup(&nd_tbl, &dip, dev);
1735 if (!n) {
1736 netdev_err(dev, "Failed to find matching neighbour for IP=%pI6c\n",
1737 &dip);
1738 return;
1739 }
1740
1741 netdev_dbg(dev, "Updating neighbour with IP=%pI6c\n", &dip);
1742 neigh_event_send(n, NULL);
1743 neigh_release(n);
1744}
Ido Schimmelb5f3e0d2017-07-24 09:56:00 +02001745#else
1746static void mlxsw_sp_router_neigh_ent_ipv6_process(struct mlxsw_sp *mlxsw_sp,
1747 char *rauhtd_pl,
1748 int rec_index)
1749{
1750}
1751#endif
Arkadi Sharshevsky60f040c2017-07-18 10:10:17 +02001752
Yotam Gigic723c7352016-07-05 11:27:43 +02001753static void mlxsw_sp_router_neigh_rec_ipv4_process(struct mlxsw_sp *mlxsw_sp,
1754 char *rauhtd_pl,
1755 int rec_index)
1756{
1757 u8 num_entries;
1758 int i;
1759
1760 num_entries = mlxsw_reg_rauhtd_ipv4_rec_num_entries_get(rauhtd_pl,
1761 rec_index);
1762 /* Hardware starts counting at 0, so add 1. */
1763 num_entries++;
1764
1765 /* Each record consists of several neighbour entries. */
1766 for (i = 0; i < num_entries; i++) {
1767 int ent_index;
1768
1769 ent_index = rec_index * MLXSW_REG_RAUHTD_IPV4_ENT_PER_REC + i;
1770 mlxsw_sp_router_neigh_ent_ipv4_process(mlxsw_sp, rauhtd_pl,
1771 ent_index);
1772 }
1773
1774}
1775
Arkadi Sharshevsky60f040c2017-07-18 10:10:17 +02001776static void mlxsw_sp_router_neigh_rec_ipv6_process(struct mlxsw_sp *mlxsw_sp,
1777 char *rauhtd_pl,
1778 int rec_index)
1779{
1780 /* One record contains one entry. */
1781 mlxsw_sp_router_neigh_ent_ipv6_process(mlxsw_sp, rauhtd_pl,
1782 rec_index);
1783}
1784
Yotam Gigic723c7352016-07-05 11:27:43 +02001785static void mlxsw_sp_router_neigh_rec_process(struct mlxsw_sp *mlxsw_sp,
1786 char *rauhtd_pl, int rec_index)
1787{
1788 switch (mlxsw_reg_rauhtd_rec_type_get(rauhtd_pl, rec_index)) {
1789 case MLXSW_REG_RAUHTD_TYPE_IPV4:
1790 mlxsw_sp_router_neigh_rec_ipv4_process(mlxsw_sp, rauhtd_pl,
1791 rec_index);
1792 break;
1793 case MLXSW_REG_RAUHTD_TYPE_IPV6:
Arkadi Sharshevsky60f040c2017-07-18 10:10:17 +02001794 mlxsw_sp_router_neigh_rec_ipv6_process(mlxsw_sp, rauhtd_pl,
1795 rec_index);
Yotam Gigic723c7352016-07-05 11:27:43 +02001796 break;
1797 }
1798}
1799
Arkadi Sharshevsky42cdb332016-11-11 16:34:26 +01001800static bool mlxsw_sp_router_rauhtd_is_full(char *rauhtd_pl)
1801{
1802 u8 num_rec, last_rec_index, num_entries;
1803
1804 num_rec = mlxsw_reg_rauhtd_num_rec_get(rauhtd_pl);
1805 last_rec_index = num_rec - 1;
1806
1807 if (num_rec < MLXSW_REG_RAUHTD_REC_MAX_NUM)
1808 return false;
1809 if (mlxsw_reg_rauhtd_rec_type_get(rauhtd_pl, last_rec_index) ==
1810 MLXSW_REG_RAUHTD_TYPE_IPV6)
1811 return true;
1812
1813 num_entries = mlxsw_reg_rauhtd_ipv4_rec_num_entries_get(rauhtd_pl,
1814 last_rec_index);
1815 if (++num_entries == MLXSW_REG_RAUHTD_IPV4_ENT_PER_REC)
1816 return true;
1817 return false;
1818}
1819
Arkadi Sharshevsky60f040c2017-07-18 10:10:17 +02001820static int
1821__mlxsw_sp_router_neighs_update_rauhtd(struct mlxsw_sp *mlxsw_sp,
1822 char *rauhtd_pl,
1823 enum mlxsw_reg_rauhtd_type type)
Yotam Gigic723c7352016-07-05 11:27:43 +02001824{
Arkadi Sharshevsky60f040c2017-07-18 10:10:17 +02001825 int i, num_rec;
1826 int err;
Yotam Gigic723c7352016-07-05 11:27:43 +02001827
1828 /* Make sure the neighbour's netdev isn't removed in the
1829 * process.
1830 */
1831 rtnl_lock();
1832 do {
Arkadi Sharshevsky60f040c2017-07-18 10:10:17 +02001833 mlxsw_reg_rauhtd_pack(rauhtd_pl, type);
Yotam Gigic723c7352016-07-05 11:27:43 +02001834 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(rauhtd),
1835 rauhtd_pl);
1836 if (err) {
Petr Machata7ff176f2017-10-02 12:21:57 +02001837 dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Failed to dump neighbour table\n");
Yotam Gigic723c7352016-07-05 11:27:43 +02001838 break;
1839 }
1840 num_rec = mlxsw_reg_rauhtd_num_rec_get(rauhtd_pl);
1841 for (i = 0; i < num_rec; i++)
1842 mlxsw_sp_router_neigh_rec_process(mlxsw_sp, rauhtd_pl,
1843 i);
Arkadi Sharshevsky42cdb332016-11-11 16:34:26 +01001844 } while (mlxsw_sp_router_rauhtd_is_full(rauhtd_pl));
Yotam Gigic723c7352016-07-05 11:27:43 +02001845 rtnl_unlock();
1846
Arkadi Sharshevsky60f040c2017-07-18 10:10:17 +02001847 return err;
1848}
1849
1850static int mlxsw_sp_router_neighs_update_rauhtd(struct mlxsw_sp *mlxsw_sp)
1851{
1852 enum mlxsw_reg_rauhtd_type type;
1853 char *rauhtd_pl;
1854 int err;
1855
1856 rauhtd_pl = kmalloc(MLXSW_REG_RAUHTD_LEN, GFP_KERNEL);
1857 if (!rauhtd_pl)
1858 return -ENOMEM;
1859
1860 type = MLXSW_REG_RAUHTD_TYPE_IPV4;
1861 err = __mlxsw_sp_router_neighs_update_rauhtd(mlxsw_sp, rauhtd_pl, type);
1862 if (err)
1863 goto out;
1864
1865 type = MLXSW_REG_RAUHTD_TYPE_IPV6;
1866 err = __mlxsw_sp_router_neighs_update_rauhtd(mlxsw_sp, rauhtd_pl, type);
1867out:
Yotam Gigic723c7352016-07-05 11:27:43 +02001868 kfree(rauhtd_pl);
Yotam Gigib2157142016-07-05 11:27:51 +02001869 return err;
1870}
1871
1872static void mlxsw_sp_router_neighs_update_nh(struct mlxsw_sp *mlxsw_sp)
1873{
1874 struct mlxsw_sp_neigh_entry *neigh_entry;
1875
1876 /* Take RTNL mutex here to prevent lists from changes */
1877 rtnl_lock();
Ido Schimmel9011b672017-05-16 19:38:25 +02001878 list_for_each_entry(neigh_entry, &mlxsw_sp->router->nexthop_neighs_list,
Ido Schimmel8a0b7272017-02-06 16:20:15 +01001879 nexthop_neighs_list_node)
Yotam Gigib2157142016-07-05 11:27:51 +02001880 /* If this neigh have nexthops, make the kernel think this neigh
1881 * is active regardless of the traffic.
1882 */
Ido Schimmel8a0b7272017-02-06 16:20:15 +01001883 neigh_event_send(neigh_entry->key.n, NULL);
Yotam Gigib2157142016-07-05 11:27:51 +02001884 rtnl_unlock();
1885}
1886
1887static void
1888mlxsw_sp_router_neighs_update_work_schedule(struct mlxsw_sp *mlxsw_sp)
1889{
Ido Schimmel9011b672017-05-16 19:38:25 +02001890 unsigned long interval = mlxsw_sp->router->neighs_update.interval;
Yotam Gigib2157142016-07-05 11:27:51 +02001891
Ido Schimmel9011b672017-05-16 19:38:25 +02001892 mlxsw_core_schedule_dw(&mlxsw_sp->router->neighs_update.dw,
Yotam Gigib2157142016-07-05 11:27:51 +02001893 msecs_to_jiffies(interval));
1894}
1895
1896static void mlxsw_sp_router_neighs_update_work(struct work_struct *work)
1897{
Ido Schimmel9011b672017-05-16 19:38:25 +02001898 struct mlxsw_sp_router *router;
Yotam Gigib2157142016-07-05 11:27:51 +02001899 int err;
1900
Ido Schimmel9011b672017-05-16 19:38:25 +02001901 router = container_of(work, struct mlxsw_sp_router,
1902 neighs_update.dw.work);
1903 err = mlxsw_sp_router_neighs_update_rauhtd(router->mlxsw_sp);
Yotam Gigib2157142016-07-05 11:27:51 +02001904 if (err)
Ido Schimmel9011b672017-05-16 19:38:25 +02001905 dev_err(router->mlxsw_sp->bus_info->dev, "Could not update kernel for neigh activity");
Yotam Gigib2157142016-07-05 11:27:51 +02001906
Ido Schimmel9011b672017-05-16 19:38:25 +02001907 mlxsw_sp_router_neighs_update_nh(router->mlxsw_sp);
Yotam Gigib2157142016-07-05 11:27:51 +02001908
Ido Schimmel9011b672017-05-16 19:38:25 +02001909 mlxsw_sp_router_neighs_update_work_schedule(router->mlxsw_sp);
Yotam Gigic723c7352016-07-05 11:27:43 +02001910}
1911
Yotam Gigi0b2361d2016-07-05 11:27:52 +02001912static void mlxsw_sp_router_probe_unresolved_nexthops(struct work_struct *work)
1913{
1914 struct mlxsw_sp_neigh_entry *neigh_entry;
Ido Schimmel9011b672017-05-16 19:38:25 +02001915 struct mlxsw_sp_router *router;
Yotam Gigi0b2361d2016-07-05 11:27:52 +02001916
Ido Schimmel9011b672017-05-16 19:38:25 +02001917 router = container_of(work, struct mlxsw_sp_router,
1918 nexthop_probe_dw.work);
Yotam Gigi0b2361d2016-07-05 11:27:52 +02001919 /* Iterate over nexthop neighbours, find those who are unresolved and
1920 * send arp on them. This solves the chicken-egg problem when
1921 * the nexthop wouldn't get offloaded until the neighbor is resolved
1922 * but it wouldn't get resolved ever in case traffic is flowing in HW
1923 * using different nexthop.
1924 *
1925 * Take RTNL mutex here to prevent lists from changes.
1926 */
1927 rtnl_lock();
Ido Schimmel9011b672017-05-16 19:38:25 +02001928 list_for_each_entry(neigh_entry, &router->nexthop_neighs_list,
Ido Schimmel8a0b7272017-02-06 16:20:15 +01001929 nexthop_neighs_list_node)
Ido Schimmel01b1aa32017-02-06 16:20:16 +01001930 if (!neigh_entry->connected)
Jiri Pirko33b13412016-11-10 12:31:04 +01001931 neigh_event_send(neigh_entry->key.n, NULL);
Yotam Gigi0b2361d2016-07-05 11:27:52 +02001932 rtnl_unlock();
1933
Ido Schimmel9011b672017-05-16 19:38:25 +02001934 mlxsw_core_schedule_dw(&router->nexthop_probe_dw,
Yotam Gigi0b2361d2016-07-05 11:27:52 +02001935 MLXSW_SP_UNRESOLVED_NH_PROBE_INTERVAL);
1936}
1937
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001938static void
1939mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp *mlxsw_sp,
1940 struct mlxsw_sp_neigh_entry *neigh_entry,
1941 bool removing);
1942
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001943static enum mlxsw_reg_rauht_op mlxsw_sp_rauht_op(bool adding)
Yotam Gigia6bf9e92016-07-05 11:27:44 +02001944{
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001945 return adding ? MLXSW_REG_RAUHT_OP_WRITE_ADD :
1946 MLXSW_REG_RAUHT_OP_WRITE_DELETE;
1947}
1948
1949static void
1950mlxsw_sp_router_neigh_entry_op4(struct mlxsw_sp *mlxsw_sp,
1951 struct mlxsw_sp_neigh_entry *neigh_entry,
1952 enum mlxsw_reg_rauht_op op)
1953{
Jiri Pirko33b13412016-11-10 12:31:04 +01001954 struct neighbour *n = neigh_entry->key.n;
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001955 u32 dip = ntohl(*((__be32 *) n->primary_key));
Yotam Gigia6bf9e92016-07-05 11:27:44 +02001956 char rauht_pl[MLXSW_REG_RAUHT_LEN];
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001957
1958 mlxsw_reg_rauht_pack4(rauht_pl, op, neigh_entry->rif, neigh_entry->ha,
1959 dip);
Arkadi Sharshevsky7cfcbc72017-08-24 08:40:08 +02001960 if (neigh_entry->counter_valid)
1961 mlxsw_reg_rauht_pack_counter(rauht_pl,
1962 neigh_entry->counter_index);
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001963 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rauht), rauht_pl);
1964}
1965
1966static void
Arkadi Sharshevskyd5eb89c2017-07-18 10:10:15 +02001967mlxsw_sp_router_neigh_entry_op6(struct mlxsw_sp *mlxsw_sp,
1968 struct mlxsw_sp_neigh_entry *neigh_entry,
1969 enum mlxsw_reg_rauht_op op)
1970{
1971 struct neighbour *n = neigh_entry->key.n;
1972 char rauht_pl[MLXSW_REG_RAUHT_LEN];
1973 const char *dip = n->primary_key;
1974
1975 mlxsw_reg_rauht_pack6(rauht_pl, op, neigh_entry->rif, neigh_entry->ha,
1976 dip);
Arkadi Sharshevsky7cfcbc72017-08-24 08:40:08 +02001977 if (neigh_entry->counter_valid)
1978 mlxsw_reg_rauht_pack_counter(rauht_pl,
1979 neigh_entry->counter_index);
Arkadi Sharshevskyd5eb89c2017-07-18 10:10:15 +02001980 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rauht), rauht_pl);
1981}
1982
Arkadi Sharshevsky1d1056d82017-08-31 17:59:13 +02001983bool mlxsw_sp_neigh_ipv6_ignore(struct mlxsw_sp_neigh_entry *neigh_entry)
Arkadi Sharshevskyd5eb89c2017-07-18 10:10:15 +02001984{
Arkadi Sharshevsky1d1056d82017-08-31 17:59:13 +02001985 struct neighbour *n = neigh_entry->key.n;
1986
Arkadi Sharshevskyd5eb89c2017-07-18 10:10:15 +02001987 /* Packets with a link-local destination address are trapped
1988 * after LPM lookup and never reach the neighbour table, so
1989 * there is no need to program such neighbours to the device.
1990 */
1991 if (ipv6_addr_type((struct in6_addr *) &n->primary_key) &
1992 IPV6_ADDR_LINKLOCAL)
1993 return true;
1994 return false;
1995}
1996
1997static void
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001998mlxsw_sp_neigh_entry_update(struct mlxsw_sp *mlxsw_sp,
1999 struct mlxsw_sp_neigh_entry *neigh_entry,
2000 bool adding)
2001{
2002 if (!adding && !neigh_entry->connected)
2003 return;
2004 neigh_entry->connected = adding;
Ido Schimmelb5f3e0d2017-07-24 09:56:00 +02002005 if (neigh_entry->key.n->tbl->family == AF_INET) {
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002006 mlxsw_sp_router_neigh_entry_op4(mlxsw_sp, neigh_entry,
2007 mlxsw_sp_rauht_op(adding));
Ido Schimmelb5f3e0d2017-07-24 09:56:00 +02002008 } else if (neigh_entry->key.n->tbl->family == AF_INET6) {
Arkadi Sharshevsky1d1056d82017-08-31 17:59:13 +02002009 if (mlxsw_sp_neigh_ipv6_ignore(neigh_entry))
Arkadi Sharshevskyd5eb89c2017-07-18 10:10:15 +02002010 return;
2011 mlxsw_sp_router_neigh_entry_op6(mlxsw_sp, neigh_entry,
2012 mlxsw_sp_rauht_op(adding));
2013 } else {
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002014 WARN_ON_ONCE(1);
Arkadi Sharshevskyd5eb89c2017-07-18 10:10:15 +02002015 }
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002016}
2017
Arkadi Sharshevskya481d712017-08-24 08:40:10 +02002018void
2019mlxsw_sp_neigh_entry_counter_update(struct mlxsw_sp *mlxsw_sp,
2020 struct mlxsw_sp_neigh_entry *neigh_entry,
2021 bool adding)
2022{
2023 if (adding)
2024 mlxsw_sp_neigh_counter_alloc(mlxsw_sp, neigh_entry);
2025 else
2026 mlxsw_sp_neigh_counter_free(mlxsw_sp, neigh_entry);
2027 mlxsw_sp_neigh_entry_update(mlxsw_sp, neigh_entry, true);
2028}
2029
Ido Schimmelceb88812017-11-02 17:14:07 +01002030struct mlxsw_sp_netevent_work {
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002031 struct work_struct work;
2032 struct mlxsw_sp *mlxsw_sp;
2033 struct neighbour *n;
2034};
2035
2036static void mlxsw_sp_router_neigh_event_work(struct work_struct *work)
2037{
Ido Schimmelceb88812017-11-02 17:14:07 +01002038 struct mlxsw_sp_netevent_work *net_work =
2039 container_of(work, struct mlxsw_sp_netevent_work, work);
2040 struct mlxsw_sp *mlxsw_sp = net_work->mlxsw_sp;
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002041 struct mlxsw_sp_neigh_entry *neigh_entry;
Ido Schimmelceb88812017-11-02 17:14:07 +01002042 struct neighbour *n = net_work->n;
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002043 unsigned char ha[ETH_ALEN];
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002044 bool entry_connected;
Ido Schimmel93a87e52016-12-23 09:32:49 +01002045 u8 nud_state, dead;
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002046
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002047 /* If these parameters are changed after we release the lock,
2048 * then we are guaranteed to receive another event letting us
2049 * know about it.
2050 */
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002051 read_lock_bh(&n->lock);
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002052 memcpy(ha, n->ha, ETH_ALEN);
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002053 nud_state = n->nud_state;
Ido Schimmel93a87e52016-12-23 09:32:49 +01002054 dead = n->dead;
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002055 read_unlock_bh(&n->lock);
2056
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002057 rtnl_lock();
Ido Schimmel93a87e52016-12-23 09:32:49 +01002058 entry_connected = nud_state & NUD_VALID && !dead;
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002059 neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp, n);
2060 if (!entry_connected && !neigh_entry)
2061 goto out;
2062 if (!neigh_entry) {
2063 neigh_entry = mlxsw_sp_neigh_entry_create(mlxsw_sp, n);
2064 if (IS_ERR(neigh_entry))
2065 goto out;
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002066 }
2067
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002068 memcpy(neigh_entry->ha, ha, ETH_ALEN);
2069 mlxsw_sp_neigh_entry_update(mlxsw_sp, neigh_entry, entry_connected);
2070 mlxsw_sp_nexthop_neigh_update(mlxsw_sp, neigh_entry, !entry_connected);
2071
2072 if (!neigh_entry->connected && list_empty(&neigh_entry->nexthop_list))
2073 mlxsw_sp_neigh_entry_destroy(mlxsw_sp, neigh_entry);
2074
2075out:
2076 rtnl_unlock();
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002077 neigh_release(n);
Ido Schimmelceb88812017-11-02 17:14:07 +01002078 kfree(net_work);
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002079}
2080
Ido Schimmel28678f02017-11-02 17:14:10 +01002081static int mlxsw_sp_mp_hash_init(struct mlxsw_sp *mlxsw_sp);
2082
2083static void mlxsw_sp_router_mp_hash_event_work(struct work_struct *work)
2084{
2085 struct mlxsw_sp_netevent_work *net_work =
2086 container_of(work, struct mlxsw_sp_netevent_work, work);
2087 struct mlxsw_sp *mlxsw_sp = net_work->mlxsw_sp;
2088
2089 mlxsw_sp_mp_hash_init(mlxsw_sp);
2090 kfree(net_work);
2091}
2092
2093static int mlxsw_sp_router_netevent_event(struct notifier_block *nb,
Ido Schimmel48fac882017-11-02 17:14:06 +01002094 unsigned long event, void *ptr)
Yotam Gigic723c7352016-07-05 11:27:43 +02002095{
Ido Schimmelceb88812017-11-02 17:14:07 +01002096 struct mlxsw_sp_netevent_work *net_work;
Yotam Gigic723c7352016-07-05 11:27:43 +02002097 struct mlxsw_sp_port *mlxsw_sp_port;
Ido Schimmel28678f02017-11-02 17:14:10 +01002098 struct mlxsw_sp_router *router;
Yotam Gigic723c7352016-07-05 11:27:43 +02002099 struct mlxsw_sp *mlxsw_sp;
2100 unsigned long interval;
2101 struct neigh_parms *p;
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002102 struct neighbour *n;
Ido Schimmel28678f02017-11-02 17:14:10 +01002103 struct net *net;
Yotam Gigic723c7352016-07-05 11:27:43 +02002104
2105 switch (event) {
2106 case NETEVENT_DELAY_PROBE_TIME_UPDATE:
2107 p = ptr;
2108
2109 /* We don't care about changes in the default table. */
Ido Schimmelb5f3e0d2017-07-24 09:56:00 +02002110 if (!p->dev || (p->tbl->family != AF_INET &&
2111 p->tbl->family != AF_INET6))
Yotam Gigic723c7352016-07-05 11:27:43 +02002112 return NOTIFY_DONE;
2113
2114 /* We are in atomic context and can't take RTNL mutex,
2115 * so use RCU variant to walk the device chain.
2116 */
2117 mlxsw_sp_port = mlxsw_sp_port_lower_dev_hold(p->dev);
2118 if (!mlxsw_sp_port)
2119 return NOTIFY_DONE;
2120
2121 mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2122 interval = jiffies_to_msecs(NEIGH_VAR(p, DELAY_PROBE_TIME));
Ido Schimmel9011b672017-05-16 19:38:25 +02002123 mlxsw_sp->router->neighs_update.interval = interval;
Yotam Gigic723c7352016-07-05 11:27:43 +02002124
2125 mlxsw_sp_port_dev_put(mlxsw_sp_port);
2126 break;
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002127 case NETEVENT_NEIGH_UPDATE:
2128 n = ptr;
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002129
Ido Schimmelb5f3e0d2017-07-24 09:56:00 +02002130 if (n->tbl->family != AF_INET && n->tbl->family != AF_INET6)
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002131 return NOTIFY_DONE;
2132
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002133 mlxsw_sp_port = mlxsw_sp_port_lower_dev_hold(n->dev);
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002134 if (!mlxsw_sp_port)
2135 return NOTIFY_DONE;
2136
Ido Schimmelceb88812017-11-02 17:14:07 +01002137 net_work = kzalloc(sizeof(*net_work), GFP_ATOMIC);
2138 if (!net_work) {
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002139 mlxsw_sp_port_dev_put(mlxsw_sp_port);
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002140 return NOTIFY_BAD;
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002141 }
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002142
Ido Schimmelceb88812017-11-02 17:14:07 +01002143 INIT_WORK(&net_work->work, mlxsw_sp_router_neigh_event_work);
2144 net_work->mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2145 net_work->n = n;
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002146
2147 /* Take a reference to ensure the neighbour won't be
2148 * destructed until we drop the reference in delayed
2149 * work.
2150 */
2151 neigh_clone(n);
Ido Schimmelceb88812017-11-02 17:14:07 +01002152 mlxsw_core_schedule_work(&net_work->work);
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002153 mlxsw_sp_port_dev_put(mlxsw_sp_port);
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002154 break;
Ido Schimmel28678f02017-11-02 17:14:10 +01002155 case NETEVENT_MULTIPATH_HASH_UPDATE:
2156 net = ptr;
2157
2158 if (!net_eq(net, &init_net))
2159 return NOTIFY_DONE;
2160
2161 net_work = kzalloc(sizeof(*net_work), GFP_ATOMIC);
2162 if (!net_work)
2163 return NOTIFY_BAD;
2164
2165 router = container_of(nb, struct mlxsw_sp_router, netevent_nb);
2166 INIT_WORK(&net_work->work, mlxsw_sp_router_mp_hash_event_work);
2167 net_work->mlxsw_sp = router->mlxsw_sp;
2168 mlxsw_core_schedule_work(&net_work->work);
2169 break;
Yotam Gigic723c7352016-07-05 11:27:43 +02002170 }
2171
2172 return NOTIFY_DONE;
2173}
2174
Jiri Pirko6cf3c972016-07-05 11:27:39 +02002175static int mlxsw_sp_neigh_init(struct mlxsw_sp *mlxsw_sp)
2176{
Yotam Gigic723c7352016-07-05 11:27:43 +02002177 int err;
2178
Ido Schimmel9011b672017-05-16 19:38:25 +02002179 err = rhashtable_init(&mlxsw_sp->router->neigh_ht,
Yotam Gigic723c7352016-07-05 11:27:43 +02002180 &mlxsw_sp_neigh_ht_params);
2181 if (err)
2182 return err;
2183
2184 /* Initialize the polling interval according to the default
2185 * table.
2186 */
2187 mlxsw_sp_router_neighs_update_interval_init(mlxsw_sp);
2188
Yotam Gigi0b2361d2016-07-05 11:27:52 +02002189 /* Create the delayed works for the activity_update */
Ido Schimmel9011b672017-05-16 19:38:25 +02002190 INIT_DELAYED_WORK(&mlxsw_sp->router->neighs_update.dw,
Yotam Gigic723c7352016-07-05 11:27:43 +02002191 mlxsw_sp_router_neighs_update_work);
Ido Schimmel9011b672017-05-16 19:38:25 +02002192 INIT_DELAYED_WORK(&mlxsw_sp->router->nexthop_probe_dw,
Yotam Gigi0b2361d2016-07-05 11:27:52 +02002193 mlxsw_sp_router_probe_unresolved_nexthops);
Ido Schimmel9011b672017-05-16 19:38:25 +02002194 mlxsw_core_schedule_dw(&mlxsw_sp->router->neighs_update.dw, 0);
2195 mlxsw_core_schedule_dw(&mlxsw_sp->router->nexthop_probe_dw, 0);
Yotam Gigic723c7352016-07-05 11:27:43 +02002196 return 0;
Jiri Pirko6cf3c972016-07-05 11:27:39 +02002197}
2198
2199static void mlxsw_sp_neigh_fini(struct mlxsw_sp *mlxsw_sp)
2200{
Ido Schimmel9011b672017-05-16 19:38:25 +02002201 cancel_delayed_work_sync(&mlxsw_sp->router->neighs_update.dw);
2202 cancel_delayed_work_sync(&mlxsw_sp->router->nexthop_probe_dw);
2203 rhashtable_destroy(&mlxsw_sp->router->neigh_ht);
Jiri Pirko6cf3c972016-07-05 11:27:39 +02002204}
2205
Ido Schimmel9665b742017-02-08 11:16:42 +01002206static void mlxsw_sp_neigh_rif_gone_sync(struct mlxsw_sp *mlxsw_sp,
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01002207 struct mlxsw_sp_rif *rif)
Ido Schimmel9665b742017-02-08 11:16:42 +01002208{
2209 struct mlxsw_sp_neigh_entry *neigh_entry, *tmp;
2210
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01002211 list_for_each_entry_safe(neigh_entry, tmp, &rif->neigh_list,
Ido Schimmel4a3c67a2017-07-21 20:31:38 +02002212 rif_list_node) {
2213 mlxsw_sp_neigh_entry_update(mlxsw_sp, neigh_entry, false);
Ido Schimmel9665b742017-02-08 11:16:42 +01002214 mlxsw_sp_neigh_entry_destroy(mlxsw_sp, neigh_entry);
Ido Schimmel4a3c67a2017-07-21 20:31:38 +02002215 }
Ido Schimmel9665b742017-02-08 11:16:42 +01002216}
2217
Petr Machata35225e42017-09-02 23:49:22 +02002218enum mlxsw_sp_nexthop_type {
2219 MLXSW_SP_NEXTHOP_TYPE_ETH,
Petr Machata1012b9a2017-09-02 23:49:23 +02002220 MLXSW_SP_NEXTHOP_TYPE_IPIP,
Petr Machata35225e42017-09-02 23:49:22 +02002221};
2222
Ido Schimmelc53b8e12017-02-08 11:16:30 +01002223struct mlxsw_sp_nexthop_key {
2224 struct fib_nh *fib_nh;
2225};
2226
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002227struct mlxsw_sp_nexthop {
2228 struct list_head neigh_list_node; /* member of neigh entry list */
Ido Schimmel9665b742017-02-08 11:16:42 +01002229 struct list_head rif_list_node;
Arkadi Sharshevskydbe45982017-09-25 10:32:23 +02002230 struct list_head router_list_node;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002231 struct mlxsw_sp_nexthop_group *nh_grp; /* pointer back to the group
2232 * this belongs to
2233 */
Ido Schimmelc53b8e12017-02-08 11:16:30 +01002234 struct rhash_head ht_node;
2235 struct mlxsw_sp_nexthop_key key;
Ido Schimmel58adf2c2017-07-18 10:10:19 +02002236 unsigned char gw_addr[sizeof(struct in6_addr)];
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02002237 int ifindex;
Ido Schimmel408bd942017-10-22 23:11:46 +02002238 int nh_weight;
Ido Schimmeleb789982017-10-22 23:11:48 +02002239 int norm_nh_weight;
2240 int num_adj_entries;
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01002241 struct mlxsw_sp_rif *rif;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002242 u8 should_offload:1, /* set indicates this neigh is connected and
2243 * should be put to KVD linear area of this group.
2244 */
2245 offloaded:1, /* set in case the neigh is actually put into
2246 * KVD linear area of this group.
2247 */
2248 update:1; /* set indicates that MAC of this neigh should be
2249 * updated in HW
2250 */
Petr Machata35225e42017-09-02 23:49:22 +02002251 enum mlxsw_sp_nexthop_type type;
2252 union {
2253 struct mlxsw_sp_neigh_entry *neigh_entry;
Petr Machata1012b9a2017-09-02 23:49:23 +02002254 struct mlxsw_sp_ipip_entry *ipip_entry;
Petr Machata35225e42017-09-02 23:49:22 +02002255 };
Arkadi Sharshevskya5390272017-09-25 10:32:28 +02002256 unsigned int counter_index;
2257 bool counter_valid;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002258};
2259
2260struct mlxsw_sp_nexthop_group {
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02002261 void *priv;
Ido Schimmele9ad5e72017-02-08 11:16:29 +01002262 struct rhash_head ht_node;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002263 struct list_head fib_list; /* list of fib entries that use this group */
Ido Schimmel58adf2c2017-07-18 10:10:19 +02002264 struct neigh_table *neigh_tbl;
Ido Schimmelb3e8d1e2017-02-08 11:16:32 +01002265 u8 adj_index_valid:1,
2266 gateway:1; /* routes using the group use a gateway */
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002267 u32 adj_index;
2268 u16 ecmp_size;
2269 u16 count;
Ido Schimmeleb789982017-10-22 23:11:48 +02002270 int sum_norm_weight;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002271 struct mlxsw_sp_nexthop nexthops[0];
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01002272#define nh_rif nexthops[0].rif
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002273};
2274
Arkadi Sharshevsky427e6522017-09-25 10:32:30 +02002275void mlxsw_sp_nexthop_counter_alloc(struct mlxsw_sp *mlxsw_sp,
2276 struct mlxsw_sp_nexthop *nh)
Arkadi Sharshevskya5390272017-09-25 10:32:28 +02002277{
2278 struct devlink *devlink;
2279
2280 devlink = priv_to_devlink(mlxsw_sp->core);
2281 if (!devlink_dpipe_table_counter_enabled(devlink,
2282 MLXSW_SP_DPIPE_TABLE_NAME_ADJ))
2283 return;
2284
2285 if (mlxsw_sp_flow_counter_alloc(mlxsw_sp, &nh->counter_index))
2286 return;
2287
2288 nh->counter_valid = true;
2289}
2290
Arkadi Sharshevsky427e6522017-09-25 10:32:30 +02002291void mlxsw_sp_nexthop_counter_free(struct mlxsw_sp *mlxsw_sp,
2292 struct mlxsw_sp_nexthop *nh)
Arkadi Sharshevskya5390272017-09-25 10:32:28 +02002293{
2294 if (!nh->counter_valid)
2295 return;
2296 mlxsw_sp_flow_counter_free(mlxsw_sp, nh->counter_index);
2297 nh->counter_valid = false;
2298}
2299
2300int mlxsw_sp_nexthop_counter_get(struct mlxsw_sp *mlxsw_sp,
2301 struct mlxsw_sp_nexthop *nh, u64 *p_counter)
2302{
2303 if (!nh->counter_valid)
2304 return -EINVAL;
2305
2306 return mlxsw_sp_flow_counter_get(mlxsw_sp, nh->counter_index,
2307 p_counter, NULL);
2308}
2309
Arkadi Sharshevskyc556cd22017-09-25 10:32:25 +02002310struct mlxsw_sp_nexthop *mlxsw_sp_nexthop_next(struct mlxsw_sp_router *router,
2311 struct mlxsw_sp_nexthop *nh)
2312{
2313 if (!nh) {
2314 if (list_empty(&router->nexthop_list))
2315 return NULL;
2316 else
2317 return list_first_entry(&router->nexthop_list,
2318 typeof(*nh), router_list_node);
2319 }
2320 if (list_is_last(&nh->router_list_node, &router->nexthop_list))
2321 return NULL;
2322 return list_next_entry(nh, router_list_node);
2323}
2324
2325bool mlxsw_sp_nexthop_offload(struct mlxsw_sp_nexthop *nh)
2326{
2327 return nh->offloaded;
2328}
2329
2330unsigned char *mlxsw_sp_nexthop_ha(struct mlxsw_sp_nexthop *nh)
2331{
2332 if (!nh->offloaded)
2333 return NULL;
2334 return nh->neigh_entry->ha;
2335}
2336
2337int mlxsw_sp_nexthop_indexes(struct mlxsw_sp_nexthop *nh, u32 *p_adj_index,
Ido Schimmele69cd9d2017-10-22 23:11:43 +02002338 u32 *p_adj_size, u32 *p_adj_hash_index)
Arkadi Sharshevskyc556cd22017-09-25 10:32:25 +02002339{
2340 struct mlxsw_sp_nexthop_group *nh_grp = nh->nh_grp;
2341 u32 adj_hash_index = 0;
2342 int i;
2343
2344 if (!nh->offloaded || !nh_grp->adj_index_valid)
2345 return -EINVAL;
2346
2347 *p_adj_index = nh_grp->adj_index;
Ido Schimmele69cd9d2017-10-22 23:11:43 +02002348 *p_adj_size = nh_grp->ecmp_size;
Arkadi Sharshevskyc556cd22017-09-25 10:32:25 +02002349
2350 for (i = 0; i < nh_grp->count; i++) {
2351 struct mlxsw_sp_nexthop *nh_iter = &nh_grp->nexthops[i];
2352
2353 if (nh_iter == nh)
2354 break;
2355 if (nh_iter->offloaded)
Ido Schimmeleb789982017-10-22 23:11:48 +02002356 adj_hash_index += nh_iter->num_adj_entries;
Arkadi Sharshevskyc556cd22017-09-25 10:32:25 +02002357 }
2358
2359 *p_adj_hash_index = adj_hash_index;
2360 return 0;
2361}
2362
2363struct mlxsw_sp_rif *mlxsw_sp_nexthop_rif(struct mlxsw_sp_nexthop *nh)
2364{
2365 return nh->rif;
2366}
2367
2368bool mlxsw_sp_nexthop_group_has_ipip(struct mlxsw_sp_nexthop *nh)
2369{
2370 struct mlxsw_sp_nexthop_group *nh_grp = nh->nh_grp;
2371 int i;
2372
2373 for (i = 0; i < nh_grp->count; i++) {
2374 struct mlxsw_sp_nexthop *nh_iter = &nh_grp->nexthops[i];
2375
2376 if (nh_iter->type == MLXSW_SP_NEXTHOP_TYPE_IPIP)
2377 return true;
2378 }
2379 return false;
2380}
2381
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02002382static struct fib_info *
2383mlxsw_sp_nexthop4_group_fi(const struct mlxsw_sp_nexthop_group *nh_grp)
2384{
2385 return nh_grp->priv;
2386}
2387
2388struct mlxsw_sp_nexthop_group_cmp_arg {
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02002389 enum mlxsw_sp_l3proto proto;
2390 union {
2391 struct fib_info *fi;
2392 struct mlxsw_sp_fib6_entry *fib6_entry;
2393 };
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02002394};
2395
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02002396static bool
2397mlxsw_sp_nexthop6_group_has_nexthop(const struct mlxsw_sp_nexthop_group *nh_grp,
2398 const struct in6_addr *gw, int ifindex)
2399{
2400 int i;
2401
2402 for (i = 0; i < nh_grp->count; i++) {
2403 const struct mlxsw_sp_nexthop *nh;
2404
2405 nh = &nh_grp->nexthops[i];
2406 if (nh->ifindex == ifindex &&
2407 ipv6_addr_equal(gw, (struct in6_addr *) nh->gw_addr))
2408 return true;
2409 }
2410
2411 return false;
2412}
2413
2414static bool
2415mlxsw_sp_nexthop6_group_cmp(const struct mlxsw_sp_nexthop_group *nh_grp,
2416 const struct mlxsw_sp_fib6_entry *fib6_entry)
2417{
2418 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
2419
2420 if (nh_grp->count != fib6_entry->nrt6)
2421 return false;
2422
2423 list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) {
2424 struct in6_addr *gw;
2425 int ifindex;
2426
2427 ifindex = mlxsw_sp_rt6->rt->dst.dev->ifindex;
2428 gw = &mlxsw_sp_rt6->rt->rt6i_gateway;
2429 if (!mlxsw_sp_nexthop6_group_has_nexthop(nh_grp, gw, ifindex))
2430 return false;
2431 }
2432
2433 return true;
2434}
2435
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02002436static int
2437mlxsw_sp_nexthop_group_cmp(struct rhashtable_compare_arg *arg, const void *ptr)
2438{
2439 const struct mlxsw_sp_nexthop_group_cmp_arg *cmp_arg = arg->key;
2440 const struct mlxsw_sp_nexthop_group *nh_grp = ptr;
2441
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02002442 switch (cmp_arg->proto) {
2443 case MLXSW_SP_L3_PROTO_IPV4:
2444 return cmp_arg->fi != mlxsw_sp_nexthop4_group_fi(nh_grp);
2445 case MLXSW_SP_L3_PROTO_IPV6:
2446 return !mlxsw_sp_nexthop6_group_cmp(nh_grp,
2447 cmp_arg->fib6_entry);
2448 default:
2449 WARN_ON(1);
2450 return 1;
2451 }
2452}
2453
2454static int
2455mlxsw_sp_nexthop_group_type(const struct mlxsw_sp_nexthop_group *nh_grp)
2456{
2457 return nh_grp->neigh_tbl->family;
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02002458}
2459
2460static u32 mlxsw_sp_nexthop_group_hash_obj(const void *data, u32 len, u32 seed)
2461{
2462 const struct mlxsw_sp_nexthop_group *nh_grp = data;
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02002463 const struct mlxsw_sp_nexthop *nh;
2464 struct fib_info *fi;
2465 unsigned int val;
2466 int i;
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02002467
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02002468 switch (mlxsw_sp_nexthop_group_type(nh_grp)) {
2469 case AF_INET:
2470 fi = mlxsw_sp_nexthop4_group_fi(nh_grp);
2471 return jhash(&fi, sizeof(fi), seed);
2472 case AF_INET6:
2473 val = nh_grp->count;
2474 for (i = 0; i < nh_grp->count; i++) {
2475 nh = &nh_grp->nexthops[i];
2476 val ^= nh->ifindex;
2477 }
2478 return jhash(&val, sizeof(val), seed);
2479 default:
2480 WARN_ON(1);
2481 return 0;
2482 }
2483}
2484
2485static u32
2486mlxsw_sp_nexthop6_group_hash(struct mlxsw_sp_fib6_entry *fib6_entry, u32 seed)
2487{
2488 unsigned int val = fib6_entry->nrt6;
2489 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
2490 struct net_device *dev;
2491
2492 list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) {
2493 dev = mlxsw_sp_rt6->rt->dst.dev;
2494 val ^= dev->ifindex;
2495 }
2496
2497 return jhash(&val, sizeof(val), seed);
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02002498}
2499
2500static u32
2501mlxsw_sp_nexthop_group_hash(const void *data, u32 len, u32 seed)
2502{
2503 const struct mlxsw_sp_nexthop_group_cmp_arg *cmp_arg = data;
2504
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02002505 switch (cmp_arg->proto) {
2506 case MLXSW_SP_L3_PROTO_IPV4:
2507 return jhash(&cmp_arg->fi, sizeof(cmp_arg->fi), seed);
2508 case MLXSW_SP_L3_PROTO_IPV6:
2509 return mlxsw_sp_nexthop6_group_hash(cmp_arg->fib6_entry, seed);
2510 default:
2511 WARN_ON(1);
2512 return 0;
2513 }
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02002514}
2515
Ido Schimmele9ad5e72017-02-08 11:16:29 +01002516static const struct rhashtable_params mlxsw_sp_nexthop_group_ht_params = {
Ido Schimmele9ad5e72017-02-08 11:16:29 +01002517 .head_offset = offsetof(struct mlxsw_sp_nexthop_group, ht_node),
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02002518 .hashfn = mlxsw_sp_nexthop_group_hash,
2519 .obj_hashfn = mlxsw_sp_nexthop_group_hash_obj,
2520 .obj_cmpfn = mlxsw_sp_nexthop_group_cmp,
Ido Schimmele9ad5e72017-02-08 11:16:29 +01002521};
2522
2523static int mlxsw_sp_nexthop_group_insert(struct mlxsw_sp *mlxsw_sp,
2524 struct mlxsw_sp_nexthop_group *nh_grp)
2525{
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02002526 if (mlxsw_sp_nexthop_group_type(nh_grp) == AF_INET6 &&
2527 !nh_grp->gateway)
2528 return 0;
2529
Ido Schimmel9011b672017-05-16 19:38:25 +02002530 return rhashtable_insert_fast(&mlxsw_sp->router->nexthop_group_ht,
Ido Schimmele9ad5e72017-02-08 11:16:29 +01002531 &nh_grp->ht_node,
2532 mlxsw_sp_nexthop_group_ht_params);
2533}
2534
2535static void mlxsw_sp_nexthop_group_remove(struct mlxsw_sp *mlxsw_sp,
2536 struct mlxsw_sp_nexthop_group *nh_grp)
2537{
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02002538 if (mlxsw_sp_nexthop_group_type(nh_grp) == AF_INET6 &&
2539 !nh_grp->gateway)
2540 return;
2541
Ido Schimmel9011b672017-05-16 19:38:25 +02002542 rhashtable_remove_fast(&mlxsw_sp->router->nexthop_group_ht,
Ido Schimmele9ad5e72017-02-08 11:16:29 +01002543 &nh_grp->ht_node,
2544 mlxsw_sp_nexthop_group_ht_params);
2545}
2546
2547static struct mlxsw_sp_nexthop_group *
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02002548mlxsw_sp_nexthop4_group_lookup(struct mlxsw_sp *mlxsw_sp,
2549 struct fib_info *fi)
Ido Schimmele9ad5e72017-02-08 11:16:29 +01002550{
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02002551 struct mlxsw_sp_nexthop_group_cmp_arg cmp_arg;
2552
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02002553 cmp_arg.proto = MLXSW_SP_L3_PROTO_IPV4;
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02002554 cmp_arg.fi = fi;
2555 return rhashtable_lookup_fast(&mlxsw_sp->router->nexthop_group_ht,
2556 &cmp_arg,
Ido Schimmele9ad5e72017-02-08 11:16:29 +01002557 mlxsw_sp_nexthop_group_ht_params);
2558}
2559
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02002560static struct mlxsw_sp_nexthop_group *
2561mlxsw_sp_nexthop6_group_lookup(struct mlxsw_sp *mlxsw_sp,
2562 struct mlxsw_sp_fib6_entry *fib6_entry)
2563{
2564 struct mlxsw_sp_nexthop_group_cmp_arg cmp_arg;
2565
2566 cmp_arg.proto = MLXSW_SP_L3_PROTO_IPV6;
2567 cmp_arg.fib6_entry = fib6_entry;
2568 return rhashtable_lookup_fast(&mlxsw_sp->router->nexthop_group_ht,
2569 &cmp_arg,
2570 mlxsw_sp_nexthop_group_ht_params);
2571}
2572
Ido Schimmelc53b8e12017-02-08 11:16:30 +01002573static const struct rhashtable_params mlxsw_sp_nexthop_ht_params = {
2574 .key_offset = offsetof(struct mlxsw_sp_nexthop, key),
2575 .head_offset = offsetof(struct mlxsw_sp_nexthop, ht_node),
2576 .key_len = sizeof(struct mlxsw_sp_nexthop_key),
2577};
2578
2579static int mlxsw_sp_nexthop_insert(struct mlxsw_sp *mlxsw_sp,
2580 struct mlxsw_sp_nexthop *nh)
2581{
Ido Schimmel9011b672017-05-16 19:38:25 +02002582 return rhashtable_insert_fast(&mlxsw_sp->router->nexthop_ht,
Ido Schimmelc53b8e12017-02-08 11:16:30 +01002583 &nh->ht_node, mlxsw_sp_nexthop_ht_params);
2584}
2585
2586static void mlxsw_sp_nexthop_remove(struct mlxsw_sp *mlxsw_sp,
2587 struct mlxsw_sp_nexthop *nh)
2588{
Ido Schimmel9011b672017-05-16 19:38:25 +02002589 rhashtable_remove_fast(&mlxsw_sp->router->nexthop_ht, &nh->ht_node,
Ido Schimmelc53b8e12017-02-08 11:16:30 +01002590 mlxsw_sp_nexthop_ht_params);
2591}
2592
Ido Schimmelad178c82017-02-08 11:16:40 +01002593static struct mlxsw_sp_nexthop *
2594mlxsw_sp_nexthop_lookup(struct mlxsw_sp *mlxsw_sp,
2595 struct mlxsw_sp_nexthop_key key)
2596{
Ido Schimmel9011b672017-05-16 19:38:25 +02002597 return rhashtable_lookup_fast(&mlxsw_sp->router->nexthop_ht, &key,
Ido Schimmelad178c82017-02-08 11:16:40 +01002598 mlxsw_sp_nexthop_ht_params);
2599}
2600
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002601static int mlxsw_sp_adj_index_mass_update_vr(struct mlxsw_sp *mlxsw_sp,
Ido Schimmel76610eb2017-03-10 08:53:41 +01002602 const struct mlxsw_sp_fib *fib,
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002603 u32 adj_index, u16 ecmp_size,
2604 u32 new_adj_index,
2605 u16 new_ecmp_size)
2606{
2607 char raleu_pl[MLXSW_REG_RALEU_LEN];
2608
Ido Schimmel1a9234e662016-09-19 08:29:26 +02002609 mlxsw_reg_raleu_pack(raleu_pl,
Ido Schimmel76610eb2017-03-10 08:53:41 +01002610 (enum mlxsw_reg_ralxx_protocol) fib->proto,
2611 fib->vr->id, adj_index, ecmp_size, new_adj_index,
Ido Schimmel1a9234e662016-09-19 08:29:26 +02002612 new_ecmp_size);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002613 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raleu), raleu_pl);
2614}
2615
2616static int mlxsw_sp_adj_index_mass_update(struct mlxsw_sp *mlxsw_sp,
2617 struct mlxsw_sp_nexthop_group *nh_grp,
2618 u32 old_adj_index, u16 old_ecmp_size)
2619{
2620 struct mlxsw_sp_fib_entry *fib_entry;
Ido Schimmel76610eb2017-03-10 08:53:41 +01002621 struct mlxsw_sp_fib *fib = NULL;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002622 int err;
2623
2624 list_for_each_entry(fib_entry, &nh_grp->fib_list, nexthop_group_node) {
Ido Schimmel76610eb2017-03-10 08:53:41 +01002625 if (fib == fib_entry->fib_node->fib)
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002626 continue;
Ido Schimmel76610eb2017-03-10 08:53:41 +01002627 fib = fib_entry->fib_node->fib;
2628 err = mlxsw_sp_adj_index_mass_update_vr(mlxsw_sp, fib,
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002629 old_adj_index,
2630 old_ecmp_size,
2631 nh_grp->adj_index,
2632 nh_grp->ecmp_size);
2633 if (err)
2634 return err;
2635 }
2636 return 0;
2637}
2638
Ido Schimmeleb789982017-10-22 23:11:48 +02002639static int __mlxsw_sp_nexthop_update(struct mlxsw_sp *mlxsw_sp, u32 adj_index,
2640 struct mlxsw_sp_nexthop *nh)
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002641{
2642 struct mlxsw_sp_neigh_entry *neigh_entry = nh->neigh_entry;
2643 char ratr_pl[MLXSW_REG_RATR_LEN];
2644
2645 mlxsw_reg_ratr_pack(ratr_pl, MLXSW_REG_RATR_OP_WRITE_WRITE_ENTRY,
Petr Machata89e41982017-09-02 23:49:15 +02002646 true, MLXSW_REG_RATR_TYPE_ETHERNET,
2647 adj_index, neigh_entry->rif);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002648 mlxsw_reg_ratr_eth_entry_pack(ratr_pl, neigh_entry->ha);
Arkadi Sharshevskya5390272017-09-25 10:32:28 +02002649 if (nh->counter_valid)
2650 mlxsw_reg_ratr_counter_pack(ratr_pl, nh->counter_index, true);
2651 else
2652 mlxsw_reg_ratr_counter_pack(ratr_pl, 0, false);
2653
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002654 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ratr), ratr_pl);
2655}
2656
Ido Schimmeleb789982017-10-22 23:11:48 +02002657int mlxsw_sp_nexthop_update(struct mlxsw_sp *mlxsw_sp, u32 adj_index,
2658 struct mlxsw_sp_nexthop *nh)
2659{
2660 int i;
2661
2662 for (i = 0; i < nh->num_adj_entries; i++) {
2663 int err;
2664
2665 err = __mlxsw_sp_nexthop_update(mlxsw_sp, adj_index + i, nh);
2666 if (err)
2667 return err;
2668 }
2669
2670 return 0;
2671}
2672
2673static int __mlxsw_sp_nexthop_ipip_update(struct mlxsw_sp *mlxsw_sp,
2674 u32 adj_index,
2675 struct mlxsw_sp_nexthop *nh)
Petr Machata1012b9a2017-09-02 23:49:23 +02002676{
2677 const struct mlxsw_sp_ipip_ops *ipip_ops;
2678
2679 ipip_ops = mlxsw_sp->router->ipip_ops_arr[nh->ipip_entry->ipipt];
2680 return ipip_ops->nexthop_update(mlxsw_sp, adj_index, nh->ipip_entry);
2681}
2682
Ido Schimmeleb789982017-10-22 23:11:48 +02002683static int mlxsw_sp_nexthop_ipip_update(struct mlxsw_sp *mlxsw_sp,
2684 u32 adj_index,
2685 struct mlxsw_sp_nexthop *nh)
2686{
2687 int i;
2688
2689 for (i = 0; i < nh->num_adj_entries; i++) {
2690 int err;
2691
2692 err = __mlxsw_sp_nexthop_ipip_update(mlxsw_sp, adj_index + i,
2693 nh);
2694 if (err)
2695 return err;
2696 }
2697
2698 return 0;
2699}
2700
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002701static int
Petr Machata35225e42017-09-02 23:49:22 +02002702mlxsw_sp_nexthop_group_update(struct mlxsw_sp *mlxsw_sp,
2703 struct mlxsw_sp_nexthop_group *nh_grp,
2704 bool reallocate)
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002705{
2706 u32 adj_index = nh_grp->adj_index; /* base */
2707 struct mlxsw_sp_nexthop *nh;
2708 int i;
2709 int err;
2710
2711 for (i = 0; i < nh_grp->count; i++) {
2712 nh = &nh_grp->nexthops[i];
2713
2714 if (!nh->should_offload) {
2715 nh->offloaded = 0;
2716 continue;
2717 }
2718
Ido Schimmela59b7e02017-01-23 11:11:42 +01002719 if (nh->update || reallocate) {
Petr Machata35225e42017-09-02 23:49:22 +02002720 switch (nh->type) {
2721 case MLXSW_SP_NEXTHOP_TYPE_ETH:
Arkadi Sharshevskya5390272017-09-25 10:32:28 +02002722 err = mlxsw_sp_nexthop_update
Petr Machata35225e42017-09-02 23:49:22 +02002723 (mlxsw_sp, adj_index, nh);
2724 break;
Petr Machata1012b9a2017-09-02 23:49:23 +02002725 case MLXSW_SP_NEXTHOP_TYPE_IPIP:
2726 err = mlxsw_sp_nexthop_ipip_update
2727 (mlxsw_sp, adj_index, nh);
2728 break;
Petr Machata35225e42017-09-02 23:49:22 +02002729 }
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002730 if (err)
2731 return err;
2732 nh->update = 0;
2733 nh->offloaded = 1;
2734 }
Ido Schimmeleb789982017-10-22 23:11:48 +02002735 adj_index += nh->num_adj_entries;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002736 }
2737 return 0;
2738}
2739
Ido Schimmel1819ae32017-07-21 18:04:28 +02002740static bool
2741mlxsw_sp_fib_node_entry_is_first(const struct mlxsw_sp_fib_node *fib_node,
2742 const struct mlxsw_sp_fib_entry *fib_entry);
2743
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002744static int
2745mlxsw_sp_nexthop_fib_entries_update(struct mlxsw_sp *mlxsw_sp,
2746 struct mlxsw_sp_nexthop_group *nh_grp)
2747{
2748 struct mlxsw_sp_fib_entry *fib_entry;
2749 int err;
2750
2751 list_for_each_entry(fib_entry, &nh_grp->fib_list, nexthop_group_node) {
Ido Schimmel1819ae32017-07-21 18:04:28 +02002752 if (!mlxsw_sp_fib_node_entry_is_first(fib_entry->fib_node,
2753 fib_entry))
2754 continue;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002755 err = mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
2756 if (err)
2757 return err;
2758 }
2759 return 0;
2760}
2761
2762static void
Ido Schimmel77d964e2017-08-02 09:56:05 +02002763mlxsw_sp_fib_entry_offload_refresh(struct mlxsw_sp_fib_entry *fib_entry,
2764 enum mlxsw_reg_ralue_op op, int err);
2765
2766static void
2767mlxsw_sp_nexthop_fib_entries_refresh(struct mlxsw_sp_nexthop_group *nh_grp)
2768{
2769 enum mlxsw_reg_ralue_op op = MLXSW_REG_RALUE_OP_WRITE_WRITE;
2770 struct mlxsw_sp_fib_entry *fib_entry;
2771
2772 list_for_each_entry(fib_entry, &nh_grp->fib_list, nexthop_group_node) {
2773 if (!mlxsw_sp_fib_node_entry_is_first(fib_entry->fib_node,
2774 fib_entry))
2775 continue;
2776 mlxsw_sp_fib_entry_offload_refresh(fib_entry, op, 0);
2777 }
2778}
2779
Ido Schimmel425a08c2017-10-22 23:11:47 +02002780static void mlxsw_sp_adj_grp_size_round_up(u16 *p_adj_grp_size)
2781{
2782 /* Valid sizes for an adjacency group are:
2783 * 1-64, 512, 1024, 2048 and 4096.
2784 */
2785 if (*p_adj_grp_size <= 64)
2786 return;
2787 else if (*p_adj_grp_size <= 512)
2788 *p_adj_grp_size = 512;
2789 else if (*p_adj_grp_size <= 1024)
2790 *p_adj_grp_size = 1024;
2791 else if (*p_adj_grp_size <= 2048)
2792 *p_adj_grp_size = 2048;
2793 else
2794 *p_adj_grp_size = 4096;
2795}
2796
2797static void mlxsw_sp_adj_grp_size_round_down(u16 *p_adj_grp_size,
2798 unsigned int alloc_size)
2799{
2800 if (alloc_size >= 4096)
2801 *p_adj_grp_size = 4096;
2802 else if (alloc_size >= 2048)
2803 *p_adj_grp_size = 2048;
2804 else if (alloc_size >= 1024)
2805 *p_adj_grp_size = 1024;
2806 else if (alloc_size >= 512)
2807 *p_adj_grp_size = 512;
2808}
2809
2810static int mlxsw_sp_fix_adj_grp_size(struct mlxsw_sp *mlxsw_sp,
2811 u16 *p_adj_grp_size)
2812{
2813 unsigned int alloc_size;
2814 int err;
2815
2816 /* Round up the requested group size to the next size supported
2817 * by the device and make sure the request can be satisfied.
2818 */
2819 mlxsw_sp_adj_grp_size_round_up(p_adj_grp_size);
2820 err = mlxsw_sp_kvdl_alloc_size_query(mlxsw_sp, *p_adj_grp_size,
2821 &alloc_size);
2822 if (err)
2823 return err;
2824 /* It is possible the allocation results in more allocated
2825 * entries than requested. Try to use as much of them as
2826 * possible.
2827 */
2828 mlxsw_sp_adj_grp_size_round_down(p_adj_grp_size, alloc_size);
2829
2830 return 0;
2831}
2832
Ido Schimmel77d964e2017-08-02 09:56:05 +02002833static void
Ido Schimmeleb789982017-10-22 23:11:48 +02002834mlxsw_sp_nexthop_group_normalize(struct mlxsw_sp_nexthop_group *nh_grp)
2835{
2836 int i, g = 0, sum_norm_weight = 0;
2837 struct mlxsw_sp_nexthop *nh;
2838
2839 for (i = 0; i < nh_grp->count; i++) {
2840 nh = &nh_grp->nexthops[i];
2841
2842 if (!nh->should_offload)
2843 continue;
2844 if (g > 0)
2845 g = gcd(nh->nh_weight, g);
2846 else
2847 g = nh->nh_weight;
2848 }
2849
2850 for (i = 0; i < nh_grp->count; i++) {
2851 nh = &nh_grp->nexthops[i];
2852
2853 if (!nh->should_offload)
2854 continue;
2855 nh->norm_nh_weight = nh->nh_weight / g;
2856 sum_norm_weight += nh->norm_nh_weight;
2857 }
2858
2859 nh_grp->sum_norm_weight = sum_norm_weight;
2860}
2861
2862static void
2863mlxsw_sp_nexthop_group_rebalance(struct mlxsw_sp_nexthop_group *nh_grp)
2864{
2865 int total = nh_grp->sum_norm_weight;
2866 u16 ecmp_size = nh_grp->ecmp_size;
2867 int i, weight = 0, lower_bound = 0;
2868
2869 for (i = 0; i < nh_grp->count; i++) {
2870 struct mlxsw_sp_nexthop *nh = &nh_grp->nexthops[i];
2871 int upper_bound;
2872
2873 if (!nh->should_offload)
2874 continue;
2875 weight += nh->norm_nh_weight;
2876 upper_bound = DIV_ROUND_CLOSEST(ecmp_size * weight, total);
2877 nh->num_adj_entries = upper_bound - lower_bound;
2878 lower_bound = upper_bound;
2879 }
2880}
2881
2882static void
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002883mlxsw_sp_nexthop_group_refresh(struct mlxsw_sp *mlxsw_sp,
2884 struct mlxsw_sp_nexthop_group *nh_grp)
2885{
Ido Schimmeleb789982017-10-22 23:11:48 +02002886 u16 ecmp_size, old_ecmp_size;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002887 struct mlxsw_sp_nexthop *nh;
2888 bool offload_change = false;
2889 u32 adj_index;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002890 bool old_adj_index_valid;
2891 u32 old_adj_index;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002892 int i;
2893 int err;
2894
Ido Schimmelb3e8d1e2017-02-08 11:16:32 +01002895 if (!nh_grp->gateway) {
2896 mlxsw_sp_nexthop_fib_entries_update(mlxsw_sp, nh_grp);
2897 return;
2898 }
2899
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002900 for (i = 0; i < nh_grp->count; i++) {
2901 nh = &nh_grp->nexthops[i];
2902
Petr Machata56b8a9e2017-07-31 09:27:29 +02002903 if (nh->should_offload != nh->offloaded) {
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002904 offload_change = true;
2905 if (nh->should_offload)
2906 nh->update = 1;
2907 }
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002908 }
2909 if (!offload_change) {
2910 /* Nothing was added or removed, so no need to reallocate. Just
2911 * update MAC on existing adjacency indexes.
2912 */
Petr Machata35225e42017-09-02 23:49:22 +02002913 err = mlxsw_sp_nexthop_group_update(mlxsw_sp, nh_grp, false);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002914 if (err) {
2915 dev_warn(mlxsw_sp->bus_info->dev, "Failed to update neigh MAC in adjacency table.\n");
2916 goto set_trap;
2917 }
2918 return;
2919 }
Ido Schimmeleb789982017-10-22 23:11:48 +02002920 mlxsw_sp_nexthop_group_normalize(nh_grp);
2921 if (!nh_grp->sum_norm_weight)
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002922 /* No neigh of this group is connected so we just set
2923 * the trap and let everthing flow through kernel.
2924 */
2925 goto set_trap;
2926
Ido Schimmeleb789982017-10-22 23:11:48 +02002927 ecmp_size = nh_grp->sum_norm_weight;
Ido Schimmel425a08c2017-10-22 23:11:47 +02002928 err = mlxsw_sp_fix_adj_grp_size(mlxsw_sp, &ecmp_size);
2929 if (err)
2930 /* No valid allocation size available. */
2931 goto set_trap;
2932
Arkadi Sharshevsky13124442017-03-25 08:28:22 +01002933 err = mlxsw_sp_kvdl_alloc(mlxsw_sp, ecmp_size, &adj_index);
2934 if (err) {
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002935 /* We ran out of KVD linear space, just set the
2936 * trap and let everything flow through kernel.
2937 */
2938 dev_warn(mlxsw_sp->bus_info->dev, "Failed to allocate KVD linear area for nexthop group.\n");
2939 goto set_trap;
2940 }
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002941 old_adj_index_valid = nh_grp->adj_index_valid;
2942 old_adj_index = nh_grp->adj_index;
2943 old_ecmp_size = nh_grp->ecmp_size;
2944 nh_grp->adj_index_valid = 1;
2945 nh_grp->adj_index = adj_index;
2946 nh_grp->ecmp_size = ecmp_size;
Ido Schimmeleb789982017-10-22 23:11:48 +02002947 mlxsw_sp_nexthop_group_rebalance(nh_grp);
Petr Machata35225e42017-09-02 23:49:22 +02002948 err = mlxsw_sp_nexthop_group_update(mlxsw_sp, nh_grp, true);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002949 if (err) {
2950 dev_warn(mlxsw_sp->bus_info->dev, "Failed to update neigh MAC in adjacency table.\n");
2951 goto set_trap;
2952 }
2953
2954 if (!old_adj_index_valid) {
2955 /* The trap was set for fib entries, so we have to call
2956 * fib entry update to unset it and use adjacency index.
2957 */
2958 err = mlxsw_sp_nexthop_fib_entries_update(mlxsw_sp, nh_grp);
2959 if (err) {
2960 dev_warn(mlxsw_sp->bus_info->dev, "Failed to add adjacency index to fib entries.\n");
2961 goto set_trap;
2962 }
2963 return;
2964 }
2965
2966 err = mlxsw_sp_adj_index_mass_update(mlxsw_sp, nh_grp,
2967 old_adj_index, old_ecmp_size);
2968 mlxsw_sp_kvdl_free(mlxsw_sp, old_adj_index);
2969 if (err) {
2970 dev_warn(mlxsw_sp->bus_info->dev, "Failed to mass-update adjacency index for nexthop group.\n");
2971 goto set_trap;
2972 }
Ido Schimmel77d964e2017-08-02 09:56:05 +02002973
2974 /* Offload state within the group changed, so update the flags. */
2975 mlxsw_sp_nexthop_fib_entries_refresh(nh_grp);
2976
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002977 return;
2978
2979set_trap:
2980 old_adj_index_valid = nh_grp->adj_index_valid;
2981 nh_grp->adj_index_valid = 0;
2982 for (i = 0; i < nh_grp->count; i++) {
2983 nh = &nh_grp->nexthops[i];
2984 nh->offloaded = 0;
2985 }
2986 err = mlxsw_sp_nexthop_fib_entries_update(mlxsw_sp, nh_grp);
2987 if (err)
2988 dev_warn(mlxsw_sp->bus_info->dev, "Failed to set traps for fib entries.\n");
2989 if (old_adj_index_valid)
2990 mlxsw_sp_kvdl_free(mlxsw_sp, nh_grp->adj_index);
2991}
2992
2993static void __mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp_nexthop *nh,
2994 bool removing)
2995{
Petr Machata213666a2017-07-31 09:27:30 +02002996 if (!removing)
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002997 nh->should_offload = 1;
Petr Machata213666a2017-07-31 09:27:30 +02002998 else if (nh->offloaded)
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002999 nh->should_offload = 0;
3000 nh->update = 1;
3001}
3002
3003static void
3004mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp *mlxsw_sp,
3005 struct mlxsw_sp_neigh_entry *neigh_entry,
3006 bool removing)
3007{
3008 struct mlxsw_sp_nexthop *nh;
3009
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003010 list_for_each_entry(nh, &neigh_entry->nexthop_list,
3011 neigh_list_node) {
3012 __mlxsw_sp_nexthop_neigh_update(nh, removing);
3013 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nh_grp);
3014 }
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003015}
3016
Ido Schimmel9665b742017-02-08 11:16:42 +01003017static void mlxsw_sp_nexthop_rif_init(struct mlxsw_sp_nexthop *nh,
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003018 struct mlxsw_sp_rif *rif)
Ido Schimmel9665b742017-02-08 11:16:42 +01003019{
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003020 if (nh->rif)
Ido Schimmel9665b742017-02-08 11:16:42 +01003021 return;
3022
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003023 nh->rif = rif;
3024 list_add(&nh->rif_list_node, &rif->nexthop_list);
Ido Schimmel9665b742017-02-08 11:16:42 +01003025}
3026
3027static void mlxsw_sp_nexthop_rif_fini(struct mlxsw_sp_nexthop *nh)
3028{
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003029 if (!nh->rif)
Ido Schimmel9665b742017-02-08 11:16:42 +01003030 return;
3031
3032 list_del(&nh->rif_list_node);
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003033 nh->rif = NULL;
Ido Schimmel9665b742017-02-08 11:16:42 +01003034}
3035
Ido Schimmela8c97012017-02-08 11:16:35 +01003036static int mlxsw_sp_nexthop_neigh_init(struct mlxsw_sp *mlxsw_sp,
3037 struct mlxsw_sp_nexthop *nh)
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003038{
3039 struct mlxsw_sp_neigh_entry *neigh_entry;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003040 struct neighbour *n;
Ido Schimmel93a87e52016-12-23 09:32:49 +01003041 u8 nud_state, dead;
Ido Schimmelc53b8e12017-02-08 11:16:30 +01003042 int err;
3043
Ido Schimmelad178c82017-02-08 11:16:40 +01003044 if (!nh->nh_grp->gateway || nh->neigh_entry)
Ido Schimmelb8399a12017-02-08 11:16:33 +01003045 return 0;
3046
Jiri Pirko33b13412016-11-10 12:31:04 +01003047 /* Take a reference of neigh here ensuring that neigh would
Petr Machata8de3c172017-07-31 09:27:25 +02003048 * not be destructed before the nexthop entry is finished.
Jiri Pirko33b13412016-11-10 12:31:04 +01003049 * The reference is taken either in neigh_lookup() or
Ido Schimmelfd76d912017-02-06 16:20:17 +01003050 * in neigh_create() in case n is not found.
Jiri Pirko33b13412016-11-10 12:31:04 +01003051 */
Ido Schimmel58adf2c2017-07-18 10:10:19 +02003052 n = neigh_lookup(nh->nh_grp->neigh_tbl, &nh->gw_addr, nh->rif->dev);
Jiri Pirko33b13412016-11-10 12:31:04 +01003053 if (!n) {
Ido Schimmel58adf2c2017-07-18 10:10:19 +02003054 n = neigh_create(nh->nh_grp->neigh_tbl, &nh->gw_addr,
3055 nh->rif->dev);
Ido Schimmela8c97012017-02-08 11:16:35 +01003056 if (IS_ERR(n))
3057 return PTR_ERR(n);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003058 neigh_event_send(n, NULL);
Jiri Pirko33b13412016-11-10 12:31:04 +01003059 }
3060 neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp, n);
3061 if (!neigh_entry) {
Ido Schimmel5c8802f2017-02-06 16:20:13 +01003062 neigh_entry = mlxsw_sp_neigh_entry_create(mlxsw_sp, n);
3063 if (IS_ERR(neigh_entry)) {
Ido Schimmelc53b8e12017-02-08 11:16:30 +01003064 err = -EINVAL;
3065 goto err_neigh_entry_create;
Ido Schimmel5c8802f2017-02-06 16:20:13 +01003066 }
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003067 }
Yotam Gigib2157142016-07-05 11:27:51 +02003068
3069 /* If that is the first nexthop connected to that neigh, add to
3070 * nexthop_neighs_list
3071 */
3072 if (list_empty(&neigh_entry->nexthop_list))
3073 list_add_tail(&neigh_entry->nexthop_neighs_list_node,
Ido Schimmel9011b672017-05-16 19:38:25 +02003074 &mlxsw_sp->router->nexthop_neighs_list);
Yotam Gigib2157142016-07-05 11:27:51 +02003075
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003076 nh->neigh_entry = neigh_entry;
3077 list_add_tail(&nh->neigh_list_node, &neigh_entry->nexthop_list);
3078 read_lock_bh(&n->lock);
3079 nud_state = n->nud_state;
Ido Schimmel93a87e52016-12-23 09:32:49 +01003080 dead = n->dead;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003081 read_unlock_bh(&n->lock);
Ido Schimmel93a87e52016-12-23 09:32:49 +01003082 __mlxsw_sp_nexthop_neigh_update(nh, !(nud_state & NUD_VALID && !dead));
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003083
3084 return 0;
Ido Schimmelc53b8e12017-02-08 11:16:30 +01003085
3086err_neigh_entry_create:
3087 neigh_release(n);
Ido Schimmelc53b8e12017-02-08 11:16:30 +01003088 return err;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003089}
3090
Ido Schimmela8c97012017-02-08 11:16:35 +01003091static void mlxsw_sp_nexthop_neigh_fini(struct mlxsw_sp *mlxsw_sp,
3092 struct mlxsw_sp_nexthop *nh)
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003093{
3094 struct mlxsw_sp_neigh_entry *neigh_entry = nh->neigh_entry;
Ido Schimmela8c97012017-02-08 11:16:35 +01003095 struct neighbour *n;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003096
Ido Schimmelb8399a12017-02-08 11:16:33 +01003097 if (!neigh_entry)
Ido Schimmela8c97012017-02-08 11:16:35 +01003098 return;
3099 n = neigh_entry->key.n;
Ido Schimmelb8399a12017-02-08 11:16:33 +01003100
Ido Schimmel58312122016-12-23 09:32:50 +01003101 __mlxsw_sp_nexthop_neigh_update(nh, true);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003102 list_del(&nh->neigh_list_node);
Ido Schimmele58be792017-02-08 11:16:28 +01003103 nh->neigh_entry = NULL;
Yotam Gigib2157142016-07-05 11:27:51 +02003104
3105 /* If that is the last nexthop connected to that neigh, remove from
3106 * nexthop_neighs_list
3107 */
Ido Schimmele58be792017-02-08 11:16:28 +01003108 if (list_empty(&neigh_entry->nexthop_list))
3109 list_del(&neigh_entry->nexthop_neighs_list_node);
Yotam Gigib2157142016-07-05 11:27:51 +02003110
Ido Schimmel5c8802f2017-02-06 16:20:13 +01003111 if (!neigh_entry->connected && list_empty(&neigh_entry->nexthop_list))
3112 mlxsw_sp_neigh_entry_destroy(mlxsw_sp, neigh_entry);
3113
3114 neigh_release(n);
Ido Schimmela8c97012017-02-08 11:16:35 +01003115}
Ido Schimmelc53b8e12017-02-08 11:16:30 +01003116
Petr Machata1012b9a2017-09-02 23:49:23 +02003117static int mlxsw_sp_nexthop_ipip_init(struct mlxsw_sp *mlxsw_sp,
Petr Machata1012b9a2017-09-02 23:49:23 +02003118 struct mlxsw_sp_nexthop *nh,
3119 struct net_device *ol_dev)
3120{
3121 if (!nh->nh_grp->gateway || nh->ipip_entry)
3122 return 0;
3123
Petr Machata4cccb732017-10-16 16:26:39 +02003124 nh->ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
3125 if (!nh->ipip_entry)
3126 return -ENOENT;
Petr Machata1012b9a2017-09-02 23:49:23 +02003127
3128 __mlxsw_sp_nexthop_neigh_update(nh, false);
3129 return 0;
3130}
3131
3132static void mlxsw_sp_nexthop_ipip_fini(struct mlxsw_sp *mlxsw_sp,
3133 struct mlxsw_sp_nexthop *nh)
3134{
3135 struct mlxsw_sp_ipip_entry *ipip_entry = nh->ipip_entry;
3136
3137 if (!ipip_entry)
3138 return;
3139
3140 __mlxsw_sp_nexthop_neigh_update(nh, true);
Petr Machata1012b9a2017-09-02 23:49:23 +02003141 nh->ipip_entry = NULL;
3142}
3143
3144static bool mlxsw_sp_nexthop4_ipip_type(const struct mlxsw_sp *mlxsw_sp,
3145 const struct fib_nh *fib_nh,
3146 enum mlxsw_sp_ipip_type *p_ipipt)
3147{
3148 struct net_device *dev = fib_nh->nh_dev;
3149
3150 return dev &&
3151 fib_nh->nh_parent->fib_type == RTN_UNICAST &&
3152 mlxsw_sp_netdev_ipip_type(mlxsw_sp, dev, p_ipipt);
3153}
3154
Petr Machata35225e42017-09-02 23:49:22 +02003155static void mlxsw_sp_nexthop_type_fini(struct mlxsw_sp *mlxsw_sp,
3156 struct mlxsw_sp_nexthop *nh)
3157{
3158 switch (nh->type) {
3159 case MLXSW_SP_NEXTHOP_TYPE_ETH:
3160 mlxsw_sp_nexthop_neigh_fini(mlxsw_sp, nh);
3161 mlxsw_sp_nexthop_rif_fini(nh);
3162 break;
Petr Machata1012b9a2017-09-02 23:49:23 +02003163 case MLXSW_SP_NEXTHOP_TYPE_IPIP:
Petr Machatade0f43c2017-10-02 12:14:57 +02003164 mlxsw_sp_nexthop_rif_fini(nh);
Petr Machata1012b9a2017-09-02 23:49:23 +02003165 mlxsw_sp_nexthop_ipip_fini(mlxsw_sp, nh);
3166 break;
Petr Machata35225e42017-09-02 23:49:22 +02003167 }
3168}
3169
3170static int mlxsw_sp_nexthop4_type_init(struct mlxsw_sp *mlxsw_sp,
3171 struct mlxsw_sp_nexthop *nh,
3172 struct fib_nh *fib_nh)
3173{
Petr Machata1012b9a2017-09-02 23:49:23 +02003174 struct mlxsw_sp_router *router = mlxsw_sp->router;
Petr Machata35225e42017-09-02 23:49:22 +02003175 struct net_device *dev = fib_nh->nh_dev;
Petr Machata1012b9a2017-09-02 23:49:23 +02003176 enum mlxsw_sp_ipip_type ipipt;
Petr Machata35225e42017-09-02 23:49:22 +02003177 struct mlxsw_sp_rif *rif;
3178 int err;
3179
Petr Machata1012b9a2017-09-02 23:49:23 +02003180 if (mlxsw_sp_nexthop4_ipip_type(mlxsw_sp, fib_nh, &ipipt) &&
3181 router->ipip_ops_arr[ipipt]->can_offload(mlxsw_sp, dev,
3182 MLXSW_SP_L3_PROTO_IPV4)) {
3183 nh->type = MLXSW_SP_NEXTHOP_TYPE_IPIP;
Petr Machata4cccb732017-10-16 16:26:39 +02003184 err = mlxsw_sp_nexthop_ipip_init(mlxsw_sp, nh, dev);
Petr Machatade0f43c2017-10-02 12:14:57 +02003185 if (err)
3186 return err;
3187 mlxsw_sp_nexthop_rif_init(nh, &nh->ipip_entry->ol_lb->common);
3188 return 0;
Petr Machata1012b9a2017-09-02 23:49:23 +02003189 }
3190
Petr Machata35225e42017-09-02 23:49:22 +02003191 nh->type = MLXSW_SP_NEXTHOP_TYPE_ETH;
3192 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
3193 if (!rif)
3194 return 0;
3195
3196 mlxsw_sp_nexthop_rif_init(nh, rif);
3197 err = mlxsw_sp_nexthop_neigh_init(mlxsw_sp, nh);
3198 if (err)
3199 goto err_neigh_init;
3200
3201 return 0;
3202
3203err_neigh_init:
3204 mlxsw_sp_nexthop_rif_fini(nh);
3205 return err;
3206}
3207
3208static void mlxsw_sp_nexthop4_type_fini(struct mlxsw_sp *mlxsw_sp,
3209 struct mlxsw_sp_nexthop *nh)
3210{
3211 mlxsw_sp_nexthop_type_fini(mlxsw_sp, nh);
3212}
3213
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02003214static int mlxsw_sp_nexthop4_init(struct mlxsw_sp *mlxsw_sp,
3215 struct mlxsw_sp_nexthop_group *nh_grp,
3216 struct mlxsw_sp_nexthop *nh,
3217 struct fib_nh *fib_nh)
Ido Schimmela8c97012017-02-08 11:16:35 +01003218{
3219 struct net_device *dev = fib_nh->nh_dev;
Ido Schimmeldf6dd79b2017-02-08 14:36:49 +01003220 struct in_device *in_dev;
Ido Schimmela8c97012017-02-08 11:16:35 +01003221 int err;
3222
3223 nh->nh_grp = nh_grp;
3224 nh->key.fib_nh = fib_nh;
Ido Schimmel408bd942017-10-22 23:11:46 +02003225#ifdef CONFIG_IP_ROUTE_MULTIPATH
3226 nh->nh_weight = fib_nh->nh_weight;
3227#else
3228 nh->nh_weight = 1;
3229#endif
Ido Schimmel58adf2c2017-07-18 10:10:19 +02003230 memcpy(&nh->gw_addr, &fib_nh->nh_gw, sizeof(fib_nh->nh_gw));
Ido Schimmela8c97012017-02-08 11:16:35 +01003231 err = mlxsw_sp_nexthop_insert(mlxsw_sp, nh);
3232 if (err)
3233 return err;
3234
Arkadi Sharshevskya5390272017-09-25 10:32:28 +02003235 mlxsw_sp_nexthop_counter_alloc(mlxsw_sp, nh);
Arkadi Sharshevskydbe45982017-09-25 10:32:23 +02003236 list_add_tail(&nh->router_list_node, &mlxsw_sp->router->nexthop_list);
3237
Ido Schimmel97989ee2017-03-10 08:53:38 +01003238 if (!dev)
3239 return 0;
3240
Ido Schimmeldf6dd79b2017-02-08 14:36:49 +01003241 in_dev = __in_dev_get_rtnl(dev);
3242 if (in_dev && IN_DEV_IGNORE_ROUTES_WITH_LINKDOWN(in_dev) &&
3243 fib_nh->nh_flags & RTNH_F_LINKDOWN)
3244 return 0;
3245
Petr Machata35225e42017-09-02 23:49:22 +02003246 err = mlxsw_sp_nexthop4_type_init(mlxsw_sp, nh, fib_nh);
Ido Schimmela8c97012017-02-08 11:16:35 +01003247 if (err)
3248 goto err_nexthop_neigh_init;
3249
3250 return 0;
3251
3252err_nexthop_neigh_init:
3253 mlxsw_sp_nexthop_remove(mlxsw_sp, nh);
3254 return err;
3255}
3256
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02003257static void mlxsw_sp_nexthop4_fini(struct mlxsw_sp *mlxsw_sp,
3258 struct mlxsw_sp_nexthop *nh)
Ido Schimmela8c97012017-02-08 11:16:35 +01003259{
Petr Machata35225e42017-09-02 23:49:22 +02003260 mlxsw_sp_nexthop4_type_fini(mlxsw_sp, nh);
Arkadi Sharshevskydbe45982017-09-25 10:32:23 +02003261 list_del(&nh->router_list_node);
Arkadi Sharshevskya5390272017-09-25 10:32:28 +02003262 mlxsw_sp_nexthop_counter_free(mlxsw_sp, nh);
Ido Schimmelc53b8e12017-02-08 11:16:30 +01003263 mlxsw_sp_nexthop_remove(mlxsw_sp, nh);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003264}
3265
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02003266static void mlxsw_sp_nexthop4_event(struct mlxsw_sp *mlxsw_sp,
3267 unsigned long event, struct fib_nh *fib_nh)
Ido Schimmelad178c82017-02-08 11:16:40 +01003268{
3269 struct mlxsw_sp_nexthop_key key;
3270 struct mlxsw_sp_nexthop *nh;
Ido Schimmelad178c82017-02-08 11:16:40 +01003271
Ido Schimmel9011b672017-05-16 19:38:25 +02003272 if (mlxsw_sp->router->aborted)
Ido Schimmelad178c82017-02-08 11:16:40 +01003273 return;
3274
3275 key.fib_nh = fib_nh;
3276 nh = mlxsw_sp_nexthop_lookup(mlxsw_sp, key);
3277 if (WARN_ON_ONCE(!nh))
3278 return;
3279
Ido Schimmelad178c82017-02-08 11:16:40 +01003280 switch (event) {
3281 case FIB_EVENT_NH_ADD:
Petr Machata35225e42017-09-02 23:49:22 +02003282 mlxsw_sp_nexthop4_type_init(mlxsw_sp, nh, fib_nh);
Ido Schimmelad178c82017-02-08 11:16:40 +01003283 break;
3284 case FIB_EVENT_NH_DEL:
Petr Machata35225e42017-09-02 23:49:22 +02003285 mlxsw_sp_nexthop4_type_fini(mlxsw_sp, nh);
Ido Schimmelad178c82017-02-08 11:16:40 +01003286 break;
3287 }
3288
3289 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nh_grp);
3290}
3291
Ido Schimmel9665b742017-02-08 11:16:42 +01003292static void mlxsw_sp_nexthop_rif_gone_sync(struct mlxsw_sp *mlxsw_sp,
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003293 struct mlxsw_sp_rif *rif)
Ido Schimmel9665b742017-02-08 11:16:42 +01003294{
3295 struct mlxsw_sp_nexthop *nh, *tmp;
3296
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003297 list_for_each_entry_safe(nh, tmp, &rif->nexthop_list, rif_list_node) {
Petr Machata35225e42017-09-02 23:49:22 +02003298 mlxsw_sp_nexthop_type_fini(mlxsw_sp, nh);
Ido Schimmel9665b742017-02-08 11:16:42 +01003299 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nh_grp);
3300 }
3301}
3302
Petr Machata9b014512017-09-02 23:49:20 +02003303static bool mlxsw_sp_fi_is_gateway(const struct mlxsw_sp *mlxsw_sp,
3304 const struct fib_info *fi)
3305{
Petr Machata1012b9a2017-09-02 23:49:23 +02003306 return fi->fib_nh->nh_scope == RT_SCOPE_LINK ||
3307 mlxsw_sp_nexthop4_ipip_type(mlxsw_sp, fi->fib_nh, NULL);
Petr Machata9b014512017-09-02 23:49:20 +02003308}
3309
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003310static struct mlxsw_sp_nexthop_group *
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02003311mlxsw_sp_nexthop4_group_create(struct mlxsw_sp *mlxsw_sp, struct fib_info *fi)
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003312{
3313 struct mlxsw_sp_nexthop_group *nh_grp;
3314 struct mlxsw_sp_nexthop *nh;
3315 struct fib_nh *fib_nh;
3316 size_t alloc_size;
3317 int i;
3318 int err;
3319
3320 alloc_size = sizeof(*nh_grp) +
3321 fi->fib_nhs * sizeof(struct mlxsw_sp_nexthop);
3322 nh_grp = kzalloc(alloc_size, GFP_KERNEL);
3323 if (!nh_grp)
3324 return ERR_PTR(-ENOMEM);
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02003325 nh_grp->priv = fi;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003326 INIT_LIST_HEAD(&nh_grp->fib_list);
Ido Schimmel58adf2c2017-07-18 10:10:19 +02003327 nh_grp->neigh_tbl = &arp_tbl;
3328
Petr Machata9b014512017-09-02 23:49:20 +02003329 nh_grp->gateway = mlxsw_sp_fi_is_gateway(mlxsw_sp, fi);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003330 nh_grp->count = fi->fib_nhs;
Ido Schimmel7387dbb2017-07-12 09:12:53 +02003331 fib_info_hold(fi);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003332 for (i = 0; i < nh_grp->count; i++) {
3333 nh = &nh_grp->nexthops[i];
3334 fib_nh = &fi->fib_nh[i];
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02003335 err = mlxsw_sp_nexthop4_init(mlxsw_sp, nh_grp, nh, fib_nh);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003336 if (err)
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02003337 goto err_nexthop4_init;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003338 }
Ido Schimmele9ad5e72017-02-08 11:16:29 +01003339 err = mlxsw_sp_nexthop_group_insert(mlxsw_sp, nh_grp);
3340 if (err)
3341 goto err_nexthop_group_insert;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003342 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
3343 return nh_grp;
3344
Ido Schimmele9ad5e72017-02-08 11:16:29 +01003345err_nexthop_group_insert:
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02003346err_nexthop4_init:
Ido Schimmeldf6dd79b2017-02-08 14:36:49 +01003347 for (i--; i >= 0; i--) {
3348 nh = &nh_grp->nexthops[i];
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02003349 mlxsw_sp_nexthop4_fini(mlxsw_sp, nh);
Ido Schimmeldf6dd79b2017-02-08 14:36:49 +01003350 }
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02003351 fib_info_put(fi);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003352 kfree(nh_grp);
3353 return ERR_PTR(err);
3354}
3355
3356static void
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02003357mlxsw_sp_nexthop4_group_destroy(struct mlxsw_sp *mlxsw_sp,
3358 struct mlxsw_sp_nexthop_group *nh_grp)
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003359{
3360 struct mlxsw_sp_nexthop *nh;
3361 int i;
3362
Ido Schimmele9ad5e72017-02-08 11:16:29 +01003363 mlxsw_sp_nexthop_group_remove(mlxsw_sp, nh_grp);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003364 for (i = 0; i < nh_grp->count; i++) {
3365 nh = &nh_grp->nexthops[i];
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02003366 mlxsw_sp_nexthop4_fini(mlxsw_sp, nh);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003367 }
Ido Schimmel58312122016-12-23 09:32:50 +01003368 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
3369 WARN_ON_ONCE(nh_grp->adj_index_valid);
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02003370 fib_info_put(mlxsw_sp_nexthop4_group_fi(nh_grp));
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003371 kfree(nh_grp);
3372}
3373
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02003374static int mlxsw_sp_nexthop4_group_get(struct mlxsw_sp *mlxsw_sp,
3375 struct mlxsw_sp_fib_entry *fib_entry,
3376 struct fib_info *fi)
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003377{
3378 struct mlxsw_sp_nexthop_group *nh_grp;
3379
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02003380 nh_grp = mlxsw_sp_nexthop4_group_lookup(mlxsw_sp, fi);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003381 if (!nh_grp) {
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02003382 nh_grp = mlxsw_sp_nexthop4_group_create(mlxsw_sp, fi);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003383 if (IS_ERR(nh_grp))
3384 return PTR_ERR(nh_grp);
3385 }
3386 list_add_tail(&fib_entry->nexthop_group_node, &nh_grp->fib_list);
3387 fib_entry->nh_group = nh_grp;
3388 return 0;
3389}
3390
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02003391static void mlxsw_sp_nexthop4_group_put(struct mlxsw_sp *mlxsw_sp,
3392 struct mlxsw_sp_fib_entry *fib_entry)
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003393{
3394 struct mlxsw_sp_nexthop_group *nh_grp = fib_entry->nh_group;
3395
3396 list_del(&fib_entry->nexthop_group_node);
3397 if (!list_empty(&nh_grp->fib_list))
3398 return;
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02003399 mlxsw_sp_nexthop4_group_destroy(mlxsw_sp, nh_grp);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003400}
3401
Ido Schimmel013b20f2017-02-08 11:16:36 +01003402static bool
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02003403mlxsw_sp_fib4_entry_should_offload(const struct mlxsw_sp_fib_entry *fib_entry)
3404{
3405 struct mlxsw_sp_fib4_entry *fib4_entry;
3406
3407 fib4_entry = container_of(fib_entry, struct mlxsw_sp_fib4_entry,
3408 common);
3409 return !fib4_entry->tos;
3410}
3411
3412static bool
Ido Schimmel013b20f2017-02-08 11:16:36 +01003413mlxsw_sp_fib_entry_should_offload(const struct mlxsw_sp_fib_entry *fib_entry)
3414{
3415 struct mlxsw_sp_nexthop_group *nh_group = fib_entry->nh_group;
3416
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02003417 switch (fib_entry->fib_node->fib->proto) {
3418 case MLXSW_SP_L3_PROTO_IPV4:
3419 if (!mlxsw_sp_fib4_entry_should_offload(fib_entry))
3420 return false;
3421 break;
3422 case MLXSW_SP_L3_PROTO_IPV6:
3423 break;
3424 }
Ido Schimmel9aecce12017-02-09 10:28:42 +01003425
Ido Schimmel013b20f2017-02-08 11:16:36 +01003426 switch (fib_entry->type) {
3427 case MLXSW_SP_FIB_ENTRY_TYPE_REMOTE:
3428 return !!nh_group->adj_index_valid;
3429 case MLXSW_SP_FIB_ENTRY_TYPE_LOCAL:
Ido Schimmel70ad3502017-02-08 11:16:38 +01003430 return !!nh_group->nh_rif;
Petr Machata4607f6d2017-09-02 23:49:25 +02003431 case MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP:
3432 return true;
Ido Schimmel013b20f2017-02-08 11:16:36 +01003433 default:
3434 return false;
3435 }
3436}
3437
Ido Schimmel428b8512017-08-03 13:28:28 +02003438static struct mlxsw_sp_nexthop *
3439mlxsw_sp_rt6_nexthop(struct mlxsw_sp_nexthop_group *nh_grp,
3440 const struct mlxsw_sp_rt6 *mlxsw_sp_rt6)
3441{
3442 int i;
3443
3444 for (i = 0; i < nh_grp->count; i++) {
3445 struct mlxsw_sp_nexthop *nh = &nh_grp->nexthops[i];
3446 struct rt6_info *rt = mlxsw_sp_rt6->rt;
3447
3448 if (nh->rif && nh->rif->dev == rt->dst.dev &&
3449 ipv6_addr_equal((const struct in6_addr *) &nh->gw_addr,
3450 &rt->rt6i_gateway))
3451 return nh;
3452 continue;
3453 }
3454
3455 return NULL;
3456}
3457
Ido Schimmel3984d1a2017-08-02 09:56:03 +02003458static void
3459mlxsw_sp_fib4_entry_offload_set(struct mlxsw_sp_fib_entry *fib_entry)
3460{
3461 struct mlxsw_sp_nexthop_group *nh_grp = fib_entry->nh_group;
3462 int i;
3463
Petr Machata4607f6d2017-09-02 23:49:25 +02003464 if (fib_entry->type == MLXSW_SP_FIB_ENTRY_TYPE_LOCAL ||
3465 fib_entry->type == MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP) {
Ido Schimmel3984d1a2017-08-02 09:56:03 +02003466 nh_grp->nexthops->key.fib_nh->nh_flags |= RTNH_F_OFFLOAD;
3467 return;
3468 }
3469
3470 for (i = 0; i < nh_grp->count; i++) {
3471 struct mlxsw_sp_nexthop *nh = &nh_grp->nexthops[i];
3472
3473 if (nh->offloaded)
3474 nh->key.fib_nh->nh_flags |= RTNH_F_OFFLOAD;
3475 else
3476 nh->key.fib_nh->nh_flags &= ~RTNH_F_OFFLOAD;
3477 }
3478}
3479
3480static void
3481mlxsw_sp_fib4_entry_offload_unset(struct mlxsw_sp_fib_entry *fib_entry)
3482{
3483 struct mlxsw_sp_nexthop_group *nh_grp = fib_entry->nh_group;
3484 int i;
3485
3486 for (i = 0; i < nh_grp->count; i++) {
3487 struct mlxsw_sp_nexthop *nh = &nh_grp->nexthops[i];
3488
3489 nh->key.fib_nh->nh_flags &= ~RTNH_F_OFFLOAD;
3490 }
3491}
3492
Ido Schimmel428b8512017-08-03 13:28:28 +02003493static void
3494mlxsw_sp_fib6_entry_offload_set(struct mlxsw_sp_fib_entry *fib_entry)
3495{
3496 struct mlxsw_sp_fib6_entry *fib6_entry;
3497 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
3498
3499 fib6_entry = container_of(fib_entry, struct mlxsw_sp_fib6_entry,
3500 common);
3501
3502 if (fib_entry->type == MLXSW_SP_FIB_ENTRY_TYPE_LOCAL) {
3503 list_first_entry(&fib6_entry->rt6_list, struct mlxsw_sp_rt6,
Ido Schimmelfe400792017-08-15 09:09:49 +02003504 list)->rt->rt6i_nh_flags |= RTNH_F_OFFLOAD;
Ido Schimmel428b8512017-08-03 13:28:28 +02003505 return;
3506 }
3507
3508 list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) {
3509 struct mlxsw_sp_nexthop_group *nh_grp = fib_entry->nh_group;
3510 struct mlxsw_sp_nexthop *nh;
3511
3512 nh = mlxsw_sp_rt6_nexthop(nh_grp, mlxsw_sp_rt6);
3513 if (nh && nh->offloaded)
Ido Schimmelfe400792017-08-15 09:09:49 +02003514 mlxsw_sp_rt6->rt->rt6i_nh_flags |= RTNH_F_OFFLOAD;
Ido Schimmel428b8512017-08-03 13:28:28 +02003515 else
Ido Schimmelfe400792017-08-15 09:09:49 +02003516 mlxsw_sp_rt6->rt->rt6i_nh_flags &= ~RTNH_F_OFFLOAD;
Ido Schimmel428b8512017-08-03 13:28:28 +02003517 }
3518}
3519
3520static void
3521mlxsw_sp_fib6_entry_offload_unset(struct mlxsw_sp_fib_entry *fib_entry)
3522{
3523 struct mlxsw_sp_fib6_entry *fib6_entry;
3524 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
3525
3526 fib6_entry = container_of(fib_entry, struct mlxsw_sp_fib6_entry,
3527 common);
3528 list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) {
3529 struct rt6_info *rt = mlxsw_sp_rt6->rt;
3530
Ido Schimmelfe400792017-08-15 09:09:49 +02003531 rt->rt6i_nh_flags &= ~RTNH_F_OFFLOAD;
Ido Schimmel428b8512017-08-03 13:28:28 +02003532 }
3533}
3534
Ido Schimmel013b20f2017-02-08 11:16:36 +01003535static void mlxsw_sp_fib_entry_offload_set(struct mlxsw_sp_fib_entry *fib_entry)
3536{
Ido Schimmel76610eb2017-03-10 08:53:41 +01003537 switch (fib_entry->fib_node->fib->proto) {
Ido Schimmel013b20f2017-02-08 11:16:36 +01003538 case MLXSW_SP_L3_PROTO_IPV4:
Ido Schimmel3984d1a2017-08-02 09:56:03 +02003539 mlxsw_sp_fib4_entry_offload_set(fib_entry);
Ido Schimmel013b20f2017-02-08 11:16:36 +01003540 break;
3541 case MLXSW_SP_L3_PROTO_IPV6:
Ido Schimmel428b8512017-08-03 13:28:28 +02003542 mlxsw_sp_fib6_entry_offload_set(fib_entry);
3543 break;
Ido Schimmel013b20f2017-02-08 11:16:36 +01003544 }
3545}
3546
3547static void
3548mlxsw_sp_fib_entry_offload_unset(struct mlxsw_sp_fib_entry *fib_entry)
3549{
Ido Schimmel76610eb2017-03-10 08:53:41 +01003550 switch (fib_entry->fib_node->fib->proto) {
Ido Schimmel013b20f2017-02-08 11:16:36 +01003551 case MLXSW_SP_L3_PROTO_IPV4:
Ido Schimmel3984d1a2017-08-02 09:56:03 +02003552 mlxsw_sp_fib4_entry_offload_unset(fib_entry);
Ido Schimmel013b20f2017-02-08 11:16:36 +01003553 break;
3554 case MLXSW_SP_L3_PROTO_IPV6:
Ido Schimmel428b8512017-08-03 13:28:28 +02003555 mlxsw_sp_fib6_entry_offload_unset(fib_entry);
3556 break;
Ido Schimmel013b20f2017-02-08 11:16:36 +01003557 }
Ido Schimmel013b20f2017-02-08 11:16:36 +01003558}
3559
3560static void
3561mlxsw_sp_fib_entry_offload_refresh(struct mlxsw_sp_fib_entry *fib_entry,
3562 enum mlxsw_reg_ralue_op op, int err)
3563{
3564 switch (op) {
3565 case MLXSW_REG_RALUE_OP_WRITE_DELETE:
Ido Schimmel013b20f2017-02-08 11:16:36 +01003566 return mlxsw_sp_fib_entry_offload_unset(fib_entry);
3567 case MLXSW_REG_RALUE_OP_WRITE_WRITE:
3568 if (err)
3569 return;
Ido Schimmel1353ee72017-08-02 09:56:04 +02003570 if (mlxsw_sp_fib_entry_should_offload(fib_entry))
Ido Schimmel013b20f2017-02-08 11:16:36 +01003571 mlxsw_sp_fib_entry_offload_set(fib_entry);
Petr Machata85f44a12017-10-02 12:21:58 +02003572 else
Ido Schimmel013b20f2017-02-08 11:16:36 +01003573 mlxsw_sp_fib_entry_offload_unset(fib_entry);
3574 return;
3575 default:
3576 return;
3577 }
3578}
3579
Ido Schimmel9dbf4d72017-07-18 10:10:24 +02003580static void
3581mlxsw_sp_fib_entry_ralue_pack(char *ralue_pl,
3582 const struct mlxsw_sp_fib_entry *fib_entry,
3583 enum mlxsw_reg_ralue_op op)
3584{
3585 struct mlxsw_sp_fib *fib = fib_entry->fib_node->fib;
3586 enum mlxsw_reg_ralxx_protocol proto;
3587 u32 *p_dip;
3588
3589 proto = (enum mlxsw_reg_ralxx_protocol) fib->proto;
3590
3591 switch (fib->proto) {
3592 case MLXSW_SP_L3_PROTO_IPV4:
3593 p_dip = (u32 *) fib_entry->fib_node->key.addr;
3594 mlxsw_reg_ralue_pack4(ralue_pl, proto, op, fib->vr->id,
3595 fib_entry->fib_node->key.prefix_len,
3596 *p_dip);
3597 break;
3598 case MLXSW_SP_L3_PROTO_IPV6:
3599 mlxsw_reg_ralue_pack6(ralue_pl, proto, op, fib->vr->id,
3600 fib_entry->fib_node->key.prefix_len,
3601 fib_entry->fib_node->key.addr);
3602 break;
3603 }
3604}
3605
3606static int mlxsw_sp_fib_entry_op_remote(struct mlxsw_sp *mlxsw_sp,
3607 struct mlxsw_sp_fib_entry *fib_entry,
3608 enum mlxsw_reg_ralue_op op)
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003609{
3610 char ralue_pl[MLXSW_REG_RALUE_LEN];
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003611 enum mlxsw_reg_ralue_trap_action trap_action;
3612 u16 trap_id = 0;
3613 u32 adjacency_index = 0;
3614 u16 ecmp_size = 0;
3615
3616 /* In case the nexthop group adjacency index is valid, use it
3617 * with provided ECMP size. Otherwise, setup trap and pass
3618 * traffic to kernel.
3619 */
Ido Schimmel4b411472017-02-08 11:16:37 +01003620 if (mlxsw_sp_fib_entry_should_offload(fib_entry)) {
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003621 trap_action = MLXSW_REG_RALUE_TRAP_ACTION_NOP;
3622 adjacency_index = fib_entry->nh_group->adj_index;
3623 ecmp_size = fib_entry->nh_group->ecmp_size;
3624 } else {
3625 trap_action = MLXSW_REG_RALUE_TRAP_ACTION_TRAP;
3626 trap_id = MLXSW_TRAP_ID_RTR_INGRESS0;
3627 }
3628
Ido Schimmel9dbf4d72017-07-18 10:10:24 +02003629 mlxsw_sp_fib_entry_ralue_pack(ralue_pl, fib_entry, op);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003630 mlxsw_reg_ralue_act_remote_pack(ralue_pl, trap_action, trap_id,
3631 adjacency_index, ecmp_size);
3632 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
3633}
3634
Ido Schimmel9dbf4d72017-07-18 10:10:24 +02003635static int mlxsw_sp_fib_entry_op_local(struct mlxsw_sp *mlxsw_sp,
3636 struct mlxsw_sp_fib_entry *fib_entry,
3637 enum mlxsw_reg_ralue_op op)
Jiri Pirko61c503f2016-07-04 08:23:11 +02003638{
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003639 struct mlxsw_sp_rif *rif = fib_entry->nh_group->nh_rif;
Ido Schimmel70ad3502017-02-08 11:16:38 +01003640 enum mlxsw_reg_ralue_trap_action trap_action;
Jiri Pirko61c503f2016-07-04 08:23:11 +02003641 char ralue_pl[MLXSW_REG_RALUE_LEN];
Ido Schimmel70ad3502017-02-08 11:16:38 +01003642 u16 trap_id = 0;
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003643 u16 rif_index = 0;
Ido Schimmel70ad3502017-02-08 11:16:38 +01003644
3645 if (mlxsw_sp_fib_entry_should_offload(fib_entry)) {
3646 trap_action = MLXSW_REG_RALUE_TRAP_ACTION_NOP;
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003647 rif_index = rif->rif_index;
Ido Schimmel70ad3502017-02-08 11:16:38 +01003648 } else {
3649 trap_action = MLXSW_REG_RALUE_TRAP_ACTION_TRAP;
3650 trap_id = MLXSW_TRAP_ID_RTR_INGRESS0;
3651 }
Jiri Pirko61c503f2016-07-04 08:23:11 +02003652
Ido Schimmel9dbf4d72017-07-18 10:10:24 +02003653 mlxsw_sp_fib_entry_ralue_pack(ralue_pl, fib_entry, op);
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003654 mlxsw_reg_ralue_act_local_pack(ralue_pl, trap_action, trap_id,
3655 rif_index);
Jiri Pirko61c503f2016-07-04 08:23:11 +02003656 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
3657}
3658
Ido Schimmel9dbf4d72017-07-18 10:10:24 +02003659static int mlxsw_sp_fib_entry_op_trap(struct mlxsw_sp *mlxsw_sp,
3660 struct mlxsw_sp_fib_entry *fib_entry,
3661 enum mlxsw_reg_ralue_op op)
Jiri Pirko61c503f2016-07-04 08:23:11 +02003662{
3663 char ralue_pl[MLXSW_REG_RALUE_LEN];
Jiri Pirko61c503f2016-07-04 08:23:11 +02003664
Ido Schimmel9dbf4d72017-07-18 10:10:24 +02003665 mlxsw_sp_fib_entry_ralue_pack(ralue_pl, fib_entry, op);
Jiri Pirko61c503f2016-07-04 08:23:11 +02003666 mlxsw_reg_ralue_act_ip2me_pack(ralue_pl);
3667 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
3668}
3669
Petr Machata4607f6d2017-09-02 23:49:25 +02003670static int
3671mlxsw_sp_fib_entry_op_ipip_decap(struct mlxsw_sp *mlxsw_sp,
3672 struct mlxsw_sp_fib_entry *fib_entry,
3673 enum mlxsw_reg_ralue_op op)
3674{
3675 struct mlxsw_sp_ipip_entry *ipip_entry = fib_entry->decap.ipip_entry;
3676 const struct mlxsw_sp_ipip_ops *ipip_ops;
3677
3678 if (WARN_ON(!ipip_entry))
3679 return -EINVAL;
3680
3681 ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt];
3682 return ipip_ops->fib_entry_op(mlxsw_sp, ipip_entry, op,
3683 fib_entry->decap.tunnel_index);
3684}
3685
Ido Schimmel9dbf4d72017-07-18 10:10:24 +02003686static int __mlxsw_sp_fib_entry_op(struct mlxsw_sp *mlxsw_sp,
3687 struct mlxsw_sp_fib_entry *fib_entry,
3688 enum mlxsw_reg_ralue_op op)
Jiri Pirko61c503f2016-07-04 08:23:11 +02003689{
3690 switch (fib_entry->type) {
3691 case MLXSW_SP_FIB_ENTRY_TYPE_REMOTE:
Ido Schimmel9dbf4d72017-07-18 10:10:24 +02003692 return mlxsw_sp_fib_entry_op_remote(mlxsw_sp, fib_entry, op);
Jiri Pirko61c503f2016-07-04 08:23:11 +02003693 case MLXSW_SP_FIB_ENTRY_TYPE_LOCAL:
Ido Schimmel9dbf4d72017-07-18 10:10:24 +02003694 return mlxsw_sp_fib_entry_op_local(mlxsw_sp, fib_entry, op);
Jiri Pirko61c503f2016-07-04 08:23:11 +02003695 case MLXSW_SP_FIB_ENTRY_TYPE_TRAP:
Ido Schimmel9dbf4d72017-07-18 10:10:24 +02003696 return mlxsw_sp_fib_entry_op_trap(mlxsw_sp, fib_entry, op);
Petr Machata4607f6d2017-09-02 23:49:25 +02003697 case MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP:
3698 return mlxsw_sp_fib_entry_op_ipip_decap(mlxsw_sp,
3699 fib_entry, op);
Jiri Pirko61c503f2016-07-04 08:23:11 +02003700 }
3701 return -EINVAL;
3702}
3703
3704static int mlxsw_sp_fib_entry_op(struct mlxsw_sp *mlxsw_sp,
3705 struct mlxsw_sp_fib_entry *fib_entry,
3706 enum mlxsw_reg_ralue_op op)
3707{
Ido Schimmel9dbf4d72017-07-18 10:10:24 +02003708 int err = __mlxsw_sp_fib_entry_op(mlxsw_sp, fib_entry, op);
Ido Schimmel013b20f2017-02-08 11:16:36 +01003709
Ido Schimmel013b20f2017-02-08 11:16:36 +01003710 mlxsw_sp_fib_entry_offload_refresh(fib_entry, op, err);
Ido Schimmel9dbf4d72017-07-18 10:10:24 +02003711
Ido Schimmel013b20f2017-02-08 11:16:36 +01003712 return err;
Jiri Pirko61c503f2016-07-04 08:23:11 +02003713}
3714
3715static int mlxsw_sp_fib_entry_update(struct mlxsw_sp *mlxsw_sp,
3716 struct mlxsw_sp_fib_entry *fib_entry)
3717{
Jiri Pirko7146da32016-09-01 10:37:41 +02003718 return mlxsw_sp_fib_entry_op(mlxsw_sp, fib_entry,
3719 MLXSW_REG_RALUE_OP_WRITE_WRITE);
Jiri Pirko61c503f2016-07-04 08:23:11 +02003720}
3721
3722static int mlxsw_sp_fib_entry_del(struct mlxsw_sp *mlxsw_sp,
3723 struct mlxsw_sp_fib_entry *fib_entry)
3724{
3725 return mlxsw_sp_fib_entry_op(mlxsw_sp, fib_entry,
3726 MLXSW_REG_RALUE_OP_WRITE_DELETE);
3727}
3728
Jiri Pirko61c503f2016-07-04 08:23:11 +02003729static int
Ido Schimmel013b20f2017-02-08 11:16:36 +01003730mlxsw_sp_fib4_entry_type_set(struct mlxsw_sp *mlxsw_sp,
3731 const struct fib_entry_notifier_info *fen_info,
3732 struct mlxsw_sp_fib_entry *fib_entry)
Jiri Pirko61c503f2016-07-04 08:23:11 +02003733{
Petr Machata4607f6d2017-09-02 23:49:25 +02003734 union mlxsw_sp_l3addr dip = { .addr4 = htonl(fen_info->dst) };
3735 struct net_device *dev = fen_info->fi->fib_dev;
3736 struct mlxsw_sp_ipip_entry *ipip_entry;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02003737 struct fib_info *fi = fen_info->fi;
Jiri Pirko61c503f2016-07-04 08:23:11 +02003738
Ido Schimmel97989ee2017-03-10 08:53:38 +01003739 switch (fen_info->type) {
Ido Schimmel97989ee2017-03-10 08:53:38 +01003740 case RTN_LOCAL:
Petr Machata4607f6d2017-09-02 23:49:25 +02003741 ipip_entry = mlxsw_sp_ipip_entry_find_by_decap(mlxsw_sp, dev,
3742 MLXSW_SP_L3_PROTO_IPV4, dip);
3743 if (ipip_entry) {
3744 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP;
3745 return mlxsw_sp_fib_entry_decap_init(mlxsw_sp,
3746 fib_entry,
3747 ipip_entry);
3748 }
3749 /* fall through */
3750 case RTN_BROADCAST:
Jiri Pirko61c503f2016-07-04 08:23:11 +02003751 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
3752 return 0;
Ido Schimmel97989ee2017-03-10 08:53:38 +01003753 case RTN_UNREACHABLE: /* fall through */
3754 case RTN_BLACKHOLE: /* fall through */
3755 case RTN_PROHIBIT:
3756 /* Packets hitting these routes need to be trapped, but
3757 * can do so with a lower priority than packets directed
3758 * at the host, so use action type local instead of trap.
3759 */
Jiri Pirkob45f64d2016-09-26 12:52:31 +02003760 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_LOCAL;
Ido Schimmel97989ee2017-03-10 08:53:38 +01003761 return 0;
3762 case RTN_UNICAST:
Petr Machata9b014512017-09-02 23:49:20 +02003763 if (mlxsw_sp_fi_is_gateway(mlxsw_sp, fi))
Ido Schimmel97989ee2017-03-10 08:53:38 +01003764 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_REMOTE;
Petr Machata9b014512017-09-02 23:49:20 +02003765 else
3766 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_LOCAL;
Ido Schimmel97989ee2017-03-10 08:53:38 +01003767 return 0;
3768 default:
3769 return -EINVAL;
3770 }
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003771}
3772
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02003773static struct mlxsw_sp_fib4_entry *
Ido Schimmel9aecce12017-02-09 10:28:42 +01003774mlxsw_sp_fib4_entry_create(struct mlxsw_sp *mlxsw_sp,
3775 struct mlxsw_sp_fib_node *fib_node,
3776 const struct fib_entry_notifier_info *fen_info)
Jiri Pirko5b004412016-09-01 10:37:40 +02003777{
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02003778 struct mlxsw_sp_fib4_entry *fib4_entry;
Jiri Pirko5b004412016-09-01 10:37:40 +02003779 struct mlxsw_sp_fib_entry *fib_entry;
Ido Schimmel9aecce12017-02-09 10:28:42 +01003780 int err;
3781
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02003782 fib4_entry = kzalloc(sizeof(*fib4_entry), GFP_KERNEL);
3783 if (!fib4_entry)
3784 return ERR_PTR(-ENOMEM);
3785 fib_entry = &fib4_entry->common;
Ido Schimmel9aecce12017-02-09 10:28:42 +01003786
3787 err = mlxsw_sp_fib4_entry_type_set(mlxsw_sp, fen_info, fib_entry);
3788 if (err)
3789 goto err_fib4_entry_type_set;
3790
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02003791 err = mlxsw_sp_nexthop4_group_get(mlxsw_sp, fib_entry, fen_info->fi);
Ido Schimmel9aecce12017-02-09 10:28:42 +01003792 if (err)
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02003793 goto err_nexthop4_group_get;
Ido Schimmel9aecce12017-02-09 10:28:42 +01003794
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02003795 fib4_entry->prio = fen_info->fi->fib_priority;
3796 fib4_entry->tb_id = fen_info->tb_id;
3797 fib4_entry->type = fen_info->type;
3798 fib4_entry->tos = fen_info->tos;
Ido Schimmel9aecce12017-02-09 10:28:42 +01003799
3800 fib_entry->fib_node = fib_node;
3801
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02003802 return fib4_entry;
Ido Schimmel9aecce12017-02-09 10:28:42 +01003803
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02003804err_nexthop4_group_get:
Ido Schimmel9aecce12017-02-09 10:28:42 +01003805err_fib4_entry_type_set:
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02003806 kfree(fib4_entry);
Ido Schimmel9aecce12017-02-09 10:28:42 +01003807 return ERR_PTR(err);
3808}
3809
3810static void mlxsw_sp_fib4_entry_destroy(struct mlxsw_sp *mlxsw_sp,
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02003811 struct mlxsw_sp_fib4_entry *fib4_entry)
Ido Schimmel9aecce12017-02-09 10:28:42 +01003812{
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02003813 mlxsw_sp_nexthop4_group_put(mlxsw_sp, &fib4_entry->common);
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02003814 kfree(fib4_entry);
Ido Schimmel9aecce12017-02-09 10:28:42 +01003815}
3816
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02003817static struct mlxsw_sp_fib4_entry *
Ido Schimmel9aecce12017-02-09 10:28:42 +01003818mlxsw_sp_fib4_entry_lookup(struct mlxsw_sp *mlxsw_sp,
3819 const struct fib_entry_notifier_info *fen_info)
3820{
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02003821 struct mlxsw_sp_fib4_entry *fib4_entry;
Ido Schimmel9aecce12017-02-09 10:28:42 +01003822 struct mlxsw_sp_fib_node *fib_node;
Ido Schimmel160e22a2017-07-18 10:10:20 +02003823 struct mlxsw_sp_fib *fib;
3824 struct mlxsw_sp_vr *vr;
Ido Schimmel9aecce12017-02-09 10:28:42 +01003825
Ido Schimmel160e22a2017-07-18 10:10:20 +02003826 vr = mlxsw_sp_vr_find(mlxsw_sp, fen_info->tb_id);
3827 if (!vr)
3828 return NULL;
3829 fib = mlxsw_sp_vr_fib(vr, MLXSW_SP_L3_PROTO_IPV4);
3830
3831 fib_node = mlxsw_sp_fib_node_lookup(fib, &fen_info->dst,
3832 sizeof(fen_info->dst),
3833 fen_info->dst_len);
3834 if (!fib_node)
Ido Schimmel9aecce12017-02-09 10:28:42 +01003835 return NULL;
3836
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02003837 list_for_each_entry(fib4_entry, &fib_node->entry_list, common.list) {
3838 if (fib4_entry->tb_id == fen_info->tb_id &&
3839 fib4_entry->tos == fen_info->tos &&
3840 fib4_entry->type == fen_info->type &&
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02003841 mlxsw_sp_nexthop4_group_fi(fib4_entry->common.nh_group) ==
3842 fen_info->fi) {
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02003843 return fib4_entry;
Ido Schimmel9aecce12017-02-09 10:28:42 +01003844 }
3845 }
3846
3847 return NULL;
3848}
3849
3850static const struct rhashtable_params mlxsw_sp_fib_ht_params = {
3851 .key_offset = offsetof(struct mlxsw_sp_fib_node, key),
3852 .head_offset = offsetof(struct mlxsw_sp_fib_node, ht_node),
3853 .key_len = sizeof(struct mlxsw_sp_fib_key),
3854 .automatic_shrinking = true,
3855};
3856
3857static int mlxsw_sp_fib_node_insert(struct mlxsw_sp_fib *fib,
3858 struct mlxsw_sp_fib_node *fib_node)
3859{
3860 return rhashtable_insert_fast(&fib->ht, &fib_node->ht_node,
3861 mlxsw_sp_fib_ht_params);
3862}
3863
3864static void mlxsw_sp_fib_node_remove(struct mlxsw_sp_fib *fib,
3865 struct mlxsw_sp_fib_node *fib_node)
3866{
3867 rhashtable_remove_fast(&fib->ht, &fib_node->ht_node,
3868 mlxsw_sp_fib_ht_params);
3869}
3870
3871static struct mlxsw_sp_fib_node *
3872mlxsw_sp_fib_node_lookup(struct mlxsw_sp_fib *fib, const void *addr,
3873 size_t addr_len, unsigned char prefix_len)
3874{
3875 struct mlxsw_sp_fib_key key;
3876
3877 memset(&key, 0, sizeof(key));
3878 memcpy(key.addr, addr, addr_len);
3879 key.prefix_len = prefix_len;
3880 return rhashtable_lookup_fast(&fib->ht, &key, mlxsw_sp_fib_ht_params);
3881}
3882
3883static struct mlxsw_sp_fib_node *
Ido Schimmel76610eb2017-03-10 08:53:41 +01003884mlxsw_sp_fib_node_create(struct mlxsw_sp_fib *fib, const void *addr,
Ido Schimmel9aecce12017-02-09 10:28:42 +01003885 size_t addr_len, unsigned char prefix_len)
3886{
3887 struct mlxsw_sp_fib_node *fib_node;
3888
3889 fib_node = kzalloc(sizeof(*fib_node), GFP_KERNEL);
3890 if (!fib_node)
3891 return NULL;
3892
3893 INIT_LIST_HEAD(&fib_node->entry_list);
Ido Schimmel76610eb2017-03-10 08:53:41 +01003894 list_add(&fib_node->list, &fib->node_list);
Ido Schimmel9aecce12017-02-09 10:28:42 +01003895 memcpy(fib_node->key.addr, addr, addr_len);
3896 fib_node->key.prefix_len = prefix_len;
Ido Schimmel9aecce12017-02-09 10:28:42 +01003897
3898 return fib_node;
3899}
3900
3901static void mlxsw_sp_fib_node_destroy(struct mlxsw_sp_fib_node *fib_node)
3902{
Ido Schimmel9aecce12017-02-09 10:28:42 +01003903 list_del(&fib_node->list);
3904 WARN_ON(!list_empty(&fib_node->entry_list));
3905 kfree(fib_node);
3906}
3907
3908static bool
3909mlxsw_sp_fib_node_entry_is_first(const struct mlxsw_sp_fib_node *fib_node,
3910 const struct mlxsw_sp_fib_entry *fib_entry)
3911{
3912 return list_first_entry(&fib_node->entry_list,
3913 struct mlxsw_sp_fib_entry, list) == fib_entry;
3914}
3915
Ido Schimmelfc922bb2017-08-14 10:54:05 +02003916static int mlxsw_sp_fib_lpm_tree_link(struct mlxsw_sp *mlxsw_sp,
3917 struct mlxsw_sp_fib *fib,
3918 struct mlxsw_sp_fib_node *fib_node)
3919{
3920 struct mlxsw_sp_prefix_usage req_prefix_usage = {{ 0 } };
3921 struct mlxsw_sp_lpm_tree *lpm_tree;
3922 int err;
3923
3924 /* Since the tree is shared between all virtual routers we must
3925 * make sure it contains all the required prefix lengths. This
3926 * can be computed by either adding the new prefix length to the
3927 * existing prefix usage of a bound tree, or by aggregating the
3928 * prefix lengths across all virtual routers and adding the new
3929 * one as well.
3930 */
3931 if (fib->lpm_tree)
3932 mlxsw_sp_prefix_usage_cpy(&req_prefix_usage,
3933 &fib->lpm_tree->prefix_usage);
3934 else
3935 mlxsw_sp_vrs_prefixes(mlxsw_sp, fib->proto, &req_prefix_usage);
3936 mlxsw_sp_prefix_usage_set(&req_prefix_usage, fib_node->key.prefix_len);
3937
3938 lpm_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, &req_prefix_usage,
3939 fib->proto);
3940 if (IS_ERR(lpm_tree))
3941 return PTR_ERR(lpm_tree);
3942
3943 if (fib->lpm_tree && fib->lpm_tree->id == lpm_tree->id)
3944 return 0;
3945
3946 err = mlxsw_sp_vrs_lpm_tree_replace(mlxsw_sp, fib, lpm_tree);
3947 if (err)
3948 return err;
3949
3950 return 0;
3951}
3952
3953static void mlxsw_sp_fib_lpm_tree_unlink(struct mlxsw_sp *mlxsw_sp,
3954 struct mlxsw_sp_fib *fib)
3955{
Ido Schimmelfc922bb2017-08-14 10:54:05 +02003956 if (!mlxsw_sp_prefix_usage_none(&fib->prefix_usage))
3957 return;
3958 mlxsw_sp_vr_lpm_tree_unbind(mlxsw_sp, fib);
3959 mlxsw_sp_lpm_tree_put(mlxsw_sp, fib->lpm_tree);
3960 fib->lpm_tree = NULL;
3961}
3962
Ido Schimmel9aecce12017-02-09 10:28:42 +01003963static void mlxsw_sp_fib_node_prefix_inc(struct mlxsw_sp_fib_node *fib_node)
3964{
3965 unsigned char prefix_len = fib_node->key.prefix_len;
Ido Schimmel76610eb2017-03-10 08:53:41 +01003966 struct mlxsw_sp_fib *fib = fib_node->fib;
Ido Schimmel9aecce12017-02-09 10:28:42 +01003967
3968 if (fib->prefix_ref_count[prefix_len]++ == 0)
3969 mlxsw_sp_prefix_usage_set(&fib->prefix_usage, prefix_len);
3970}
3971
3972static void mlxsw_sp_fib_node_prefix_dec(struct mlxsw_sp_fib_node *fib_node)
3973{
3974 unsigned char prefix_len = fib_node->key.prefix_len;
Ido Schimmel76610eb2017-03-10 08:53:41 +01003975 struct mlxsw_sp_fib *fib = fib_node->fib;
Ido Schimmel9aecce12017-02-09 10:28:42 +01003976
3977 if (--fib->prefix_ref_count[prefix_len] == 0)
3978 mlxsw_sp_prefix_usage_clear(&fib->prefix_usage, prefix_len);
3979}
3980
Ido Schimmel76610eb2017-03-10 08:53:41 +01003981static int mlxsw_sp_fib_node_init(struct mlxsw_sp *mlxsw_sp,
3982 struct mlxsw_sp_fib_node *fib_node,
3983 struct mlxsw_sp_fib *fib)
3984{
Ido Schimmel76610eb2017-03-10 08:53:41 +01003985 int err;
3986
3987 err = mlxsw_sp_fib_node_insert(fib, fib_node);
3988 if (err)
3989 return err;
3990 fib_node->fib = fib;
3991
Ido Schimmelfc922bb2017-08-14 10:54:05 +02003992 err = mlxsw_sp_fib_lpm_tree_link(mlxsw_sp, fib, fib_node);
3993 if (err)
3994 goto err_fib_lpm_tree_link;
Ido Schimmel76610eb2017-03-10 08:53:41 +01003995
3996 mlxsw_sp_fib_node_prefix_inc(fib_node);
3997
3998 return 0;
3999
Ido Schimmelfc922bb2017-08-14 10:54:05 +02004000err_fib_lpm_tree_link:
Ido Schimmel76610eb2017-03-10 08:53:41 +01004001 fib_node->fib = NULL;
4002 mlxsw_sp_fib_node_remove(fib, fib_node);
4003 return err;
4004}
4005
4006static void mlxsw_sp_fib_node_fini(struct mlxsw_sp *mlxsw_sp,
4007 struct mlxsw_sp_fib_node *fib_node)
4008{
Ido Schimmel76610eb2017-03-10 08:53:41 +01004009 struct mlxsw_sp_fib *fib = fib_node->fib;
4010
4011 mlxsw_sp_fib_node_prefix_dec(fib_node);
Ido Schimmelfc922bb2017-08-14 10:54:05 +02004012 mlxsw_sp_fib_lpm_tree_unlink(mlxsw_sp, fib);
Ido Schimmel76610eb2017-03-10 08:53:41 +01004013 fib_node->fib = NULL;
4014 mlxsw_sp_fib_node_remove(fib, fib_node);
4015}
4016
Ido Schimmel9aecce12017-02-09 10:28:42 +01004017static struct mlxsw_sp_fib_node *
Ido Schimmel731ea1c2017-07-18 10:10:21 +02004018mlxsw_sp_fib_node_get(struct mlxsw_sp *mlxsw_sp, u32 tb_id, const void *addr,
4019 size_t addr_len, unsigned char prefix_len,
4020 enum mlxsw_sp_l3proto proto)
Ido Schimmel9aecce12017-02-09 10:28:42 +01004021{
4022 struct mlxsw_sp_fib_node *fib_node;
Ido Schimmel76610eb2017-03-10 08:53:41 +01004023 struct mlxsw_sp_fib *fib;
Jiri Pirko5b004412016-09-01 10:37:40 +02004024 struct mlxsw_sp_vr *vr;
4025 int err;
4026
David Ahernf8fa9b42017-10-18 09:56:56 -07004027 vr = mlxsw_sp_vr_get(mlxsw_sp, tb_id, NULL);
Jiri Pirko5b004412016-09-01 10:37:40 +02004028 if (IS_ERR(vr))
4029 return ERR_CAST(vr);
Ido Schimmel731ea1c2017-07-18 10:10:21 +02004030 fib = mlxsw_sp_vr_fib(vr, proto);
Jiri Pirko5b004412016-09-01 10:37:40 +02004031
Ido Schimmel731ea1c2017-07-18 10:10:21 +02004032 fib_node = mlxsw_sp_fib_node_lookup(fib, addr, addr_len, prefix_len);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004033 if (fib_node)
4034 return fib_node;
4035
Ido Schimmel731ea1c2017-07-18 10:10:21 +02004036 fib_node = mlxsw_sp_fib_node_create(fib, addr, addr_len, prefix_len);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004037 if (!fib_node) {
Jiri Pirko5b004412016-09-01 10:37:40 +02004038 err = -ENOMEM;
Ido Schimmel9aecce12017-02-09 10:28:42 +01004039 goto err_fib_node_create;
Jiri Pirko5b004412016-09-01 10:37:40 +02004040 }
Jiri Pirko5b004412016-09-01 10:37:40 +02004041
Ido Schimmel76610eb2017-03-10 08:53:41 +01004042 err = mlxsw_sp_fib_node_init(mlxsw_sp, fib_node, fib);
4043 if (err)
4044 goto err_fib_node_init;
4045
Ido Schimmel9aecce12017-02-09 10:28:42 +01004046 return fib_node;
Jiri Pirko5b004412016-09-01 10:37:40 +02004047
Ido Schimmel76610eb2017-03-10 08:53:41 +01004048err_fib_node_init:
4049 mlxsw_sp_fib_node_destroy(fib_node);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004050err_fib_node_create:
Ido Schimmel76610eb2017-03-10 08:53:41 +01004051 mlxsw_sp_vr_put(vr);
Jiri Pirko5b004412016-09-01 10:37:40 +02004052 return ERR_PTR(err);
4053}
4054
Ido Schimmel731ea1c2017-07-18 10:10:21 +02004055static void mlxsw_sp_fib_node_put(struct mlxsw_sp *mlxsw_sp,
4056 struct mlxsw_sp_fib_node *fib_node)
Jiri Pirko5b004412016-09-01 10:37:40 +02004057{
Ido Schimmel76610eb2017-03-10 08:53:41 +01004058 struct mlxsw_sp_vr *vr = fib_node->fib->vr;
Jiri Pirko5b004412016-09-01 10:37:40 +02004059
Ido Schimmel9aecce12017-02-09 10:28:42 +01004060 if (!list_empty(&fib_node->entry_list))
4061 return;
Ido Schimmel76610eb2017-03-10 08:53:41 +01004062 mlxsw_sp_fib_node_fini(mlxsw_sp, fib_node);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004063 mlxsw_sp_fib_node_destroy(fib_node);
Ido Schimmel76610eb2017-03-10 08:53:41 +01004064 mlxsw_sp_vr_put(vr);
Jiri Pirko5b004412016-09-01 10:37:40 +02004065}
4066
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004067static struct mlxsw_sp_fib4_entry *
Ido Schimmel9aecce12017-02-09 10:28:42 +01004068mlxsw_sp_fib4_node_entry_find(const struct mlxsw_sp_fib_node *fib_node,
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004069 const struct mlxsw_sp_fib4_entry *new4_entry)
Jiri Pirko61c503f2016-07-04 08:23:11 +02004070{
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004071 struct mlxsw_sp_fib4_entry *fib4_entry;
Ido Schimmel9aecce12017-02-09 10:28:42 +01004072
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004073 list_for_each_entry(fib4_entry, &fib_node->entry_list, common.list) {
4074 if (fib4_entry->tb_id > new4_entry->tb_id)
Ido Schimmel9aecce12017-02-09 10:28:42 +01004075 continue;
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004076 if (fib4_entry->tb_id != new4_entry->tb_id)
Ido Schimmel9aecce12017-02-09 10:28:42 +01004077 break;
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004078 if (fib4_entry->tos > new4_entry->tos)
Ido Schimmel9aecce12017-02-09 10:28:42 +01004079 continue;
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004080 if (fib4_entry->prio >= new4_entry->prio ||
4081 fib4_entry->tos < new4_entry->tos)
4082 return fib4_entry;
Ido Schimmel9aecce12017-02-09 10:28:42 +01004083 }
4084
4085 return NULL;
4086}
4087
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004088static int
4089mlxsw_sp_fib4_node_list_append(struct mlxsw_sp_fib4_entry *fib4_entry,
4090 struct mlxsw_sp_fib4_entry *new4_entry)
Ido Schimmel4283bce2017-02-09 10:28:43 +01004091{
4092 struct mlxsw_sp_fib_node *fib_node;
4093
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004094 if (WARN_ON(!fib4_entry))
Ido Schimmel4283bce2017-02-09 10:28:43 +01004095 return -EINVAL;
4096
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004097 fib_node = fib4_entry->common.fib_node;
4098 list_for_each_entry_from(fib4_entry, &fib_node->entry_list,
4099 common.list) {
4100 if (fib4_entry->tb_id != new4_entry->tb_id ||
4101 fib4_entry->tos != new4_entry->tos ||
4102 fib4_entry->prio != new4_entry->prio)
Ido Schimmel4283bce2017-02-09 10:28:43 +01004103 break;
4104 }
4105
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004106 list_add_tail(&new4_entry->common.list, &fib4_entry->common.list);
Ido Schimmel4283bce2017-02-09 10:28:43 +01004107 return 0;
4108}
4109
Ido Schimmel9aecce12017-02-09 10:28:42 +01004110static int
Ido Schimmel9efbee62017-07-18 10:10:28 +02004111mlxsw_sp_fib4_node_list_insert(struct mlxsw_sp_fib4_entry *new4_entry,
Ido Schimmel599cf8f2017-02-09 10:28:44 +01004112 bool replace, bool append)
Ido Schimmel9aecce12017-02-09 10:28:42 +01004113{
Ido Schimmel9efbee62017-07-18 10:10:28 +02004114 struct mlxsw_sp_fib_node *fib_node = new4_entry->common.fib_node;
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004115 struct mlxsw_sp_fib4_entry *fib4_entry;
Ido Schimmel9aecce12017-02-09 10:28:42 +01004116
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004117 fib4_entry = mlxsw_sp_fib4_node_entry_find(fib_node, new4_entry);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004118
Ido Schimmel4283bce2017-02-09 10:28:43 +01004119 if (append)
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004120 return mlxsw_sp_fib4_node_list_append(fib4_entry, new4_entry);
4121 if (replace && WARN_ON(!fib4_entry))
Ido Schimmel599cf8f2017-02-09 10:28:44 +01004122 return -EINVAL;
Ido Schimmel4283bce2017-02-09 10:28:43 +01004123
Ido Schimmel599cf8f2017-02-09 10:28:44 +01004124 /* Insert new entry before replaced one, so that we can later
4125 * remove the second.
4126 */
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004127 if (fib4_entry) {
4128 list_add_tail(&new4_entry->common.list,
4129 &fib4_entry->common.list);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004130 } else {
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004131 struct mlxsw_sp_fib4_entry *last;
Ido Schimmel9aecce12017-02-09 10:28:42 +01004132
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004133 list_for_each_entry(last, &fib_node->entry_list, common.list) {
4134 if (new4_entry->tb_id > last->tb_id)
Ido Schimmel9aecce12017-02-09 10:28:42 +01004135 break;
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004136 fib4_entry = last;
Ido Schimmel9aecce12017-02-09 10:28:42 +01004137 }
4138
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004139 if (fib4_entry)
4140 list_add(&new4_entry->common.list,
4141 &fib4_entry->common.list);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004142 else
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004143 list_add(&new4_entry->common.list,
4144 &fib_node->entry_list);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004145 }
4146
4147 return 0;
4148}
4149
4150static void
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004151mlxsw_sp_fib4_node_list_remove(struct mlxsw_sp_fib4_entry *fib4_entry)
Ido Schimmel9aecce12017-02-09 10:28:42 +01004152{
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004153 list_del(&fib4_entry->common.list);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004154}
4155
Ido Schimmel80c238f2017-07-18 10:10:29 +02004156static int mlxsw_sp_fib_node_entry_add(struct mlxsw_sp *mlxsw_sp,
4157 struct mlxsw_sp_fib_entry *fib_entry)
Ido Schimmel9aecce12017-02-09 10:28:42 +01004158{
Ido Schimmel9efbee62017-07-18 10:10:28 +02004159 struct mlxsw_sp_fib_node *fib_node = fib_entry->fib_node;
4160
Ido Schimmel9aecce12017-02-09 10:28:42 +01004161 if (!mlxsw_sp_fib_node_entry_is_first(fib_node, fib_entry))
4162 return 0;
4163
4164 /* To prevent packet loss, overwrite the previously offloaded
4165 * entry.
4166 */
4167 if (!list_is_singular(&fib_node->entry_list)) {
4168 enum mlxsw_reg_ralue_op op = MLXSW_REG_RALUE_OP_WRITE_DELETE;
4169 struct mlxsw_sp_fib_entry *n = list_next_entry(fib_entry, list);
4170
4171 mlxsw_sp_fib_entry_offload_refresh(n, op, 0);
4172 }
4173
4174 return mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
4175}
4176
Ido Schimmel80c238f2017-07-18 10:10:29 +02004177static void mlxsw_sp_fib_node_entry_del(struct mlxsw_sp *mlxsw_sp,
4178 struct mlxsw_sp_fib_entry *fib_entry)
Ido Schimmel9aecce12017-02-09 10:28:42 +01004179{
Ido Schimmel9efbee62017-07-18 10:10:28 +02004180 struct mlxsw_sp_fib_node *fib_node = fib_entry->fib_node;
4181
Ido Schimmel9aecce12017-02-09 10:28:42 +01004182 if (!mlxsw_sp_fib_node_entry_is_first(fib_node, fib_entry))
4183 return;
4184
4185 /* Promote the next entry by overwriting the deleted entry */
4186 if (!list_is_singular(&fib_node->entry_list)) {
4187 struct mlxsw_sp_fib_entry *n = list_next_entry(fib_entry, list);
4188 enum mlxsw_reg_ralue_op op = MLXSW_REG_RALUE_OP_WRITE_DELETE;
4189
4190 mlxsw_sp_fib_entry_update(mlxsw_sp, n);
4191 mlxsw_sp_fib_entry_offload_refresh(fib_entry, op, 0);
4192 return;
4193 }
4194
4195 mlxsw_sp_fib_entry_del(mlxsw_sp, fib_entry);
4196}
4197
4198static int mlxsw_sp_fib4_node_entry_link(struct mlxsw_sp *mlxsw_sp,
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004199 struct mlxsw_sp_fib4_entry *fib4_entry,
Ido Schimmel599cf8f2017-02-09 10:28:44 +01004200 bool replace, bool append)
Ido Schimmel9aecce12017-02-09 10:28:42 +01004201{
Ido Schimmel9aecce12017-02-09 10:28:42 +01004202 int err;
4203
Ido Schimmel9efbee62017-07-18 10:10:28 +02004204 err = mlxsw_sp_fib4_node_list_insert(fib4_entry, replace, append);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004205 if (err)
4206 return err;
4207
Ido Schimmel80c238f2017-07-18 10:10:29 +02004208 err = mlxsw_sp_fib_node_entry_add(mlxsw_sp, &fib4_entry->common);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004209 if (err)
Ido Schimmel80c238f2017-07-18 10:10:29 +02004210 goto err_fib_node_entry_add;
Ido Schimmel9aecce12017-02-09 10:28:42 +01004211
Ido Schimmel9aecce12017-02-09 10:28:42 +01004212 return 0;
4213
Ido Schimmel80c238f2017-07-18 10:10:29 +02004214err_fib_node_entry_add:
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004215 mlxsw_sp_fib4_node_list_remove(fib4_entry);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004216 return err;
4217}
4218
4219static void
4220mlxsw_sp_fib4_node_entry_unlink(struct mlxsw_sp *mlxsw_sp,
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004221 struct mlxsw_sp_fib4_entry *fib4_entry)
Ido Schimmel9aecce12017-02-09 10:28:42 +01004222{
Ido Schimmel80c238f2017-07-18 10:10:29 +02004223 mlxsw_sp_fib_node_entry_del(mlxsw_sp, &fib4_entry->common);
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004224 mlxsw_sp_fib4_node_list_remove(fib4_entry);
Petr Machata4607f6d2017-09-02 23:49:25 +02004225
4226 if (fib4_entry->common.type == MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP)
4227 mlxsw_sp_fib_entry_decap_fini(mlxsw_sp, &fib4_entry->common);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004228}
4229
Ido Schimmel599cf8f2017-02-09 10:28:44 +01004230static void mlxsw_sp_fib4_entry_replace(struct mlxsw_sp *mlxsw_sp,
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004231 struct mlxsw_sp_fib4_entry *fib4_entry,
Ido Schimmel599cf8f2017-02-09 10:28:44 +01004232 bool replace)
4233{
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004234 struct mlxsw_sp_fib_node *fib_node = fib4_entry->common.fib_node;
4235 struct mlxsw_sp_fib4_entry *replaced;
Ido Schimmel599cf8f2017-02-09 10:28:44 +01004236
4237 if (!replace)
4238 return;
4239
4240 /* We inserted the new entry before replaced one */
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004241 replaced = list_next_entry(fib4_entry, common.list);
Ido Schimmel599cf8f2017-02-09 10:28:44 +01004242
4243 mlxsw_sp_fib4_node_entry_unlink(mlxsw_sp, replaced);
4244 mlxsw_sp_fib4_entry_destroy(mlxsw_sp, replaced);
Ido Schimmel731ea1c2017-07-18 10:10:21 +02004245 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
Ido Schimmel599cf8f2017-02-09 10:28:44 +01004246}
4247
Ido Schimmel9aecce12017-02-09 10:28:42 +01004248static int
4249mlxsw_sp_router_fib4_add(struct mlxsw_sp *mlxsw_sp,
Ido Schimmel4283bce2017-02-09 10:28:43 +01004250 const struct fib_entry_notifier_info *fen_info,
Ido Schimmel599cf8f2017-02-09 10:28:44 +01004251 bool replace, bool append)
Ido Schimmel9aecce12017-02-09 10:28:42 +01004252{
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004253 struct mlxsw_sp_fib4_entry *fib4_entry;
Ido Schimmel9aecce12017-02-09 10:28:42 +01004254 struct mlxsw_sp_fib_node *fib_node;
Jiri Pirko61c503f2016-07-04 08:23:11 +02004255 int err;
4256
Ido Schimmel9011b672017-05-16 19:38:25 +02004257 if (mlxsw_sp->router->aborted)
Jiri Pirkob45f64d2016-09-26 12:52:31 +02004258 return 0;
4259
Ido Schimmel731ea1c2017-07-18 10:10:21 +02004260 fib_node = mlxsw_sp_fib_node_get(mlxsw_sp, fen_info->tb_id,
4261 &fen_info->dst, sizeof(fen_info->dst),
4262 fen_info->dst_len,
4263 MLXSW_SP_L3_PROTO_IPV4);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004264 if (IS_ERR(fib_node)) {
4265 dev_warn(mlxsw_sp->bus_info->dev, "Failed to get FIB node\n");
4266 return PTR_ERR(fib_node);
4267 }
4268
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004269 fib4_entry = mlxsw_sp_fib4_entry_create(mlxsw_sp, fib_node, fen_info);
4270 if (IS_ERR(fib4_entry)) {
Ido Schimmel9aecce12017-02-09 10:28:42 +01004271 dev_warn(mlxsw_sp->bus_info->dev, "Failed to create FIB entry\n");
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004272 err = PTR_ERR(fib4_entry);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004273 goto err_fib4_entry_create;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02004274 }
Jiri Pirko61c503f2016-07-04 08:23:11 +02004275
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004276 err = mlxsw_sp_fib4_node_entry_link(mlxsw_sp, fib4_entry, replace,
Ido Schimmel599cf8f2017-02-09 10:28:44 +01004277 append);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02004278 if (err) {
Ido Schimmel9aecce12017-02-09 10:28:42 +01004279 dev_warn(mlxsw_sp->bus_info->dev, "Failed to link FIB entry to node\n");
4280 goto err_fib4_node_entry_link;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02004281 }
Ido Schimmel9aecce12017-02-09 10:28:42 +01004282
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004283 mlxsw_sp_fib4_entry_replace(mlxsw_sp, fib4_entry, replace);
Ido Schimmel599cf8f2017-02-09 10:28:44 +01004284
Jiri Pirko61c503f2016-07-04 08:23:11 +02004285 return 0;
4286
Ido Schimmel9aecce12017-02-09 10:28:42 +01004287err_fib4_node_entry_link:
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004288 mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib4_entry);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004289err_fib4_entry_create:
Ido Schimmel731ea1c2017-07-18 10:10:21 +02004290 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
Jiri Pirko61c503f2016-07-04 08:23:11 +02004291 return err;
4292}
4293
Jiri Pirko37956d72016-10-20 16:05:43 +02004294static void mlxsw_sp_router_fib4_del(struct mlxsw_sp *mlxsw_sp,
4295 struct fib_entry_notifier_info *fen_info)
Jiri Pirko61c503f2016-07-04 08:23:11 +02004296{
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004297 struct mlxsw_sp_fib4_entry *fib4_entry;
Ido Schimmel9aecce12017-02-09 10:28:42 +01004298 struct mlxsw_sp_fib_node *fib_node;
Jiri Pirko61c503f2016-07-04 08:23:11 +02004299
Ido Schimmel9011b672017-05-16 19:38:25 +02004300 if (mlxsw_sp->router->aborted)
Jiri Pirko37956d72016-10-20 16:05:43 +02004301 return;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02004302
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004303 fib4_entry = mlxsw_sp_fib4_entry_lookup(mlxsw_sp, fen_info);
4304 if (WARN_ON(!fib4_entry))
Jiri Pirko37956d72016-10-20 16:05:43 +02004305 return;
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004306 fib_node = fib4_entry->common.fib_node;
Jiri Pirko5b004412016-09-01 10:37:40 +02004307
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004308 mlxsw_sp_fib4_node_entry_unlink(mlxsw_sp, fib4_entry);
4309 mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib4_entry);
Ido Schimmel731ea1c2017-07-18 10:10:21 +02004310 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
Jiri Pirko61c503f2016-07-04 08:23:11 +02004311}
Jiri Pirkob45f64d2016-09-26 12:52:31 +02004312
Ido Schimmel428b8512017-08-03 13:28:28 +02004313static bool mlxsw_sp_fib6_rt_should_ignore(const struct rt6_info *rt)
4314{
4315 /* Packets with link-local destination IP arriving to the router
4316 * are trapped to the CPU, so no need to program specific routes
4317 * for them.
4318 */
4319 if (ipv6_addr_type(&rt->rt6i_dst.addr) & IPV6_ADDR_LINKLOCAL)
4320 return true;
4321
4322 /* Multicast routes aren't supported, so ignore them. Neighbour
4323 * Discovery packets are specifically trapped.
4324 */
4325 if (ipv6_addr_type(&rt->rt6i_dst.addr) & IPV6_ADDR_MULTICAST)
4326 return true;
4327
4328 /* Cloned routes are irrelevant in the forwarding path. */
4329 if (rt->rt6i_flags & RTF_CACHE)
4330 return true;
4331
4332 return false;
4333}
4334
4335static struct mlxsw_sp_rt6 *mlxsw_sp_rt6_create(struct rt6_info *rt)
4336{
4337 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
4338
4339 mlxsw_sp_rt6 = kzalloc(sizeof(*mlxsw_sp_rt6), GFP_KERNEL);
4340 if (!mlxsw_sp_rt6)
4341 return ERR_PTR(-ENOMEM);
4342
4343 /* In case of route replace, replaced route is deleted with
4344 * no notification. Take reference to prevent accessing freed
4345 * memory.
4346 */
4347 mlxsw_sp_rt6->rt = rt;
4348 rt6_hold(rt);
4349
4350 return mlxsw_sp_rt6;
4351}
4352
4353#if IS_ENABLED(CONFIG_IPV6)
4354static void mlxsw_sp_rt6_release(struct rt6_info *rt)
4355{
4356 rt6_release(rt);
4357}
4358#else
4359static void mlxsw_sp_rt6_release(struct rt6_info *rt)
4360{
4361}
4362#endif
4363
4364static void mlxsw_sp_rt6_destroy(struct mlxsw_sp_rt6 *mlxsw_sp_rt6)
4365{
4366 mlxsw_sp_rt6_release(mlxsw_sp_rt6->rt);
4367 kfree(mlxsw_sp_rt6);
4368}
4369
4370static bool mlxsw_sp_fib6_rt_can_mp(const struct rt6_info *rt)
4371{
4372 /* RTF_CACHE routes are ignored */
4373 return (rt->rt6i_flags & (RTF_GATEWAY | RTF_ADDRCONF)) == RTF_GATEWAY;
4374}
4375
4376static struct rt6_info *
4377mlxsw_sp_fib6_entry_rt(const struct mlxsw_sp_fib6_entry *fib6_entry)
4378{
4379 return list_first_entry(&fib6_entry->rt6_list, struct mlxsw_sp_rt6,
4380 list)->rt;
4381}
4382
4383static struct mlxsw_sp_fib6_entry *
4384mlxsw_sp_fib6_node_mp_entry_find(const struct mlxsw_sp_fib_node *fib_node,
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02004385 const struct rt6_info *nrt, bool replace)
Ido Schimmel428b8512017-08-03 13:28:28 +02004386{
4387 struct mlxsw_sp_fib6_entry *fib6_entry;
4388
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02004389 if (!mlxsw_sp_fib6_rt_can_mp(nrt) || replace)
Ido Schimmel428b8512017-08-03 13:28:28 +02004390 return NULL;
4391
4392 list_for_each_entry(fib6_entry, &fib_node->entry_list, common.list) {
4393 struct rt6_info *rt = mlxsw_sp_fib6_entry_rt(fib6_entry);
4394
4395 /* RT6_TABLE_LOCAL and RT6_TABLE_MAIN share the same
4396 * virtual router.
4397 */
4398 if (rt->rt6i_table->tb6_id > nrt->rt6i_table->tb6_id)
4399 continue;
4400 if (rt->rt6i_table->tb6_id != nrt->rt6i_table->tb6_id)
4401 break;
4402 if (rt->rt6i_metric < nrt->rt6i_metric)
4403 continue;
4404 if (rt->rt6i_metric == nrt->rt6i_metric &&
4405 mlxsw_sp_fib6_rt_can_mp(rt))
4406 return fib6_entry;
4407 if (rt->rt6i_metric > nrt->rt6i_metric)
4408 break;
4409 }
4410
4411 return NULL;
4412}
4413
4414static struct mlxsw_sp_rt6 *
4415mlxsw_sp_fib6_entry_rt_find(const struct mlxsw_sp_fib6_entry *fib6_entry,
4416 const struct rt6_info *rt)
4417{
4418 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
4419
4420 list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) {
4421 if (mlxsw_sp_rt6->rt == rt)
4422 return mlxsw_sp_rt6;
4423 }
4424
4425 return NULL;
4426}
4427
Petr Machata8f28a302017-09-02 23:49:24 +02004428static bool mlxsw_sp_nexthop6_ipip_type(const struct mlxsw_sp *mlxsw_sp,
4429 const struct rt6_info *rt,
4430 enum mlxsw_sp_ipip_type *ret)
4431{
4432 return rt->dst.dev &&
4433 mlxsw_sp_netdev_ipip_type(mlxsw_sp, rt->dst.dev, ret);
4434}
4435
Petr Machata35225e42017-09-02 23:49:22 +02004436static int mlxsw_sp_nexthop6_type_init(struct mlxsw_sp *mlxsw_sp,
4437 struct mlxsw_sp_nexthop_group *nh_grp,
4438 struct mlxsw_sp_nexthop *nh,
4439 const struct rt6_info *rt)
Ido Schimmel428b8512017-08-03 13:28:28 +02004440{
Petr Machata8f28a302017-09-02 23:49:24 +02004441 struct mlxsw_sp_router *router = mlxsw_sp->router;
Ido Schimmel428b8512017-08-03 13:28:28 +02004442 struct net_device *dev = rt->dst.dev;
Petr Machata8f28a302017-09-02 23:49:24 +02004443 enum mlxsw_sp_ipip_type ipipt;
Ido Schimmel428b8512017-08-03 13:28:28 +02004444 struct mlxsw_sp_rif *rif;
4445 int err;
4446
Petr Machata8f28a302017-09-02 23:49:24 +02004447 if (mlxsw_sp_nexthop6_ipip_type(mlxsw_sp, rt, &ipipt) &&
4448 router->ipip_ops_arr[ipipt]->can_offload(mlxsw_sp, dev,
4449 MLXSW_SP_L3_PROTO_IPV6)) {
4450 nh->type = MLXSW_SP_NEXTHOP_TYPE_IPIP;
Petr Machata4cccb732017-10-16 16:26:39 +02004451 err = mlxsw_sp_nexthop_ipip_init(mlxsw_sp, nh, dev);
Petr Machatade0f43c2017-10-02 12:14:57 +02004452 if (err)
4453 return err;
4454 mlxsw_sp_nexthop_rif_init(nh, &nh->ipip_entry->ol_lb->common);
4455 return 0;
Petr Machata8f28a302017-09-02 23:49:24 +02004456 }
4457
Petr Machata35225e42017-09-02 23:49:22 +02004458 nh->type = MLXSW_SP_NEXTHOP_TYPE_ETH;
Ido Schimmel428b8512017-08-03 13:28:28 +02004459 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
4460 if (!rif)
4461 return 0;
4462 mlxsw_sp_nexthop_rif_init(nh, rif);
4463
4464 err = mlxsw_sp_nexthop_neigh_init(mlxsw_sp, nh);
4465 if (err)
4466 goto err_nexthop_neigh_init;
4467
4468 return 0;
4469
4470err_nexthop_neigh_init:
4471 mlxsw_sp_nexthop_rif_fini(nh);
4472 return err;
4473}
4474
Petr Machata35225e42017-09-02 23:49:22 +02004475static void mlxsw_sp_nexthop6_type_fini(struct mlxsw_sp *mlxsw_sp,
4476 struct mlxsw_sp_nexthop *nh)
4477{
4478 mlxsw_sp_nexthop_type_fini(mlxsw_sp, nh);
4479}
4480
4481static int mlxsw_sp_nexthop6_init(struct mlxsw_sp *mlxsw_sp,
4482 struct mlxsw_sp_nexthop_group *nh_grp,
4483 struct mlxsw_sp_nexthop *nh,
4484 const struct rt6_info *rt)
4485{
4486 struct net_device *dev = rt->dst.dev;
4487
4488 nh->nh_grp = nh_grp;
Ido Schimmel408bd942017-10-22 23:11:46 +02004489 nh->nh_weight = 1;
Petr Machata35225e42017-09-02 23:49:22 +02004490 memcpy(&nh->gw_addr, &rt->rt6i_gateway, sizeof(nh->gw_addr));
Arkadi Sharshevskya5390272017-09-25 10:32:28 +02004491 mlxsw_sp_nexthop_counter_alloc(mlxsw_sp, nh);
Petr Machata35225e42017-09-02 23:49:22 +02004492
Arkadi Sharshevskydbe45982017-09-25 10:32:23 +02004493 list_add_tail(&nh->router_list_node, &mlxsw_sp->router->nexthop_list);
4494
Petr Machata35225e42017-09-02 23:49:22 +02004495 if (!dev)
4496 return 0;
4497 nh->ifindex = dev->ifindex;
4498
4499 return mlxsw_sp_nexthop6_type_init(mlxsw_sp, nh_grp, nh, rt);
4500}
4501
Ido Schimmel428b8512017-08-03 13:28:28 +02004502static void mlxsw_sp_nexthop6_fini(struct mlxsw_sp *mlxsw_sp,
4503 struct mlxsw_sp_nexthop *nh)
4504{
Petr Machata35225e42017-09-02 23:49:22 +02004505 mlxsw_sp_nexthop6_type_fini(mlxsw_sp, nh);
Arkadi Sharshevskydbe45982017-09-25 10:32:23 +02004506 list_del(&nh->router_list_node);
Arkadi Sharshevskya5390272017-09-25 10:32:28 +02004507 mlxsw_sp_nexthop_counter_free(mlxsw_sp, nh);
Ido Schimmel428b8512017-08-03 13:28:28 +02004508}
4509
Petr Machataf6050ee2017-09-02 23:49:21 +02004510static bool mlxsw_sp_rt6_is_gateway(const struct mlxsw_sp *mlxsw_sp,
4511 const struct rt6_info *rt)
4512{
Petr Machata8f28a302017-09-02 23:49:24 +02004513 return rt->rt6i_flags & RTF_GATEWAY ||
4514 mlxsw_sp_nexthop6_ipip_type(mlxsw_sp, rt, NULL);
Petr Machataf6050ee2017-09-02 23:49:21 +02004515}
4516
Ido Schimmel428b8512017-08-03 13:28:28 +02004517static struct mlxsw_sp_nexthop_group *
4518mlxsw_sp_nexthop6_group_create(struct mlxsw_sp *mlxsw_sp,
4519 struct mlxsw_sp_fib6_entry *fib6_entry)
4520{
4521 struct mlxsw_sp_nexthop_group *nh_grp;
4522 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
4523 struct mlxsw_sp_nexthop *nh;
4524 size_t alloc_size;
4525 int i = 0;
4526 int err;
4527
4528 alloc_size = sizeof(*nh_grp) +
4529 fib6_entry->nrt6 * sizeof(struct mlxsw_sp_nexthop);
4530 nh_grp = kzalloc(alloc_size, GFP_KERNEL);
4531 if (!nh_grp)
4532 return ERR_PTR(-ENOMEM);
4533 INIT_LIST_HEAD(&nh_grp->fib_list);
4534#if IS_ENABLED(CONFIG_IPV6)
4535 nh_grp->neigh_tbl = &nd_tbl;
4536#endif
4537 mlxsw_sp_rt6 = list_first_entry(&fib6_entry->rt6_list,
4538 struct mlxsw_sp_rt6, list);
Petr Machataf6050ee2017-09-02 23:49:21 +02004539 nh_grp->gateway = mlxsw_sp_rt6_is_gateway(mlxsw_sp, mlxsw_sp_rt6->rt);
Ido Schimmel428b8512017-08-03 13:28:28 +02004540 nh_grp->count = fib6_entry->nrt6;
4541 for (i = 0; i < nh_grp->count; i++) {
4542 struct rt6_info *rt = mlxsw_sp_rt6->rt;
4543
4544 nh = &nh_grp->nexthops[i];
4545 err = mlxsw_sp_nexthop6_init(mlxsw_sp, nh_grp, nh, rt);
4546 if (err)
4547 goto err_nexthop6_init;
4548 mlxsw_sp_rt6 = list_next_entry(mlxsw_sp_rt6, list);
4549 }
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02004550
4551 err = mlxsw_sp_nexthop_group_insert(mlxsw_sp, nh_grp);
4552 if (err)
4553 goto err_nexthop_group_insert;
4554
Ido Schimmel428b8512017-08-03 13:28:28 +02004555 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
4556 return nh_grp;
4557
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02004558err_nexthop_group_insert:
Ido Schimmel428b8512017-08-03 13:28:28 +02004559err_nexthop6_init:
4560 for (i--; i >= 0; i--) {
4561 nh = &nh_grp->nexthops[i];
4562 mlxsw_sp_nexthop6_fini(mlxsw_sp, nh);
4563 }
4564 kfree(nh_grp);
4565 return ERR_PTR(err);
4566}
4567
4568static void
4569mlxsw_sp_nexthop6_group_destroy(struct mlxsw_sp *mlxsw_sp,
4570 struct mlxsw_sp_nexthop_group *nh_grp)
4571{
4572 struct mlxsw_sp_nexthop *nh;
4573 int i = nh_grp->count;
4574
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02004575 mlxsw_sp_nexthop_group_remove(mlxsw_sp, nh_grp);
Ido Schimmel428b8512017-08-03 13:28:28 +02004576 for (i--; i >= 0; i--) {
4577 nh = &nh_grp->nexthops[i];
4578 mlxsw_sp_nexthop6_fini(mlxsw_sp, nh);
4579 }
4580 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
4581 WARN_ON(nh_grp->adj_index_valid);
4582 kfree(nh_grp);
4583}
4584
4585static int mlxsw_sp_nexthop6_group_get(struct mlxsw_sp *mlxsw_sp,
4586 struct mlxsw_sp_fib6_entry *fib6_entry)
4587{
4588 struct mlxsw_sp_nexthop_group *nh_grp;
4589
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02004590 nh_grp = mlxsw_sp_nexthop6_group_lookup(mlxsw_sp, fib6_entry);
4591 if (!nh_grp) {
4592 nh_grp = mlxsw_sp_nexthop6_group_create(mlxsw_sp, fib6_entry);
4593 if (IS_ERR(nh_grp))
4594 return PTR_ERR(nh_grp);
4595 }
Ido Schimmel428b8512017-08-03 13:28:28 +02004596
4597 list_add_tail(&fib6_entry->common.nexthop_group_node,
4598 &nh_grp->fib_list);
4599 fib6_entry->common.nh_group = nh_grp;
4600
4601 return 0;
4602}
4603
4604static void mlxsw_sp_nexthop6_group_put(struct mlxsw_sp *mlxsw_sp,
4605 struct mlxsw_sp_fib_entry *fib_entry)
4606{
4607 struct mlxsw_sp_nexthop_group *nh_grp = fib_entry->nh_group;
4608
4609 list_del(&fib_entry->nexthop_group_node);
4610 if (!list_empty(&nh_grp->fib_list))
4611 return;
4612 mlxsw_sp_nexthop6_group_destroy(mlxsw_sp, nh_grp);
4613}
4614
4615static int
4616mlxsw_sp_nexthop6_group_update(struct mlxsw_sp *mlxsw_sp,
4617 struct mlxsw_sp_fib6_entry *fib6_entry)
4618{
4619 struct mlxsw_sp_nexthop_group *old_nh_grp = fib6_entry->common.nh_group;
4620 int err;
4621
4622 fib6_entry->common.nh_group = NULL;
4623 list_del(&fib6_entry->common.nexthop_group_node);
4624
4625 err = mlxsw_sp_nexthop6_group_get(mlxsw_sp, fib6_entry);
4626 if (err)
4627 goto err_nexthop6_group_get;
4628
4629 /* In case this entry is offloaded, then the adjacency index
4630 * currently associated with it in the device's table is that
4631 * of the old group. Start using the new one instead.
4632 */
4633 err = mlxsw_sp_fib_node_entry_add(mlxsw_sp, &fib6_entry->common);
4634 if (err)
4635 goto err_fib_node_entry_add;
4636
4637 if (list_empty(&old_nh_grp->fib_list))
4638 mlxsw_sp_nexthop6_group_destroy(mlxsw_sp, old_nh_grp);
4639
4640 return 0;
4641
4642err_fib_node_entry_add:
4643 mlxsw_sp_nexthop6_group_put(mlxsw_sp, &fib6_entry->common);
4644err_nexthop6_group_get:
4645 list_add_tail(&fib6_entry->common.nexthop_group_node,
4646 &old_nh_grp->fib_list);
4647 fib6_entry->common.nh_group = old_nh_grp;
4648 return err;
4649}
4650
4651static int
4652mlxsw_sp_fib6_entry_nexthop_add(struct mlxsw_sp *mlxsw_sp,
4653 struct mlxsw_sp_fib6_entry *fib6_entry,
4654 struct rt6_info *rt)
4655{
4656 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
4657 int err;
4658
4659 mlxsw_sp_rt6 = mlxsw_sp_rt6_create(rt);
4660 if (IS_ERR(mlxsw_sp_rt6))
4661 return PTR_ERR(mlxsw_sp_rt6);
4662
4663 list_add_tail(&mlxsw_sp_rt6->list, &fib6_entry->rt6_list);
4664 fib6_entry->nrt6++;
4665
4666 err = mlxsw_sp_nexthop6_group_update(mlxsw_sp, fib6_entry);
4667 if (err)
4668 goto err_nexthop6_group_update;
4669
4670 return 0;
4671
4672err_nexthop6_group_update:
4673 fib6_entry->nrt6--;
4674 list_del(&mlxsw_sp_rt6->list);
4675 mlxsw_sp_rt6_destroy(mlxsw_sp_rt6);
4676 return err;
4677}
4678
4679static void
4680mlxsw_sp_fib6_entry_nexthop_del(struct mlxsw_sp *mlxsw_sp,
4681 struct mlxsw_sp_fib6_entry *fib6_entry,
4682 struct rt6_info *rt)
4683{
4684 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
4685
4686 mlxsw_sp_rt6 = mlxsw_sp_fib6_entry_rt_find(fib6_entry, rt);
4687 if (WARN_ON(!mlxsw_sp_rt6))
4688 return;
4689
4690 fib6_entry->nrt6--;
4691 list_del(&mlxsw_sp_rt6->list);
4692 mlxsw_sp_nexthop6_group_update(mlxsw_sp, fib6_entry);
4693 mlxsw_sp_rt6_destroy(mlxsw_sp_rt6);
4694}
4695
Petr Machataf6050ee2017-09-02 23:49:21 +02004696static void mlxsw_sp_fib6_entry_type_set(struct mlxsw_sp *mlxsw_sp,
4697 struct mlxsw_sp_fib_entry *fib_entry,
Ido Schimmel428b8512017-08-03 13:28:28 +02004698 const struct rt6_info *rt)
4699{
4700 /* Packets hitting RTF_REJECT routes need to be discarded by the
4701 * stack. We can rely on their destination device not having a
4702 * RIF (it's the loopback device) and can thus use action type
4703 * local, which will cause them to be trapped with a lower
4704 * priority than packets that need to be locally received.
4705 */
Ido Schimmeld3b6d372017-09-01 10:58:55 +02004706 if (rt->rt6i_flags & (RTF_LOCAL | RTF_ANYCAST))
Ido Schimmel428b8512017-08-03 13:28:28 +02004707 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
4708 else if (rt->rt6i_flags & RTF_REJECT)
4709 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_LOCAL;
Petr Machataf6050ee2017-09-02 23:49:21 +02004710 else if (mlxsw_sp_rt6_is_gateway(mlxsw_sp, rt))
Ido Schimmel428b8512017-08-03 13:28:28 +02004711 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_REMOTE;
4712 else
4713 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_LOCAL;
4714}
4715
4716static void
4717mlxsw_sp_fib6_entry_rt_destroy_all(struct mlxsw_sp_fib6_entry *fib6_entry)
4718{
4719 struct mlxsw_sp_rt6 *mlxsw_sp_rt6, *tmp;
4720
4721 list_for_each_entry_safe(mlxsw_sp_rt6, tmp, &fib6_entry->rt6_list,
4722 list) {
4723 fib6_entry->nrt6--;
4724 list_del(&mlxsw_sp_rt6->list);
4725 mlxsw_sp_rt6_destroy(mlxsw_sp_rt6);
4726 }
4727}
4728
4729static struct mlxsw_sp_fib6_entry *
4730mlxsw_sp_fib6_entry_create(struct mlxsw_sp *mlxsw_sp,
4731 struct mlxsw_sp_fib_node *fib_node,
4732 struct rt6_info *rt)
4733{
4734 struct mlxsw_sp_fib6_entry *fib6_entry;
4735 struct mlxsw_sp_fib_entry *fib_entry;
4736 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
4737 int err;
4738
4739 fib6_entry = kzalloc(sizeof(*fib6_entry), GFP_KERNEL);
4740 if (!fib6_entry)
4741 return ERR_PTR(-ENOMEM);
4742 fib_entry = &fib6_entry->common;
4743
4744 mlxsw_sp_rt6 = mlxsw_sp_rt6_create(rt);
4745 if (IS_ERR(mlxsw_sp_rt6)) {
4746 err = PTR_ERR(mlxsw_sp_rt6);
4747 goto err_rt6_create;
4748 }
4749
Petr Machataf6050ee2017-09-02 23:49:21 +02004750 mlxsw_sp_fib6_entry_type_set(mlxsw_sp, fib_entry, mlxsw_sp_rt6->rt);
Ido Schimmel428b8512017-08-03 13:28:28 +02004751
4752 INIT_LIST_HEAD(&fib6_entry->rt6_list);
4753 list_add_tail(&mlxsw_sp_rt6->list, &fib6_entry->rt6_list);
4754 fib6_entry->nrt6 = 1;
4755 err = mlxsw_sp_nexthop6_group_get(mlxsw_sp, fib6_entry);
4756 if (err)
4757 goto err_nexthop6_group_get;
4758
4759 fib_entry->fib_node = fib_node;
4760
4761 return fib6_entry;
4762
4763err_nexthop6_group_get:
4764 list_del(&mlxsw_sp_rt6->list);
4765 mlxsw_sp_rt6_destroy(mlxsw_sp_rt6);
4766err_rt6_create:
4767 kfree(fib6_entry);
4768 return ERR_PTR(err);
4769}
4770
4771static void mlxsw_sp_fib6_entry_destroy(struct mlxsw_sp *mlxsw_sp,
4772 struct mlxsw_sp_fib6_entry *fib6_entry)
4773{
4774 mlxsw_sp_nexthop6_group_put(mlxsw_sp, &fib6_entry->common);
4775 mlxsw_sp_fib6_entry_rt_destroy_all(fib6_entry);
4776 WARN_ON(fib6_entry->nrt6);
4777 kfree(fib6_entry);
4778}
4779
4780static struct mlxsw_sp_fib6_entry *
4781mlxsw_sp_fib6_node_entry_find(const struct mlxsw_sp_fib_node *fib_node,
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02004782 const struct rt6_info *nrt, bool replace)
Ido Schimmel428b8512017-08-03 13:28:28 +02004783{
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02004784 struct mlxsw_sp_fib6_entry *fib6_entry, *fallback = NULL;
Ido Schimmel428b8512017-08-03 13:28:28 +02004785
4786 list_for_each_entry(fib6_entry, &fib_node->entry_list, common.list) {
4787 struct rt6_info *rt = mlxsw_sp_fib6_entry_rt(fib6_entry);
4788
4789 if (rt->rt6i_table->tb6_id > nrt->rt6i_table->tb6_id)
4790 continue;
4791 if (rt->rt6i_table->tb6_id != nrt->rt6i_table->tb6_id)
4792 break;
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02004793 if (replace && rt->rt6i_metric == nrt->rt6i_metric) {
4794 if (mlxsw_sp_fib6_rt_can_mp(rt) ==
4795 mlxsw_sp_fib6_rt_can_mp(nrt))
4796 return fib6_entry;
4797 if (mlxsw_sp_fib6_rt_can_mp(nrt))
4798 fallback = fallback ?: fib6_entry;
4799 }
Ido Schimmel428b8512017-08-03 13:28:28 +02004800 if (rt->rt6i_metric > nrt->rt6i_metric)
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02004801 return fallback ?: fib6_entry;
Ido Schimmel428b8512017-08-03 13:28:28 +02004802 }
4803
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02004804 return fallback;
Ido Schimmel428b8512017-08-03 13:28:28 +02004805}
4806
4807static int
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02004808mlxsw_sp_fib6_node_list_insert(struct mlxsw_sp_fib6_entry *new6_entry,
4809 bool replace)
Ido Schimmel428b8512017-08-03 13:28:28 +02004810{
4811 struct mlxsw_sp_fib_node *fib_node = new6_entry->common.fib_node;
4812 struct rt6_info *nrt = mlxsw_sp_fib6_entry_rt(new6_entry);
4813 struct mlxsw_sp_fib6_entry *fib6_entry;
4814
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02004815 fib6_entry = mlxsw_sp_fib6_node_entry_find(fib_node, nrt, replace);
4816
4817 if (replace && WARN_ON(!fib6_entry))
4818 return -EINVAL;
Ido Schimmel428b8512017-08-03 13:28:28 +02004819
4820 if (fib6_entry) {
4821 list_add_tail(&new6_entry->common.list,
4822 &fib6_entry->common.list);
4823 } else {
4824 struct mlxsw_sp_fib6_entry *last;
4825
4826 list_for_each_entry(last, &fib_node->entry_list, common.list) {
4827 struct rt6_info *rt = mlxsw_sp_fib6_entry_rt(last);
4828
4829 if (nrt->rt6i_table->tb6_id > rt->rt6i_table->tb6_id)
4830 break;
4831 fib6_entry = last;
4832 }
4833
4834 if (fib6_entry)
4835 list_add(&new6_entry->common.list,
4836 &fib6_entry->common.list);
4837 else
4838 list_add(&new6_entry->common.list,
4839 &fib_node->entry_list);
4840 }
4841
4842 return 0;
4843}
4844
4845static void
4846mlxsw_sp_fib6_node_list_remove(struct mlxsw_sp_fib6_entry *fib6_entry)
4847{
4848 list_del(&fib6_entry->common.list);
4849}
4850
4851static int mlxsw_sp_fib6_node_entry_link(struct mlxsw_sp *mlxsw_sp,
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02004852 struct mlxsw_sp_fib6_entry *fib6_entry,
4853 bool replace)
Ido Schimmel428b8512017-08-03 13:28:28 +02004854{
4855 int err;
4856
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02004857 err = mlxsw_sp_fib6_node_list_insert(fib6_entry, replace);
Ido Schimmel428b8512017-08-03 13:28:28 +02004858 if (err)
4859 return err;
4860
4861 err = mlxsw_sp_fib_node_entry_add(mlxsw_sp, &fib6_entry->common);
4862 if (err)
4863 goto err_fib_node_entry_add;
4864
4865 return 0;
4866
4867err_fib_node_entry_add:
4868 mlxsw_sp_fib6_node_list_remove(fib6_entry);
4869 return err;
4870}
4871
4872static void
4873mlxsw_sp_fib6_node_entry_unlink(struct mlxsw_sp *mlxsw_sp,
4874 struct mlxsw_sp_fib6_entry *fib6_entry)
4875{
4876 mlxsw_sp_fib_node_entry_del(mlxsw_sp, &fib6_entry->common);
4877 mlxsw_sp_fib6_node_list_remove(fib6_entry);
4878}
4879
4880static struct mlxsw_sp_fib6_entry *
4881mlxsw_sp_fib6_entry_lookup(struct mlxsw_sp *mlxsw_sp,
4882 const struct rt6_info *rt)
4883{
4884 struct mlxsw_sp_fib6_entry *fib6_entry;
4885 struct mlxsw_sp_fib_node *fib_node;
4886 struct mlxsw_sp_fib *fib;
4887 struct mlxsw_sp_vr *vr;
4888
4889 vr = mlxsw_sp_vr_find(mlxsw_sp, rt->rt6i_table->tb6_id);
4890 if (!vr)
4891 return NULL;
4892 fib = mlxsw_sp_vr_fib(vr, MLXSW_SP_L3_PROTO_IPV6);
4893
4894 fib_node = mlxsw_sp_fib_node_lookup(fib, &rt->rt6i_dst.addr,
4895 sizeof(rt->rt6i_dst.addr),
4896 rt->rt6i_dst.plen);
4897 if (!fib_node)
4898 return NULL;
4899
4900 list_for_each_entry(fib6_entry, &fib_node->entry_list, common.list) {
4901 struct rt6_info *iter_rt = mlxsw_sp_fib6_entry_rt(fib6_entry);
4902
4903 if (rt->rt6i_table->tb6_id == iter_rt->rt6i_table->tb6_id &&
4904 rt->rt6i_metric == iter_rt->rt6i_metric &&
4905 mlxsw_sp_fib6_entry_rt_find(fib6_entry, rt))
4906 return fib6_entry;
4907 }
4908
4909 return NULL;
4910}
4911
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02004912static void mlxsw_sp_fib6_entry_replace(struct mlxsw_sp *mlxsw_sp,
4913 struct mlxsw_sp_fib6_entry *fib6_entry,
4914 bool replace)
4915{
4916 struct mlxsw_sp_fib_node *fib_node = fib6_entry->common.fib_node;
4917 struct mlxsw_sp_fib6_entry *replaced;
4918
4919 if (!replace)
4920 return;
4921
4922 replaced = list_next_entry(fib6_entry, common.list);
4923
4924 mlxsw_sp_fib6_node_entry_unlink(mlxsw_sp, replaced);
4925 mlxsw_sp_fib6_entry_destroy(mlxsw_sp, replaced);
4926 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
4927}
4928
Ido Schimmel428b8512017-08-03 13:28:28 +02004929static int mlxsw_sp_router_fib6_add(struct mlxsw_sp *mlxsw_sp,
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02004930 struct rt6_info *rt, bool replace)
Ido Schimmel428b8512017-08-03 13:28:28 +02004931{
4932 struct mlxsw_sp_fib6_entry *fib6_entry;
4933 struct mlxsw_sp_fib_node *fib_node;
4934 int err;
4935
4936 if (mlxsw_sp->router->aborted)
4937 return 0;
4938
Ido Schimmelf36f5ac2017-08-03 13:28:30 +02004939 if (rt->rt6i_src.plen)
4940 return -EINVAL;
4941
Ido Schimmel428b8512017-08-03 13:28:28 +02004942 if (mlxsw_sp_fib6_rt_should_ignore(rt))
4943 return 0;
4944
4945 fib_node = mlxsw_sp_fib_node_get(mlxsw_sp, rt->rt6i_table->tb6_id,
4946 &rt->rt6i_dst.addr,
4947 sizeof(rt->rt6i_dst.addr),
4948 rt->rt6i_dst.plen,
4949 MLXSW_SP_L3_PROTO_IPV6);
4950 if (IS_ERR(fib_node))
4951 return PTR_ERR(fib_node);
4952
4953 /* Before creating a new entry, try to append route to an existing
4954 * multipath entry.
4955 */
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02004956 fib6_entry = mlxsw_sp_fib6_node_mp_entry_find(fib_node, rt, replace);
Ido Schimmel428b8512017-08-03 13:28:28 +02004957 if (fib6_entry) {
4958 err = mlxsw_sp_fib6_entry_nexthop_add(mlxsw_sp, fib6_entry, rt);
4959 if (err)
4960 goto err_fib6_entry_nexthop_add;
4961 return 0;
4962 }
4963
4964 fib6_entry = mlxsw_sp_fib6_entry_create(mlxsw_sp, fib_node, rt);
4965 if (IS_ERR(fib6_entry)) {
4966 err = PTR_ERR(fib6_entry);
4967 goto err_fib6_entry_create;
4968 }
4969
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02004970 err = mlxsw_sp_fib6_node_entry_link(mlxsw_sp, fib6_entry, replace);
Ido Schimmel428b8512017-08-03 13:28:28 +02004971 if (err)
4972 goto err_fib6_node_entry_link;
4973
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02004974 mlxsw_sp_fib6_entry_replace(mlxsw_sp, fib6_entry, replace);
4975
Ido Schimmel428b8512017-08-03 13:28:28 +02004976 return 0;
4977
4978err_fib6_node_entry_link:
4979 mlxsw_sp_fib6_entry_destroy(mlxsw_sp, fib6_entry);
4980err_fib6_entry_create:
4981err_fib6_entry_nexthop_add:
4982 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
4983 return err;
4984}
4985
4986static void mlxsw_sp_router_fib6_del(struct mlxsw_sp *mlxsw_sp,
4987 struct rt6_info *rt)
4988{
4989 struct mlxsw_sp_fib6_entry *fib6_entry;
4990 struct mlxsw_sp_fib_node *fib_node;
4991
4992 if (mlxsw_sp->router->aborted)
4993 return;
4994
4995 if (mlxsw_sp_fib6_rt_should_ignore(rt))
4996 return;
4997
4998 fib6_entry = mlxsw_sp_fib6_entry_lookup(mlxsw_sp, rt);
4999 if (WARN_ON(!fib6_entry))
5000 return;
5001
5002 /* If route is part of a multipath entry, but not the last one
5003 * removed, then only reduce its nexthop group.
5004 */
5005 if (!list_is_singular(&fib6_entry->rt6_list)) {
5006 mlxsw_sp_fib6_entry_nexthop_del(mlxsw_sp, fib6_entry, rt);
5007 return;
5008 }
5009
5010 fib_node = fib6_entry->common.fib_node;
5011
5012 mlxsw_sp_fib6_node_entry_unlink(mlxsw_sp, fib6_entry);
5013 mlxsw_sp_fib6_entry_destroy(mlxsw_sp, fib6_entry);
5014 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
5015}
5016
Ido Schimmelbc65a8a2017-07-18 10:10:25 +02005017static int __mlxsw_sp_router_set_abort_trap(struct mlxsw_sp *mlxsw_sp,
5018 enum mlxsw_reg_ralxx_protocol proto,
5019 u8 tree_id)
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005020{
5021 char ralta_pl[MLXSW_REG_RALTA_LEN];
5022 char ralst_pl[MLXSW_REG_RALST_LEN];
Ido Schimmelb5d90e62017-03-10 08:53:43 +01005023 int i, err;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005024
Ido Schimmelbc65a8a2017-07-18 10:10:25 +02005025 mlxsw_reg_ralta_pack(ralta_pl, true, proto, tree_id);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005026 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralta), ralta_pl);
5027 if (err)
5028 return err;
5029
Ido Schimmelbc65a8a2017-07-18 10:10:25 +02005030 mlxsw_reg_ralst_pack(ralst_pl, 0xff, tree_id);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005031 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralst), ralst_pl);
5032 if (err)
5033 return err;
5034
Ido Schimmelb5d90e62017-03-10 08:53:43 +01005035 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
Ido Schimmel9011b672017-05-16 19:38:25 +02005036 struct mlxsw_sp_vr *vr = &mlxsw_sp->router->vrs[i];
Ido Schimmelb5d90e62017-03-10 08:53:43 +01005037 char raltb_pl[MLXSW_REG_RALTB_LEN];
5038 char ralue_pl[MLXSW_REG_RALUE_LEN];
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005039
Ido Schimmelbc65a8a2017-07-18 10:10:25 +02005040 mlxsw_reg_raltb_pack(raltb_pl, vr->id, proto, tree_id);
Ido Schimmelb5d90e62017-03-10 08:53:43 +01005041 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raltb),
5042 raltb_pl);
5043 if (err)
5044 return err;
5045
Ido Schimmelbc65a8a2017-07-18 10:10:25 +02005046 mlxsw_reg_ralue_pack(ralue_pl, proto,
5047 MLXSW_REG_RALUE_OP_WRITE_WRITE, vr->id, 0);
Ido Schimmelb5d90e62017-03-10 08:53:43 +01005048 mlxsw_reg_ralue_act_ip2me_pack(ralue_pl);
5049 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue),
5050 ralue_pl);
5051 if (err)
5052 return err;
5053 }
5054
5055 return 0;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005056}
5057
Yotam Gigid42b0962017-09-27 08:23:20 +02005058static int mlxsw_sp_router_fibmr_add(struct mlxsw_sp *mlxsw_sp,
5059 struct mfc_entry_notifier_info *men_info,
5060 bool replace)
5061{
5062 struct mlxsw_sp_vr *vr;
5063
5064 if (mlxsw_sp->router->aborted)
5065 return 0;
5066
David Ahernf8fa9b42017-10-18 09:56:56 -07005067 vr = mlxsw_sp_vr_get(mlxsw_sp, men_info->tb_id, NULL);
Yotam Gigid42b0962017-09-27 08:23:20 +02005068 if (IS_ERR(vr))
5069 return PTR_ERR(vr);
5070
5071 return mlxsw_sp_mr_route4_add(vr->mr4_table, men_info->mfc, replace);
5072}
5073
5074static void mlxsw_sp_router_fibmr_del(struct mlxsw_sp *mlxsw_sp,
5075 struct mfc_entry_notifier_info *men_info)
5076{
5077 struct mlxsw_sp_vr *vr;
5078
5079 if (mlxsw_sp->router->aborted)
5080 return;
5081
5082 vr = mlxsw_sp_vr_find(mlxsw_sp, men_info->tb_id);
5083 if (WARN_ON(!vr))
5084 return;
5085
5086 mlxsw_sp_mr_route4_del(vr->mr4_table, men_info->mfc);
5087 mlxsw_sp_vr_put(vr);
5088}
5089
5090static int
5091mlxsw_sp_router_fibmr_vif_add(struct mlxsw_sp *mlxsw_sp,
5092 struct vif_entry_notifier_info *ven_info)
5093{
5094 struct mlxsw_sp_rif *rif;
5095 struct mlxsw_sp_vr *vr;
5096
5097 if (mlxsw_sp->router->aborted)
5098 return 0;
5099
David Ahernf8fa9b42017-10-18 09:56:56 -07005100 vr = mlxsw_sp_vr_get(mlxsw_sp, ven_info->tb_id, NULL);
Yotam Gigid42b0962017-09-27 08:23:20 +02005101 if (IS_ERR(vr))
5102 return PTR_ERR(vr);
5103
5104 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, ven_info->dev);
5105 return mlxsw_sp_mr_vif_add(vr->mr4_table, ven_info->dev,
5106 ven_info->vif_index,
5107 ven_info->vif_flags, rif);
5108}
5109
5110static void
5111mlxsw_sp_router_fibmr_vif_del(struct mlxsw_sp *mlxsw_sp,
5112 struct vif_entry_notifier_info *ven_info)
5113{
5114 struct mlxsw_sp_vr *vr;
5115
5116 if (mlxsw_sp->router->aborted)
5117 return;
5118
5119 vr = mlxsw_sp_vr_find(mlxsw_sp, ven_info->tb_id);
5120 if (WARN_ON(!vr))
5121 return;
5122
5123 mlxsw_sp_mr_vif_del(vr->mr4_table, ven_info->vif_index);
5124 mlxsw_sp_vr_put(vr);
5125}
5126
Ido Schimmelbc65a8a2017-07-18 10:10:25 +02005127static int mlxsw_sp_router_set_abort_trap(struct mlxsw_sp *mlxsw_sp)
5128{
5129 enum mlxsw_reg_ralxx_protocol proto = MLXSW_REG_RALXX_PROTOCOL_IPV4;
5130 int err;
5131
5132 err = __mlxsw_sp_router_set_abort_trap(mlxsw_sp, proto,
5133 MLXSW_SP_LPM_TREE_MIN);
5134 if (err)
5135 return err;
5136
Yotam Gigid42b0962017-09-27 08:23:20 +02005137 /* The multicast router code does not need an abort trap as by default,
5138 * packets that don't match any routes are trapped to the CPU.
5139 */
5140
Ido Schimmelbc65a8a2017-07-18 10:10:25 +02005141 proto = MLXSW_REG_RALXX_PROTOCOL_IPV6;
5142 return __mlxsw_sp_router_set_abort_trap(mlxsw_sp, proto,
5143 MLXSW_SP_LPM_TREE_MIN + 1);
5144}
5145
Ido Schimmel9aecce12017-02-09 10:28:42 +01005146static void mlxsw_sp_fib4_node_flush(struct mlxsw_sp *mlxsw_sp,
5147 struct mlxsw_sp_fib_node *fib_node)
5148{
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02005149 struct mlxsw_sp_fib4_entry *fib4_entry, *tmp;
Ido Schimmel9aecce12017-02-09 10:28:42 +01005150
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02005151 list_for_each_entry_safe(fib4_entry, tmp, &fib_node->entry_list,
5152 common.list) {
5153 bool do_break = &tmp->common.list == &fib_node->entry_list;
Ido Schimmel9aecce12017-02-09 10:28:42 +01005154
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02005155 mlxsw_sp_fib4_node_entry_unlink(mlxsw_sp, fib4_entry);
5156 mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib4_entry);
Ido Schimmel731ea1c2017-07-18 10:10:21 +02005157 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
Ido Schimmel9aecce12017-02-09 10:28:42 +01005158 /* Break when entry list is empty and node was freed.
5159 * Otherwise, we'll access freed memory in the next
5160 * iteration.
5161 */
5162 if (do_break)
5163 break;
5164 }
5165}
5166
Ido Schimmel428b8512017-08-03 13:28:28 +02005167static void mlxsw_sp_fib6_node_flush(struct mlxsw_sp *mlxsw_sp,
5168 struct mlxsw_sp_fib_node *fib_node)
5169{
5170 struct mlxsw_sp_fib6_entry *fib6_entry, *tmp;
5171
5172 list_for_each_entry_safe(fib6_entry, tmp, &fib_node->entry_list,
5173 common.list) {
5174 bool do_break = &tmp->common.list == &fib_node->entry_list;
5175
5176 mlxsw_sp_fib6_node_entry_unlink(mlxsw_sp, fib6_entry);
5177 mlxsw_sp_fib6_entry_destroy(mlxsw_sp, fib6_entry);
5178 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
5179 if (do_break)
5180 break;
5181 }
5182}
5183
Ido Schimmel9aecce12017-02-09 10:28:42 +01005184static void mlxsw_sp_fib_node_flush(struct mlxsw_sp *mlxsw_sp,
5185 struct mlxsw_sp_fib_node *fib_node)
5186{
Ido Schimmel76610eb2017-03-10 08:53:41 +01005187 switch (fib_node->fib->proto) {
Ido Schimmel9aecce12017-02-09 10:28:42 +01005188 case MLXSW_SP_L3_PROTO_IPV4:
5189 mlxsw_sp_fib4_node_flush(mlxsw_sp, fib_node);
5190 break;
5191 case MLXSW_SP_L3_PROTO_IPV6:
Ido Schimmel428b8512017-08-03 13:28:28 +02005192 mlxsw_sp_fib6_node_flush(mlxsw_sp, fib_node);
Ido Schimmel9aecce12017-02-09 10:28:42 +01005193 break;
5194 }
5195}
5196
Ido Schimmel76610eb2017-03-10 08:53:41 +01005197static void mlxsw_sp_vr_fib_flush(struct mlxsw_sp *mlxsw_sp,
5198 struct mlxsw_sp_vr *vr,
5199 enum mlxsw_sp_l3proto proto)
5200{
5201 struct mlxsw_sp_fib *fib = mlxsw_sp_vr_fib(vr, proto);
5202 struct mlxsw_sp_fib_node *fib_node, *tmp;
5203
5204 list_for_each_entry_safe(fib_node, tmp, &fib->node_list, list) {
5205 bool do_break = &tmp->list == &fib->node_list;
5206
5207 mlxsw_sp_fib_node_flush(mlxsw_sp, fib_node);
5208 if (do_break)
5209 break;
5210 }
5211}
5212
Ido Schimmelac571de2016-11-14 11:26:32 +01005213static void mlxsw_sp_router_fib_flush(struct mlxsw_sp *mlxsw_sp)
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005214{
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005215 int i;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005216
Jiri Pirkoc1a38312016-10-21 16:07:23 +02005217 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
Ido Schimmel9011b672017-05-16 19:38:25 +02005218 struct mlxsw_sp_vr *vr = &mlxsw_sp->router->vrs[i];
Ido Schimmelac571de2016-11-14 11:26:32 +01005219
Ido Schimmel76610eb2017-03-10 08:53:41 +01005220 if (!mlxsw_sp_vr_is_used(vr))
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005221 continue;
Yotam Gigid42b0962017-09-27 08:23:20 +02005222
5223 mlxsw_sp_mr_table_flush(vr->mr4_table);
Ido Schimmel76610eb2017-03-10 08:53:41 +01005224 mlxsw_sp_vr_fib_flush(mlxsw_sp, vr, MLXSW_SP_L3_PROTO_IPV4);
Ido Schimmela3d9bc52017-07-18 10:10:22 +02005225
5226 /* If virtual router was only used for IPv4, then it's no
5227 * longer used.
5228 */
5229 if (!mlxsw_sp_vr_is_used(vr))
5230 continue;
5231 mlxsw_sp_vr_fib_flush(mlxsw_sp, vr, MLXSW_SP_L3_PROTO_IPV6);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005232 }
Ido Schimmelac571de2016-11-14 11:26:32 +01005233}
5234
Ido Schimmelbc65a8a2017-07-18 10:10:25 +02005235static void mlxsw_sp_router_fib_abort(struct mlxsw_sp *mlxsw_sp)
Ido Schimmelac571de2016-11-14 11:26:32 +01005236{
5237 int err;
5238
Ido Schimmel9011b672017-05-16 19:38:25 +02005239 if (mlxsw_sp->router->aborted)
Ido Schimmeld331d302016-11-16 09:51:58 +01005240 return;
5241 dev_warn(mlxsw_sp->bus_info->dev, "FIB abort triggered. Note that FIB entries are no longer being offloaded to this device.\n");
Ido Schimmelac571de2016-11-14 11:26:32 +01005242 mlxsw_sp_router_fib_flush(mlxsw_sp);
Ido Schimmel9011b672017-05-16 19:38:25 +02005243 mlxsw_sp->router->aborted = true;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005244 err = mlxsw_sp_router_set_abort_trap(mlxsw_sp);
5245 if (err)
5246 dev_warn(mlxsw_sp->bus_info->dev, "Failed to set abort trap.\n");
5247}
5248
Ido Schimmel30572242016-12-03 16:45:01 +01005249struct mlxsw_sp_fib_event_work {
Ido Schimmela0e47612017-02-06 16:20:10 +01005250 struct work_struct work;
Ido Schimmelad178c82017-02-08 11:16:40 +01005251 union {
Ido Schimmel428b8512017-08-03 13:28:28 +02005252 struct fib6_entry_notifier_info fen6_info;
Ido Schimmelad178c82017-02-08 11:16:40 +01005253 struct fib_entry_notifier_info fen_info;
Ido Schimmel5d7bfd12017-03-16 09:08:14 +01005254 struct fib_rule_notifier_info fr_info;
Ido Schimmelad178c82017-02-08 11:16:40 +01005255 struct fib_nh_notifier_info fnh_info;
Yotam Gigid42b0962017-09-27 08:23:20 +02005256 struct mfc_entry_notifier_info men_info;
5257 struct vif_entry_notifier_info ven_info;
Ido Schimmelad178c82017-02-08 11:16:40 +01005258 };
Ido Schimmel30572242016-12-03 16:45:01 +01005259 struct mlxsw_sp *mlxsw_sp;
5260 unsigned long event;
5261};
5262
Ido Schimmel66a57632017-08-03 13:28:26 +02005263static void mlxsw_sp_router_fib4_event_work(struct work_struct *work)
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005264{
Ido Schimmel30572242016-12-03 16:45:01 +01005265 struct mlxsw_sp_fib_event_work *fib_work =
Ido Schimmela0e47612017-02-06 16:20:10 +01005266 container_of(work, struct mlxsw_sp_fib_event_work, work);
Ido Schimmel30572242016-12-03 16:45:01 +01005267 struct mlxsw_sp *mlxsw_sp = fib_work->mlxsw_sp;
Ido Schimmel599cf8f2017-02-09 10:28:44 +01005268 bool replace, append;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005269 int err;
5270
Ido Schimmel30572242016-12-03 16:45:01 +01005271 /* Protect internal structures from changes */
5272 rtnl_lock();
5273 switch (fib_work->event) {
Ido Schimmel599cf8f2017-02-09 10:28:44 +01005274 case FIB_EVENT_ENTRY_REPLACE: /* fall through */
Ido Schimmel4283bce2017-02-09 10:28:43 +01005275 case FIB_EVENT_ENTRY_APPEND: /* fall through */
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005276 case FIB_EVENT_ENTRY_ADD:
Ido Schimmel599cf8f2017-02-09 10:28:44 +01005277 replace = fib_work->event == FIB_EVENT_ENTRY_REPLACE;
Ido Schimmel4283bce2017-02-09 10:28:43 +01005278 append = fib_work->event == FIB_EVENT_ENTRY_APPEND;
5279 err = mlxsw_sp_router_fib4_add(mlxsw_sp, &fib_work->fen_info,
Ido Schimmel599cf8f2017-02-09 10:28:44 +01005280 replace, append);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005281 if (err)
Ido Schimmelbc65a8a2017-07-18 10:10:25 +02005282 mlxsw_sp_router_fib_abort(mlxsw_sp);
Ido Schimmel30572242016-12-03 16:45:01 +01005283 fib_info_put(fib_work->fen_info.fi);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005284 break;
5285 case FIB_EVENT_ENTRY_DEL:
Ido Schimmel30572242016-12-03 16:45:01 +01005286 mlxsw_sp_router_fib4_del(mlxsw_sp, &fib_work->fen_info);
5287 fib_info_put(fib_work->fen_info.fi);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005288 break;
David Ahern1f279232017-10-27 17:37:14 -07005289 case FIB_EVENT_RULE_ADD:
5290 /* if we get here, a rule was added that we do not support.
5291 * just do the fib_abort
5292 */
5293 mlxsw_sp_router_fib_abort(mlxsw_sp);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005294 break;
Ido Schimmelad178c82017-02-08 11:16:40 +01005295 case FIB_EVENT_NH_ADD: /* fall through */
5296 case FIB_EVENT_NH_DEL:
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02005297 mlxsw_sp_nexthop4_event(mlxsw_sp, fib_work->event,
5298 fib_work->fnh_info.fib_nh);
Ido Schimmelad178c82017-02-08 11:16:40 +01005299 fib_info_put(fib_work->fnh_info.fib_nh->nh_parent);
5300 break;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005301 }
Ido Schimmel30572242016-12-03 16:45:01 +01005302 rtnl_unlock();
5303 kfree(fib_work);
5304}
5305
Ido Schimmel66a57632017-08-03 13:28:26 +02005306static void mlxsw_sp_router_fib6_event_work(struct work_struct *work)
5307{
Ido Schimmel583419f2017-08-03 13:28:27 +02005308 struct mlxsw_sp_fib_event_work *fib_work =
5309 container_of(work, struct mlxsw_sp_fib_event_work, work);
5310 struct mlxsw_sp *mlxsw_sp = fib_work->mlxsw_sp;
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02005311 bool replace;
Ido Schimmel428b8512017-08-03 13:28:28 +02005312 int err;
Ido Schimmel583419f2017-08-03 13:28:27 +02005313
5314 rtnl_lock();
5315 switch (fib_work->event) {
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02005316 case FIB_EVENT_ENTRY_REPLACE: /* fall through */
Ido Schimmel428b8512017-08-03 13:28:28 +02005317 case FIB_EVENT_ENTRY_ADD:
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02005318 replace = fib_work->event == FIB_EVENT_ENTRY_REPLACE;
Ido Schimmel428b8512017-08-03 13:28:28 +02005319 err = mlxsw_sp_router_fib6_add(mlxsw_sp,
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02005320 fib_work->fen6_info.rt, replace);
Ido Schimmel428b8512017-08-03 13:28:28 +02005321 if (err)
5322 mlxsw_sp_router_fib_abort(mlxsw_sp);
5323 mlxsw_sp_rt6_release(fib_work->fen6_info.rt);
5324 break;
5325 case FIB_EVENT_ENTRY_DEL:
5326 mlxsw_sp_router_fib6_del(mlxsw_sp, fib_work->fen6_info.rt);
5327 mlxsw_sp_rt6_release(fib_work->fen6_info.rt);
5328 break;
David Ahern1f279232017-10-27 17:37:14 -07005329 case FIB_EVENT_RULE_ADD:
5330 /* if we get here, a rule was added that we do not support.
5331 * just do the fib_abort
5332 */
5333 mlxsw_sp_router_fib_abort(mlxsw_sp);
Ido Schimmel583419f2017-08-03 13:28:27 +02005334 break;
5335 }
5336 rtnl_unlock();
5337 kfree(fib_work);
Ido Schimmel66a57632017-08-03 13:28:26 +02005338}
5339
Yotam Gigid42b0962017-09-27 08:23:20 +02005340static void mlxsw_sp_router_fibmr_event_work(struct work_struct *work)
5341{
5342 struct mlxsw_sp_fib_event_work *fib_work =
5343 container_of(work, struct mlxsw_sp_fib_event_work, work);
5344 struct mlxsw_sp *mlxsw_sp = fib_work->mlxsw_sp;
Yotam Gigid42b0962017-09-27 08:23:20 +02005345 bool replace;
5346 int err;
5347
5348 rtnl_lock();
5349 switch (fib_work->event) {
5350 case FIB_EVENT_ENTRY_REPLACE: /* fall through */
5351 case FIB_EVENT_ENTRY_ADD:
5352 replace = fib_work->event == FIB_EVENT_ENTRY_REPLACE;
5353
5354 err = mlxsw_sp_router_fibmr_add(mlxsw_sp, &fib_work->men_info,
5355 replace);
5356 if (err)
5357 mlxsw_sp_router_fib_abort(mlxsw_sp);
5358 ipmr_cache_put(fib_work->men_info.mfc);
5359 break;
5360 case FIB_EVENT_ENTRY_DEL:
5361 mlxsw_sp_router_fibmr_del(mlxsw_sp, &fib_work->men_info);
5362 ipmr_cache_put(fib_work->men_info.mfc);
5363 break;
5364 case FIB_EVENT_VIF_ADD:
5365 err = mlxsw_sp_router_fibmr_vif_add(mlxsw_sp,
5366 &fib_work->ven_info);
5367 if (err)
5368 mlxsw_sp_router_fib_abort(mlxsw_sp);
5369 dev_put(fib_work->ven_info.dev);
5370 break;
5371 case FIB_EVENT_VIF_DEL:
5372 mlxsw_sp_router_fibmr_vif_del(mlxsw_sp,
5373 &fib_work->ven_info);
5374 dev_put(fib_work->ven_info.dev);
5375 break;
David Ahern1f279232017-10-27 17:37:14 -07005376 case FIB_EVENT_RULE_ADD:
5377 /* if we get here, a rule was added that we do not support.
5378 * just do the fib_abort
5379 */
5380 mlxsw_sp_router_fib_abort(mlxsw_sp);
Yotam Gigid42b0962017-09-27 08:23:20 +02005381 break;
5382 }
5383 rtnl_unlock();
5384 kfree(fib_work);
5385}
5386
Ido Schimmel66a57632017-08-03 13:28:26 +02005387static void mlxsw_sp_router_fib4_event(struct mlxsw_sp_fib_event_work *fib_work,
5388 struct fib_notifier_info *info)
5389{
David Ahern3c75f9b2017-10-18 15:01:38 -07005390 struct fib_entry_notifier_info *fen_info;
David Ahern3c75f9b2017-10-18 15:01:38 -07005391 struct fib_nh_notifier_info *fnh_info;
5392
Ido Schimmel66a57632017-08-03 13:28:26 +02005393 switch (fib_work->event) {
5394 case FIB_EVENT_ENTRY_REPLACE: /* fall through */
5395 case FIB_EVENT_ENTRY_APPEND: /* fall through */
5396 case FIB_EVENT_ENTRY_ADD: /* fall through */
5397 case FIB_EVENT_ENTRY_DEL:
David Ahern3c75f9b2017-10-18 15:01:38 -07005398 fen_info = container_of(info, struct fib_entry_notifier_info,
5399 info);
5400 fib_work->fen_info = *fen_info;
5401 /* Take reference on fib_info to prevent it from being
Ido Schimmel66a57632017-08-03 13:28:26 +02005402 * freed while work is queued. Release it afterwards.
5403 */
5404 fib_info_hold(fib_work->fen_info.fi);
5405 break;
Ido Schimmel66a57632017-08-03 13:28:26 +02005406 case FIB_EVENT_NH_ADD: /* fall through */
5407 case FIB_EVENT_NH_DEL:
David Ahern3c75f9b2017-10-18 15:01:38 -07005408 fnh_info = container_of(info, struct fib_nh_notifier_info,
5409 info);
5410 fib_work->fnh_info = *fnh_info;
Ido Schimmel66a57632017-08-03 13:28:26 +02005411 fib_info_hold(fib_work->fnh_info.fib_nh->nh_parent);
5412 break;
5413 }
5414}
5415
5416static void mlxsw_sp_router_fib6_event(struct mlxsw_sp_fib_event_work *fib_work,
5417 struct fib_notifier_info *info)
5418{
David Ahern3c75f9b2017-10-18 15:01:38 -07005419 struct fib6_entry_notifier_info *fen6_info;
David Ahern3c75f9b2017-10-18 15:01:38 -07005420
Ido Schimmel583419f2017-08-03 13:28:27 +02005421 switch (fib_work->event) {
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02005422 case FIB_EVENT_ENTRY_REPLACE: /* fall through */
Ido Schimmel428b8512017-08-03 13:28:28 +02005423 case FIB_EVENT_ENTRY_ADD: /* fall through */
5424 case FIB_EVENT_ENTRY_DEL:
David Ahern3c75f9b2017-10-18 15:01:38 -07005425 fen6_info = container_of(info, struct fib6_entry_notifier_info,
5426 info);
5427 fib_work->fen6_info = *fen6_info;
Ido Schimmel428b8512017-08-03 13:28:28 +02005428 rt6_hold(fib_work->fen6_info.rt);
5429 break;
Ido Schimmel583419f2017-08-03 13:28:27 +02005430 }
Ido Schimmel66a57632017-08-03 13:28:26 +02005431}
5432
Yotam Gigid42b0962017-09-27 08:23:20 +02005433static void
5434mlxsw_sp_router_fibmr_event(struct mlxsw_sp_fib_event_work *fib_work,
5435 struct fib_notifier_info *info)
5436{
5437 switch (fib_work->event) {
5438 case FIB_EVENT_ENTRY_REPLACE: /* fall through */
5439 case FIB_EVENT_ENTRY_ADD: /* fall through */
5440 case FIB_EVENT_ENTRY_DEL:
5441 memcpy(&fib_work->men_info, info, sizeof(fib_work->men_info));
5442 ipmr_cache_hold(fib_work->men_info.mfc);
5443 break;
5444 case FIB_EVENT_VIF_ADD: /* fall through */
5445 case FIB_EVENT_VIF_DEL:
5446 memcpy(&fib_work->ven_info, info, sizeof(fib_work->ven_info));
5447 dev_hold(fib_work->ven_info.dev);
5448 break;
David Ahern1f279232017-10-27 17:37:14 -07005449 }
5450}
5451
5452static int mlxsw_sp_router_fib_rule_event(unsigned long event,
5453 struct fib_notifier_info *info,
5454 struct mlxsw_sp *mlxsw_sp)
5455{
5456 struct netlink_ext_ack *extack = info->extack;
5457 struct fib_rule_notifier_info *fr_info;
5458 struct fib_rule *rule;
5459 int err = 0;
5460
5461 /* nothing to do at the moment */
5462 if (event == FIB_EVENT_RULE_DEL)
5463 return 0;
5464
5465 if (mlxsw_sp->router->aborted)
5466 return 0;
5467
5468 fr_info = container_of(info, struct fib_rule_notifier_info, info);
5469 rule = fr_info->rule;
5470
5471 switch (info->family) {
5472 case AF_INET:
5473 if (!fib4_rule_default(rule) && !rule->l3mdev)
5474 err = -1;
5475 break;
5476 case AF_INET6:
5477 if (!fib6_rule_default(rule) && !rule->l3mdev)
5478 err = -1;
5479 break;
5480 case RTNL_FAMILY_IPMR:
5481 if (!ipmr_rule_default(rule) && !rule->l3mdev)
5482 err = -1;
Yotam Gigid42b0962017-09-27 08:23:20 +02005483 break;
5484 }
David Ahern1f279232017-10-27 17:37:14 -07005485
5486 if (err < 0)
5487 NL_SET_ERR_MSG(extack, "spectrum: FIB rules not supported. Aborting offload");
5488
5489 return err;
Yotam Gigid42b0962017-09-27 08:23:20 +02005490}
5491
Ido Schimmel30572242016-12-03 16:45:01 +01005492/* Called with rcu_read_lock() */
5493static int mlxsw_sp_router_fib_event(struct notifier_block *nb,
5494 unsigned long event, void *ptr)
5495{
Ido Schimmel30572242016-12-03 16:45:01 +01005496 struct mlxsw_sp_fib_event_work *fib_work;
5497 struct fib_notifier_info *info = ptr;
Ido Schimmel7e39d112017-05-16 19:38:28 +02005498 struct mlxsw_sp_router *router;
David Ahern1f279232017-10-27 17:37:14 -07005499 int err;
Ido Schimmel30572242016-12-03 16:45:01 +01005500
Ido Schimmel8e29f972017-09-15 15:31:07 +02005501 if (!net_eq(info->net, &init_net) ||
Yotam Gigi664375e2017-09-27 08:23:22 +02005502 (info->family != AF_INET && info->family != AF_INET6 &&
5503 info->family != RTNL_FAMILY_IPMR))
Ido Schimmel30572242016-12-03 16:45:01 +01005504 return NOTIFY_DONE;
5505
David Ahern1f279232017-10-27 17:37:14 -07005506 router = container_of(nb, struct mlxsw_sp_router, fib_nb);
5507
5508 switch (event) {
5509 case FIB_EVENT_RULE_ADD: /* fall through */
5510 case FIB_EVENT_RULE_DEL:
5511 err = mlxsw_sp_router_fib_rule_event(event, info,
5512 router->mlxsw_sp);
5513 if (!err)
5514 return NOTIFY_DONE;
5515 }
5516
Ido Schimmel30572242016-12-03 16:45:01 +01005517 fib_work = kzalloc(sizeof(*fib_work), GFP_ATOMIC);
5518 if (WARN_ON(!fib_work))
5519 return NOTIFY_BAD;
5520
Ido Schimmel7e39d112017-05-16 19:38:28 +02005521 fib_work->mlxsw_sp = router->mlxsw_sp;
Ido Schimmel30572242016-12-03 16:45:01 +01005522 fib_work->event = event;
5523
Ido Schimmel66a57632017-08-03 13:28:26 +02005524 switch (info->family) {
5525 case AF_INET:
5526 INIT_WORK(&fib_work->work, mlxsw_sp_router_fib4_event_work);
5527 mlxsw_sp_router_fib4_event(fib_work, info);
Ido Schimmel30572242016-12-03 16:45:01 +01005528 break;
Ido Schimmel66a57632017-08-03 13:28:26 +02005529 case AF_INET6:
5530 INIT_WORK(&fib_work->work, mlxsw_sp_router_fib6_event_work);
5531 mlxsw_sp_router_fib6_event(fib_work, info);
Ido Schimmelad178c82017-02-08 11:16:40 +01005532 break;
Yotam Gigid42b0962017-09-27 08:23:20 +02005533 case RTNL_FAMILY_IPMR:
5534 INIT_WORK(&fib_work->work, mlxsw_sp_router_fibmr_event_work);
5535 mlxsw_sp_router_fibmr_event(fib_work, info);
5536 break;
Ido Schimmel30572242016-12-03 16:45:01 +01005537 }
5538
Ido Schimmela0e47612017-02-06 16:20:10 +01005539 mlxsw_core_schedule_work(&fib_work->work);
Ido Schimmel30572242016-12-03 16:45:01 +01005540
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005541 return NOTIFY_DONE;
5542}
5543
Ido Schimmel4724ba562017-03-10 08:53:39 +01005544static struct mlxsw_sp_rif *
5545mlxsw_sp_rif_find_by_dev(const struct mlxsw_sp *mlxsw_sp,
5546 const struct net_device *dev)
5547{
5548 int i;
5549
5550 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++)
Ido Schimmel5f9efff2017-05-16 19:38:27 +02005551 if (mlxsw_sp->router->rifs[i] &&
5552 mlxsw_sp->router->rifs[i]->dev == dev)
5553 return mlxsw_sp->router->rifs[i];
Ido Schimmel4724ba562017-03-10 08:53:39 +01005554
5555 return NULL;
5556}
5557
5558static int mlxsw_sp_router_rif_disable(struct mlxsw_sp *mlxsw_sp, u16 rif)
5559{
5560 char ritr_pl[MLXSW_REG_RITR_LEN];
5561 int err;
5562
5563 mlxsw_reg_ritr_rif_pack(ritr_pl, rif);
5564 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
5565 if (WARN_ON_ONCE(err))
5566 return err;
5567
5568 mlxsw_reg_ritr_enable_set(ritr_pl, false);
5569 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
5570}
5571
5572static void mlxsw_sp_router_rif_gone_sync(struct mlxsw_sp *mlxsw_sp,
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01005573 struct mlxsw_sp_rif *rif)
Ido Schimmel4724ba562017-03-10 08:53:39 +01005574{
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01005575 mlxsw_sp_router_rif_disable(mlxsw_sp, rif->rif_index);
5576 mlxsw_sp_nexthop_rif_gone_sync(mlxsw_sp, rif);
5577 mlxsw_sp_neigh_rif_gone_sync(mlxsw_sp, rif);
Ido Schimmel4724ba562017-03-10 08:53:39 +01005578}
5579
Arkadi Sharshevsky5ea12372017-07-18 10:10:13 +02005580static bool
5581mlxsw_sp_rif_should_config(struct mlxsw_sp_rif *rif, struct net_device *dev,
5582 unsigned long event)
Ido Schimmel4724ba562017-03-10 08:53:39 +01005583{
Arkadi Sharshevsky5ea12372017-07-18 10:10:13 +02005584 struct inet6_dev *inet6_dev;
5585 bool addr_list_empty = true;
5586 struct in_device *idev;
5587
Ido Schimmel4724ba562017-03-10 08:53:39 +01005588 switch (event) {
5589 case NETDEV_UP:
Petr Machataf1b1f272017-07-31 09:27:28 +02005590 return rif == NULL;
Ido Schimmel4724ba562017-03-10 08:53:39 +01005591 case NETDEV_DOWN:
Arkadi Sharshevsky5ea12372017-07-18 10:10:13 +02005592 idev = __in_dev_get_rtnl(dev);
5593 if (idev && idev->ifa_list)
5594 addr_list_empty = false;
5595
5596 inet6_dev = __in6_dev_get(dev);
5597 if (addr_list_empty && inet6_dev &&
5598 !list_empty(&inet6_dev->addr_list))
5599 addr_list_empty = false;
5600
5601 if (rif && addr_list_empty &&
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01005602 !netif_is_l3_slave(rif->dev))
Ido Schimmel4724ba562017-03-10 08:53:39 +01005603 return true;
5604 /* It is possible we already removed the RIF ourselves
5605 * if it was assigned to a netdev that is now a bridge
5606 * or LAG slave.
5607 */
5608 return false;
5609 }
5610
5611 return false;
5612}
5613
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02005614static enum mlxsw_sp_rif_type
5615mlxsw_sp_dev_rif_type(const struct mlxsw_sp *mlxsw_sp,
5616 const struct net_device *dev)
5617{
5618 enum mlxsw_sp_fid_type type;
5619
Petr Machata6ddb7422017-09-02 23:49:19 +02005620 if (mlxsw_sp_netdev_ipip_type(mlxsw_sp, dev, NULL))
5621 return MLXSW_SP_RIF_TYPE_IPIP_LB;
5622
5623 /* Otherwise RIF type is derived from the type of the underlying FID. */
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02005624 if (is_vlan_dev(dev) && netif_is_bridge_master(vlan_dev_real_dev(dev)))
5625 type = MLXSW_SP_FID_TYPE_8021Q;
5626 else if (netif_is_bridge_master(dev) && br_vlan_enabled(dev))
5627 type = MLXSW_SP_FID_TYPE_8021Q;
5628 else if (netif_is_bridge_master(dev))
5629 type = MLXSW_SP_FID_TYPE_8021D;
5630 else
5631 type = MLXSW_SP_FID_TYPE_RFID;
5632
5633 return mlxsw_sp_fid_type_rif_type(mlxsw_sp, type);
5634}
5635
Ido Schimmelde5ed992017-06-04 16:53:40 +02005636static int mlxsw_sp_rif_index_alloc(struct mlxsw_sp *mlxsw_sp, u16 *p_rif_index)
Ido Schimmel4724ba562017-03-10 08:53:39 +01005637{
5638 int i;
5639
Ido Schimmelde5ed992017-06-04 16:53:40 +02005640 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++) {
5641 if (!mlxsw_sp->router->rifs[i]) {
5642 *p_rif_index = i;
5643 return 0;
5644 }
5645 }
Ido Schimmel4724ba562017-03-10 08:53:39 +01005646
Ido Schimmelde5ed992017-06-04 16:53:40 +02005647 return -ENOBUFS;
Ido Schimmel4724ba562017-03-10 08:53:39 +01005648}
5649
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02005650static struct mlxsw_sp_rif *mlxsw_sp_rif_alloc(size_t rif_size, u16 rif_index,
5651 u16 vr_id,
5652 struct net_device *l3_dev)
Ido Schimmel4724ba562017-03-10 08:53:39 +01005653{
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01005654 struct mlxsw_sp_rif *rif;
Ido Schimmel4724ba562017-03-10 08:53:39 +01005655
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02005656 rif = kzalloc(rif_size, GFP_KERNEL);
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01005657 if (!rif)
Ido Schimmel4724ba562017-03-10 08:53:39 +01005658 return NULL;
5659
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01005660 INIT_LIST_HEAD(&rif->nexthop_list);
5661 INIT_LIST_HEAD(&rif->neigh_list);
5662 ether_addr_copy(rif->addr, l3_dev->dev_addr);
5663 rif->mtu = l3_dev->mtu;
5664 rif->vr_id = vr_id;
5665 rif->dev = l3_dev;
5666 rif->rif_index = rif_index;
Ido Schimmel4724ba562017-03-10 08:53:39 +01005667
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01005668 return rif;
Ido Schimmel4724ba562017-03-10 08:53:39 +01005669}
5670
Ido Schimmel5f9efff2017-05-16 19:38:27 +02005671struct mlxsw_sp_rif *mlxsw_sp_rif_by_index(const struct mlxsw_sp *mlxsw_sp,
5672 u16 rif_index)
5673{
5674 return mlxsw_sp->router->rifs[rif_index];
5675}
5676
Arkadi Sharshevskyfd1b9d42017-03-28 17:24:16 +02005677u16 mlxsw_sp_rif_index(const struct mlxsw_sp_rif *rif)
5678{
5679 return rif->rif_index;
5680}
5681
Petr Machata92107cf2017-09-02 23:49:28 +02005682u16 mlxsw_sp_ipip_lb_rif_index(const struct mlxsw_sp_rif_ipip_lb *lb_rif)
5683{
5684 return lb_rif->common.rif_index;
5685}
5686
5687u16 mlxsw_sp_ipip_lb_ul_vr_id(const struct mlxsw_sp_rif_ipip_lb *lb_rif)
5688{
5689 return lb_rif->ul_vr_id;
5690}
5691
Arkadi Sharshevskyfd1b9d42017-03-28 17:24:16 +02005692int mlxsw_sp_rif_dev_ifindex(const struct mlxsw_sp_rif *rif)
5693{
5694 return rif->dev->ifindex;
5695}
5696
Yotam Gigi91e4d592017-09-19 10:00:19 +02005697const struct net_device *mlxsw_sp_rif_dev(const struct mlxsw_sp_rif *rif)
5698{
5699 return rif->dev;
5700}
5701
Ido Schimmel4724ba562017-03-10 08:53:39 +01005702static struct mlxsw_sp_rif *
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02005703mlxsw_sp_rif_create(struct mlxsw_sp *mlxsw_sp,
David Ahernf8fa9b42017-10-18 09:56:56 -07005704 const struct mlxsw_sp_rif_params *params,
5705 struct netlink_ext_ack *extack)
Ido Schimmel4724ba562017-03-10 08:53:39 +01005706{
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02005707 u32 tb_id = l3mdev_fib_table(params->dev);
5708 const struct mlxsw_sp_rif_ops *ops;
Petr Machata010cadf2017-09-02 23:49:18 +02005709 struct mlxsw_sp_fid *fid = NULL;
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02005710 enum mlxsw_sp_rif_type type;
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01005711 struct mlxsw_sp_rif *rif;
Ido Schimmela1107482017-05-26 08:37:39 +02005712 struct mlxsw_sp_vr *vr;
5713 u16 rif_index;
Ido Schimmel4724ba562017-03-10 08:53:39 +01005714 int err;
5715
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02005716 type = mlxsw_sp_dev_rif_type(mlxsw_sp, params->dev);
5717 ops = mlxsw_sp->router->rif_ops_arr[type];
5718
David Ahernf8fa9b42017-10-18 09:56:56 -07005719 vr = mlxsw_sp_vr_get(mlxsw_sp, tb_id ? : RT_TABLE_MAIN, extack);
Ido Schimmelc9ec53f2017-05-26 08:37:38 +02005720 if (IS_ERR(vr))
5721 return ERR_CAST(vr);
Petr Machata28a04c72017-10-02 12:14:56 +02005722 vr->rif_count++;
Ido Schimmelc9ec53f2017-05-26 08:37:38 +02005723
Ido Schimmelde5ed992017-06-04 16:53:40 +02005724 err = mlxsw_sp_rif_index_alloc(mlxsw_sp, &rif_index);
David Ahernf8fa9b42017-10-18 09:56:56 -07005725 if (err) {
5726 NL_SET_ERR_MSG(extack, "spectrum: Exceeded number of supported router interfaces");
Ido Schimmelde5ed992017-06-04 16:53:40 +02005727 goto err_rif_index_alloc;
David Ahernf8fa9b42017-10-18 09:56:56 -07005728 }
Ido Schimmel4724ba562017-03-10 08:53:39 +01005729
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02005730 rif = mlxsw_sp_rif_alloc(ops->rif_size, rif_index, vr->id, params->dev);
Ido Schimmela13a5942017-05-26 08:37:33 +02005731 if (!rif) {
5732 err = -ENOMEM;
5733 goto err_rif_alloc;
5734 }
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02005735 rif->mlxsw_sp = mlxsw_sp;
5736 rif->ops = ops;
Ido Schimmela13a5942017-05-26 08:37:33 +02005737
Petr Machata010cadf2017-09-02 23:49:18 +02005738 if (ops->fid_get) {
5739 fid = ops->fid_get(rif);
5740 if (IS_ERR(fid)) {
5741 err = PTR_ERR(fid);
5742 goto err_fid_get;
5743 }
5744 rif->fid = fid;
Ido Schimmel4d93cee2017-05-26 08:37:34 +02005745 }
5746
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02005747 if (ops->setup)
5748 ops->setup(rif, params);
5749
5750 err = ops->configure(rif);
Ido Schimmel4724ba562017-03-10 08:53:39 +01005751 if (err)
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02005752 goto err_configure;
Ido Schimmel4724ba562017-03-10 08:53:39 +01005753
Yotam Gigid42b0962017-09-27 08:23:20 +02005754 err = mlxsw_sp_mr_rif_add(vr->mr4_table, rif);
5755 if (err)
5756 goto err_mr_rif_add;
5757
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02005758 mlxsw_sp_rif_counters_alloc(rif);
Ido Schimmel5f9efff2017-05-16 19:38:27 +02005759 mlxsw_sp->router->rifs[rif_index] = rif;
Ido Schimmel4724ba562017-03-10 08:53:39 +01005760
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01005761 return rif;
Ido Schimmel4724ba562017-03-10 08:53:39 +01005762
Yotam Gigid42b0962017-09-27 08:23:20 +02005763err_mr_rif_add:
5764 ops->deconfigure(rif);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02005765err_configure:
Petr Machata010cadf2017-09-02 23:49:18 +02005766 if (fid)
5767 mlxsw_sp_fid_put(fid);
Ido Schimmela1107482017-05-26 08:37:39 +02005768err_fid_get:
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02005769 kfree(rif);
5770err_rif_alloc:
Ido Schimmelde5ed992017-06-04 16:53:40 +02005771err_rif_index_alloc:
Petr Machata28a04c72017-10-02 12:14:56 +02005772 vr->rif_count--;
Ido Schimmelc9ec53f2017-05-26 08:37:38 +02005773 mlxsw_sp_vr_put(vr);
Ido Schimmel4724ba562017-03-10 08:53:39 +01005774 return ERR_PTR(err);
5775}
5776
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02005777void mlxsw_sp_rif_destroy(struct mlxsw_sp_rif *rif)
Ido Schimmel4724ba562017-03-10 08:53:39 +01005778{
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02005779 const struct mlxsw_sp_rif_ops *ops = rif->ops;
5780 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
Ido Schimmela1107482017-05-26 08:37:39 +02005781 struct mlxsw_sp_fid *fid = rif->fid;
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02005782 struct mlxsw_sp_vr *vr;
Ido Schimmel4724ba562017-03-10 08:53:39 +01005783
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01005784 mlxsw_sp_router_rif_gone_sync(mlxsw_sp, rif);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02005785 vr = &mlxsw_sp->router->vrs[rif->vr_id];
Arkadi Sharshevskye0c0afd2017-03-28 17:24:15 +02005786
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02005787 mlxsw_sp->router->rifs[rif->rif_index] = NULL;
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02005788 mlxsw_sp_rif_counters_free(rif);
Yotam Gigid42b0962017-09-27 08:23:20 +02005789 mlxsw_sp_mr_rif_del(vr->mr4_table, rif);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02005790 ops->deconfigure(rif);
Petr Machata010cadf2017-09-02 23:49:18 +02005791 if (fid)
5792 /* Loopback RIFs are not associated with a FID. */
5793 mlxsw_sp_fid_put(fid);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02005794 kfree(rif);
Petr Machata28a04c72017-10-02 12:14:56 +02005795 vr->rif_count--;
Ido Schimmelc9ec53f2017-05-26 08:37:38 +02005796 mlxsw_sp_vr_put(vr);
Ido Schimmel4724ba562017-03-10 08:53:39 +01005797}
5798
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02005799static void
5800mlxsw_sp_rif_subport_params_init(struct mlxsw_sp_rif_params *params,
5801 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
5802{
5803 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
5804
5805 params->vid = mlxsw_sp_port_vlan->vid;
5806 params->lag = mlxsw_sp_port->lagged;
5807 if (params->lag)
5808 params->lag_id = mlxsw_sp_port->lag_id;
5809 else
5810 params->system_port = mlxsw_sp_port->local_port;
5811}
5812
Ido Schimmel7cbecf22017-05-26 08:37:28 +02005813static int
Ido Schimmela1107482017-05-26 08:37:39 +02005814mlxsw_sp_port_vlan_router_join(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan,
David Ahernf8fa9b42017-10-18 09:56:56 -07005815 struct net_device *l3_dev,
5816 struct netlink_ext_ack *extack)
Ido Schimmel4724ba562017-03-10 08:53:39 +01005817{
Ido Schimmel7cbecf22017-05-26 08:37:28 +02005818 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
Ido Schimmel1b8f09a2017-05-26 08:37:36 +02005819 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
Ido Schimmel7cbecf22017-05-26 08:37:28 +02005820 u16 vid = mlxsw_sp_port_vlan->vid;
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01005821 struct mlxsw_sp_rif *rif;
Ido Schimmela1107482017-05-26 08:37:39 +02005822 struct mlxsw_sp_fid *fid;
Ido Schimmel03ea01e2017-05-23 21:56:30 +02005823 int err;
Ido Schimmel4724ba562017-03-10 08:53:39 +01005824
Ido Schimmel1b8f09a2017-05-26 08:37:36 +02005825 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev);
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01005826 if (!rif) {
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02005827 struct mlxsw_sp_rif_params params = {
5828 .dev = l3_dev,
5829 };
5830
5831 mlxsw_sp_rif_subport_params_init(&params, mlxsw_sp_port_vlan);
David Ahernf8fa9b42017-10-18 09:56:56 -07005832 rif = mlxsw_sp_rif_create(mlxsw_sp, &params, extack);
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01005833 if (IS_ERR(rif))
5834 return PTR_ERR(rif);
Ido Schimmel4724ba562017-03-10 08:53:39 +01005835 }
5836
Ido Schimmela1107482017-05-26 08:37:39 +02005837 /* FID was already created, just take a reference */
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02005838 fid = rif->ops->fid_get(rif);
Ido Schimmela1107482017-05-26 08:37:39 +02005839 err = mlxsw_sp_fid_port_vid_map(fid, mlxsw_sp_port, vid);
5840 if (err)
5841 goto err_fid_port_vid_map;
5842
Ido Schimmel7cbecf22017-05-26 08:37:28 +02005843 err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, false);
Ido Schimmel03ea01e2017-05-23 21:56:30 +02005844 if (err)
5845 goto err_port_vid_learning_set;
5846
Ido Schimmel7cbecf22017-05-26 08:37:28 +02005847 err = mlxsw_sp_port_vid_stp_set(mlxsw_sp_port, vid,
Ido Schimmel03ea01e2017-05-23 21:56:30 +02005848 BR_STATE_FORWARDING);
5849 if (err)
5850 goto err_port_vid_stp_set;
5851
Ido Schimmela1107482017-05-26 08:37:39 +02005852 mlxsw_sp_port_vlan->fid = fid;
Ido Schimmel4724ba562017-03-10 08:53:39 +01005853
Ido Schimmel4724ba562017-03-10 08:53:39 +01005854 return 0;
Ido Schimmel03ea01e2017-05-23 21:56:30 +02005855
5856err_port_vid_stp_set:
Ido Schimmel7cbecf22017-05-26 08:37:28 +02005857 mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, true);
Ido Schimmel03ea01e2017-05-23 21:56:30 +02005858err_port_vid_learning_set:
Ido Schimmela1107482017-05-26 08:37:39 +02005859 mlxsw_sp_fid_port_vid_unmap(fid, mlxsw_sp_port, vid);
5860err_fid_port_vid_map:
5861 mlxsw_sp_fid_put(fid);
Ido Schimmel03ea01e2017-05-23 21:56:30 +02005862 return err;
Ido Schimmel4724ba562017-03-10 08:53:39 +01005863}
5864
Ido Schimmela1107482017-05-26 08:37:39 +02005865void
5866mlxsw_sp_port_vlan_router_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
Ido Schimmel4724ba562017-03-10 08:53:39 +01005867{
Ido Schimmelce95e152017-05-26 08:37:27 +02005868 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
Ido Schimmel7cbecf22017-05-26 08:37:28 +02005869 struct mlxsw_sp_fid *fid = mlxsw_sp_port_vlan->fid;
Ido Schimmelce95e152017-05-26 08:37:27 +02005870 u16 vid = mlxsw_sp_port_vlan->vid;
Ido Schimmelce95e152017-05-26 08:37:27 +02005871
Ido Schimmela1107482017-05-26 08:37:39 +02005872 if (WARN_ON(mlxsw_sp_fid_type(fid) != MLXSW_SP_FID_TYPE_RFID))
5873 return;
Ido Schimmel4aafc362017-05-26 08:37:25 +02005874
Ido Schimmela1107482017-05-26 08:37:39 +02005875 mlxsw_sp_port_vlan->fid = NULL;
Ido Schimmel7cbecf22017-05-26 08:37:28 +02005876 mlxsw_sp_port_vid_stp_set(mlxsw_sp_port, vid, BR_STATE_BLOCKING);
5877 mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, true);
Ido Schimmela1107482017-05-26 08:37:39 +02005878 mlxsw_sp_fid_port_vid_unmap(fid, mlxsw_sp_port, vid);
5879 /* If router port holds the last reference on the rFID, then the
5880 * associated Sub-port RIF will be destroyed.
5881 */
5882 mlxsw_sp_fid_put(fid);
Ido Schimmel4724ba562017-03-10 08:53:39 +01005883}
5884
Ido Schimmel7cbecf22017-05-26 08:37:28 +02005885static int mlxsw_sp_inetaddr_port_vlan_event(struct net_device *l3_dev,
5886 struct net_device *port_dev,
David Ahernf8fa9b42017-10-18 09:56:56 -07005887 unsigned long event, u16 vid,
5888 struct netlink_ext_ack *extack)
Ido Schimmel4724ba562017-03-10 08:53:39 +01005889{
5890 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(port_dev);
Ido Schimmelce95e152017-05-26 08:37:27 +02005891 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
Ido Schimmel4724ba562017-03-10 08:53:39 +01005892
Ido Schimmelce95e152017-05-26 08:37:27 +02005893 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
Ido Schimmel7cbecf22017-05-26 08:37:28 +02005894 if (WARN_ON(!mlxsw_sp_port_vlan))
5895 return -EINVAL;
Ido Schimmel4724ba562017-03-10 08:53:39 +01005896
5897 switch (event) {
5898 case NETDEV_UP:
Ido Schimmela1107482017-05-26 08:37:39 +02005899 return mlxsw_sp_port_vlan_router_join(mlxsw_sp_port_vlan,
David Ahernf8fa9b42017-10-18 09:56:56 -07005900 l3_dev, extack);
Ido Schimmel4724ba562017-03-10 08:53:39 +01005901 case NETDEV_DOWN:
Ido Schimmela1107482017-05-26 08:37:39 +02005902 mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port_vlan);
Ido Schimmel4724ba562017-03-10 08:53:39 +01005903 break;
5904 }
5905
5906 return 0;
5907}
5908
5909static int mlxsw_sp_inetaddr_port_event(struct net_device *port_dev,
David Ahernf8fa9b42017-10-18 09:56:56 -07005910 unsigned long event,
5911 struct netlink_ext_ack *extack)
Ido Schimmel4724ba562017-03-10 08:53:39 +01005912{
Jiri Pirko2b94e582017-04-18 16:55:37 +02005913 if (netif_is_bridge_port(port_dev) ||
5914 netif_is_lag_port(port_dev) ||
5915 netif_is_ovs_port(port_dev))
Ido Schimmel4724ba562017-03-10 08:53:39 +01005916 return 0;
5917
David Ahernf8fa9b42017-10-18 09:56:56 -07005918 return mlxsw_sp_inetaddr_port_vlan_event(port_dev, port_dev, event, 1,
5919 extack);
Ido Schimmel4724ba562017-03-10 08:53:39 +01005920}
5921
5922static int __mlxsw_sp_inetaddr_lag_event(struct net_device *l3_dev,
5923 struct net_device *lag_dev,
David Ahernf8fa9b42017-10-18 09:56:56 -07005924 unsigned long event, u16 vid,
5925 struct netlink_ext_ack *extack)
Ido Schimmel4724ba562017-03-10 08:53:39 +01005926{
5927 struct net_device *port_dev;
5928 struct list_head *iter;
5929 int err;
5930
5931 netdev_for_each_lower_dev(lag_dev, port_dev, iter) {
5932 if (mlxsw_sp_port_dev_check(port_dev)) {
Ido Schimmel7cbecf22017-05-26 08:37:28 +02005933 err = mlxsw_sp_inetaddr_port_vlan_event(l3_dev,
5934 port_dev,
David Ahernf8fa9b42017-10-18 09:56:56 -07005935 event, vid,
5936 extack);
Ido Schimmel4724ba562017-03-10 08:53:39 +01005937 if (err)
5938 return err;
5939 }
5940 }
5941
5942 return 0;
5943}
5944
5945static int mlxsw_sp_inetaddr_lag_event(struct net_device *lag_dev,
David Ahernf8fa9b42017-10-18 09:56:56 -07005946 unsigned long event,
5947 struct netlink_ext_ack *extack)
Ido Schimmel4724ba562017-03-10 08:53:39 +01005948{
5949 if (netif_is_bridge_port(lag_dev))
5950 return 0;
5951
David Ahernf8fa9b42017-10-18 09:56:56 -07005952 return __mlxsw_sp_inetaddr_lag_event(lag_dev, lag_dev, event, 1,
5953 extack);
Ido Schimmel4724ba562017-03-10 08:53:39 +01005954}
5955
Ido Schimmel4724ba562017-03-10 08:53:39 +01005956static int mlxsw_sp_inetaddr_bridge_event(struct net_device *l3_dev,
David Ahernf8fa9b42017-10-18 09:56:56 -07005957 unsigned long event,
5958 struct netlink_ext_ack *extack)
Ido Schimmel4724ba562017-03-10 08:53:39 +01005959{
5960 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(l3_dev);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02005961 struct mlxsw_sp_rif_params params = {
5962 .dev = l3_dev,
5963 };
Ido Schimmela1107482017-05-26 08:37:39 +02005964 struct mlxsw_sp_rif *rif;
Ido Schimmel4724ba562017-03-10 08:53:39 +01005965
5966 switch (event) {
5967 case NETDEV_UP:
David Ahernf8fa9b42017-10-18 09:56:56 -07005968 rif = mlxsw_sp_rif_create(mlxsw_sp, &params, extack);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02005969 if (IS_ERR(rif))
5970 return PTR_ERR(rif);
5971 break;
Ido Schimmel4724ba562017-03-10 08:53:39 +01005972 case NETDEV_DOWN:
Ido Schimmela1107482017-05-26 08:37:39 +02005973 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02005974 mlxsw_sp_rif_destroy(rif);
Ido Schimmel4724ba562017-03-10 08:53:39 +01005975 break;
5976 }
5977
5978 return 0;
5979}
5980
5981static int mlxsw_sp_inetaddr_vlan_event(struct net_device *vlan_dev,
David Ahernf8fa9b42017-10-18 09:56:56 -07005982 unsigned long event,
5983 struct netlink_ext_ack *extack)
Ido Schimmel4724ba562017-03-10 08:53:39 +01005984{
5985 struct net_device *real_dev = vlan_dev_real_dev(vlan_dev);
Ido Schimmel4724ba562017-03-10 08:53:39 +01005986 u16 vid = vlan_dev_vlan_id(vlan_dev);
5987
Ido Schimmel6b27c8a2017-06-28 09:03:12 +03005988 if (netif_is_bridge_port(vlan_dev))
5989 return 0;
5990
Ido Schimmel4724ba562017-03-10 08:53:39 +01005991 if (mlxsw_sp_port_dev_check(real_dev))
Ido Schimmel7cbecf22017-05-26 08:37:28 +02005992 return mlxsw_sp_inetaddr_port_vlan_event(vlan_dev, real_dev,
David Ahernf8fa9b42017-10-18 09:56:56 -07005993 event, vid, extack);
Ido Schimmel4724ba562017-03-10 08:53:39 +01005994 else if (netif_is_lag_master(real_dev))
5995 return __mlxsw_sp_inetaddr_lag_event(vlan_dev, real_dev, event,
David Ahernf8fa9b42017-10-18 09:56:56 -07005996 vid, extack);
Ido Schimmelc57529e2017-05-26 08:37:31 +02005997 else if (netif_is_bridge_master(real_dev) && br_vlan_enabled(real_dev))
David Ahernf8fa9b42017-10-18 09:56:56 -07005998 return mlxsw_sp_inetaddr_bridge_event(vlan_dev, event, extack);
Ido Schimmel4724ba562017-03-10 08:53:39 +01005999
6000 return 0;
6001}
6002
Ido Schimmelb1e45522017-04-30 19:47:14 +03006003static int __mlxsw_sp_inetaddr_event(struct net_device *dev,
David Ahernf8fa9b42017-10-18 09:56:56 -07006004 unsigned long event,
6005 struct netlink_ext_ack *extack)
Ido Schimmelb1e45522017-04-30 19:47:14 +03006006{
6007 if (mlxsw_sp_port_dev_check(dev))
David Ahernf8fa9b42017-10-18 09:56:56 -07006008 return mlxsw_sp_inetaddr_port_event(dev, event, extack);
Ido Schimmelb1e45522017-04-30 19:47:14 +03006009 else if (netif_is_lag_master(dev))
David Ahernf8fa9b42017-10-18 09:56:56 -07006010 return mlxsw_sp_inetaddr_lag_event(dev, event, extack);
Ido Schimmelb1e45522017-04-30 19:47:14 +03006011 else if (netif_is_bridge_master(dev))
David Ahernf8fa9b42017-10-18 09:56:56 -07006012 return mlxsw_sp_inetaddr_bridge_event(dev, event, extack);
Ido Schimmelb1e45522017-04-30 19:47:14 +03006013 else if (is_vlan_dev(dev))
David Ahernf8fa9b42017-10-18 09:56:56 -07006014 return mlxsw_sp_inetaddr_vlan_event(dev, event, extack);
Ido Schimmelb1e45522017-04-30 19:47:14 +03006015 else
6016 return 0;
6017}
6018
Ido Schimmel4724ba562017-03-10 08:53:39 +01006019int mlxsw_sp_inetaddr_event(struct notifier_block *unused,
6020 unsigned long event, void *ptr)
6021{
6022 struct in_ifaddr *ifa = (struct in_ifaddr *) ptr;
6023 struct net_device *dev = ifa->ifa_dev->dev;
6024 struct mlxsw_sp *mlxsw_sp;
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01006025 struct mlxsw_sp_rif *rif;
Ido Schimmel4724ba562017-03-10 08:53:39 +01006026 int err = 0;
6027
David Ahern89d5dd22017-10-18 09:56:55 -07006028 /* NETDEV_UP event is handled by mlxsw_sp_inetaddr_valid_event */
6029 if (event == NETDEV_UP)
6030 goto out;
6031
6032 mlxsw_sp = mlxsw_sp_lower_get(dev);
6033 if (!mlxsw_sp)
6034 goto out;
6035
6036 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
6037 if (!mlxsw_sp_rif_should_config(rif, dev, event))
6038 goto out;
6039
David Ahernf8fa9b42017-10-18 09:56:56 -07006040 err = __mlxsw_sp_inetaddr_event(dev, event, NULL);
David Ahern89d5dd22017-10-18 09:56:55 -07006041out:
6042 return notifier_from_errno(err);
6043}
6044
6045int mlxsw_sp_inetaddr_valid_event(struct notifier_block *unused,
6046 unsigned long event, void *ptr)
6047{
6048 struct in_validator_info *ivi = (struct in_validator_info *) ptr;
6049 struct net_device *dev = ivi->ivi_dev->dev;
6050 struct mlxsw_sp *mlxsw_sp;
6051 struct mlxsw_sp_rif *rif;
6052 int err = 0;
6053
Ido Schimmel4724ba562017-03-10 08:53:39 +01006054 mlxsw_sp = mlxsw_sp_lower_get(dev);
6055 if (!mlxsw_sp)
6056 goto out;
6057
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01006058 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
Arkadi Sharshevsky5ea12372017-07-18 10:10:13 +02006059 if (!mlxsw_sp_rif_should_config(rif, dev, event))
Ido Schimmel4724ba562017-03-10 08:53:39 +01006060 goto out;
6061
David Ahernf8fa9b42017-10-18 09:56:56 -07006062 err = __mlxsw_sp_inetaddr_event(dev, event, ivi->extack);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006063out:
6064 return notifier_from_errno(err);
6065}
6066
Arkadi Sharshevsky5ea12372017-07-18 10:10:13 +02006067struct mlxsw_sp_inet6addr_event_work {
6068 struct work_struct work;
6069 struct net_device *dev;
6070 unsigned long event;
6071};
6072
6073static void mlxsw_sp_inet6addr_event_work(struct work_struct *work)
6074{
6075 struct mlxsw_sp_inet6addr_event_work *inet6addr_work =
6076 container_of(work, struct mlxsw_sp_inet6addr_event_work, work);
6077 struct net_device *dev = inet6addr_work->dev;
6078 unsigned long event = inet6addr_work->event;
6079 struct mlxsw_sp *mlxsw_sp;
6080 struct mlxsw_sp_rif *rif;
6081
6082 rtnl_lock();
6083 mlxsw_sp = mlxsw_sp_lower_get(dev);
6084 if (!mlxsw_sp)
6085 goto out;
6086
6087 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
6088 if (!mlxsw_sp_rif_should_config(rif, dev, event))
6089 goto out;
6090
David Ahernf8fa9b42017-10-18 09:56:56 -07006091 __mlxsw_sp_inetaddr_event(dev, event, NULL);
Arkadi Sharshevsky5ea12372017-07-18 10:10:13 +02006092out:
6093 rtnl_unlock();
6094 dev_put(dev);
6095 kfree(inet6addr_work);
6096}
6097
6098/* Called with rcu_read_lock() */
6099int mlxsw_sp_inet6addr_event(struct notifier_block *unused,
6100 unsigned long event, void *ptr)
6101{
6102 struct inet6_ifaddr *if6 = (struct inet6_ifaddr *) ptr;
6103 struct mlxsw_sp_inet6addr_event_work *inet6addr_work;
6104 struct net_device *dev = if6->idev->dev;
6105
David Ahern89d5dd22017-10-18 09:56:55 -07006106 /* NETDEV_UP event is handled by mlxsw_sp_inet6addr_valid_event */
6107 if (event == NETDEV_UP)
6108 return NOTIFY_DONE;
6109
Arkadi Sharshevsky5ea12372017-07-18 10:10:13 +02006110 if (!mlxsw_sp_port_dev_lower_find_rcu(dev))
6111 return NOTIFY_DONE;
6112
6113 inet6addr_work = kzalloc(sizeof(*inet6addr_work), GFP_ATOMIC);
6114 if (!inet6addr_work)
6115 return NOTIFY_BAD;
6116
6117 INIT_WORK(&inet6addr_work->work, mlxsw_sp_inet6addr_event_work);
6118 inet6addr_work->dev = dev;
6119 inet6addr_work->event = event;
6120 dev_hold(dev);
6121 mlxsw_core_schedule_work(&inet6addr_work->work);
6122
6123 return NOTIFY_DONE;
6124}
6125
David Ahern89d5dd22017-10-18 09:56:55 -07006126int mlxsw_sp_inet6addr_valid_event(struct notifier_block *unused,
6127 unsigned long event, void *ptr)
6128{
6129 struct in6_validator_info *i6vi = (struct in6_validator_info *) ptr;
6130 struct net_device *dev = i6vi->i6vi_dev->dev;
6131 struct mlxsw_sp *mlxsw_sp;
6132 struct mlxsw_sp_rif *rif;
6133 int err = 0;
6134
6135 mlxsw_sp = mlxsw_sp_lower_get(dev);
6136 if (!mlxsw_sp)
6137 goto out;
6138
6139 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
6140 if (!mlxsw_sp_rif_should_config(rif, dev, event))
6141 goto out;
6142
David Ahernf8fa9b42017-10-18 09:56:56 -07006143 err = __mlxsw_sp_inetaddr_event(dev, event, i6vi->extack);
David Ahern89d5dd22017-10-18 09:56:55 -07006144out:
6145 return notifier_from_errno(err);
6146}
6147
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01006148static int mlxsw_sp_rif_edit(struct mlxsw_sp *mlxsw_sp, u16 rif_index,
Ido Schimmel4724ba562017-03-10 08:53:39 +01006149 const char *mac, int mtu)
6150{
6151 char ritr_pl[MLXSW_REG_RITR_LEN];
6152 int err;
6153
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01006154 mlxsw_reg_ritr_rif_pack(ritr_pl, rif_index);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006155 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
6156 if (err)
6157 return err;
6158
6159 mlxsw_reg_ritr_mtu_set(ritr_pl, mtu);
6160 mlxsw_reg_ritr_if_mac_memcpy_to(ritr_pl, mac);
6161 mlxsw_reg_ritr_op_set(ritr_pl, MLXSW_REG_RITR_RIF_CREATE);
6162 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
6163}
6164
6165int mlxsw_sp_netdevice_router_port_event(struct net_device *dev)
6166{
6167 struct mlxsw_sp *mlxsw_sp;
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01006168 struct mlxsw_sp_rif *rif;
Ido Schimmela1107482017-05-26 08:37:39 +02006169 u16 fid_index;
Ido Schimmel4724ba562017-03-10 08:53:39 +01006170 int err;
6171
6172 mlxsw_sp = mlxsw_sp_lower_get(dev);
6173 if (!mlxsw_sp)
6174 return 0;
6175
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01006176 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
6177 if (!rif)
Ido Schimmel4724ba562017-03-10 08:53:39 +01006178 return 0;
Ido Schimmela1107482017-05-26 08:37:39 +02006179 fid_index = mlxsw_sp_fid_index(rif->fid);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006180
Ido Schimmela1107482017-05-26 08:37:39 +02006181 err = mlxsw_sp_rif_fdb_op(mlxsw_sp, rif->addr, fid_index, false);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006182 if (err)
6183 return err;
6184
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01006185 err = mlxsw_sp_rif_edit(mlxsw_sp, rif->rif_index, dev->dev_addr,
6186 dev->mtu);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006187 if (err)
6188 goto err_rif_edit;
6189
Ido Schimmela1107482017-05-26 08:37:39 +02006190 err = mlxsw_sp_rif_fdb_op(mlxsw_sp, dev->dev_addr, fid_index, true);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006191 if (err)
6192 goto err_rif_fdb_op;
6193
Yotam Gigifd890fe2017-09-27 08:23:21 +02006194 if (rif->mtu != dev->mtu) {
6195 struct mlxsw_sp_vr *vr;
6196
6197 /* The RIF is relevant only to its mr_table instance, as unlike
6198 * unicast routing, in multicast routing a RIF cannot be shared
6199 * between several multicast routing tables.
6200 */
6201 vr = &mlxsw_sp->router->vrs[rif->vr_id];
6202 mlxsw_sp_mr_rif_mtu_update(vr->mr4_table, rif, dev->mtu);
6203 }
6204
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01006205 ether_addr_copy(rif->addr, dev->dev_addr);
6206 rif->mtu = dev->mtu;
Ido Schimmel4724ba562017-03-10 08:53:39 +01006207
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01006208 netdev_dbg(dev, "Updated RIF=%d\n", rif->rif_index);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006209
6210 return 0;
6211
6212err_rif_fdb_op:
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01006213 mlxsw_sp_rif_edit(mlxsw_sp, rif->rif_index, rif->addr, rif->mtu);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006214err_rif_edit:
Ido Schimmela1107482017-05-26 08:37:39 +02006215 mlxsw_sp_rif_fdb_op(mlxsw_sp, rif->addr, fid_index, true);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006216 return err;
6217}
6218
Ido Schimmelb1e45522017-04-30 19:47:14 +03006219static int mlxsw_sp_port_vrf_join(struct mlxsw_sp *mlxsw_sp,
David Ahernf8fa9b42017-10-18 09:56:56 -07006220 struct net_device *l3_dev,
6221 struct netlink_ext_ack *extack)
Ido Schimmel7179eb52017-03-16 09:08:18 +01006222{
Ido Schimmelb1e45522017-04-30 19:47:14 +03006223 struct mlxsw_sp_rif *rif;
Ido Schimmel7179eb52017-03-16 09:08:18 +01006224
Ido Schimmelb1e45522017-04-30 19:47:14 +03006225 /* If netdev is already associated with a RIF, then we need to
6226 * destroy it and create a new one with the new virtual router ID.
Ido Schimmel7179eb52017-03-16 09:08:18 +01006227 */
Ido Schimmelb1e45522017-04-30 19:47:14 +03006228 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev);
6229 if (rif)
David Ahernf8fa9b42017-10-18 09:56:56 -07006230 __mlxsw_sp_inetaddr_event(l3_dev, NETDEV_DOWN, extack);
Ido Schimmel7179eb52017-03-16 09:08:18 +01006231
David Ahernf8fa9b42017-10-18 09:56:56 -07006232 return __mlxsw_sp_inetaddr_event(l3_dev, NETDEV_UP, extack);
Ido Schimmel7179eb52017-03-16 09:08:18 +01006233}
6234
Ido Schimmelb1e45522017-04-30 19:47:14 +03006235static void mlxsw_sp_port_vrf_leave(struct mlxsw_sp *mlxsw_sp,
6236 struct net_device *l3_dev)
Ido Schimmel7179eb52017-03-16 09:08:18 +01006237{
Ido Schimmelb1e45522017-04-30 19:47:14 +03006238 struct mlxsw_sp_rif *rif;
Ido Schimmel7179eb52017-03-16 09:08:18 +01006239
Ido Schimmelb1e45522017-04-30 19:47:14 +03006240 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev);
6241 if (!rif)
Ido Schimmel7179eb52017-03-16 09:08:18 +01006242 return;
David Ahernf8fa9b42017-10-18 09:56:56 -07006243 __mlxsw_sp_inetaddr_event(l3_dev, NETDEV_DOWN, NULL);
Ido Schimmel7179eb52017-03-16 09:08:18 +01006244}
6245
Ido Schimmelb1e45522017-04-30 19:47:14 +03006246int mlxsw_sp_netdevice_vrf_event(struct net_device *l3_dev, unsigned long event,
6247 struct netdev_notifier_changeupper_info *info)
Ido Schimmel3d70e4582017-03-16 09:08:19 +01006248{
Ido Schimmelb1e45522017-04-30 19:47:14 +03006249 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(l3_dev);
6250 int err = 0;
Ido Schimmel3d70e4582017-03-16 09:08:19 +01006251
Ido Schimmelb1e45522017-04-30 19:47:14 +03006252 if (!mlxsw_sp)
6253 return 0;
Ido Schimmel3d70e4582017-03-16 09:08:19 +01006254
Ido Schimmelb1e45522017-04-30 19:47:14 +03006255 switch (event) {
6256 case NETDEV_PRECHANGEUPPER:
6257 return 0;
6258 case NETDEV_CHANGEUPPER:
David Ahernf8fa9b42017-10-18 09:56:56 -07006259 if (info->linking) {
6260 struct netlink_ext_ack *extack;
6261
6262 extack = netdev_notifier_info_to_extack(&info->info);
6263 err = mlxsw_sp_port_vrf_join(mlxsw_sp, l3_dev, extack);
6264 } else {
Ido Schimmelb1e45522017-04-30 19:47:14 +03006265 mlxsw_sp_port_vrf_leave(mlxsw_sp, l3_dev);
David Ahernf8fa9b42017-10-18 09:56:56 -07006266 }
Ido Schimmelb1e45522017-04-30 19:47:14 +03006267 break;
6268 }
Ido Schimmel3d70e4582017-03-16 09:08:19 +01006269
Ido Schimmelb1e45522017-04-30 19:47:14 +03006270 return err;
Ido Schimmel3d70e4582017-03-16 09:08:19 +01006271}
6272
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006273static struct mlxsw_sp_rif_subport *
6274mlxsw_sp_rif_subport_rif(const struct mlxsw_sp_rif *rif)
Ido Schimmela1107482017-05-26 08:37:39 +02006275{
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006276 return container_of(rif, struct mlxsw_sp_rif_subport, common);
Ido Schimmela1107482017-05-26 08:37:39 +02006277}
6278
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006279static void mlxsw_sp_rif_subport_setup(struct mlxsw_sp_rif *rif,
6280 const struct mlxsw_sp_rif_params *params)
6281{
6282 struct mlxsw_sp_rif_subport *rif_subport;
6283
6284 rif_subport = mlxsw_sp_rif_subport_rif(rif);
6285 rif_subport->vid = params->vid;
6286 rif_subport->lag = params->lag;
6287 if (params->lag)
6288 rif_subport->lag_id = params->lag_id;
6289 else
6290 rif_subport->system_port = params->system_port;
6291}
6292
6293static int mlxsw_sp_rif_subport_op(struct mlxsw_sp_rif *rif, bool enable)
6294{
6295 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
6296 struct mlxsw_sp_rif_subport *rif_subport;
6297 char ritr_pl[MLXSW_REG_RITR_LEN];
6298
6299 rif_subport = mlxsw_sp_rif_subport_rif(rif);
6300 mlxsw_reg_ritr_pack(ritr_pl, enable, MLXSW_REG_RITR_SP_IF,
Petr Machata9571e822017-09-02 23:49:14 +02006301 rif->rif_index, rif->vr_id, rif->dev->mtu);
6302 mlxsw_reg_ritr_mac_pack(ritr_pl, rif->dev->dev_addr);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006303 mlxsw_reg_ritr_sp_if_pack(ritr_pl, rif_subport->lag,
6304 rif_subport->lag ? rif_subport->lag_id :
6305 rif_subport->system_port,
6306 rif_subport->vid);
6307
6308 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
6309}
6310
6311static int mlxsw_sp_rif_subport_configure(struct mlxsw_sp_rif *rif)
6312{
Petr Machata010cadf2017-09-02 23:49:18 +02006313 int err;
6314
6315 err = mlxsw_sp_rif_subport_op(rif, true);
6316 if (err)
6317 return err;
6318
6319 err = mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
6320 mlxsw_sp_fid_index(rif->fid), true);
6321 if (err)
6322 goto err_rif_fdb_op;
6323
6324 mlxsw_sp_fid_rif_set(rif->fid, rif);
6325 return 0;
6326
6327err_rif_fdb_op:
6328 mlxsw_sp_rif_subport_op(rif, false);
6329 return err;
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006330}
6331
6332static void mlxsw_sp_rif_subport_deconfigure(struct mlxsw_sp_rif *rif)
6333{
Petr Machata010cadf2017-09-02 23:49:18 +02006334 struct mlxsw_sp_fid *fid = rif->fid;
6335
6336 mlxsw_sp_fid_rif_set(fid, NULL);
6337 mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
6338 mlxsw_sp_fid_index(fid), false);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006339 mlxsw_sp_rif_subport_op(rif, false);
6340}
6341
6342static struct mlxsw_sp_fid *
6343mlxsw_sp_rif_subport_fid_get(struct mlxsw_sp_rif *rif)
6344{
6345 return mlxsw_sp_fid_rfid_get(rif->mlxsw_sp, rif->rif_index);
6346}
6347
6348static const struct mlxsw_sp_rif_ops mlxsw_sp_rif_subport_ops = {
6349 .type = MLXSW_SP_RIF_TYPE_SUBPORT,
6350 .rif_size = sizeof(struct mlxsw_sp_rif_subport),
6351 .setup = mlxsw_sp_rif_subport_setup,
6352 .configure = mlxsw_sp_rif_subport_configure,
6353 .deconfigure = mlxsw_sp_rif_subport_deconfigure,
6354 .fid_get = mlxsw_sp_rif_subport_fid_get,
6355};
6356
6357static int mlxsw_sp_rif_vlan_fid_op(struct mlxsw_sp_rif *rif,
6358 enum mlxsw_reg_ritr_if_type type,
6359 u16 vid_fid, bool enable)
6360{
6361 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
6362 char ritr_pl[MLXSW_REG_RITR_LEN];
6363
6364 mlxsw_reg_ritr_pack(ritr_pl, enable, type, rif->rif_index, rif->vr_id,
Petr Machata9571e822017-09-02 23:49:14 +02006365 rif->dev->mtu);
6366 mlxsw_reg_ritr_mac_pack(ritr_pl, rif->dev->dev_addr);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006367 mlxsw_reg_ritr_fid_set(ritr_pl, type, vid_fid);
6368
6369 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
6370}
6371
Yotam Gigib35750f2017-10-09 11:15:33 +02006372u8 mlxsw_sp_router_port(const struct mlxsw_sp *mlxsw_sp)
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006373{
6374 return mlxsw_core_max_ports(mlxsw_sp->core) + 1;
6375}
6376
6377static int mlxsw_sp_rif_vlan_configure(struct mlxsw_sp_rif *rif)
6378{
6379 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
6380 u16 vid = mlxsw_sp_fid_8021q_vid(rif->fid);
6381 int err;
6382
6383 err = mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_VLAN_IF, vid, true);
6384 if (err)
6385 return err;
6386
Ido Schimmel0d284812017-07-18 10:10:12 +02006387 err = mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
6388 mlxsw_sp_router_port(mlxsw_sp), true);
6389 if (err)
6390 goto err_fid_mc_flood_set;
6391
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006392 err = mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
6393 mlxsw_sp_router_port(mlxsw_sp), true);
6394 if (err)
6395 goto err_fid_bc_flood_set;
6396
Petr Machata010cadf2017-09-02 23:49:18 +02006397 err = mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
6398 mlxsw_sp_fid_index(rif->fid), true);
6399 if (err)
6400 goto err_rif_fdb_op;
6401
6402 mlxsw_sp_fid_rif_set(rif->fid, rif);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006403 return 0;
6404
Petr Machata010cadf2017-09-02 23:49:18 +02006405err_rif_fdb_op:
6406 mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
6407 mlxsw_sp_router_port(mlxsw_sp), false);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006408err_fid_bc_flood_set:
Ido Schimmel0d284812017-07-18 10:10:12 +02006409 mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
6410 mlxsw_sp_router_port(mlxsw_sp), false);
6411err_fid_mc_flood_set:
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006412 mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_VLAN_IF, vid, false);
6413 return err;
6414}
6415
6416static void mlxsw_sp_rif_vlan_deconfigure(struct mlxsw_sp_rif *rif)
6417{
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006418 u16 vid = mlxsw_sp_fid_8021q_vid(rif->fid);
Petr Machata010cadf2017-09-02 23:49:18 +02006419 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
6420 struct mlxsw_sp_fid *fid = rif->fid;
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006421
Petr Machata010cadf2017-09-02 23:49:18 +02006422 mlxsw_sp_fid_rif_set(fid, NULL);
6423 mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
6424 mlxsw_sp_fid_index(fid), false);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006425 mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
6426 mlxsw_sp_router_port(mlxsw_sp), false);
Ido Schimmel0d284812017-07-18 10:10:12 +02006427 mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
6428 mlxsw_sp_router_port(mlxsw_sp), false);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006429 mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_VLAN_IF, vid, false);
6430}
6431
6432static struct mlxsw_sp_fid *
6433mlxsw_sp_rif_vlan_fid_get(struct mlxsw_sp_rif *rif)
6434{
6435 u16 vid = is_vlan_dev(rif->dev) ? vlan_dev_vlan_id(rif->dev) : 1;
6436
6437 return mlxsw_sp_fid_8021q_get(rif->mlxsw_sp, vid);
6438}
6439
6440static const struct mlxsw_sp_rif_ops mlxsw_sp_rif_vlan_ops = {
6441 .type = MLXSW_SP_RIF_TYPE_VLAN,
6442 .rif_size = sizeof(struct mlxsw_sp_rif),
6443 .configure = mlxsw_sp_rif_vlan_configure,
6444 .deconfigure = mlxsw_sp_rif_vlan_deconfigure,
6445 .fid_get = mlxsw_sp_rif_vlan_fid_get,
6446};
6447
6448static int mlxsw_sp_rif_fid_configure(struct mlxsw_sp_rif *rif)
6449{
6450 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
6451 u16 fid_index = mlxsw_sp_fid_index(rif->fid);
6452 int err;
6453
6454 err = mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_FID_IF, fid_index,
6455 true);
6456 if (err)
6457 return err;
6458
Ido Schimmel0d284812017-07-18 10:10:12 +02006459 err = mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
6460 mlxsw_sp_router_port(mlxsw_sp), true);
6461 if (err)
6462 goto err_fid_mc_flood_set;
6463
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006464 err = mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
6465 mlxsw_sp_router_port(mlxsw_sp), true);
6466 if (err)
6467 goto err_fid_bc_flood_set;
6468
Petr Machata010cadf2017-09-02 23:49:18 +02006469 err = mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
6470 mlxsw_sp_fid_index(rif->fid), true);
6471 if (err)
6472 goto err_rif_fdb_op;
6473
6474 mlxsw_sp_fid_rif_set(rif->fid, rif);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006475 return 0;
6476
Petr Machata010cadf2017-09-02 23:49:18 +02006477err_rif_fdb_op:
6478 mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
6479 mlxsw_sp_router_port(mlxsw_sp), false);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006480err_fid_bc_flood_set:
Ido Schimmel0d284812017-07-18 10:10:12 +02006481 mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
6482 mlxsw_sp_router_port(mlxsw_sp), false);
6483err_fid_mc_flood_set:
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006484 mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_FID_IF, fid_index, false);
6485 return err;
6486}
6487
6488static void mlxsw_sp_rif_fid_deconfigure(struct mlxsw_sp_rif *rif)
6489{
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006490 u16 fid_index = mlxsw_sp_fid_index(rif->fid);
Petr Machata010cadf2017-09-02 23:49:18 +02006491 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
6492 struct mlxsw_sp_fid *fid = rif->fid;
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006493
Petr Machata010cadf2017-09-02 23:49:18 +02006494 mlxsw_sp_fid_rif_set(fid, NULL);
6495 mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
6496 mlxsw_sp_fid_index(fid), false);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006497 mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
6498 mlxsw_sp_router_port(mlxsw_sp), false);
Ido Schimmel0d284812017-07-18 10:10:12 +02006499 mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
6500 mlxsw_sp_router_port(mlxsw_sp), false);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006501 mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_FID_IF, fid_index, false);
6502}
6503
6504static struct mlxsw_sp_fid *
6505mlxsw_sp_rif_fid_fid_get(struct mlxsw_sp_rif *rif)
6506{
6507 return mlxsw_sp_fid_8021d_get(rif->mlxsw_sp, rif->dev->ifindex);
6508}
6509
6510static const struct mlxsw_sp_rif_ops mlxsw_sp_rif_fid_ops = {
6511 .type = MLXSW_SP_RIF_TYPE_FID,
6512 .rif_size = sizeof(struct mlxsw_sp_rif),
6513 .configure = mlxsw_sp_rif_fid_configure,
6514 .deconfigure = mlxsw_sp_rif_fid_deconfigure,
6515 .fid_get = mlxsw_sp_rif_fid_fid_get,
6516};
6517
Petr Machata6ddb7422017-09-02 23:49:19 +02006518static struct mlxsw_sp_rif_ipip_lb *
6519mlxsw_sp_rif_ipip_lb_rif(struct mlxsw_sp_rif *rif)
6520{
6521 return container_of(rif, struct mlxsw_sp_rif_ipip_lb, common);
6522}
6523
6524static void
6525mlxsw_sp_rif_ipip_lb_setup(struct mlxsw_sp_rif *rif,
6526 const struct mlxsw_sp_rif_params *params)
6527{
6528 struct mlxsw_sp_rif_params_ipip_lb *params_lb;
6529 struct mlxsw_sp_rif_ipip_lb *rif_lb;
6530
6531 params_lb = container_of(params, struct mlxsw_sp_rif_params_ipip_lb,
6532 common);
6533 rif_lb = mlxsw_sp_rif_ipip_lb_rif(rif);
6534 rif_lb->lb_config = params_lb->lb_config;
6535}
6536
6537static int
6538mlxsw_sp_rif_ipip_lb_op(struct mlxsw_sp_rif_ipip_lb *lb_rif,
6539 struct mlxsw_sp_vr *ul_vr, bool enable)
6540{
6541 struct mlxsw_sp_rif_ipip_lb_config lb_cf = lb_rif->lb_config;
6542 struct mlxsw_sp_rif *rif = &lb_rif->common;
6543 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
6544 char ritr_pl[MLXSW_REG_RITR_LEN];
6545 u32 saddr4;
6546
6547 switch (lb_cf.ul_protocol) {
6548 case MLXSW_SP_L3_PROTO_IPV4:
6549 saddr4 = be32_to_cpu(lb_cf.saddr.addr4);
6550 mlxsw_reg_ritr_pack(ritr_pl, enable, MLXSW_REG_RITR_LOOPBACK_IF,
6551 rif->rif_index, rif->vr_id, rif->dev->mtu);
6552 mlxsw_reg_ritr_loopback_ipip4_pack(ritr_pl, lb_cf.lb_ipipt,
6553 MLXSW_REG_RITR_LOOPBACK_IPIP_OPTIONS_GRE_KEY_PRESET,
6554 ul_vr->id, saddr4, lb_cf.okey);
6555 break;
6556
6557 case MLXSW_SP_L3_PROTO_IPV6:
6558 return -EAFNOSUPPORT;
6559 }
6560
6561 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
6562}
6563
6564static int
6565mlxsw_sp_rif_ipip_lb_configure(struct mlxsw_sp_rif *rif)
6566{
6567 struct mlxsw_sp_rif_ipip_lb *lb_rif = mlxsw_sp_rif_ipip_lb_rif(rif);
6568 u32 ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(rif->dev);
6569 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
6570 struct mlxsw_sp_vr *ul_vr;
6571 int err;
6572
David Ahernf8fa9b42017-10-18 09:56:56 -07006573 ul_vr = mlxsw_sp_vr_get(mlxsw_sp, ul_tb_id, NULL);
Petr Machata6ddb7422017-09-02 23:49:19 +02006574 if (IS_ERR(ul_vr))
6575 return PTR_ERR(ul_vr);
6576
6577 err = mlxsw_sp_rif_ipip_lb_op(lb_rif, ul_vr, true);
6578 if (err)
6579 goto err_loopback_op;
6580
6581 lb_rif->ul_vr_id = ul_vr->id;
6582 ++ul_vr->rif_count;
6583 return 0;
6584
6585err_loopback_op:
6586 mlxsw_sp_vr_put(ul_vr);
6587 return err;
6588}
6589
6590static void mlxsw_sp_rif_ipip_lb_deconfigure(struct mlxsw_sp_rif *rif)
6591{
6592 struct mlxsw_sp_rif_ipip_lb *lb_rif = mlxsw_sp_rif_ipip_lb_rif(rif);
6593 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
6594 struct mlxsw_sp_vr *ul_vr;
6595
6596 ul_vr = &mlxsw_sp->router->vrs[lb_rif->ul_vr_id];
6597 mlxsw_sp_rif_ipip_lb_op(lb_rif, ul_vr, false);
6598
6599 --ul_vr->rif_count;
6600 mlxsw_sp_vr_put(ul_vr);
6601}
6602
6603static const struct mlxsw_sp_rif_ops mlxsw_sp_rif_ipip_lb_ops = {
6604 .type = MLXSW_SP_RIF_TYPE_IPIP_LB,
6605 .rif_size = sizeof(struct mlxsw_sp_rif_ipip_lb),
6606 .setup = mlxsw_sp_rif_ipip_lb_setup,
6607 .configure = mlxsw_sp_rif_ipip_lb_configure,
6608 .deconfigure = mlxsw_sp_rif_ipip_lb_deconfigure,
6609};
6610
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006611static const struct mlxsw_sp_rif_ops *mlxsw_sp_rif_ops_arr[] = {
6612 [MLXSW_SP_RIF_TYPE_SUBPORT] = &mlxsw_sp_rif_subport_ops,
6613 [MLXSW_SP_RIF_TYPE_VLAN] = &mlxsw_sp_rif_vlan_ops,
6614 [MLXSW_SP_RIF_TYPE_FID] = &mlxsw_sp_rif_fid_ops,
Petr Machata6ddb7422017-09-02 23:49:19 +02006615 [MLXSW_SP_RIF_TYPE_IPIP_LB] = &mlxsw_sp_rif_ipip_lb_ops,
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006616};
6617
Ido Schimmel348b8fc2017-05-16 19:38:29 +02006618static int mlxsw_sp_rifs_init(struct mlxsw_sp *mlxsw_sp)
6619{
6620 u64 max_rifs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS);
6621
6622 mlxsw_sp->router->rifs = kcalloc(max_rifs,
6623 sizeof(struct mlxsw_sp_rif *),
6624 GFP_KERNEL);
6625 if (!mlxsw_sp->router->rifs)
6626 return -ENOMEM;
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006627
6628 mlxsw_sp->router->rif_ops_arr = mlxsw_sp_rif_ops_arr;
6629
Ido Schimmel348b8fc2017-05-16 19:38:29 +02006630 return 0;
6631}
6632
6633static void mlxsw_sp_rifs_fini(struct mlxsw_sp *mlxsw_sp)
6634{
6635 int i;
6636
6637 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++)
6638 WARN_ON_ONCE(mlxsw_sp->router->rifs[i]);
6639
6640 kfree(mlxsw_sp->router->rifs);
6641}
6642
Petr Machatadcbda282017-10-20 09:16:16 +02006643static int
6644mlxsw_sp_ipip_config_tigcr(struct mlxsw_sp *mlxsw_sp)
6645{
6646 char tigcr_pl[MLXSW_REG_TIGCR_LEN];
6647
6648 mlxsw_reg_tigcr_pack(tigcr_pl, true, 0);
6649 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(tigcr), tigcr_pl);
6650}
6651
Petr Machata38ebc0f2017-09-02 23:49:17 +02006652static int mlxsw_sp_ipips_init(struct mlxsw_sp *mlxsw_sp)
6653{
6654 mlxsw_sp->router->ipip_ops_arr = mlxsw_sp_ipip_ops_arr;
Petr Machata1012b9a2017-09-02 23:49:23 +02006655 INIT_LIST_HEAD(&mlxsw_sp->router->ipip_list);
Petr Machatadcbda282017-10-20 09:16:16 +02006656 return mlxsw_sp_ipip_config_tigcr(mlxsw_sp);
Petr Machata38ebc0f2017-09-02 23:49:17 +02006657}
6658
6659static void mlxsw_sp_ipips_fini(struct mlxsw_sp *mlxsw_sp)
6660{
Petr Machata1012b9a2017-09-02 23:49:23 +02006661 WARN_ON(!list_empty(&mlxsw_sp->router->ipip_list));
Petr Machata38ebc0f2017-09-02 23:49:17 +02006662}
6663
Ido Schimmelc3852ef2016-12-03 16:45:07 +01006664static void mlxsw_sp_router_fib_dump_flush(struct notifier_block *nb)
6665{
Ido Schimmel7e39d112017-05-16 19:38:28 +02006666 struct mlxsw_sp_router *router;
Ido Schimmelc3852ef2016-12-03 16:45:07 +01006667
6668 /* Flush pending FIB notifications and then flush the device's
6669 * table before requesting another dump. The FIB notification
6670 * block is unregistered, so no need to take RTNL.
6671 */
6672 mlxsw_core_flush_owq();
Ido Schimmel7e39d112017-05-16 19:38:28 +02006673 router = container_of(nb, struct mlxsw_sp_router, fib_nb);
6674 mlxsw_sp_router_fib_flush(router->mlxsw_sp);
Ido Schimmelc3852ef2016-12-03 16:45:07 +01006675}
6676
Ido Schimmelaf658b62017-11-02 17:14:09 +01006677#ifdef CONFIG_IP_ROUTE_MULTIPATH
6678static void mlxsw_sp_mp_hash_header_set(char *recr2_pl, int header)
6679{
6680 mlxsw_reg_recr2_outer_header_enables_set(recr2_pl, header, true);
6681}
6682
6683static void mlxsw_sp_mp_hash_field_set(char *recr2_pl, int field)
6684{
6685 mlxsw_reg_recr2_outer_header_fields_enable_set(recr2_pl, field, true);
6686}
6687
6688static void mlxsw_sp_mp4_hash_init(char *recr2_pl)
6689{
6690 bool only_l3 = !init_net.ipv4.sysctl_fib_multipath_hash_policy;
6691
6692 mlxsw_sp_mp_hash_header_set(recr2_pl,
6693 MLXSW_REG_RECR2_IPV4_EN_NOT_TCP_NOT_UDP);
6694 mlxsw_sp_mp_hash_header_set(recr2_pl, MLXSW_REG_RECR2_IPV4_EN_TCP_UDP);
6695 mlxsw_reg_recr2_ipv4_sip_enable(recr2_pl);
6696 mlxsw_reg_recr2_ipv4_dip_enable(recr2_pl);
6697 if (only_l3)
6698 return;
6699 mlxsw_sp_mp_hash_header_set(recr2_pl, MLXSW_REG_RECR2_TCP_UDP_EN_IPV4);
6700 mlxsw_sp_mp_hash_field_set(recr2_pl, MLXSW_REG_RECR2_IPV4_PROTOCOL);
6701 mlxsw_sp_mp_hash_field_set(recr2_pl, MLXSW_REG_RECR2_TCP_UDP_SPORT);
6702 mlxsw_sp_mp_hash_field_set(recr2_pl, MLXSW_REG_RECR2_TCP_UDP_DPORT);
6703}
6704
6705static void mlxsw_sp_mp6_hash_init(char *recr2_pl)
6706{
6707 mlxsw_sp_mp_hash_header_set(recr2_pl,
6708 MLXSW_REG_RECR2_IPV6_EN_NOT_TCP_NOT_UDP);
6709 mlxsw_sp_mp_hash_header_set(recr2_pl, MLXSW_REG_RECR2_IPV6_EN_TCP_UDP);
6710 mlxsw_reg_recr2_ipv6_sip_enable(recr2_pl);
6711 mlxsw_reg_recr2_ipv6_dip_enable(recr2_pl);
6712 mlxsw_sp_mp_hash_field_set(recr2_pl, MLXSW_REG_RECR2_IPV6_FLOW_LABEL);
6713 mlxsw_sp_mp_hash_field_set(recr2_pl, MLXSW_REG_RECR2_IPV6_NEXT_HEADER);
6714}
6715
6716static int mlxsw_sp_mp_hash_init(struct mlxsw_sp *mlxsw_sp)
6717{
6718 char recr2_pl[MLXSW_REG_RECR2_LEN];
6719 u32 seed;
6720
6721 get_random_bytes(&seed, sizeof(seed));
6722 mlxsw_reg_recr2_pack(recr2_pl, seed);
6723 mlxsw_sp_mp4_hash_init(recr2_pl);
6724 mlxsw_sp_mp6_hash_init(recr2_pl);
6725
6726 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(recr2), recr2_pl);
6727}
6728#else
6729static int mlxsw_sp_mp_hash_init(struct mlxsw_sp *mlxsw_sp)
6730{
6731 return 0;
6732}
6733#endif
6734
Ido Schimmel4724ba562017-03-10 08:53:39 +01006735static int __mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp)
6736{
6737 char rgcr_pl[MLXSW_REG_RGCR_LEN];
6738 u64 max_rifs;
6739 int err;
6740
6741 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_RIFS))
6742 return -EIO;
Ido Schimmel4724ba562017-03-10 08:53:39 +01006743 max_rifs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006744
Arkadi Sharshevskye29237e2017-07-18 10:10:09 +02006745 mlxsw_reg_rgcr_pack(rgcr_pl, true, true);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006746 mlxsw_reg_rgcr_max_router_interfaces_set(rgcr_pl, max_rifs);
6747 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rgcr), rgcr_pl);
6748 if (err)
Ido Schimmel348b8fc2017-05-16 19:38:29 +02006749 return err;
Ido Schimmel4724ba562017-03-10 08:53:39 +01006750 return 0;
Ido Schimmel4724ba562017-03-10 08:53:39 +01006751}
6752
6753static void __mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp)
6754{
6755 char rgcr_pl[MLXSW_REG_RGCR_LEN];
Ido Schimmel4724ba562017-03-10 08:53:39 +01006756
Arkadi Sharshevskye29237e2017-07-18 10:10:09 +02006757 mlxsw_reg_rgcr_pack(rgcr_pl, false, false);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006758 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rgcr), rgcr_pl);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006759}
6760
Jiri Pirkob45f64d2016-09-26 12:52:31 +02006761int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp)
6762{
Ido Schimmel9011b672017-05-16 19:38:25 +02006763 struct mlxsw_sp_router *router;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02006764 int err;
6765
Ido Schimmel9011b672017-05-16 19:38:25 +02006766 router = kzalloc(sizeof(*mlxsw_sp->router), GFP_KERNEL);
6767 if (!router)
6768 return -ENOMEM;
6769 mlxsw_sp->router = router;
6770 router->mlxsw_sp = mlxsw_sp;
6771
6772 INIT_LIST_HEAD(&mlxsw_sp->router->nexthop_neighs_list);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02006773 err = __mlxsw_sp_router_init(mlxsw_sp);
6774 if (err)
Ido Schimmel9011b672017-05-16 19:38:25 +02006775 goto err_router_init;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02006776
Ido Schimmel348b8fc2017-05-16 19:38:29 +02006777 err = mlxsw_sp_rifs_init(mlxsw_sp);
6778 if (err)
6779 goto err_rifs_init;
6780
Petr Machata38ebc0f2017-09-02 23:49:17 +02006781 err = mlxsw_sp_ipips_init(mlxsw_sp);
6782 if (err)
6783 goto err_ipips_init;
6784
Ido Schimmel9011b672017-05-16 19:38:25 +02006785 err = rhashtable_init(&mlxsw_sp->router->nexthop_ht,
Ido Schimmelc53b8e12017-02-08 11:16:30 +01006786 &mlxsw_sp_nexthop_ht_params);
6787 if (err)
6788 goto err_nexthop_ht_init;
6789
Ido Schimmel9011b672017-05-16 19:38:25 +02006790 err = rhashtable_init(&mlxsw_sp->router->nexthop_group_ht,
Ido Schimmele9ad5e72017-02-08 11:16:29 +01006791 &mlxsw_sp_nexthop_group_ht_params);
6792 if (err)
6793 goto err_nexthop_group_ht_init;
6794
Arkadi Sharshevskydbe45982017-09-25 10:32:23 +02006795 INIT_LIST_HEAD(&mlxsw_sp->router->nexthop_list);
Ido Schimmel8494ab02017-03-24 08:02:47 +01006796 err = mlxsw_sp_lpm_init(mlxsw_sp);
6797 if (err)
6798 goto err_lpm_init;
6799
Yotam Gigid42b0962017-09-27 08:23:20 +02006800 err = mlxsw_sp_mr_init(mlxsw_sp, &mlxsw_sp_mr_tcam_ops);
6801 if (err)
6802 goto err_mr_init;
6803
Jiri Pirkob45f64d2016-09-26 12:52:31 +02006804 err = mlxsw_sp_vrs_init(mlxsw_sp);
6805 if (err)
6806 goto err_vrs_init;
6807
Ido Schimmel8c9583a2016-10-27 15:12:57 +02006808 err = mlxsw_sp_neigh_init(mlxsw_sp);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02006809 if (err)
6810 goto err_neigh_init;
6811
Ido Schimmel48fac882017-11-02 17:14:06 +01006812 mlxsw_sp->router->netevent_nb.notifier_call =
6813 mlxsw_sp_router_netevent_event;
6814 err = register_netevent_notifier(&mlxsw_sp->router->netevent_nb);
6815 if (err)
6816 goto err_register_netevent_notifier;
6817
Ido Schimmelaf658b62017-11-02 17:14:09 +01006818 err = mlxsw_sp_mp_hash_init(mlxsw_sp);
6819 if (err)
6820 goto err_mp_hash_init;
6821
Ido Schimmel7e39d112017-05-16 19:38:28 +02006822 mlxsw_sp->router->fib_nb.notifier_call = mlxsw_sp_router_fib_event;
6823 err = register_fib_notifier(&mlxsw_sp->router->fib_nb,
Ido Schimmelc3852ef2016-12-03 16:45:07 +01006824 mlxsw_sp_router_fib_dump_flush);
6825 if (err)
6826 goto err_register_fib_notifier;
6827
Jiri Pirkob45f64d2016-09-26 12:52:31 +02006828 return 0;
6829
Ido Schimmelc3852ef2016-12-03 16:45:07 +01006830err_register_fib_notifier:
Ido Schimmelaf658b62017-11-02 17:14:09 +01006831err_mp_hash_init:
Ido Schimmel48fac882017-11-02 17:14:06 +01006832 unregister_netevent_notifier(&mlxsw_sp->router->netevent_nb);
6833err_register_netevent_notifier:
Ido Schimmelc3852ef2016-12-03 16:45:07 +01006834 mlxsw_sp_neigh_fini(mlxsw_sp);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02006835err_neigh_init:
6836 mlxsw_sp_vrs_fini(mlxsw_sp);
6837err_vrs_init:
Yotam Gigid42b0962017-09-27 08:23:20 +02006838 mlxsw_sp_mr_fini(mlxsw_sp);
6839err_mr_init:
Ido Schimmel8494ab02017-03-24 08:02:47 +01006840 mlxsw_sp_lpm_fini(mlxsw_sp);
6841err_lpm_init:
Ido Schimmel9011b672017-05-16 19:38:25 +02006842 rhashtable_destroy(&mlxsw_sp->router->nexthop_group_ht);
Ido Schimmele9ad5e72017-02-08 11:16:29 +01006843err_nexthop_group_ht_init:
Ido Schimmel9011b672017-05-16 19:38:25 +02006844 rhashtable_destroy(&mlxsw_sp->router->nexthop_ht);
Ido Schimmelc53b8e12017-02-08 11:16:30 +01006845err_nexthop_ht_init:
Petr Machata38ebc0f2017-09-02 23:49:17 +02006846 mlxsw_sp_ipips_fini(mlxsw_sp);
6847err_ipips_init:
Ido Schimmel348b8fc2017-05-16 19:38:29 +02006848 mlxsw_sp_rifs_fini(mlxsw_sp);
6849err_rifs_init:
Jiri Pirkob45f64d2016-09-26 12:52:31 +02006850 __mlxsw_sp_router_fini(mlxsw_sp);
Ido Schimmel9011b672017-05-16 19:38:25 +02006851err_router_init:
6852 kfree(mlxsw_sp->router);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02006853 return err;
6854}
6855
6856void mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp)
6857{
Ido Schimmel7e39d112017-05-16 19:38:28 +02006858 unregister_fib_notifier(&mlxsw_sp->router->fib_nb);
Ido Schimmel48fac882017-11-02 17:14:06 +01006859 unregister_netevent_notifier(&mlxsw_sp->router->netevent_nb);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02006860 mlxsw_sp_neigh_fini(mlxsw_sp);
6861 mlxsw_sp_vrs_fini(mlxsw_sp);
Yotam Gigid42b0962017-09-27 08:23:20 +02006862 mlxsw_sp_mr_fini(mlxsw_sp);
Ido Schimmel8494ab02017-03-24 08:02:47 +01006863 mlxsw_sp_lpm_fini(mlxsw_sp);
Ido Schimmel9011b672017-05-16 19:38:25 +02006864 rhashtable_destroy(&mlxsw_sp->router->nexthop_group_ht);
6865 rhashtable_destroy(&mlxsw_sp->router->nexthop_ht);
Petr Machata38ebc0f2017-09-02 23:49:17 +02006866 mlxsw_sp_ipips_fini(mlxsw_sp);
Ido Schimmel348b8fc2017-05-16 19:38:29 +02006867 mlxsw_sp_rifs_fini(mlxsw_sp);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02006868 __mlxsw_sp_router_fini(mlxsw_sp);
Ido Schimmel9011b672017-05-16 19:38:25 +02006869 kfree(mlxsw_sp->router);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02006870}