blob: 2b05f9ff7ff466be1c9bf7e0be31441b7caf3690 [file] [log] [blame]
Ido Schimmel464dce12016-07-02 11:00:15 +02001/*
2 * drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
Petr Machata6ddb7422017-09-02 23:49:19 +02003 * Copyright (c) 2016-2017 Mellanox Technologies. All rights reserved.
Ido Schimmel464dce12016-07-02 11:00:15 +02004 * Copyright (c) 2016 Jiri Pirko <jiri@mellanox.com>
5 * Copyright (c) 2016 Ido Schimmel <idosch@mellanox.com>
Yotam Gigic723c7352016-07-05 11:27:43 +02006 * Copyright (c) 2016 Yotam Gigi <yotamg@mellanox.com>
Petr Machata6ddb7422017-09-02 23:49:19 +02007 * Copyright (c) 2017 Petr Machata <petrm@mellanox.com>
Ido Schimmel464dce12016-07-02 11:00:15 +02008 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions are met:
11 *
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. Neither the names of the copyright holders nor the names of its
18 * contributors may be used to endorse or promote products derived from
19 * this software without specific prior written permission.
20 *
21 * Alternatively, this software may be distributed under the terms of the
22 * GNU General Public License ("GPL") version 2 as published by the Free
23 * Software Foundation.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
26 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
29 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
36 */
37
38#include <linux/kernel.h>
39#include <linux/types.h>
Jiri Pirko5e9c16c2016-07-04 08:23:04 +020040#include <linux/rhashtable.h>
41#include <linux/bitops.h>
42#include <linux/in6.h>
Yotam Gigic723c7352016-07-05 11:27:43 +020043#include <linux/notifier.h>
Ido Schimmeldf6dd79b2017-02-08 14:36:49 +010044#include <linux/inetdevice.h>
Ido Schimmel9db032b2017-03-16 09:08:17 +010045#include <linux/netdevice.h>
Ido Schimmel03ea01e2017-05-23 21:56:30 +020046#include <linux/if_bridge.h>
Ido Schimmelb5f3e0d2017-07-24 09:56:00 +020047#include <linux/socket.h>
Ido Schimmel428b8512017-08-03 13:28:28 +020048#include <linux/route.h>
Ido Schimmeleb789982017-10-22 23:11:48 +020049#include <linux/gcd.h>
Ido Schimmelaf658b62017-11-02 17:14:09 +010050#include <linux/random.h>
Yotam Gigic723c7352016-07-05 11:27:43 +020051#include <net/netevent.h>
Jiri Pirko6cf3c972016-07-05 11:27:39 +020052#include <net/neighbour.h>
53#include <net/arp.h>
Jiri Pirkob45f64d2016-09-26 12:52:31 +020054#include <net/ip_fib.h>
Ido Schimmel583419f2017-08-03 13:28:27 +020055#include <net/ip6_fib.h>
Ido Schimmel5d7bfd12017-03-16 09:08:14 +010056#include <net/fib_rules.h>
Petr Machata6ddb7422017-09-02 23:49:19 +020057#include <net/ip_tunnels.h>
Ido Schimmel57837882017-03-16 09:08:16 +010058#include <net/l3mdev.h>
Arkadi Sharshevsky5ea12372017-07-18 10:10:13 +020059#include <net/addrconf.h>
Arkadi Sharshevskyd5eb89c2017-07-18 10:10:15 +020060#include <net/ndisc.h>
61#include <net/ipv6.h>
Ido Schimmel04b1d4e2017-08-03 13:28:11 +020062#include <net/fib_notifier.h>
Ido Schimmel464dce12016-07-02 11:00:15 +020063
64#include "spectrum.h"
65#include "core.h"
66#include "reg.h"
Arkadi Sharshevskye0c0afd2017-03-28 17:24:15 +020067#include "spectrum_cnt.h"
68#include "spectrum_dpipe.h"
Petr Machata38ebc0f2017-09-02 23:49:17 +020069#include "spectrum_ipip.h"
Yotam Gigid42b0962017-09-27 08:23:20 +020070#include "spectrum_mr.h"
71#include "spectrum_mr_tcam.h"
Arkadi Sharshevskye0c0afd2017-03-28 17:24:15 +020072#include "spectrum_router.h"
Ido Schimmel464dce12016-07-02 11:00:15 +020073
Ido Schimmel9011b672017-05-16 19:38:25 +020074struct mlxsw_sp_vr;
75struct mlxsw_sp_lpm_tree;
Ido Schimmele4f3c1c2017-05-26 08:37:40 +020076struct mlxsw_sp_rif_ops;
Ido Schimmel9011b672017-05-16 19:38:25 +020077
78struct mlxsw_sp_router {
79 struct mlxsw_sp *mlxsw_sp;
Ido Schimmel5f9efff2017-05-16 19:38:27 +020080 struct mlxsw_sp_rif **rifs;
Ido Schimmel9011b672017-05-16 19:38:25 +020081 struct mlxsw_sp_vr *vrs;
82 struct rhashtable neigh_ht;
83 struct rhashtable nexthop_group_ht;
84 struct rhashtable nexthop_ht;
Arkadi Sharshevskydbe45982017-09-25 10:32:23 +020085 struct list_head nexthop_list;
Ido Schimmel9011b672017-05-16 19:38:25 +020086 struct {
87 struct mlxsw_sp_lpm_tree *trees;
88 unsigned int tree_count;
89 } lpm;
90 struct {
91 struct delayed_work dw;
92 unsigned long interval; /* ms */
93 } neighs_update;
94 struct delayed_work nexthop_probe_dw;
95#define MLXSW_SP_UNRESOLVED_NH_PROBE_INTERVAL 5000 /* ms */
96 struct list_head nexthop_neighs_list;
Petr Machata1012b9a2017-09-02 23:49:23 +020097 struct list_head ipip_list;
Ido Schimmel9011b672017-05-16 19:38:25 +020098 bool aborted;
Ido Schimmel7e39d112017-05-16 19:38:28 +020099 struct notifier_block fib_nb;
Ido Schimmel48fac882017-11-02 17:14:06 +0100100 struct notifier_block netevent_nb;
Ido Schimmele4f3c1c2017-05-26 08:37:40 +0200101 const struct mlxsw_sp_rif_ops **rif_ops_arr;
Petr Machata38ebc0f2017-09-02 23:49:17 +0200102 const struct mlxsw_sp_ipip_ops **ipip_ops_arr;
Ido Schimmel9011b672017-05-16 19:38:25 +0200103};
104
Ido Schimmel4724ba562017-03-10 08:53:39 +0100105struct mlxsw_sp_rif {
106 struct list_head nexthop_list;
107 struct list_head neigh_list;
108 struct net_device *dev;
Ido Schimmela1107482017-05-26 08:37:39 +0200109 struct mlxsw_sp_fid *fid;
Ido Schimmel4724ba562017-03-10 08:53:39 +0100110 unsigned char addr[ETH_ALEN];
111 int mtu;
Arkadi Sharshevskybf952332017-03-17 09:38:00 +0100112 u16 rif_index;
Ido Schimmel69132292017-03-10 08:53:42 +0100113 u16 vr_id;
Ido Schimmele4f3c1c2017-05-26 08:37:40 +0200114 const struct mlxsw_sp_rif_ops *ops;
115 struct mlxsw_sp *mlxsw_sp;
116
Arkadi Sharshevskye0c0afd2017-03-28 17:24:15 +0200117 unsigned int counter_ingress;
118 bool counter_ingress_valid;
119 unsigned int counter_egress;
120 bool counter_egress_valid;
Ido Schimmel4724ba562017-03-10 08:53:39 +0100121};
122
Ido Schimmele4f3c1c2017-05-26 08:37:40 +0200123struct mlxsw_sp_rif_params {
124 struct net_device *dev;
125 union {
126 u16 system_port;
127 u16 lag_id;
128 };
129 u16 vid;
130 bool lag;
131};
132
Ido Schimmel4d93cee2017-05-26 08:37:34 +0200133struct mlxsw_sp_rif_subport {
134 struct mlxsw_sp_rif common;
135 union {
136 u16 system_port;
137 u16 lag_id;
138 };
139 u16 vid;
140 bool lag;
141};
142
Petr Machata6ddb7422017-09-02 23:49:19 +0200143struct mlxsw_sp_rif_ipip_lb {
144 struct mlxsw_sp_rif common;
145 struct mlxsw_sp_rif_ipip_lb_config lb_config;
146 u16 ul_vr_id; /* Reserved for Spectrum-2. */
147};
148
149struct mlxsw_sp_rif_params_ipip_lb {
150 struct mlxsw_sp_rif_params common;
151 struct mlxsw_sp_rif_ipip_lb_config lb_config;
152};
153
Ido Schimmele4f3c1c2017-05-26 08:37:40 +0200154struct mlxsw_sp_rif_ops {
155 enum mlxsw_sp_rif_type type;
156 size_t rif_size;
157
158 void (*setup)(struct mlxsw_sp_rif *rif,
159 const struct mlxsw_sp_rif_params *params);
160 int (*configure)(struct mlxsw_sp_rif *rif);
161 void (*deconfigure)(struct mlxsw_sp_rif *rif);
162 struct mlxsw_sp_fid * (*fid_get)(struct mlxsw_sp_rif *rif);
163};
164
Arkadi Sharshevskye0c0afd2017-03-28 17:24:15 +0200165static unsigned int *
166mlxsw_sp_rif_p_counter_get(struct mlxsw_sp_rif *rif,
167 enum mlxsw_sp_rif_counter_dir dir)
168{
169 switch (dir) {
170 case MLXSW_SP_RIF_COUNTER_EGRESS:
171 return &rif->counter_egress;
172 case MLXSW_SP_RIF_COUNTER_INGRESS:
173 return &rif->counter_ingress;
174 }
175 return NULL;
176}
177
178static bool
179mlxsw_sp_rif_counter_valid_get(struct mlxsw_sp_rif *rif,
180 enum mlxsw_sp_rif_counter_dir dir)
181{
182 switch (dir) {
183 case MLXSW_SP_RIF_COUNTER_EGRESS:
184 return rif->counter_egress_valid;
185 case MLXSW_SP_RIF_COUNTER_INGRESS:
186 return rif->counter_ingress_valid;
187 }
188 return false;
189}
190
191static void
192mlxsw_sp_rif_counter_valid_set(struct mlxsw_sp_rif *rif,
193 enum mlxsw_sp_rif_counter_dir dir,
194 bool valid)
195{
196 switch (dir) {
197 case MLXSW_SP_RIF_COUNTER_EGRESS:
198 rif->counter_egress_valid = valid;
199 break;
200 case MLXSW_SP_RIF_COUNTER_INGRESS:
201 rif->counter_ingress_valid = valid;
202 break;
203 }
204}
205
206static int mlxsw_sp_rif_counter_edit(struct mlxsw_sp *mlxsw_sp, u16 rif_index,
207 unsigned int counter_index, bool enable,
208 enum mlxsw_sp_rif_counter_dir dir)
209{
210 char ritr_pl[MLXSW_REG_RITR_LEN];
211 bool is_egress = false;
212 int err;
213
214 if (dir == MLXSW_SP_RIF_COUNTER_EGRESS)
215 is_egress = true;
216 mlxsw_reg_ritr_rif_pack(ritr_pl, rif_index);
217 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
218 if (err)
219 return err;
220
221 mlxsw_reg_ritr_counter_pack(ritr_pl, counter_index, enable,
222 is_egress);
223 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
224}
225
226int mlxsw_sp_rif_counter_value_get(struct mlxsw_sp *mlxsw_sp,
227 struct mlxsw_sp_rif *rif,
228 enum mlxsw_sp_rif_counter_dir dir, u64 *cnt)
229{
230 char ricnt_pl[MLXSW_REG_RICNT_LEN];
231 unsigned int *p_counter_index;
232 bool valid;
233 int err;
234
235 valid = mlxsw_sp_rif_counter_valid_get(rif, dir);
236 if (!valid)
237 return -EINVAL;
238
239 p_counter_index = mlxsw_sp_rif_p_counter_get(rif, dir);
240 if (!p_counter_index)
241 return -EINVAL;
242 mlxsw_reg_ricnt_pack(ricnt_pl, *p_counter_index,
243 MLXSW_REG_RICNT_OPCODE_NOP);
244 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ricnt), ricnt_pl);
245 if (err)
246 return err;
247 *cnt = mlxsw_reg_ricnt_good_unicast_packets_get(ricnt_pl);
248 return 0;
249}
250
251static int mlxsw_sp_rif_counter_clear(struct mlxsw_sp *mlxsw_sp,
252 unsigned int counter_index)
253{
254 char ricnt_pl[MLXSW_REG_RICNT_LEN];
255
256 mlxsw_reg_ricnt_pack(ricnt_pl, counter_index,
257 MLXSW_REG_RICNT_OPCODE_CLEAR);
258 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ricnt), ricnt_pl);
259}
260
261int mlxsw_sp_rif_counter_alloc(struct mlxsw_sp *mlxsw_sp,
262 struct mlxsw_sp_rif *rif,
263 enum mlxsw_sp_rif_counter_dir dir)
264{
265 unsigned int *p_counter_index;
266 int err;
267
268 p_counter_index = mlxsw_sp_rif_p_counter_get(rif, dir);
269 if (!p_counter_index)
270 return -EINVAL;
271 err = mlxsw_sp_counter_alloc(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_RIF,
272 p_counter_index);
273 if (err)
274 return err;
275
276 err = mlxsw_sp_rif_counter_clear(mlxsw_sp, *p_counter_index);
277 if (err)
278 goto err_counter_clear;
279
280 err = mlxsw_sp_rif_counter_edit(mlxsw_sp, rif->rif_index,
281 *p_counter_index, true, dir);
282 if (err)
283 goto err_counter_edit;
284 mlxsw_sp_rif_counter_valid_set(rif, dir, true);
285 return 0;
286
287err_counter_edit:
288err_counter_clear:
289 mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_RIF,
290 *p_counter_index);
291 return err;
292}
293
294void mlxsw_sp_rif_counter_free(struct mlxsw_sp *mlxsw_sp,
295 struct mlxsw_sp_rif *rif,
296 enum mlxsw_sp_rif_counter_dir dir)
297{
298 unsigned int *p_counter_index;
299
Arkadi Sharshevsky6b1206b2017-05-18 09:18:53 +0200300 if (!mlxsw_sp_rif_counter_valid_get(rif, dir))
301 return;
302
Arkadi Sharshevskye0c0afd2017-03-28 17:24:15 +0200303 p_counter_index = mlxsw_sp_rif_p_counter_get(rif, dir);
304 if (WARN_ON(!p_counter_index))
305 return;
306 mlxsw_sp_rif_counter_edit(mlxsw_sp, rif->rif_index,
307 *p_counter_index, false, dir);
308 mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_RIF,
309 *p_counter_index);
310 mlxsw_sp_rif_counter_valid_set(rif, dir, false);
311}
312
Ido Schimmele4f3c1c2017-05-26 08:37:40 +0200313static void mlxsw_sp_rif_counters_alloc(struct mlxsw_sp_rif *rif)
314{
315 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
316 struct devlink *devlink;
317
318 devlink = priv_to_devlink(mlxsw_sp->core);
319 if (!devlink_dpipe_table_counter_enabled(devlink,
320 MLXSW_SP_DPIPE_TABLE_NAME_ERIF))
321 return;
322 mlxsw_sp_rif_counter_alloc(mlxsw_sp, rif, MLXSW_SP_RIF_COUNTER_EGRESS);
323}
324
325static void mlxsw_sp_rif_counters_free(struct mlxsw_sp_rif *rif)
326{
327 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
328
329 mlxsw_sp_rif_counter_free(mlxsw_sp, rif, MLXSW_SP_RIF_COUNTER_EGRESS);
330}
331
Ido Schimmel4724ba562017-03-10 08:53:39 +0100332static struct mlxsw_sp_rif *
333mlxsw_sp_rif_find_by_dev(const struct mlxsw_sp *mlxsw_sp,
334 const struct net_device *dev);
335
Ido Schimmel7dcc18a2017-07-18 10:10:30 +0200336#define MLXSW_SP_PREFIX_COUNT (sizeof(struct in6_addr) * BITS_PER_BYTE + 1)
Ido Schimmel9011b672017-05-16 19:38:25 +0200337
338struct mlxsw_sp_prefix_usage {
339 DECLARE_BITMAP(b, MLXSW_SP_PREFIX_COUNT);
340};
341
Jiri Pirko53342022016-07-04 08:23:08 +0200342#define mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage) \
343 for_each_set_bit(prefix, (prefix_usage)->b, MLXSW_SP_PREFIX_COUNT)
344
345static bool
346mlxsw_sp_prefix_usage_eq(struct mlxsw_sp_prefix_usage *prefix_usage1,
347 struct mlxsw_sp_prefix_usage *prefix_usage2)
348{
349 return !memcmp(prefix_usage1, prefix_usage2, sizeof(*prefix_usage1));
350}
351
Jiri Pirko6b75c482016-07-04 08:23:09 +0200352static bool
353mlxsw_sp_prefix_usage_none(struct mlxsw_sp_prefix_usage *prefix_usage)
354{
355 struct mlxsw_sp_prefix_usage prefix_usage_none = {{ 0 } };
356
357 return mlxsw_sp_prefix_usage_eq(prefix_usage, &prefix_usage_none);
358}
359
360static void
361mlxsw_sp_prefix_usage_cpy(struct mlxsw_sp_prefix_usage *prefix_usage1,
362 struct mlxsw_sp_prefix_usage *prefix_usage2)
363{
364 memcpy(prefix_usage1, prefix_usage2, sizeof(*prefix_usage1));
365}
366
367static void
Jiri Pirko5e9c16c2016-07-04 08:23:04 +0200368mlxsw_sp_prefix_usage_set(struct mlxsw_sp_prefix_usage *prefix_usage,
369 unsigned char prefix_len)
370{
371 set_bit(prefix_len, prefix_usage->b);
372}
373
374static void
375mlxsw_sp_prefix_usage_clear(struct mlxsw_sp_prefix_usage *prefix_usage,
376 unsigned char prefix_len)
377{
378 clear_bit(prefix_len, prefix_usage->b);
379}
380
381struct mlxsw_sp_fib_key {
382 unsigned char addr[sizeof(struct in6_addr)];
383 unsigned char prefix_len;
384};
385
Jiri Pirko61c503f2016-07-04 08:23:11 +0200386enum mlxsw_sp_fib_entry_type {
387 MLXSW_SP_FIB_ENTRY_TYPE_REMOTE,
388 MLXSW_SP_FIB_ENTRY_TYPE_LOCAL,
389 MLXSW_SP_FIB_ENTRY_TYPE_TRAP,
Petr Machata4607f6d2017-09-02 23:49:25 +0200390
391 /* This is a special case of local delivery, where a packet should be
392 * decapsulated on reception. Note that there is no corresponding ENCAP,
393 * because that's a type of next hop, not of FIB entry. (There can be
394 * several next hops in a REMOTE entry, and some of them may be
395 * encapsulating entries.)
396 */
397 MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP,
Jiri Pirko61c503f2016-07-04 08:23:11 +0200398};
399
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +0200400struct mlxsw_sp_nexthop_group;
Ido Schimmel9011b672017-05-16 19:38:25 +0200401struct mlxsw_sp_fib;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +0200402
Ido Schimmel9aecce12017-02-09 10:28:42 +0100403struct mlxsw_sp_fib_node {
404 struct list_head entry_list;
Jiri Pirkob45f64d2016-09-26 12:52:31 +0200405 struct list_head list;
Ido Schimmel9aecce12017-02-09 10:28:42 +0100406 struct rhash_head ht_node;
Ido Schimmel76610eb2017-03-10 08:53:41 +0100407 struct mlxsw_sp_fib *fib;
Ido Schimmel9aecce12017-02-09 10:28:42 +0100408 struct mlxsw_sp_fib_key key;
409};
410
Petr Machata4607f6d2017-09-02 23:49:25 +0200411struct mlxsw_sp_fib_entry_decap {
412 struct mlxsw_sp_ipip_entry *ipip_entry;
413 u32 tunnel_index;
414};
415
Ido Schimmel9aecce12017-02-09 10:28:42 +0100416struct mlxsw_sp_fib_entry {
417 struct list_head list;
418 struct mlxsw_sp_fib_node *fib_node;
419 enum mlxsw_sp_fib_entry_type type;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +0200420 struct list_head nexthop_group_node;
421 struct mlxsw_sp_nexthop_group *nh_group;
Petr Machata4607f6d2017-09-02 23:49:25 +0200422 struct mlxsw_sp_fib_entry_decap decap; /* Valid for decap entries. */
Jiri Pirko5e9c16c2016-07-04 08:23:04 +0200423};
424
Ido Schimmel4f1c7f12017-07-18 10:10:26 +0200425struct mlxsw_sp_fib4_entry {
426 struct mlxsw_sp_fib_entry common;
427 u32 tb_id;
428 u32 prio;
429 u8 tos;
430 u8 type;
431};
432
Ido Schimmel428b8512017-08-03 13:28:28 +0200433struct mlxsw_sp_fib6_entry {
434 struct mlxsw_sp_fib_entry common;
435 struct list_head rt6_list;
436 unsigned int nrt6;
437};
438
439struct mlxsw_sp_rt6 {
440 struct list_head list;
441 struct rt6_info *rt;
442};
443
Ido Schimmel9011b672017-05-16 19:38:25 +0200444struct mlxsw_sp_lpm_tree {
445 u8 id; /* tree ID */
446 unsigned int ref_count;
447 enum mlxsw_sp_l3proto proto;
448 struct mlxsw_sp_prefix_usage prefix_usage;
449};
450
Jiri Pirko5e9c16c2016-07-04 08:23:04 +0200451struct mlxsw_sp_fib {
452 struct rhashtable ht;
Ido Schimmel9aecce12017-02-09 10:28:42 +0100453 struct list_head node_list;
Ido Schimmel76610eb2017-03-10 08:53:41 +0100454 struct mlxsw_sp_vr *vr;
455 struct mlxsw_sp_lpm_tree *lpm_tree;
Jiri Pirko5e9c16c2016-07-04 08:23:04 +0200456 unsigned long prefix_ref_count[MLXSW_SP_PREFIX_COUNT];
457 struct mlxsw_sp_prefix_usage prefix_usage;
Ido Schimmel76610eb2017-03-10 08:53:41 +0100458 enum mlxsw_sp_l3proto proto;
Jiri Pirko5e9c16c2016-07-04 08:23:04 +0200459};
460
Ido Schimmel9011b672017-05-16 19:38:25 +0200461struct mlxsw_sp_vr {
462 u16 id; /* virtual router ID */
463 u32 tb_id; /* kernel fib table id */
464 unsigned int rif_count;
465 struct mlxsw_sp_fib *fib4;
Ido Schimmela3d9bc52017-07-18 10:10:22 +0200466 struct mlxsw_sp_fib *fib6;
Yotam Gigid42b0962017-09-27 08:23:20 +0200467 struct mlxsw_sp_mr_table *mr4_table;
Ido Schimmel9011b672017-05-16 19:38:25 +0200468};
469
Ido Schimmel9aecce12017-02-09 10:28:42 +0100470static const struct rhashtable_params mlxsw_sp_fib_ht_params;
Jiri Pirko5e9c16c2016-07-04 08:23:04 +0200471
Ido Schimmel76610eb2017-03-10 08:53:41 +0100472static struct mlxsw_sp_fib *mlxsw_sp_fib_create(struct mlxsw_sp_vr *vr,
473 enum mlxsw_sp_l3proto proto)
Jiri Pirko5e9c16c2016-07-04 08:23:04 +0200474{
475 struct mlxsw_sp_fib *fib;
476 int err;
477
478 fib = kzalloc(sizeof(*fib), GFP_KERNEL);
479 if (!fib)
480 return ERR_PTR(-ENOMEM);
481 err = rhashtable_init(&fib->ht, &mlxsw_sp_fib_ht_params);
482 if (err)
483 goto err_rhashtable_init;
Ido Schimmel9aecce12017-02-09 10:28:42 +0100484 INIT_LIST_HEAD(&fib->node_list);
Ido Schimmel76610eb2017-03-10 08:53:41 +0100485 fib->proto = proto;
486 fib->vr = vr;
Jiri Pirko5e9c16c2016-07-04 08:23:04 +0200487 return fib;
488
489err_rhashtable_init:
490 kfree(fib);
491 return ERR_PTR(err);
492}
493
494static void mlxsw_sp_fib_destroy(struct mlxsw_sp_fib *fib)
495{
Ido Schimmel9aecce12017-02-09 10:28:42 +0100496 WARN_ON(!list_empty(&fib->node_list));
Ido Schimmel76610eb2017-03-10 08:53:41 +0100497 WARN_ON(fib->lpm_tree);
Jiri Pirko5e9c16c2016-07-04 08:23:04 +0200498 rhashtable_destroy(&fib->ht);
499 kfree(fib);
500}
501
Jiri Pirko53342022016-07-04 08:23:08 +0200502static struct mlxsw_sp_lpm_tree *
Ido Schimmel382dbb42017-03-10 08:53:40 +0100503mlxsw_sp_lpm_tree_find_unused(struct mlxsw_sp *mlxsw_sp)
Jiri Pirko53342022016-07-04 08:23:08 +0200504{
505 static struct mlxsw_sp_lpm_tree *lpm_tree;
506 int i;
507
Ido Schimmel9011b672017-05-16 19:38:25 +0200508 for (i = 0; i < mlxsw_sp->router->lpm.tree_count; i++) {
509 lpm_tree = &mlxsw_sp->router->lpm.trees[i];
Ido Schimmel382dbb42017-03-10 08:53:40 +0100510 if (lpm_tree->ref_count == 0)
511 return lpm_tree;
Jiri Pirko53342022016-07-04 08:23:08 +0200512 }
513 return NULL;
514}
515
516static int mlxsw_sp_lpm_tree_alloc(struct mlxsw_sp *mlxsw_sp,
517 struct mlxsw_sp_lpm_tree *lpm_tree)
518{
519 char ralta_pl[MLXSW_REG_RALTA_LEN];
520
Ido Schimmel1a9234e662016-09-19 08:29:26 +0200521 mlxsw_reg_ralta_pack(ralta_pl, true,
522 (enum mlxsw_reg_ralxx_protocol) lpm_tree->proto,
523 lpm_tree->id);
Jiri Pirko53342022016-07-04 08:23:08 +0200524 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralta), ralta_pl);
525}
526
Ido Schimmelcc702672017-08-14 10:54:03 +0200527static void mlxsw_sp_lpm_tree_free(struct mlxsw_sp *mlxsw_sp,
528 struct mlxsw_sp_lpm_tree *lpm_tree)
Jiri Pirko53342022016-07-04 08:23:08 +0200529{
530 char ralta_pl[MLXSW_REG_RALTA_LEN];
531
Ido Schimmel1a9234e662016-09-19 08:29:26 +0200532 mlxsw_reg_ralta_pack(ralta_pl, false,
533 (enum mlxsw_reg_ralxx_protocol) lpm_tree->proto,
534 lpm_tree->id);
Ido Schimmelcc702672017-08-14 10:54:03 +0200535 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralta), ralta_pl);
Jiri Pirko53342022016-07-04 08:23:08 +0200536}
537
538static int
539mlxsw_sp_lpm_tree_left_struct_set(struct mlxsw_sp *mlxsw_sp,
540 struct mlxsw_sp_prefix_usage *prefix_usage,
541 struct mlxsw_sp_lpm_tree *lpm_tree)
542{
543 char ralst_pl[MLXSW_REG_RALST_LEN];
544 u8 root_bin = 0;
545 u8 prefix;
546 u8 last_prefix = MLXSW_REG_RALST_BIN_NO_CHILD;
547
548 mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage)
549 root_bin = prefix;
550
551 mlxsw_reg_ralst_pack(ralst_pl, root_bin, lpm_tree->id);
552 mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage) {
553 if (prefix == 0)
554 continue;
555 mlxsw_reg_ralst_bin_pack(ralst_pl, prefix, last_prefix,
556 MLXSW_REG_RALST_BIN_NO_CHILD);
557 last_prefix = prefix;
558 }
559 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralst), ralst_pl);
560}
561
562static struct mlxsw_sp_lpm_tree *
563mlxsw_sp_lpm_tree_create(struct mlxsw_sp *mlxsw_sp,
564 struct mlxsw_sp_prefix_usage *prefix_usage,
Ido Schimmel382dbb42017-03-10 08:53:40 +0100565 enum mlxsw_sp_l3proto proto)
Jiri Pirko53342022016-07-04 08:23:08 +0200566{
567 struct mlxsw_sp_lpm_tree *lpm_tree;
568 int err;
569
Ido Schimmel382dbb42017-03-10 08:53:40 +0100570 lpm_tree = mlxsw_sp_lpm_tree_find_unused(mlxsw_sp);
Jiri Pirko53342022016-07-04 08:23:08 +0200571 if (!lpm_tree)
572 return ERR_PTR(-EBUSY);
573 lpm_tree->proto = proto;
574 err = mlxsw_sp_lpm_tree_alloc(mlxsw_sp, lpm_tree);
575 if (err)
576 return ERR_PTR(err);
577
578 err = mlxsw_sp_lpm_tree_left_struct_set(mlxsw_sp, prefix_usage,
579 lpm_tree);
580 if (err)
581 goto err_left_struct_set;
Jiri Pirko2083d362016-10-25 11:25:56 +0200582 memcpy(&lpm_tree->prefix_usage, prefix_usage,
583 sizeof(lpm_tree->prefix_usage));
Jiri Pirko53342022016-07-04 08:23:08 +0200584 return lpm_tree;
585
586err_left_struct_set:
587 mlxsw_sp_lpm_tree_free(mlxsw_sp, lpm_tree);
588 return ERR_PTR(err);
589}
590
Ido Schimmelcc702672017-08-14 10:54:03 +0200591static void mlxsw_sp_lpm_tree_destroy(struct mlxsw_sp *mlxsw_sp,
592 struct mlxsw_sp_lpm_tree *lpm_tree)
Jiri Pirko53342022016-07-04 08:23:08 +0200593{
Ido Schimmelcc702672017-08-14 10:54:03 +0200594 mlxsw_sp_lpm_tree_free(mlxsw_sp, lpm_tree);
Jiri Pirko53342022016-07-04 08:23:08 +0200595}
596
597static struct mlxsw_sp_lpm_tree *
598mlxsw_sp_lpm_tree_get(struct mlxsw_sp *mlxsw_sp,
599 struct mlxsw_sp_prefix_usage *prefix_usage,
Ido Schimmel382dbb42017-03-10 08:53:40 +0100600 enum mlxsw_sp_l3proto proto)
Jiri Pirko53342022016-07-04 08:23:08 +0200601{
602 struct mlxsw_sp_lpm_tree *lpm_tree;
603 int i;
604
Ido Schimmel9011b672017-05-16 19:38:25 +0200605 for (i = 0; i < mlxsw_sp->router->lpm.tree_count; i++) {
606 lpm_tree = &mlxsw_sp->router->lpm.trees[i];
Jiri Pirko8b99bec2016-10-25 11:25:57 +0200607 if (lpm_tree->ref_count != 0 &&
608 lpm_tree->proto == proto &&
Jiri Pirko53342022016-07-04 08:23:08 +0200609 mlxsw_sp_prefix_usage_eq(&lpm_tree->prefix_usage,
610 prefix_usage))
Ido Schimmelfc922bb2017-08-14 10:54:05 +0200611 return lpm_tree;
Jiri Pirko53342022016-07-04 08:23:08 +0200612 }
Ido Schimmelfc922bb2017-08-14 10:54:05 +0200613 return mlxsw_sp_lpm_tree_create(mlxsw_sp, prefix_usage, proto);
614}
Jiri Pirko53342022016-07-04 08:23:08 +0200615
Ido Schimmelfc922bb2017-08-14 10:54:05 +0200616static void mlxsw_sp_lpm_tree_hold(struct mlxsw_sp_lpm_tree *lpm_tree)
617{
Jiri Pirko53342022016-07-04 08:23:08 +0200618 lpm_tree->ref_count++;
Jiri Pirko53342022016-07-04 08:23:08 +0200619}
620
Ido Schimmelcc702672017-08-14 10:54:03 +0200621static void mlxsw_sp_lpm_tree_put(struct mlxsw_sp *mlxsw_sp,
622 struct mlxsw_sp_lpm_tree *lpm_tree)
Jiri Pirko53342022016-07-04 08:23:08 +0200623{
624 if (--lpm_tree->ref_count == 0)
Ido Schimmelcc702672017-08-14 10:54:03 +0200625 mlxsw_sp_lpm_tree_destroy(mlxsw_sp, lpm_tree);
Jiri Pirko53342022016-07-04 08:23:08 +0200626}
627
Ido Schimmeld7a60302017-06-08 08:47:43 +0200628#define MLXSW_SP_LPM_TREE_MIN 1 /* tree 0 is reserved */
Ido Schimmel8494ab02017-03-24 08:02:47 +0100629
630static int mlxsw_sp_lpm_init(struct mlxsw_sp *mlxsw_sp)
Jiri Pirko53342022016-07-04 08:23:08 +0200631{
632 struct mlxsw_sp_lpm_tree *lpm_tree;
Ido Schimmel8494ab02017-03-24 08:02:47 +0100633 u64 max_trees;
Jiri Pirko53342022016-07-04 08:23:08 +0200634 int i;
635
Ido Schimmel8494ab02017-03-24 08:02:47 +0100636 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_LPM_TREES))
637 return -EIO;
638
639 max_trees = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_LPM_TREES);
Ido Schimmel9011b672017-05-16 19:38:25 +0200640 mlxsw_sp->router->lpm.tree_count = max_trees - MLXSW_SP_LPM_TREE_MIN;
641 mlxsw_sp->router->lpm.trees = kcalloc(mlxsw_sp->router->lpm.tree_count,
Ido Schimmel8494ab02017-03-24 08:02:47 +0100642 sizeof(struct mlxsw_sp_lpm_tree),
643 GFP_KERNEL);
Ido Schimmel9011b672017-05-16 19:38:25 +0200644 if (!mlxsw_sp->router->lpm.trees)
Ido Schimmel8494ab02017-03-24 08:02:47 +0100645 return -ENOMEM;
646
Ido Schimmel9011b672017-05-16 19:38:25 +0200647 for (i = 0; i < mlxsw_sp->router->lpm.tree_count; i++) {
648 lpm_tree = &mlxsw_sp->router->lpm.trees[i];
Jiri Pirko53342022016-07-04 08:23:08 +0200649 lpm_tree->id = i + MLXSW_SP_LPM_TREE_MIN;
650 }
Ido Schimmel8494ab02017-03-24 08:02:47 +0100651
652 return 0;
653}
654
655static void mlxsw_sp_lpm_fini(struct mlxsw_sp *mlxsw_sp)
656{
Ido Schimmel9011b672017-05-16 19:38:25 +0200657 kfree(mlxsw_sp->router->lpm.trees);
Jiri Pirko53342022016-07-04 08:23:08 +0200658}
659
Ido Schimmel76610eb2017-03-10 08:53:41 +0100660static bool mlxsw_sp_vr_is_used(const struct mlxsw_sp_vr *vr)
661{
Yotam Gigid42b0962017-09-27 08:23:20 +0200662 return !!vr->fib4 || !!vr->fib6 || !!vr->mr4_table;
Ido Schimmel76610eb2017-03-10 08:53:41 +0100663}
664
Jiri Pirko6b75c482016-07-04 08:23:09 +0200665static struct mlxsw_sp_vr *mlxsw_sp_vr_find_unused(struct mlxsw_sp *mlxsw_sp)
666{
667 struct mlxsw_sp_vr *vr;
668 int i;
669
Jiri Pirkoc1a38312016-10-21 16:07:23 +0200670 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
Ido Schimmel9011b672017-05-16 19:38:25 +0200671 vr = &mlxsw_sp->router->vrs[i];
Ido Schimmel76610eb2017-03-10 08:53:41 +0100672 if (!mlxsw_sp_vr_is_used(vr))
Jiri Pirko6b75c482016-07-04 08:23:09 +0200673 return vr;
674 }
675 return NULL;
676}
677
678static int mlxsw_sp_vr_lpm_tree_bind(struct mlxsw_sp *mlxsw_sp,
Ido Schimmel0adb2142017-08-14 10:54:04 +0200679 const struct mlxsw_sp_fib *fib, u8 tree_id)
Jiri Pirko6b75c482016-07-04 08:23:09 +0200680{
681 char raltb_pl[MLXSW_REG_RALTB_LEN];
682
Ido Schimmel76610eb2017-03-10 08:53:41 +0100683 mlxsw_reg_raltb_pack(raltb_pl, fib->vr->id,
684 (enum mlxsw_reg_ralxx_protocol) fib->proto,
Ido Schimmel0adb2142017-08-14 10:54:04 +0200685 tree_id);
Jiri Pirko6b75c482016-07-04 08:23:09 +0200686 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raltb), raltb_pl);
687}
688
689static int mlxsw_sp_vr_lpm_tree_unbind(struct mlxsw_sp *mlxsw_sp,
Ido Schimmel76610eb2017-03-10 08:53:41 +0100690 const struct mlxsw_sp_fib *fib)
Jiri Pirko6b75c482016-07-04 08:23:09 +0200691{
692 char raltb_pl[MLXSW_REG_RALTB_LEN];
693
694 /* Bind to tree 0 which is default */
Ido Schimmel76610eb2017-03-10 08:53:41 +0100695 mlxsw_reg_raltb_pack(raltb_pl, fib->vr->id,
696 (enum mlxsw_reg_ralxx_protocol) fib->proto, 0);
Jiri Pirko6b75c482016-07-04 08:23:09 +0200697 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raltb), raltb_pl);
698}
699
700static u32 mlxsw_sp_fix_tb_id(u32 tb_id)
701{
Yotam Gigi7e50d432017-09-27 08:23:19 +0200702 /* For our purpose, squash main, default and local tables into one */
703 if (tb_id == RT_TABLE_LOCAL || tb_id == RT_TABLE_DEFAULT)
Jiri Pirko6b75c482016-07-04 08:23:09 +0200704 tb_id = RT_TABLE_MAIN;
705 return tb_id;
706}
707
708static struct mlxsw_sp_vr *mlxsw_sp_vr_find(struct mlxsw_sp *mlxsw_sp,
Ido Schimmel76610eb2017-03-10 08:53:41 +0100709 u32 tb_id)
Jiri Pirko6b75c482016-07-04 08:23:09 +0200710{
711 struct mlxsw_sp_vr *vr;
712 int i;
713
714 tb_id = mlxsw_sp_fix_tb_id(tb_id);
Nogah Frankel9497c042016-09-20 11:16:54 +0200715
Jiri Pirkoc1a38312016-10-21 16:07:23 +0200716 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
Ido Schimmel9011b672017-05-16 19:38:25 +0200717 vr = &mlxsw_sp->router->vrs[i];
Ido Schimmel76610eb2017-03-10 08:53:41 +0100718 if (mlxsw_sp_vr_is_used(vr) && vr->tb_id == tb_id)
Jiri Pirko6b75c482016-07-04 08:23:09 +0200719 return vr;
720 }
721 return NULL;
722}
723
Ido Schimmel76610eb2017-03-10 08:53:41 +0100724static struct mlxsw_sp_fib *mlxsw_sp_vr_fib(const struct mlxsw_sp_vr *vr,
725 enum mlxsw_sp_l3proto proto)
Jiri Pirko6b75c482016-07-04 08:23:09 +0200726{
Ido Schimmel76610eb2017-03-10 08:53:41 +0100727 switch (proto) {
728 case MLXSW_SP_L3_PROTO_IPV4:
729 return vr->fib4;
730 case MLXSW_SP_L3_PROTO_IPV6:
Ido Schimmela3d9bc52017-07-18 10:10:22 +0200731 return vr->fib6;
Ido Schimmel76610eb2017-03-10 08:53:41 +0100732 }
733 return NULL;
734}
735
736static struct mlxsw_sp_vr *mlxsw_sp_vr_create(struct mlxsw_sp *mlxsw_sp,
David Ahernf8fa9b42017-10-18 09:56:56 -0700737 u32 tb_id,
738 struct netlink_ext_ack *extack)
Ido Schimmel76610eb2017-03-10 08:53:41 +0100739{
Jiri Pirko6b75c482016-07-04 08:23:09 +0200740 struct mlxsw_sp_vr *vr;
Ido Schimmela3d9bc52017-07-18 10:10:22 +0200741 int err;
Jiri Pirko6b75c482016-07-04 08:23:09 +0200742
743 vr = mlxsw_sp_vr_find_unused(mlxsw_sp);
David Ahernf8fa9b42017-10-18 09:56:56 -0700744 if (!vr) {
745 NL_SET_ERR_MSG(extack, "spectrum: Exceeded number of supported virtual routers");
Jiri Pirko6b75c482016-07-04 08:23:09 +0200746 return ERR_PTR(-EBUSY);
David Ahernf8fa9b42017-10-18 09:56:56 -0700747 }
Ido Schimmel76610eb2017-03-10 08:53:41 +0100748 vr->fib4 = mlxsw_sp_fib_create(vr, MLXSW_SP_L3_PROTO_IPV4);
749 if (IS_ERR(vr->fib4))
750 return ERR_CAST(vr->fib4);
Ido Schimmela3d9bc52017-07-18 10:10:22 +0200751 vr->fib6 = mlxsw_sp_fib_create(vr, MLXSW_SP_L3_PROTO_IPV6);
752 if (IS_ERR(vr->fib6)) {
753 err = PTR_ERR(vr->fib6);
754 goto err_fib6_create;
755 }
Yotam Gigid42b0962017-09-27 08:23:20 +0200756 vr->mr4_table = mlxsw_sp_mr_table_create(mlxsw_sp, vr->id,
757 MLXSW_SP_L3_PROTO_IPV4);
758 if (IS_ERR(vr->mr4_table)) {
759 err = PTR_ERR(vr->mr4_table);
760 goto err_mr_table_create;
761 }
Jiri Pirko6b75c482016-07-04 08:23:09 +0200762 vr->tb_id = tb_id;
Jiri Pirko6b75c482016-07-04 08:23:09 +0200763 return vr;
Ido Schimmela3d9bc52017-07-18 10:10:22 +0200764
Yotam Gigid42b0962017-09-27 08:23:20 +0200765err_mr_table_create:
766 mlxsw_sp_fib_destroy(vr->fib6);
767 vr->fib6 = NULL;
Ido Schimmela3d9bc52017-07-18 10:10:22 +0200768err_fib6_create:
769 mlxsw_sp_fib_destroy(vr->fib4);
770 vr->fib4 = NULL;
771 return ERR_PTR(err);
Jiri Pirko6b75c482016-07-04 08:23:09 +0200772}
773
Ido Schimmel76610eb2017-03-10 08:53:41 +0100774static void mlxsw_sp_vr_destroy(struct mlxsw_sp_vr *vr)
Jiri Pirko6b75c482016-07-04 08:23:09 +0200775{
Yotam Gigid42b0962017-09-27 08:23:20 +0200776 mlxsw_sp_mr_table_destroy(vr->mr4_table);
777 vr->mr4_table = NULL;
Ido Schimmela3d9bc52017-07-18 10:10:22 +0200778 mlxsw_sp_fib_destroy(vr->fib6);
779 vr->fib6 = NULL;
Ido Schimmel76610eb2017-03-10 08:53:41 +0100780 mlxsw_sp_fib_destroy(vr->fib4);
781 vr->fib4 = NULL;
Jiri Pirko6b75c482016-07-04 08:23:09 +0200782}
783
David Ahernf8fa9b42017-10-18 09:56:56 -0700784static struct mlxsw_sp_vr *mlxsw_sp_vr_get(struct mlxsw_sp *mlxsw_sp, u32 tb_id,
785 struct netlink_ext_ack *extack)
Jiri Pirko6b75c482016-07-04 08:23:09 +0200786{
787 struct mlxsw_sp_vr *vr;
Jiri Pirko6b75c482016-07-04 08:23:09 +0200788
789 tb_id = mlxsw_sp_fix_tb_id(tb_id);
Ido Schimmel76610eb2017-03-10 08:53:41 +0100790 vr = mlxsw_sp_vr_find(mlxsw_sp, tb_id);
791 if (!vr)
David Ahernf8fa9b42017-10-18 09:56:56 -0700792 vr = mlxsw_sp_vr_create(mlxsw_sp, tb_id, extack);
Jiri Pirko6b75c482016-07-04 08:23:09 +0200793 return vr;
794}
795
Ido Schimmel76610eb2017-03-10 08:53:41 +0100796static void mlxsw_sp_vr_put(struct mlxsw_sp_vr *vr)
Jiri Pirko6b75c482016-07-04 08:23:09 +0200797{
Ido Schimmela3d9bc52017-07-18 10:10:22 +0200798 if (!vr->rif_count && list_empty(&vr->fib4->node_list) &&
Yotam Gigid42b0962017-09-27 08:23:20 +0200799 list_empty(&vr->fib6->node_list) &&
800 mlxsw_sp_mr_table_empty(vr->mr4_table))
Ido Schimmel76610eb2017-03-10 08:53:41 +0100801 mlxsw_sp_vr_destroy(vr);
Jiri Pirko6b75c482016-07-04 08:23:09 +0200802}
803
Ido Schimmelfc922bb2017-08-14 10:54:05 +0200804static bool
805mlxsw_sp_vr_lpm_tree_should_replace(struct mlxsw_sp_vr *vr,
806 enum mlxsw_sp_l3proto proto, u8 tree_id)
807{
808 struct mlxsw_sp_fib *fib = mlxsw_sp_vr_fib(vr, proto);
809
810 if (!mlxsw_sp_vr_is_used(vr))
811 return false;
812 if (fib->lpm_tree && fib->lpm_tree->id == tree_id)
813 return true;
814 return false;
815}
816
817static int mlxsw_sp_vr_lpm_tree_replace(struct mlxsw_sp *mlxsw_sp,
818 struct mlxsw_sp_fib *fib,
819 struct mlxsw_sp_lpm_tree *new_tree)
820{
821 struct mlxsw_sp_lpm_tree *old_tree = fib->lpm_tree;
822 int err;
823
824 err = mlxsw_sp_vr_lpm_tree_bind(mlxsw_sp, fib, new_tree->id);
825 if (err)
826 return err;
827 fib->lpm_tree = new_tree;
828 mlxsw_sp_lpm_tree_hold(new_tree);
829 mlxsw_sp_lpm_tree_put(mlxsw_sp, old_tree);
830 return 0;
831}
832
833static int mlxsw_sp_vrs_lpm_tree_replace(struct mlxsw_sp *mlxsw_sp,
834 struct mlxsw_sp_fib *fib,
835 struct mlxsw_sp_lpm_tree *new_tree)
836{
837 struct mlxsw_sp_lpm_tree *old_tree = fib->lpm_tree;
838 enum mlxsw_sp_l3proto proto = fib->proto;
839 u8 old_id, new_id = new_tree->id;
840 struct mlxsw_sp_vr *vr;
841 int i, err;
842
843 if (!old_tree)
844 goto no_replace;
845 old_id = old_tree->id;
846
847 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
848 vr = &mlxsw_sp->router->vrs[i];
849 if (!mlxsw_sp_vr_lpm_tree_should_replace(vr, proto, old_id))
850 continue;
851 err = mlxsw_sp_vr_lpm_tree_replace(mlxsw_sp,
852 mlxsw_sp_vr_fib(vr, proto),
853 new_tree);
854 if (err)
855 goto err_tree_replace;
856 }
857
858 return 0;
859
860err_tree_replace:
861 for (i--; i >= 0; i--) {
862 if (!mlxsw_sp_vr_lpm_tree_should_replace(vr, proto, new_id))
863 continue;
864 mlxsw_sp_vr_lpm_tree_replace(mlxsw_sp,
865 mlxsw_sp_vr_fib(vr, proto),
866 old_tree);
867 }
868 return err;
869
870no_replace:
871 err = mlxsw_sp_vr_lpm_tree_bind(mlxsw_sp, fib, new_tree->id);
872 if (err)
873 return err;
874 fib->lpm_tree = new_tree;
875 mlxsw_sp_lpm_tree_hold(new_tree);
876 return 0;
877}
878
879static void
880mlxsw_sp_vrs_prefixes(struct mlxsw_sp *mlxsw_sp,
881 enum mlxsw_sp_l3proto proto,
882 struct mlxsw_sp_prefix_usage *req_prefix_usage)
883{
884 int i;
885
886 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
887 struct mlxsw_sp_vr *vr = &mlxsw_sp->router->vrs[i];
888 struct mlxsw_sp_fib *fib = mlxsw_sp_vr_fib(vr, proto);
889 unsigned char prefix;
890
891 if (!mlxsw_sp_vr_is_used(vr))
892 continue;
893 mlxsw_sp_prefix_usage_for_each(prefix, &fib->prefix_usage)
894 mlxsw_sp_prefix_usage_set(req_prefix_usage, prefix);
895 }
896}
897
Nogah Frankel9497c042016-09-20 11:16:54 +0200898static int mlxsw_sp_vrs_init(struct mlxsw_sp *mlxsw_sp)
Jiri Pirko6b75c482016-07-04 08:23:09 +0200899{
900 struct mlxsw_sp_vr *vr;
Jiri Pirkoc1a38312016-10-21 16:07:23 +0200901 u64 max_vrs;
Jiri Pirko6b75c482016-07-04 08:23:09 +0200902 int i;
903
Jiri Pirkoc1a38312016-10-21 16:07:23 +0200904 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_VRS))
Nogah Frankel9497c042016-09-20 11:16:54 +0200905 return -EIO;
906
Jiri Pirkoc1a38312016-10-21 16:07:23 +0200907 max_vrs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS);
Ido Schimmel9011b672017-05-16 19:38:25 +0200908 mlxsw_sp->router->vrs = kcalloc(max_vrs, sizeof(struct mlxsw_sp_vr),
909 GFP_KERNEL);
910 if (!mlxsw_sp->router->vrs)
Nogah Frankel9497c042016-09-20 11:16:54 +0200911 return -ENOMEM;
912
Jiri Pirkoc1a38312016-10-21 16:07:23 +0200913 for (i = 0; i < max_vrs; i++) {
Ido Schimmel9011b672017-05-16 19:38:25 +0200914 vr = &mlxsw_sp->router->vrs[i];
Jiri Pirko6b75c482016-07-04 08:23:09 +0200915 vr->id = i;
916 }
Nogah Frankel9497c042016-09-20 11:16:54 +0200917
918 return 0;
919}
920
Ido Schimmelac571de2016-11-14 11:26:32 +0100921static void mlxsw_sp_router_fib_flush(struct mlxsw_sp *mlxsw_sp);
922
Nogah Frankel9497c042016-09-20 11:16:54 +0200923static void mlxsw_sp_vrs_fini(struct mlxsw_sp *mlxsw_sp)
924{
Ido Schimmel30572242016-12-03 16:45:01 +0100925 /* At this stage we're guaranteed not to have new incoming
926 * FIB notifications and the work queue is free from FIBs
927 * sitting on top of mlxsw netdevs. However, we can still
928 * have other FIBs queued. Flush the queue before flushing
929 * the device's tables. No need for locks, as we're the only
930 * writer.
931 */
932 mlxsw_core_flush_owq();
Ido Schimmelac571de2016-11-14 11:26:32 +0100933 mlxsw_sp_router_fib_flush(mlxsw_sp);
Ido Schimmel9011b672017-05-16 19:38:25 +0200934 kfree(mlxsw_sp->router->vrs);
Jiri Pirko6b75c482016-07-04 08:23:09 +0200935}
936
Petr Machata6ddb7422017-09-02 23:49:19 +0200937static struct net_device *
938__mlxsw_sp_ipip_netdev_ul_dev_get(const struct net_device *ol_dev)
939{
940 struct ip_tunnel *tun = netdev_priv(ol_dev);
941 struct net *net = dev_net(ol_dev);
942
943 return __dev_get_by_index(net, tun->parms.link);
944}
945
946static u32 mlxsw_sp_ipip_dev_ul_tb_id(const struct net_device *ol_dev)
947{
948 struct net_device *d = __mlxsw_sp_ipip_netdev_ul_dev_get(ol_dev);
949
950 if (d)
951 return l3mdev_fib_table(d) ? : RT_TABLE_MAIN;
952 else
953 return l3mdev_fib_table(ol_dev) ? : RT_TABLE_MAIN;
954}
955
Petr Machata1012b9a2017-09-02 23:49:23 +0200956static struct mlxsw_sp_rif *
957mlxsw_sp_rif_create(struct mlxsw_sp *mlxsw_sp,
David Ahernf8fa9b42017-10-18 09:56:56 -0700958 const struct mlxsw_sp_rif_params *params,
959 struct netlink_ext_ack *extack);
Petr Machata1012b9a2017-09-02 23:49:23 +0200960
961static struct mlxsw_sp_rif_ipip_lb *
962mlxsw_sp_ipip_ol_ipip_lb_create(struct mlxsw_sp *mlxsw_sp,
963 enum mlxsw_sp_ipip_type ipipt,
964 struct net_device *ol_dev)
965{
966 struct mlxsw_sp_rif_params_ipip_lb lb_params;
967 const struct mlxsw_sp_ipip_ops *ipip_ops;
968 struct mlxsw_sp_rif *rif;
969
970 ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipipt];
971 lb_params = (struct mlxsw_sp_rif_params_ipip_lb) {
972 .common.dev = ol_dev,
973 .common.lag = false,
974 .lb_config = ipip_ops->ol_loopback_config(mlxsw_sp, ol_dev),
975 };
976
David Ahernf8fa9b42017-10-18 09:56:56 -0700977 rif = mlxsw_sp_rif_create(mlxsw_sp, &lb_params.common, NULL);
Petr Machata1012b9a2017-09-02 23:49:23 +0200978 if (IS_ERR(rif))
979 return ERR_CAST(rif);
980 return container_of(rif, struct mlxsw_sp_rif_ipip_lb, common);
981}
982
983static struct mlxsw_sp_ipip_entry *
984mlxsw_sp_ipip_entry_alloc(struct mlxsw_sp *mlxsw_sp,
985 enum mlxsw_sp_ipip_type ipipt,
986 struct net_device *ol_dev)
987{
988 struct mlxsw_sp_ipip_entry *ipip_entry;
989 struct mlxsw_sp_ipip_entry *ret = NULL;
990
991 ipip_entry = kzalloc(sizeof(*ipip_entry), GFP_KERNEL);
992 if (!ipip_entry)
993 return ERR_PTR(-ENOMEM);
994
995 ipip_entry->ol_lb = mlxsw_sp_ipip_ol_ipip_lb_create(mlxsw_sp, ipipt,
996 ol_dev);
997 if (IS_ERR(ipip_entry->ol_lb)) {
998 ret = ERR_CAST(ipip_entry->ol_lb);
999 goto err_ol_ipip_lb_create;
1000 }
1001
1002 ipip_entry->ipipt = ipipt;
1003 ipip_entry->ol_dev = ol_dev;
1004
1005 return ipip_entry;
1006
1007err_ol_ipip_lb_create:
1008 kfree(ipip_entry);
1009 return ret;
1010}
1011
1012static void
Petr Machata4cccb732017-10-16 16:26:39 +02001013mlxsw_sp_ipip_entry_dealloc(struct mlxsw_sp_ipip_entry *ipip_entry)
Petr Machata1012b9a2017-09-02 23:49:23 +02001014{
Petr Machata1012b9a2017-09-02 23:49:23 +02001015 mlxsw_sp_rif_destroy(&ipip_entry->ol_lb->common);
1016 kfree(ipip_entry);
1017}
1018
Petr Machata1012b9a2017-09-02 23:49:23 +02001019static bool mlxsw_sp_l3addr_eq(const union mlxsw_sp_l3addr *addr1,
1020 const union mlxsw_sp_l3addr *addr2)
1021{
1022 return !memcmp(addr1, addr2, sizeof(*addr1));
1023}
1024
1025static bool
1026mlxsw_sp_ipip_entry_saddr_matches(struct mlxsw_sp *mlxsw_sp,
1027 const enum mlxsw_sp_l3proto ul_proto,
1028 union mlxsw_sp_l3addr saddr,
1029 u32 ul_tb_id,
1030 struct mlxsw_sp_ipip_entry *ipip_entry)
1031{
1032 u32 tun_ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(ipip_entry->ol_dev);
1033 enum mlxsw_sp_ipip_type ipipt = ipip_entry->ipipt;
1034 union mlxsw_sp_l3addr tun_saddr;
1035
1036 if (mlxsw_sp->router->ipip_ops_arr[ipipt]->ul_proto != ul_proto)
1037 return false;
1038
1039 tun_saddr = mlxsw_sp_ipip_netdev_saddr(ul_proto, ipip_entry->ol_dev);
1040 return tun_ul_tb_id == ul_tb_id &&
1041 mlxsw_sp_l3addr_eq(&tun_saddr, &saddr);
1042}
1043
Petr Machata4607f6d2017-09-02 23:49:25 +02001044static int
1045mlxsw_sp_fib_entry_decap_init(struct mlxsw_sp *mlxsw_sp,
1046 struct mlxsw_sp_fib_entry *fib_entry,
1047 struct mlxsw_sp_ipip_entry *ipip_entry)
1048{
1049 u32 tunnel_index;
1050 int err;
1051
1052 err = mlxsw_sp_kvdl_alloc(mlxsw_sp, 1, &tunnel_index);
1053 if (err)
1054 return err;
1055
1056 ipip_entry->decap_fib_entry = fib_entry;
1057 fib_entry->decap.ipip_entry = ipip_entry;
1058 fib_entry->decap.tunnel_index = tunnel_index;
1059 return 0;
1060}
1061
1062static void mlxsw_sp_fib_entry_decap_fini(struct mlxsw_sp *mlxsw_sp,
1063 struct mlxsw_sp_fib_entry *fib_entry)
1064{
1065 /* Unlink this node from the IPIP entry that it's the decap entry of. */
1066 fib_entry->decap.ipip_entry->decap_fib_entry = NULL;
1067 fib_entry->decap.ipip_entry = NULL;
1068 mlxsw_sp_kvdl_free(mlxsw_sp, fib_entry->decap.tunnel_index);
1069}
1070
Petr Machata1cc38fb2017-09-02 23:49:26 +02001071static struct mlxsw_sp_fib_node *
1072mlxsw_sp_fib_node_lookup(struct mlxsw_sp_fib *fib, const void *addr,
1073 size_t addr_len, unsigned char prefix_len);
Petr Machata4607f6d2017-09-02 23:49:25 +02001074static int mlxsw_sp_fib_entry_update(struct mlxsw_sp *mlxsw_sp,
1075 struct mlxsw_sp_fib_entry *fib_entry);
1076
1077static void
1078mlxsw_sp_ipip_entry_demote_decap(struct mlxsw_sp *mlxsw_sp,
1079 struct mlxsw_sp_ipip_entry *ipip_entry)
1080{
1081 struct mlxsw_sp_fib_entry *fib_entry = ipip_entry->decap_fib_entry;
1082
1083 mlxsw_sp_fib_entry_decap_fini(mlxsw_sp, fib_entry);
1084 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
1085
1086 mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
1087}
1088
Petr Machata1cc38fb2017-09-02 23:49:26 +02001089static void
1090mlxsw_sp_ipip_entry_promote_decap(struct mlxsw_sp *mlxsw_sp,
1091 struct mlxsw_sp_ipip_entry *ipip_entry,
1092 struct mlxsw_sp_fib_entry *decap_fib_entry)
1093{
1094 if (mlxsw_sp_fib_entry_decap_init(mlxsw_sp, decap_fib_entry,
1095 ipip_entry))
1096 return;
1097 decap_fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP;
1098
1099 if (mlxsw_sp_fib_entry_update(mlxsw_sp, decap_fib_entry))
1100 mlxsw_sp_ipip_entry_demote_decap(mlxsw_sp, ipip_entry);
1101}
1102
1103/* Given an IPIP entry, find the corresponding decap route. */
1104static struct mlxsw_sp_fib_entry *
1105mlxsw_sp_ipip_entry_find_decap(struct mlxsw_sp *mlxsw_sp,
1106 struct mlxsw_sp_ipip_entry *ipip_entry)
1107{
1108 static struct mlxsw_sp_fib_node *fib_node;
1109 const struct mlxsw_sp_ipip_ops *ipip_ops;
1110 struct mlxsw_sp_fib_entry *fib_entry;
1111 unsigned char saddr_prefix_len;
1112 union mlxsw_sp_l3addr saddr;
1113 struct mlxsw_sp_fib *ul_fib;
1114 struct mlxsw_sp_vr *ul_vr;
1115 const void *saddrp;
1116 size_t saddr_len;
1117 u32 ul_tb_id;
1118 u32 saddr4;
1119
1120 ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt];
1121
1122 ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(ipip_entry->ol_dev);
1123 ul_vr = mlxsw_sp_vr_find(mlxsw_sp, ul_tb_id);
1124 if (!ul_vr)
1125 return NULL;
1126
1127 ul_fib = mlxsw_sp_vr_fib(ul_vr, ipip_ops->ul_proto);
1128 saddr = mlxsw_sp_ipip_netdev_saddr(ipip_ops->ul_proto,
1129 ipip_entry->ol_dev);
1130
1131 switch (ipip_ops->ul_proto) {
1132 case MLXSW_SP_L3_PROTO_IPV4:
1133 saddr4 = be32_to_cpu(saddr.addr4);
1134 saddrp = &saddr4;
1135 saddr_len = 4;
1136 saddr_prefix_len = 32;
1137 break;
1138 case MLXSW_SP_L3_PROTO_IPV6:
1139 WARN_ON(1);
1140 return NULL;
1141 }
1142
1143 fib_node = mlxsw_sp_fib_node_lookup(ul_fib, saddrp, saddr_len,
1144 saddr_prefix_len);
1145 if (!fib_node || list_empty(&fib_node->entry_list))
1146 return NULL;
1147
1148 fib_entry = list_first_entry(&fib_node->entry_list,
1149 struct mlxsw_sp_fib_entry, list);
1150 if (fib_entry->type != MLXSW_SP_FIB_ENTRY_TYPE_TRAP)
1151 return NULL;
1152
1153 return fib_entry;
1154}
1155
Petr Machata1012b9a2017-09-02 23:49:23 +02001156static struct mlxsw_sp_ipip_entry *
Petr Machata4cccb732017-10-16 16:26:39 +02001157mlxsw_sp_ipip_entry_create(struct mlxsw_sp *mlxsw_sp,
1158 enum mlxsw_sp_ipip_type ipipt,
1159 struct net_device *ol_dev)
Petr Machata1012b9a2017-09-02 23:49:23 +02001160{
1161 u32 ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(ol_dev);
1162 struct mlxsw_sp_router *router = mlxsw_sp->router;
1163 struct mlxsw_sp_ipip_entry *ipip_entry;
1164 enum mlxsw_sp_l3proto ul_proto;
1165 union mlxsw_sp_l3addr saddr;
1166
Petr Machata4cccb732017-10-16 16:26:39 +02001167 /* The configuration where several tunnels have the same local address
1168 * in the same underlay table needs special treatment in the HW. That is
1169 * currently not implemented in the driver.
1170 */
Petr Machata1012b9a2017-09-02 23:49:23 +02001171 list_for_each_entry(ipip_entry, &mlxsw_sp->router->ipip_list,
1172 ipip_list_node) {
Petr Machata1012b9a2017-09-02 23:49:23 +02001173 ul_proto = router->ipip_ops_arr[ipip_entry->ipipt]->ul_proto;
1174 saddr = mlxsw_sp_ipip_netdev_saddr(ul_proto, ol_dev);
1175 if (mlxsw_sp_ipip_entry_saddr_matches(mlxsw_sp, ul_proto, saddr,
1176 ul_tb_id, ipip_entry))
1177 return ERR_PTR(-EEXIST);
1178 }
1179
1180 ipip_entry = mlxsw_sp_ipip_entry_alloc(mlxsw_sp, ipipt, ol_dev);
1181 if (IS_ERR(ipip_entry))
1182 return ipip_entry;
1183
1184 list_add_tail(&ipip_entry->ipip_list_node,
1185 &mlxsw_sp->router->ipip_list);
1186
Petr Machata1012b9a2017-09-02 23:49:23 +02001187 return ipip_entry;
1188}
1189
1190static void
Petr Machata4cccb732017-10-16 16:26:39 +02001191mlxsw_sp_ipip_entry_destroy(struct mlxsw_sp *mlxsw_sp,
1192 struct mlxsw_sp_ipip_entry *ipip_entry)
Petr Machata1012b9a2017-09-02 23:49:23 +02001193{
Petr Machata4cccb732017-10-16 16:26:39 +02001194 list_del(&ipip_entry->ipip_list_node);
1195 mlxsw_sp_ipip_entry_dealloc(ipip_entry);
Petr Machata1012b9a2017-09-02 23:49:23 +02001196}
1197
Petr Machata4607f6d2017-09-02 23:49:25 +02001198static bool
1199mlxsw_sp_ipip_entry_matches_decap(struct mlxsw_sp *mlxsw_sp,
1200 const struct net_device *ul_dev,
1201 enum mlxsw_sp_l3proto ul_proto,
1202 union mlxsw_sp_l3addr ul_dip,
1203 struct mlxsw_sp_ipip_entry *ipip_entry)
1204{
1205 u32 ul_tb_id = l3mdev_fib_table(ul_dev) ? : RT_TABLE_MAIN;
1206 enum mlxsw_sp_ipip_type ipipt = ipip_entry->ipipt;
1207 struct net_device *ipip_ul_dev;
1208
1209 if (mlxsw_sp->router->ipip_ops_arr[ipipt]->ul_proto != ul_proto)
1210 return false;
1211
1212 ipip_ul_dev = __mlxsw_sp_ipip_netdev_ul_dev_get(ipip_entry->ol_dev);
1213 return mlxsw_sp_ipip_entry_saddr_matches(mlxsw_sp, ul_proto, ul_dip,
1214 ul_tb_id, ipip_entry) &&
1215 (!ipip_ul_dev || ipip_ul_dev == ul_dev);
1216}
1217
1218/* Given decap parameters, find the corresponding IPIP entry. */
1219static struct mlxsw_sp_ipip_entry *
1220mlxsw_sp_ipip_entry_find_by_decap(struct mlxsw_sp *mlxsw_sp,
1221 const struct net_device *ul_dev,
1222 enum mlxsw_sp_l3proto ul_proto,
1223 union mlxsw_sp_l3addr ul_dip)
1224{
1225 struct mlxsw_sp_ipip_entry *ipip_entry;
1226
1227 list_for_each_entry(ipip_entry, &mlxsw_sp->router->ipip_list,
1228 ipip_list_node)
1229 if (mlxsw_sp_ipip_entry_matches_decap(mlxsw_sp, ul_dev,
1230 ul_proto, ul_dip,
1231 ipip_entry))
1232 return ipip_entry;
1233
1234 return NULL;
1235}
1236
Petr Machata6698c162017-10-16 16:26:36 +02001237static bool mlxsw_sp_netdev_ipip_type(const struct mlxsw_sp *mlxsw_sp,
1238 const struct net_device *dev,
1239 enum mlxsw_sp_ipip_type *p_type)
1240{
1241 struct mlxsw_sp_router *router = mlxsw_sp->router;
1242 const struct mlxsw_sp_ipip_ops *ipip_ops;
1243 enum mlxsw_sp_ipip_type ipipt;
1244
1245 for (ipipt = 0; ipipt < MLXSW_SP_IPIP_TYPE_MAX; ++ipipt) {
1246 ipip_ops = router->ipip_ops_arr[ipipt];
1247 if (dev->type == ipip_ops->dev_type) {
1248 if (p_type)
1249 *p_type = ipipt;
1250 return true;
1251 }
1252 }
1253 return false;
1254}
1255
Petr Machata796ec772017-11-03 10:03:29 +01001256bool mlxsw_sp_netdev_is_ipip_ol(const struct mlxsw_sp *mlxsw_sp,
1257 const struct net_device *dev)
Petr Machata00635872017-10-16 16:26:37 +02001258{
1259 return mlxsw_sp_netdev_ipip_type(mlxsw_sp, dev, NULL);
1260}
1261
1262static struct mlxsw_sp_ipip_entry *
1263mlxsw_sp_ipip_entry_find_by_ol_dev(struct mlxsw_sp *mlxsw_sp,
1264 const struct net_device *ol_dev)
1265{
1266 struct mlxsw_sp_ipip_entry *ipip_entry;
1267
1268 list_for_each_entry(ipip_entry, &mlxsw_sp->router->ipip_list,
1269 ipip_list_node)
1270 if (ipip_entry->ol_dev == ol_dev)
1271 return ipip_entry;
1272
1273 return NULL;
1274}
1275
Petr Machatacafdb2a2017-11-03 10:03:30 +01001276static bool mlxsw_sp_netdevice_ipip_can_offload(struct mlxsw_sp *mlxsw_sp,
1277 const struct net_device *ol_dev,
1278 enum mlxsw_sp_ipip_type ipipt)
1279{
1280 const struct mlxsw_sp_ipip_ops *ops
1281 = mlxsw_sp->router->ipip_ops_arr[ipipt];
1282
1283 /* For deciding whether decap should be offloaded, we don't care about
1284 * overlay protocol, so ask whether either one is supported.
1285 */
1286 return ops->can_offload(mlxsw_sp, ol_dev, MLXSW_SP_L3_PROTO_IPV4) ||
1287 ops->can_offload(mlxsw_sp, ol_dev, MLXSW_SP_L3_PROTO_IPV6);
1288}
1289
Petr Machata796ec772017-11-03 10:03:29 +01001290static int mlxsw_sp_netdevice_ipip_ol_reg_event(struct mlxsw_sp *mlxsw_sp,
1291 struct net_device *ol_dev)
Petr Machata00635872017-10-16 16:26:37 +02001292{
Petr Machata00635872017-10-16 16:26:37 +02001293 struct mlxsw_sp_ipip_entry *ipip_entry;
1294 enum mlxsw_sp_ipip_type ipipt;
1295
1296 mlxsw_sp_netdev_ipip_type(mlxsw_sp, ol_dev, &ipipt);
Petr Machatacafdb2a2017-11-03 10:03:30 +01001297 if (mlxsw_sp_netdevice_ipip_can_offload(mlxsw_sp, ol_dev, ipipt)) {
Petr Machata4cccb732017-10-16 16:26:39 +02001298 ipip_entry = mlxsw_sp_ipip_entry_create(mlxsw_sp, ipipt,
1299 ol_dev);
Petr Machata00635872017-10-16 16:26:37 +02001300 if (IS_ERR(ipip_entry))
1301 return PTR_ERR(ipip_entry);
1302 }
1303
1304 return 0;
1305}
1306
Petr Machata796ec772017-11-03 10:03:29 +01001307static void mlxsw_sp_netdevice_ipip_ol_unreg_event(struct mlxsw_sp *mlxsw_sp,
1308 struct net_device *ol_dev)
Petr Machata00635872017-10-16 16:26:37 +02001309{
1310 struct mlxsw_sp_ipip_entry *ipip_entry;
1311
1312 ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
1313 if (ipip_entry)
Petr Machata4cccb732017-10-16 16:26:39 +02001314 mlxsw_sp_ipip_entry_destroy(mlxsw_sp, ipip_entry);
Petr Machata00635872017-10-16 16:26:37 +02001315}
1316
Petr Machata6d4de442017-11-03 10:03:34 +01001317static void mlxsw_sp_netdevice_ipip_ol_up_event(struct mlxsw_sp *mlxsw_sp,
1318 struct net_device *ol_dev)
Petr Machata00635872017-10-16 16:26:37 +02001319{
1320 struct mlxsw_sp_fib_entry *decap_fib_entry;
1321 struct mlxsw_sp_ipip_entry *ipip_entry;
1322
1323 ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
1324 if (ipip_entry) {
1325 decap_fib_entry = mlxsw_sp_ipip_entry_find_decap(mlxsw_sp,
1326 ipip_entry);
1327 if (decap_fib_entry)
1328 mlxsw_sp_ipip_entry_promote_decap(mlxsw_sp, ipip_entry,
1329 decap_fib_entry);
1330 }
1331
Petr Machata00635872017-10-16 16:26:37 +02001332}
1333
Petr Machataa3fe1982017-11-03 10:03:33 +01001334static void
1335mlxsw_sp_ipip_entry_ol_down_event(struct mlxsw_sp *mlxsw_sp,
1336 struct mlxsw_sp_ipip_entry *ipip_entry)
1337{
1338 if (ipip_entry->decap_fib_entry)
1339 mlxsw_sp_ipip_entry_demote_decap(mlxsw_sp, ipip_entry);
1340}
1341
Petr Machata796ec772017-11-03 10:03:29 +01001342static void mlxsw_sp_netdevice_ipip_ol_down_event(struct mlxsw_sp *mlxsw_sp,
1343 struct net_device *ol_dev)
Petr Machata00635872017-10-16 16:26:37 +02001344{
1345 struct mlxsw_sp_ipip_entry *ipip_entry;
1346
1347 ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
Petr Machataa3fe1982017-11-03 10:03:33 +01001348 if (ipip_entry)
1349 mlxsw_sp_ipip_entry_ol_down_event(mlxsw_sp, ipip_entry);
Petr Machata00635872017-10-16 16:26:37 +02001350}
1351
Petr Machata796ec772017-11-03 10:03:29 +01001352static int mlxsw_sp_netdevice_ipip_ol_vrf_event(struct mlxsw_sp *mlxsw_sp,
1353 struct net_device *ol_dev)
Petr Machataf63ce4e2017-10-16 16:26:38 +02001354{
1355 struct mlxsw_sp_fib_entry *decap_fib_entry;
1356 struct mlxsw_sp_ipip_entry *ipip_entry;
1357 struct mlxsw_sp_rif_ipip_lb *lb_rif;
1358
1359 ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
1360 if (!ipip_entry)
1361 return 0;
1362
1363 /* When a tunneling device is moved to a different VRF, we need to
1364 * update the backing loopback. Since RIFs can't be edited, we need to
1365 * destroy and recreate it. That might create a window of opportunity
1366 * where RALUE and RATR registers end up referencing a RIF that's
1367 * already gone. RATRs are handled by the RIF destroy, and to take care
1368 * of RALUE, demote the decap route back.
1369 */
1370 if (ipip_entry->decap_fib_entry)
1371 mlxsw_sp_ipip_entry_demote_decap(mlxsw_sp, ipip_entry);
1372
1373 lb_rif = mlxsw_sp_ipip_ol_ipip_lb_create(mlxsw_sp, ipip_entry->ipipt,
1374 ol_dev);
1375 if (IS_ERR(lb_rif))
1376 return PTR_ERR(lb_rif);
1377 mlxsw_sp_rif_destroy(&ipip_entry->ol_lb->common);
1378 ipip_entry->ol_lb = lb_rif;
1379
1380 if (ol_dev->flags & IFF_UP) {
1381 decap_fib_entry = mlxsw_sp_ipip_entry_find_decap(mlxsw_sp,
1382 ipip_entry);
1383 if (decap_fib_entry)
1384 mlxsw_sp_ipip_entry_promote_decap(mlxsw_sp, ipip_entry,
1385 decap_fib_entry);
1386 }
1387
1388 return 0;
1389}
1390
Petr Machata796ec772017-11-03 10:03:29 +01001391int
1392mlxsw_sp_netdevice_ipip_ol_event(struct mlxsw_sp *mlxsw_sp,
1393 struct net_device *ol_dev,
1394 unsigned long event,
1395 struct netdev_notifier_changeupper_info *info)
Petr Machata00635872017-10-16 16:26:37 +02001396{
1397 switch (event) {
1398 case NETDEV_REGISTER:
Petr Machata796ec772017-11-03 10:03:29 +01001399 return mlxsw_sp_netdevice_ipip_ol_reg_event(mlxsw_sp, ol_dev);
Petr Machata00635872017-10-16 16:26:37 +02001400 case NETDEV_UNREGISTER:
Petr Machata796ec772017-11-03 10:03:29 +01001401 mlxsw_sp_netdevice_ipip_ol_unreg_event(mlxsw_sp, ol_dev);
Petr Machata00635872017-10-16 16:26:37 +02001402 return 0;
1403 case NETDEV_UP:
Petr Machata6d4de442017-11-03 10:03:34 +01001404 mlxsw_sp_netdevice_ipip_ol_up_event(mlxsw_sp, ol_dev);
1405 return 0;
Petr Machata00635872017-10-16 16:26:37 +02001406 case NETDEV_DOWN:
Petr Machata796ec772017-11-03 10:03:29 +01001407 mlxsw_sp_netdevice_ipip_ol_down_event(mlxsw_sp, ol_dev);
Petr Machata00635872017-10-16 16:26:37 +02001408 return 0;
Petr Machataf63ce4e2017-10-16 16:26:38 +02001409 case NETDEV_CHANGEUPPER:
1410 if (netif_is_l3_master(info->upper_dev))
Petr Machata796ec772017-11-03 10:03:29 +01001411 return mlxsw_sp_netdevice_ipip_ol_vrf_event(mlxsw_sp,
1412 ol_dev);
Petr Machataf63ce4e2017-10-16 16:26:38 +02001413 return 0;
Petr Machata00635872017-10-16 16:26:37 +02001414 }
1415 return 0;
1416}
1417
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001418struct mlxsw_sp_neigh_key {
Jiri Pirko33b13412016-11-10 12:31:04 +01001419 struct neighbour *n;
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001420};
1421
1422struct mlxsw_sp_neigh_entry {
Ido Schimmel9665b742017-02-08 11:16:42 +01001423 struct list_head rif_list_node;
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001424 struct rhash_head ht_node;
1425 struct mlxsw_sp_neigh_key key;
1426 u16 rif;
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001427 bool connected;
Yotam Gigia6bf9e92016-07-05 11:27:44 +02001428 unsigned char ha[ETH_ALEN];
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001429 struct list_head nexthop_list; /* list of nexthops using
1430 * this neigh entry
1431 */
Yotam Gigib2157142016-07-05 11:27:51 +02001432 struct list_head nexthop_neighs_list_node;
Arkadi Sharshevsky7cfcbc72017-08-24 08:40:08 +02001433 unsigned int counter_index;
1434 bool counter_valid;
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001435};
1436
1437static const struct rhashtable_params mlxsw_sp_neigh_ht_params = {
1438 .key_offset = offsetof(struct mlxsw_sp_neigh_entry, key),
1439 .head_offset = offsetof(struct mlxsw_sp_neigh_entry, ht_node),
1440 .key_len = sizeof(struct mlxsw_sp_neigh_key),
1441};
1442
Arkadi Sharshevskyf17cc842017-08-24 08:40:04 +02001443struct mlxsw_sp_neigh_entry *
1444mlxsw_sp_rif_neigh_next(struct mlxsw_sp_rif *rif,
1445 struct mlxsw_sp_neigh_entry *neigh_entry)
1446{
1447 if (!neigh_entry) {
1448 if (list_empty(&rif->neigh_list))
1449 return NULL;
1450 else
1451 return list_first_entry(&rif->neigh_list,
1452 typeof(*neigh_entry),
1453 rif_list_node);
1454 }
Arkadi Sharshevskyec2437f2017-09-25 10:32:24 +02001455 if (list_is_last(&neigh_entry->rif_list_node, &rif->neigh_list))
Arkadi Sharshevskyf17cc842017-08-24 08:40:04 +02001456 return NULL;
1457 return list_next_entry(neigh_entry, rif_list_node);
1458}
1459
1460int mlxsw_sp_neigh_entry_type(struct mlxsw_sp_neigh_entry *neigh_entry)
1461{
1462 return neigh_entry->key.n->tbl->family;
1463}
1464
1465unsigned char *
1466mlxsw_sp_neigh_entry_ha(struct mlxsw_sp_neigh_entry *neigh_entry)
1467{
1468 return neigh_entry->ha;
1469}
1470
1471u32 mlxsw_sp_neigh4_entry_dip(struct mlxsw_sp_neigh_entry *neigh_entry)
1472{
1473 struct neighbour *n;
1474
1475 n = neigh_entry->key.n;
1476 return ntohl(*((__be32 *) n->primary_key));
1477}
1478
Arkadi Sharshevsky02507682017-08-31 17:59:15 +02001479struct in6_addr *
1480mlxsw_sp_neigh6_entry_dip(struct mlxsw_sp_neigh_entry *neigh_entry)
1481{
1482 struct neighbour *n;
1483
1484 n = neigh_entry->key.n;
1485 return (struct in6_addr *) &n->primary_key;
1486}
1487
Arkadi Sharshevsky7cfcbc72017-08-24 08:40:08 +02001488int mlxsw_sp_neigh_counter_get(struct mlxsw_sp *mlxsw_sp,
1489 struct mlxsw_sp_neigh_entry *neigh_entry,
1490 u64 *p_counter)
1491{
1492 if (!neigh_entry->counter_valid)
1493 return -EINVAL;
1494
1495 return mlxsw_sp_flow_counter_get(mlxsw_sp, neigh_entry->counter_index,
1496 p_counter, NULL);
1497}
1498
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001499static struct mlxsw_sp_neigh_entry *
1500mlxsw_sp_neigh_entry_alloc(struct mlxsw_sp *mlxsw_sp, struct neighbour *n,
1501 u16 rif)
1502{
1503 struct mlxsw_sp_neigh_entry *neigh_entry;
1504
1505 neigh_entry = kzalloc(sizeof(*neigh_entry), GFP_KERNEL);
1506 if (!neigh_entry)
1507 return NULL;
1508
1509 neigh_entry->key.n = n;
1510 neigh_entry->rif = rif;
1511 INIT_LIST_HEAD(&neigh_entry->nexthop_list);
1512
1513 return neigh_entry;
1514}
1515
1516static void mlxsw_sp_neigh_entry_free(struct mlxsw_sp_neigh_entry *neigh_entry)
1517{
1518 kfree(neigh_entry);
1519}
1520
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001521static int
1522mlxsw_sp_neigh_entry_insert(struct mlxsw_sp *mlxsw_sp,
1523 struct mlxsw_sp_neigh_entry *neigh_entry)
1524{
Ido Schimmel9011b672017-05-16 19:38:25 +02001525 return rhashtable_insert_fast(&mlxsw_sp->router->neigh_ht,
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001526 &neigh_entry->ht_node,
1527 mlxsw_sp_neigh_ht_params);
1528}
1529
1530static void
1531mlxsw_sp_neigh_entry_remove(struct mlxsw_sp *mlxsw_sp,
1532 struct mlxsw_sp_neigh_entry *neigh_entry)
1533{
Ido Schimmel9011b672017-05-16 19:38:25 +02001534 rhashtable_remove_fast(&mlxsw_sp->router->neigh_ht,
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001535 &neigh_entry->ht_node,
1536 mlxsw_sp_neigh_ht_params);
1537}
1538
Arkadi Sharshevsky7cfcbc72017-08-24 08:40:08 +02001539static bool
Arkadi Sharshevsky1ed55742017-08-31 17:59:18 +02001540mlxsw_sp_neigh_counter_should_alloc(struct mlxsw_sp *mlxsw_sp,
1541 struct mlxsw_sp_neigh_entry *neigh_entry)
Arkadi Sharshevsky7cfcbc72017-08-24 08:40:08 +02001542{
1543 struct devlink *devlink;
Arkadi Sharshevsky1ed55742017-08-31 17:59:18 +02001544 const char *table_name;
1545
1546 switch (mlxsw_sp_neigh_entry_type(neigh_entry)) {
1547 case AF_INET:
1548 table_name = MLXSW_SP_DPIPE_TABLE_NAME_HOST4;
1549 break;
1550 case AF_INET6:
1551 table_name = MLXSW_SP_DPIPE_TABLE_NAME_HOST6;
1552 break;
1553 default:
1554 WARN_ON(1);
1555 return false;
1556 }
Arkadi Sharshevsky7cfcbc72017-08-24 08:40:08 +02001557
1558 devlink = priv_to_devlink(mlxsw_sp->core);
Arkadi Sharshevsky1ed55742017-08-31 17:59:18 +02001559 return devlink_dpipe_table_counter_enabled(devlink, table_name);
Arkadi Sharshevsky7cfcbc72017-08-24 08:40:08 +02001560}
1561
1562static void
1563mlxsw_sp_neigh_counter_alloc(struct mlxsw_sp *mlxsw_sp,
1564 struct mlxsw_sp_neigh_entry *neigh_entry)
1565{
Arkadi Sharshevsky1ed55742017-08-31 17:59:18 +02001566 if (!mlxsw_sp_neigh_counter_should_alloc(mlxsw_sp, neigh_entry))
Arkadi Sharshevsky7cfcbc72017-08-24 08:40:08 +02001567 return;
1568
1569 if (mlxsw_sp_flow_counter_alloc(mlxsw_sp, &neigh_entry->counter_index))
1570 return;
1571
1572 neigh_entry->counter_valid = true;
1573}
1574
1575static void
1576mlxsw_sp_neigh_counter_free(struct mlxsw_sp *mlxsw_sp,
1577 struct mlxsw_sp_neigh_entry *neigh_entry)
1578{
1579 if (!neigh_entry->counter_valid)
1580 return;
1581 mlxsw_sp_flow_counter_free(mlxsw_sp,
1582 neigh_entry->counter_index);
1583 neigh_entry->counter_valid = false;
1584}
1585
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001586static struct mlxsw_sp_neigh_entry *
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001587mlxsw_sp_neigh_entry_create(struct mlxsw_sp *mlxsw_sp, struct neighbour *n)
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001588{
1589 struct mlxsw_sp_neigh_entry *neigh_entry;
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01001590 struct mlxsw_sp_rif *rif;
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001591 int err;
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001592
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01001593 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, n->dev);
1594 if (!rif)
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001595 return ERR_PTR(-EINVAL);
1596
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01001597 neigh_entry = mlxsw_sp_neigh_entry_alloc(mlxsw_sp, n, rif->rif_index);
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001598 if (!neigh_entry)
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001599 return ERR_PTR(-ENOMEM);
1600
1601 err = mlxsw_sp_neigh_entry_insert(mlxsw_sp, neigh_entry);
1602 if (err)
1603 goto err_neigh_entry_insert;
1604
Arkadi Sharshevsky7cfcbc72017-08-24 08:40:08 +02001605 mlxsw_sp_neigh_counter_alloc(mlxsw_sp, neigh_entry);
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01001606 list_add(&neigh_entry->rif_list_node, &rif->neigh_list);
Ido Schimmel9665b742017-02-08 11:16:42 +01001607
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001608 return neigh_entry;
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001609
1610err_neigh_entry_insert:
1611 mlxsw_sp_neigh_entry_free(neigh_entry);
1612 return ERR_PTR(err);
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001613}
1614
1615static void
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001616mlxsw_sp_neigh_entry_destroy(struct mlxsw_sp *mlxsw_sp,
1617 struct mlxsw_sp_neigh_entry *neigh_entry)
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001618{
Ido Schimmel9665b742017-02-08 11:16:42 +01001619 list_del(&neigh_entry->rif_list_node);
Arkadi Sharshevsky7cfcbc72017-08-24 08:40:08 +02001620 mlxsw_sp_neigh_counter_free(mlxsw_sp, neigh_entry);
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001621 mlxsw_sp_neigh_entry_remove(mlxsw_sp, neigh_entry);
1622 mlxsw_sp_neigh_entry_free(neigh_entry);
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001623}
1624
1625static struct mlxsw_sp_neigh_entry *
Jiri Pirko33b13412016-11-10 12:31:04 +01001626mlxsw_sp_neigh_entry_lookup(struct mlxsw_sp *mlxsw_sp, struct neighbour *n)
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001627{
Jiri Pirko33b13412016-11-10 12:31:04 +01001628 struct mlxsw_sp_neigh_key key;
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001629
Jiri Pirko33b13412016-11-10 12:31:04 +01001630 key.n = n;
Ido Schimmel9011b672017-05-16 19:38:25 +02001631 return rhashtable_lookup_fast(&mlxsw_sp->router->neigh_ht,
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001632 &key, mlxsw_sp_neigh_ht_params);
1633}
1634
Yotam Gigic723c7352016-07-05 11:27:43 +02001635static void
1636mlxsw_sp_router_neighs_update_interval_init(struct mlxsw_sp *mlxsw_sp)
1637{
Arkadi Sharshevskya6c9b5d2017-07-18 10:10:18 +02001638 unsigned long interval;
Yotam Gigic723c7352016-07-05 11:27:43 +02001639
Ido Schimmelb5f3e0d2017-07-24 09:56:00 +02001640#if IS_ENABLED(CONFIG_IPV6)
Arkadi Sharshevskya6c9b5d2017-07-18 10:10:18 +02001641 interval = min_t(unsigned long,
1642 NEIGH_VAR(&arp_tbl.parms, DELAY_PROBE_TIME),
1643 NEIGH_VAR(&nd_tbl.parms, DELAY_PROBE_TIME));
Ido Schimmelb5f3e0d2017-07-24 09:56:00 +02001644#else
1645 interval = NEIGH_VAR(&arp_tbl.parms, DELAY_PROBE_TIME);
1646#endif
Ido Schimmel9011b672017-05-16 19:38:25 +02001647 mlxsw_sp->router->neighs_update.interval = jiffies_to_msecs(interval);
Yotam Gigic723c7352016-07-05 11:27:43 +02001648}
1649
1650static void mlxsw_sp_router_neigh_ent_ipv4_process(struct mlxsw_sp *mlxsw_sp,
1651 char *rauhtd_pl,
1652 int ent_index)
1653{
1654 struct net_device *dev;
1655 struct neighbour *n;
1656 __be32 dipn;
1657 u32 dip;
1658 u16 rif;
1659
1660 mlxsw_reg_rauhtd_ent_ipv4_unpack(rauhtd_pl, ent_index, &rif, &dip);
1661
Ido Schimmel5f9efff2017-05-16 19:38:27 +02001662 if (!mlxsw_sp->router->rifs[rif]) {
Yotam Gigic723c7352016-07-05 11:27:43 +02001663 dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Incorrect RIF in neighbour entry\n");
1664 return;
1665 }
1666
1667 dipn = htonl(dip);
Ido Schimmel5f9efff2017-05-16 19:38:27 +02001668 dev = mlxsw_sp->router->rifs[rif]->dev;
Yotam Gigic723c7352016-07-05 11:27:43 +02001669 n = neigh_lookup(&arp_tbl, &dipn, dev);
1670 if (!n) {
1671 netdev_err(dev, "Failed to find matching neighbour for IP=%pI4h\n",
1672 &dip);
1673 return;
1674 }
1675
1676 netdev_dbg(dev, "Updating neighbour with IP=%pI4h\n", &dip);
1677 neigh_event_send(n, NULL);
1678 neigh_release(n);
1679}
1680
Ido Schimmeldf9a21f2017-08-15 09:10:33 +02001681#if IS_ENABLED(CONFIG_IPV6)
Arkadi Sharshevsky60f040c2017-07-18 10:10:17 +02001682static void mlxsw_sp_router_neigh_ent_ipv6_process(struct mlxsw_sp *mlxsw_sp,
1683 char *rauhtd_pl,
1684 int rec_index)
1685{
1686 struct net_device *dev;
1687 struct neighbour *n;
1688 struct in6_addr dip;
1689 u16 rif;
1690
1691 mlxsw_reg_rauhtd_ent_ipv6_unpack(rauhtd_pl, rec_index, &rif,
1692 (char *) &dip);
1693
1694 if (!mlxsw_sp->router->rifs[rif]) {
1695 dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Incorrect RIF in neighbour entry\n");
1696 return;
1697 }
1698
1699 dev = mlxsw_sp->router->rifs[rif]->dev;
1700 n = neigh_lookup(&nd_tbl, &dip, dev);
1701 if (!n) {
1702 netdev_err(dev, "Failed to find matching neighbour for IP=%pI6c\n",
1703 &dip);
1704 return;
1705 }
1706
1707 netdev_dbg(dev, "Updating neighbour with IP=%pI6c\n", &dip);
1708 neigh_event_send(n, NULL);
1709 neigh_release(n);
1710}
Ido Schimmelb5f3e0d2017-07-24 09:56:00 +02001711#else
1712static void mlxsw_sp_router_neigh_ent_ipv6_process(struct mlxsw_sp *mlxsw_sp,
1713 char *rauhtd_pl,
1714 int rec_index)
1715{
1716}
1717#endif
Arkadi Sharshevsky60f040c2017-07-18 10:10:17 +02001718
Yotam Gigic723c7352016-07-05 11:27:43 +02001719static void mlxsw_sp_router_neigh_rec_ipv4_process(struct mlxsw_sp *mlxsw_sp,
1720 char *rauhtd_pl,
1721 int rec_index)
1722{
1723 u8 num_entries;
1724 int i;
1725
1726 num_entries = mlxsw_reg_rauhtd_ipv4_rec_num_entries_get(rauhtd_pl,
1727 rec_index);
1728 /* Hardware starts counting at 0, so add 1. */
1729 num_entries++;
1730
1731 /* Each record consists of several neighbour entries. */
1732 for (i = 0; i < num_entries; i++) {
1733 int ent_index;
1734
1735 ent_index = rec_index * MLXSW_REG_RAUHTD_IPV4_ENT_PER_REC + i;
1736 mlxsw_sp_router_neigh_ent_ipv4_process(mlxsw_sp, rauhtd_pl,
1737 ent_index);
1738 }
1739
1740}
1741
Arkadi Sharshevsky60f040c2017-07-18 10:10:17 +02001742static void mlxsw_sp_router_neigh_rec_ipv6_process(struct mlxsw_sp *mlxsw_sp,
1743 char *rauhtd_pl,
1744 int rec_index)
1745{
1746 /* One record contains one entry. */
1747 mlxsw_sp_router_neigh_ent_ipv6_process(mlxsw_sp, rauhtd_pl,
1748 rec_index);
1749}
1750
Yotam Gigic723c7352016-07-05 11:27:43 +02001751static void mlxsw_sp_router_neigh_rec_process(struct mlxsw_sp *mlxsw_sp,
1752 char *rauhtd_pl, int rec_index)
1753{
1754 switch (mlxsw_reg_rauhtd_rec_type_get(rauhtd_pl, rec_index)) {
1755 case MLXSW_REG_RAUHTD_TYPE_IPV4:
1756 mlxsw_sp_router_neigh_rec_ipv4_process(mlxsw_sp, rauhtd_pl,
1757 rec_index);
1758 break;
1759 case MLXSW_REG_RAUHTD_TYPE_IPV6:
Arkadi Sharshevsky60f040c2017-07-18 10:10:17 +02001760 mlxsw_sp_router_neigh_rec_ipv6_process(mlxsw_sp, rauhtd_pl,
1761 rec_index);
Yotam Gigic723c7352016-07-05 11:27:43 +02001762 break;
1763 }
1764}
1765
Arkadi Sharshevsky42cdb332016-11-11 16:34:26 +01001766static bool mlxsw_sp_router_rauhtd_is_full(char *rauhtd_pl)
1767{
1768 u8 num_rec, last_rec_index, num_entries;
1769
1770 num_rec = mlxsw_reg_rauhtd_num_rec_get(rauhtd_pl);
1771 last_rec_index = num_rec - 1;
1772
1773 if (num_rec < MLXSW_REG_RAUHTD_REC_MAX_NUM)
1774 return false;
1775 if (mlxsw_reg_rauhtd_rec_type_get(rauhtd_pl, last_rec_index) ==
1776 MLXSW_REG_RAUHTD_TYPE_IPV6)
1777 return true;
1778
1779 num_entries = mlxsw_reg_rauhtd_ipv4_rec_num_entries_get(rauhtd_pl,
1780 last_rec_index);
1781 if (++num_entries == MLXSW_REG_RAUHTD_IPV4_ENT_PER_REC)
1782 return true;
1783 return false;
1784}
1785
Arkadi Sharshevsky60f040c2017-07-18 10:10:17 +02001786static int
1787__mlxsw_sp_router_neighs_update_rauhtd(struct mlxsw_sp *mlxsw_sp,
1788 char *rauhtd_pl,
1789 enum mlxsw_reg_rauhtd_type type)
Yotam Gigic723c7352016-07-05 11:27:43 +02001790{
Arkadi Sharshevsky60f040c2017-07-18 10:10:17 +02001791 int i, num_rec;
1792 int err;
Yotam Gigic723c7352016-07-05 11:27:43 +02001793
1794 /* Make sure the neighbour's netdev isn't removed in the
1795 * process.
1796 */
1797 rtnl_lock();
1798 do {
Arkadi Sharshevsky60f040c2017-07-18 10:10:17 +02001799 mlxsw_reg_rauhtd_pack(rauhtd_pl, type);
Yotam Gigic723c7352016-07-05 11:27:43 +02001800 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(rauhtd),
1801 rauhtd_pl);
1802 if (err) {
Petr Machata7ff176f2017-10-02 12:21:57 +02001803 dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Failed to dump neighbour table\n");
Yotam Gigic723c7352016-07-05 11:27:43 +02001804 break;
1805 }
1806 num_rec = mlxsw_reg_rauhtd_num_rec_get(rauhtd_pl);
1807 for (i = 0; i < num_rec; i++)
1808 mlxsw_sp_router_neigh_rec_process(mlxsw_sp, rauhtd_pl,
1809 i);
Arkadi Sharshevsky42cdb332016-11-11 16:34:26 +01001810 } while (mlxsw_sp_router_rauhtd_is_full(rauhtd_pl));
Yotam Gigic723c7352016-07-05 11:27:43 +02001811 rtnl_unlock();
1812
Arkadi Sharshevsky60f040c2017-07-18 10:10:17 +02001813 return err;
1814}
1815
1816static int mlxsw_sp_router_neighs_update_rauhtd(struct mlxsw_sp *mlxsw_sp)
1817{
1818 enum mlxsw_reg_rauhtd_type type;
1819 char *rauhtd_pl;
1820 int err;
1821
1822 rauhtd_pl = kmalloc(MLXSW_REG_RAUHTD_LEN, GFP_KERNEL);
1823 if (!rauhtd_pl)
1824 return -ENOMEM;
1825
1826 type = MLXSW_REG_RAUHTD_TYPE_IPV4;
1827 err = __mlxsw_sp_router_neighs_update_rauhtd(mlxsw_sp, rauhtd_pl, type);
1828 if (err)
1829 goto out;
1830
1831 type = MLXSW_REG_RAUHTD_TYPE_IPV6;
1832 err = __mlxsw_sp_router_neighs_update_rauhtd(mlxsw_sp, rauhtd_pl, type);
1833out:
Yotam Gigic723c7352016-07-05 11:27:43 +02001834 kfree(rauhtd_pl);
Yotam Gigib2157142016-07-05 11:27:51 +02001835 return err;
1836}
1837
1838static void mlxsw_sp_router_neighs_update_nh(struct mlxsw_sp *mlxsw_sp)
1839{
1840 struct mlxsw_sp_neigh_entry *neigh_entry;
1841
1842 /* Take RTNL mutex here to prevent lists from changes */
1843 rtnl_lock();
Ido Schimmel9011b672017-05-16 19:38:25 +02001844 list_for_each_entry(neigh_entry, &mlxsw_sp->router->nexthop_neighs_list,
Ido Schimmel8a0b7272017-02-06 16:20:15 +01001845 nexthop_neighs_list_node)
Yotam Gigib2157142016-07-05 11:27:51 +02001846 /* If this neigh have nexthops, make the kernel think this neigh
1847 * is active regardless of the traffic.
1848 */
Ido Schimmel8a0b7272017-02-06 16:20:15 +01001849 neigh_event_send(neigh_entry->key.n, NULL);
Yotam Gigib2157142016-07-05 11:27:51 +02001850 rtnl_unlock();
1851}
1852
1853static void
1854mlxsw_sp_router_neighs_update_work_schedule(struct mlxsw_sp *mlxsw_sp)
1855{
Ido Schimmel9011b672017-05-16 19:38:25 +02001856 unsigned long interval = mlxsw_sp->router->neighs_update.interval;
Yotam Gigib2157142016-07-05 11:27:51 +02001857
Ido Schimmel9011b672017-05-16 19:38:25 +02001858 mlxsw_core_schedule_dw(&mlxsw_sp->router->neighs_update.dw,
Yotam Gigib2157142016-07-05 11:27:51 +02001859 msecs_to_jiffies(interval));
1860}
1861
1862static void mlxsw_sp_router_neighs_update_work(struct work_struct *work)
1863{
Ido Schimmel9011b672017-05-16 19:38:25 +02001864 struct mlxsw_sp_router *router;
Yotam Gigib2157142016-07-05 11:27:51 +02001865 int err;
1866
Ido Schimmel9011b672017-05-16 19:38:25 +02001867 router = container_of(work, struct mlxsw_sp_router,
1868 neighs_update.dw.work);
1869 err = mlxsw_sp_router_neighs_update_rauhtd(router->mlxsw_sp);
Yotam Gigib2157142016-07-05 11:27:51 +02001870 if (err)
Ido Schimmel9011b672017-05-16 19:38:25 +02001871 dev_err(router->mlxsw_sp->bus_info->dev, "Could not update kernel for neigh activity");
Yotam Gigib2157142016-07-05 11:27:51 +02001872
Ido Schimmel9011b672017-05-16 19:38:25 +02001873 mlxsw_sp_router_neighs_update_nh(router->mlxsw_sp);
Yotam Gigib2157142016-07-05 11:27:51 +02001874
Ido Schimmel9011b672017-05-16 19:38:25 +02001875 mlxsw_sp_router_neighs_update_work_schedule(router->mlxsw_sp);
Yotam Gigic723c7352016-07-05 11:27:43 +02001876}
1877
Yotam Gigi0b2361d2016-07-05 11:27:52 +02001878static void mlxsw_sp_router_probe_unresolved_nexthops(struct work_struct *work)
1879{
1880 struct mlxsw_sp_neigh_entry *neigh_entry;
Ido Schimmel9011b672017-05-16 19:38:25 +02001881 struct mlxsw_sp_router *router;
Yotam Gigi0b2361d2016-07-05 11:27:52 +02001882
Ido Schimmel9011b672017-05-16 19:38:25 +02001883 router = container_of(work, struct mlxsw_sp_router,
1884 nexthop_probe_dw.work);
Yotam Gigi0b2361d2016-07-05 11:27:52 +02001885 /* Iterate over nexthop neighbours, find those who are unresolved and
1886 * send arp on them. This solves the chicken-egg problem when
1887 * the nexthop wouldn't get offloaded until the neighbor is resolved
1888 * but it wouldn't get resolved ever in case traffic is flowing in HW
1889 * using different nexthop.
1890 *
1891 * Take RTNL mutex here to prevent lists from changes.
1892 */
1893 rtnl_lock();
Ido Schimmel9011b672017-05-16 19:38:25 +02001894 list_for_each_entry(neigh_entry, &router->nexthop_neighs_list,
Ido Schimmel8a0b7272017-02-06 16:20:15 +01001895 nexthop_neighs_list_node)
Ido Schimmel01b1aa32017-02-06 16:20:16 +01001896 if (!neigh_entry->connected)
Jiri Pirko33b13412016-11-10 12:31:04 +01001897 neigh_event_send(neigh_entry->key.n, NULL);
Yotam Gigi0b2361d2016-07-05 11:27:52 +02001898 rtnl_unlock();
1899
Ido Schimmel9011b672017-05-16 19:38:25 +02001900 mlxsw_core_schedule_dw(&router->nexthop_probe_dw,
Yotam Gigi0b2361d2016-07-05 11:27:52 +02001901 MLXSW_SP_UNRESOLVED_NH_PROBE_INTERVAL);
1902}
1903
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001904static void
1905mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp *mlxsw_sp,
1906 struct mlxsw_sp_neigh_entry *neigh_entry,
1907 bool removing);
1908
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001909static enum mlxsw_reg_rauht_op mlxsw_sp_rauht_op(bool adding)
Yotam Gigia6bf9e92016-07-05 11:27:44 +02001910{
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001911 return adding ? MLXSW_REG_RAUHT_OP_WRITE_ADD :
1912 MLXSW_REG_RAUHT_OP_WRITE_DELETE;
1913}
1914
1915static void
1916mlxsw_sp_router_neigh_entry_op4(struct mlxsw_sp *mlxsw_sp,
1917 struct mlxsw_sp_neigh_entry *neigh_entry,
1918 enum mlxsw_reg_rauht_op op)
1919{
Jiri Pirko33b13412016-11-10 12:31:04 +01001920 struct neighbour *n = neigh_entry->key.n;
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001921 u32 dip = ntohl(*((__be32 *) n->primary_key));
Yotam Gigia6bf9e92016-07-05 11:27:44 +02001922 char rauht_pl[MLXSW_REG_RAUHT_LEN];
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001923
1924 mlxsw_reg_rauht_pack4(rauht_pl, op, neigh_entry->rif, neigh_entry->ha,
1925 dip);
Arkadi Sharshevsky7cfcbc72017-08-24 08:40:08 +02001926 if (neigh_entry->counter_valid)
1927 mlxsw_reg_rauht_pack_counter(rauht_pl,
1928 neigh_entry->counter_index);
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001929 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rauht), rauht_pl);
1930}
1931
1932static void
Arkadi Sharshevskyd5eb89c2017-07-18 10:10:15 +02001933mlxsw_sp_router_neigh_entry_op6(struct mlxsw_sp *mlxsw_sp,
1934 struct mlxsw_sp_neigh_entry *neigh_entry,
1935 enum mlxsw_reg_rauht_op op)
1936{
1937 struct neighbour *n = neigh_entry->key.n;
1938 char rauht_pl[MLXSW_REG_RAUHT_LEN];
1939 const char *dip = n->primary_key;
1940
1941 mlxsw_reg_rauht_pack6(rauht_pl, op, neigh_entry->rif, neigh_entry->ha,
1942 dip);
Arkadi Sharshevsky7cfcbc72017-08-24 08:40:08 +02001943 if (neigh_entry->counter_valid)
1944 mlxsw_reg_rauht_pack_counter(rauht_pl,
1945 neigh_entry->counter_index);
Arkadi Sharshevskyd5eb89c2017-07-18 10:10:15 +02001946 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rauht), rauht_pl);
1947}
1948
Arkadi Sharshevsky1d1056d82017-08-31 17:59:13 +02001949bool mlxsw_sp_neigh_ipv6_ignore(struct mlxsw_sp_neigh_entry *neigh_entry)
Arkadi Sharshevskyd5eb89c2017-07-18 10:10:15 +02001950{
Arkadi Sharshevsky1d1056d82017-08-31 17:59:13 +02001951 struct neighbour *n = neigh_entry->key.n;
1952
Arkadi Sharshevskyd5eb89c2017-07-18 10:10:15 +02001953 /* Packets with a link-local destination address are trapped
1954 * after LPM lookup and never reach the neighbour table, so
1955 * there is no need to program such neighbours to the device.
1956 */
1957 if (ipv6_addr_type((struct in6_addr *) &n->primary_key) &
1958 IPV6_ADDR_LINKLOCAL)
1959 return true;
1960 return false;
1961}
1962
1963static void
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001964mlxsw_sp_neigh_entry_update(struct mlxsw_sp *mlxsw_sp,
1965 struct mlxsw_sp_neigh_entry *neigh_entry,
1966 bool adding)
1967{
1968 if (!adding && !neigh_entry->connected)
1969 return;
1970 neigh_entry->connected = adding;
Ido Schimmelb5f3e0d2017-07-24 09:56:00 +02001971 if (neigh_entry->key.n->tbl->family == AF_INET) {
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001972 mlxsw_sp_router_neigh_entry_op4(mlxsw_sp, neigh_entry,
1973 mlxsw_sp_rauht_op(adding));
Ido Schimmelb5f3e0d2017-07-24 09:56:00 +02001974 } else if (neigh_entry->key.n->tbl->family == AF_INET6) {
Arkadi Sharshevsky1d1056d82017-08-31 17:59:13 +02001975 if (mlxsw_sp_neigh_ipv6_ignore(neigh_entry))
Arkadi Sharshevskyd5eb89c2017-07-18 10:10:15 +02001976 return;
1977 mlxsw_sp_router_neigh_entry_op6(mlxsw_sp, neigh_entry,
1978 mlxsw_sp_rauht_op(adding));
1979 } else {
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001980 WARN_ON_ONCE(1);
Arkadi Sharshevskyd5eb89c2017-07-18 10:10:15 +02001981 }
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001982}
1983
Arkadi Sharshevskya481d712017-08-24 08:40:10 +02001984void
1985mlxsw_sp_neigh_entry_counter_update(struct mlxsw_sp *mlxsw_sp,
1986 struct mlxsw_sp_neigh_entry *neigh_entry,
1987 bool adding)
1988{
1989 if (adding)
1990 mlxsw_sp_neigh_counter_alloc(mlxsw_sp, neigh_entry);
1991 else
1992 mlxsw_sp_neigh_counter_free(mlxsw_sp, neigh_entry);
1993 mlxsw_sp_neigh_entry_update(mlxsw_sp, neigh_entry, true);
1994}
1995
Ido Schimmelceb88812017-11-02 17:14:07 +01001996struct mlxsw_sp_netevent_work {
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001997 struct work_struct work;
1998 struct mlxsw_sp *mlxsw_sp;
1999 struct neighbour *n;
2000};
2001
2002static void mlxsw_sp_router_neigh_event_work(struct work_struct *work)
2003{
Ido Schimmelceb88812017-11-02 17:14:07 +01002004 struct mlxsw_sp_netevent_work *net_work =
2005 container_of(work, struct mlxsw_sp_netevent_work, work);
2006 struct mlxsw_sp *mlxsw_sp = net_work->mlxsw_sp;
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002007 struct mlxsw_sp_neigh_entry *neigh_entry;
Ido Schimmelceb88812017-11-02 17:14:07 +01002008 struct neighbour *n = net_work->n;
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002009 unsigned char ha[ETH_ALEN];
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002010 bool entry_connected;
Ido Schimmel93a87e52016-12-23 09:32:49 +01002011 u8 nud_state, dead;
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002012
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002013 /* If these parameters are changed after we release the lock,
2014 * then we are guaranteed to receive another event letting us
2015 * know about it.
2016 */
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002017 read_lock_bh(&n->lock);
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002018 memcpy(ha, n->ha, ETH_ALEN);
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002019 nud_state = n->nud_state;
Ido Schimmel93a87e52016-12-23 09:32:49 +01002020 dead = n->dead;
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002021 read_unlock_bh(&n->lock);
2022
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002023 rtnl_lock();
Ido Schimmel93a87e52016-12-23 09:32:49 +01002024 entry_connected = nud_state & NUD_VALID && !dead;
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002025 neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp, n);
2026 if (!entry_connected && !neigh_entry)
2027 goto out;
2028 if (!neigh_entry) {
2029 neigh_entry = mlxsw_sp_neigh_entry_create(mlxsw_sp, n);
2030 if (IS_ERR(neigh_entry))
2031 goto out;
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002032 }
2033
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002034 memcpy(neigh_entry->ha, ha, ETH_ALEN);
2035 mlxsw_sp_neigh_entry_update(mlxsw_sp, neigh_entry, entry_connected);
2036 mlxsw_sp_nexthop_neigh_update(mlxsw_sp, neigh_entry, !entry_connected);
2037
2038 if (!neigh_entry->connected && list_empty(&neigh_entry->nexthop_list))
2039 mlxsw_sp_neigh_entry_destroy(mlxsw_sp, neigh_entry);
2040
2041out:
2042 rtnl_unlock();
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002043 neigh_release(n);
Ido Schimmelceb88812017-11-02 17:14:07 +01002044 kfree(net_work);
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002045}
2046
Ido Schimmel28678f02017-11-02 17:14:10 +01002047static int mlxsw_sp_mp_hash_init(struct mlxsw_sp *mlxsw_sp);
2048
2049static void mlxsw_sp_router_mp_hash_event_work(struct work_struct *work)
2050{
2051 struct mlxsw_sp_netevent_work *net_work =
2052 container_of(work, struct mlxsw_sp_netevent_work, work);
2053 struct mlxsw_sp *mlxsw_sp = net_work->mlxsw_sp;
2054
2055 mlxsw_sp_mp_hash_init(mlxsw_sp);
2056 kfree(net_work);
2057}
2058
2059static int mlxsw_sp_router_netevent_event(struct notifier_block *nb,
Ido Schimmel48fac882017-11-02 17:14:06 +01002060 unsigned long event, void *ptr)
Yotam Gigic723c7352016-07-05 11:27:43 +02002061{
Ido Schimmelceb88812017-11-02 17:14:07 +01002062 struct mlxsw_sp_netevent_work *net_work;
Yotam Gigic723c7352016-07-05 11:27:43 +02002063 struct mlxsw_sp_port *mlxsw_sp_port;
Ido Schimmel28678f02017-11-02 17:14:10 +01002064 struct mlxsw_sp_router *router;
Yotam Gigic723c7352016-07-05 11:27:43 +02002065 struct mlxsw_sp *mlxsw_sp;
2066 unsigned long interval;
2067 struct neigh_parms *p;
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002068 struct neighbour *n;
Ido Schimmel28678f02017-11-02 17:14:10 +01002069 struct net *net;
Yotam Gigic723c7352016-07-05 11:27:43 +02002070
2071 switch (event) {
2072 case NETEVENT_DELAY_PROBE_TIME_UPDATE:
2073 p = ptr;
2074
2075 /* We don't care about changes in the default table. */
Ido Schimmelb5f3e0d2017-07-24 09:56:00 +02002076 if (!p->dev || (p->tbl->family != AF_INET &&
2077 p->tbl->family != AF_INET6))
Yotam Gigic723c7352016-07-05 11:27:43 +02002078 return NOTIFY_DONE;
2079
2080 /* We are in atomic context and can't take RTNL mutex,
2081 * so use RCU variant to walk the device chain.
2082 */
2083 mlxsw_sp_port = mlxsw_sp_port_lower_dev_hold(p->dev);
2084 if (!mlxsw_sp_port)
2085 return NOTIFY_DONE;
2086
2087 mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2088 interval = jiffies_to_msecs(NEIGH_VAR(p, DELAY_PROBE_TIME));
Ido Schimmel9011b672017-05-16 19:38:25 +02002089 mlxsw_sp->router->neighs_update.interval = interval;
Yotam Gigic723c7352016-07-05 11:27:43 +02002090
2091 mlxsw_sp_port_dev_put(mlxsw_sp_port);
2092 break;
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002093 case NETEVENT_NEIGH_UPDATE:
2094 n = ptr;
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002095
Ido Schimmelb5f3e0d2017-07-24 09:56:00 +02002096 if (n->tbl->family != AF_INET && n->tbl->family != AF_INET6)
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002097 return NOTIFY_DONE;
2098
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002099 mlxsw_sp_port = mlxsw_sp_port_lower_dev_hold(n->dev);
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002100 if (!mlxsw_sp_port)
2101 return NOTIFY_DONE;
2102
Ido Schimmelceb88812017-11-02 17:14:07 +01002103 net_work = kzalloc(sizeof(*net_work), GFP_ATOMIC);
2104 if (!net_work) {
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002105 mlxsw_sp_port_dev_put(mlxsw_sp_port);
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002106 return NOTIFY_BAD;
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002107 }
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002108
Ido Schimmelceb88812017-11-02 17:14:07 +01002109 INIT_WORK(&net_work->work, mlxsw_sp_router_neigh_event_work);
2110 net_work->mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2111 net_work->n = n;
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002112
2113 /* Take a reference to ensure the neighbour won't be
2114 * destructed until we drop the reference in delayed
2115 * work.
2116 */
2117 neigh_clone(n);
Ido Schimmelceb88812017-11-02 17:14:07 +01002118 mlxsw_core_schedule_work(&net_work->work);
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002119 mlxsw_sp_port_dev_put(mlxsw_sp_port);
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002120 break;
Ido Schimmel28678f02017-11-02 17:14:10 +01002121 case NETEVENT_MULTIPATH_HASH_UPDATE:
2122 net = ptr;
2123
2124 if (!net_eq(net, &init_net))
2125 return NOTIFY_DONE;
2126
2127 net_work = kzalloc(sizeof(*net_work), GFP_ATOMIC);
2128 if (!net_work)
2129 return NOTIFY_BAD;
2130
2131 router = container_of(nb, struct mlxsw_sp_router, netevent_nb);
2132 INIT_WORK(&net_work->work, mlxsw_sp_router_mp_hash_event_work);
2133 net_work->mlxsw_sp = router->mlxsw_sp;
2134 mlxsw_core_schedule_work(&net_work->work);
2135 break;
Yotam Gigic723c7352016-07-05 11:27:43 +02002136 }
2137
2138 return NOTIFY_DONE;
2139}
2140
Jiri Pirko6cf3c972016-07-05 11:27:39 +02002141static int mlxsw_sp_neigh_init(struct mlxsw_sp *mlxsw_sp)
2142{
Yotam Gigic723c7352016-07-05 11:27:43 +02002143 int err;
2144
Ido Schimmel9011b672017-05-16 19:38:25 +02002145 err = rhashtable_init(&mlxsw_sp->router->neigh_ht,
Yotam Gigic723c7352016-07-05 11:27:43 +02002146 &mlxsw_sp_neigh_ht_params);
2147 if (err)
2148 return err;
2149
2150 /* Initialize the polling interval according to the default
2151 * table.
2152 */
2153 mlxsw_sp_router_neighs_update_interval_init(mlxsw_sp);
2154
Yotam Gigi0b2361d2016-07-05 11:27:52 +02002155 /* Create the delayed works for the activity_update */
Ido Schimmel9011b672017-05-16 19:38:25 +02002156 INIT_DELAYED_WORK(&mlxsw_sp->router->neighs_update.dw,
Yotam Gigic723c7352016-07-05 11:27:43 +02002157 mlxsw_sp_router_neighs_update_work);
Ido Schimmel9011b672017-05-16 19:38:25 +02002158 INIT_DELAYED_WORK(&mlxsw_sp->router->nexthop_probe_dw,
Yotam Gigi0b2361d2016-07-05 11:27:52 +02002159 mlxsw_sp_router_probe_unresolved_nexthops);
Ido Schimmel9011b672017-05-16 19:38:25 +02002160 mlxsw_core_schedule_dw(&mlxsw_sp->router->neighs_update.dw, 0);
2161 mlxsw_core_schedule_dw(&mlxsw_sp->router->nexthop_probe_dw, 0);
Yotam Gigic723c7352016-07-05 11:27:43 +02002162 return 0;
Jiri Pirko6cf3c972016-07-05 11:27:39 +02002163}
2164
2165static void mlxsw_sp_neigh_fini(struct mlxsw_sp *mlxsw_sp)
2166{
Ido Schimmel9011b672017-05-16 19:38:25 +02002167 cancel_delayed_work_sync(&mlxsw_sp->router->neighs_update.dw);
2168 cancel_delayed_work_sync(&mlxsw_sp->router->nexthop_probe_dw);
2169 rhashtable_destroy(&mlxsw_sp->router->neigh_ht);
Jiri Pirko6cf3c972016-07-05 11:27:39 +02002170}
2171
Ido Schimmel9665b742017-02-08 11:16:42 +01002172static void mlxsw_sp_neigh_rif_gone_sync(struct mlxsw_sp *mlxsw_sp,
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01002173 struct mlxsw_sp_rif *rif)
Ido Schimmel9665b742017-02-08 11:16:42 +01002174{
2175 struct mlxsw_sp_neigh_entry *neigh_entry, *tmp;
2176
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01002177 list_for_each_entry_safe(neigh_entry, tmp, &rif->neigh_list,
Ido Schimmel4a3c67a2017-07-21 20:31:38 +02002178 rif_list_node) {
2179 mlxsw_sp_neigh_entry_update(mlxsw_sp, neigh_entry, false);
Ido Schimmel9665b742017-02-08 11:16:42 +01002180 mlxsw_sp_neigh_entry_destroy(mlxsw_sp, neigh_entry);
Ido Schimmel4a3c67a2017-07-21 20:31:38 +02002181 }
Ido Schimmel9665b742017-02-08 11:16:42 +01002182}
2183
Petr Machata35225e42017-09-02 23:49:22 +02002184enum mlxsw_sp_nexthop_type {
2185 MLXSW_SP_NEXTHOP_TYPE_ETH,
Petr Machata1012b9a2017-09-02 23:49:23 +02002186 MLXSW_SP_NEXTHOP_TYPE_IPIP,
Petr Machata35225e42017-09-02 23:49:22 +02002187};
2188
Ido Schimmelc53b8e12017-02-08 11:16:30 +01002189struct mlxsw_sp_nexthop_key {
2190 struct fib_nh *fib_nh;
2191};
2192
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002193struct mlxsw_sp_nexthop {
2194 struct list_head neigh_list_node; /* member of neigh entry list */
Ido Schimmel9665b742017-02-08 11:16:42 +01002195 struct list_head rif_list_node;
Arkadi Sharshevskydbe45982017-09-25 10:32:23 +02002196 struct list_head router_list_node;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002197 struct mlxsw_sp_nexthop_group *nh_grp; /* pointer back to the group
2198 * this belongs to
2199 */
Ido Schimmelc53b8e12017-02-08 11:16:30 +01002200 struct rhash_head ht_node;
2201 struct mlxsw_sp_nexthop_key key;
Ido Schimmel58adf2c2017-07-18 10:10:19 +02002202 unsigned char gw_addr[sizeof(struct in6_addr)];
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02002203 int ifindex;
Ido Schimmel408bd942017-10-22 23:11:46 +02002204 int nh_weight;
Ido Schimmeleb789982017-10-22 23:11:48 +02002205 int norm_nh_weight;
2206 int num_adj_entries;
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01002207 struct mlxsw_sp_rif *rif;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002208 u8 should_offload:1, /* set indicates this neigh is connected and
2209 * should be put to KVD linear area of this group.
2210 */
2211 offloaded:1, /* set in case the neigh is actually put into
2212 * KVD linear area of this group.
2213 */
2214 update:1; /* set indicates that MAC of this neigh should be
2215 * updated in HW
2216 */
Petr Machata35225e42017-09-02 23:49:22 +02002217 enum mlxsw_sp_nexthop_type type;
2218 union {
2219 struct mlxsw_sp_neigh_entry *neigh_entry;
Petr Machata1012b9a2017-09-02 23:49:23 +02002220 struct mlxsw_sp_ipip_entry *ipip_entry;
Petr Machata35225e42017-09-02 23:49:22 +02002221 };
Arkadi Sharshevskya5390272017-09-25 10:32:28 +02002222 unsigned int counter_index;
2223 bool counter_valid;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002224};
2225
2226struct mlxsw_sp_nexthop_group {
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02002227 void *priv;
Ido Schimmele9ad5e72017-02-08 11:16:29 +01002228 struct rhash_head ht_node;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002229 struct list_head fib_list; /* list of fib entries that use this group */
Ido Schimmel58adf2c2017-07-18 10:10:19 +02002230 struct neigh_table *neigh_tbl;
Ido Schimmelb3e8d1e2017-02-08 11:16:32 +01002231 u8 adj_index_valid:1,
2232 gateway:1; /* routes using the group use a gateway */
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002233 u32 adj_index;
2234 u16 ecmp_size;
2235 u16 count;
Ido Schimmeleb789982017-10-22 23:11:48 +02002236 int sum_norm_weight;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002237 struct mlxsw_sp_nexthop nexthops[0];
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01002238#define nh_rif nexthops[0].rif
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002239};
2240
Arkadi Sharshevsky427e6522017-09-25 10:32:30 +02002241void mlxsw_sp_nexthop_counter_alloc(struct mlxsw_sp *mlxsw_sp,
2242 struct mlxsw_sp_nexthop *nh)
Arkadi Sharshevskya5390272017-09-25 10:32:28 +02002243{
2244 struct devlink *devlink;
2245
2246 devlink = priv_to_devlink(mlxsw_sp->core);
2247 if (!devlink_dpipe_table_counter_enabled(devlink,
2248 MLXSW_SP_DPIPE_TABLE_NAME_ADJ))
2249 return;
2250
2251 if (mlxsw_sp_flow_counter_alloc(mlxsw_sp, &nh->counter_index))
2252 return;
2253
2254 nh->counter_valid = true;
2255}
2256
Arkadi Sharshevsky427e6522017-09-25 10:32:30 +02002257void mlxsw_sp_nexthop_counter_free(struct mlxsw_sp *mlxsw_sp,
2258 struct mlxsw_sp_nexthop *nh)
Arkadi Sharshevskya5390272017-09-25 10:32:28 +02002259{
2260 if (!nh->counter_valid)
2261 return;
2262 mlxsw_sp_flow_counter_free(mlxsw_sp, nh->counter_index);
2263 nh->counter_valid = false;
2264}
2265
2266int mlxsw_sp_nexthop_counter_get(struct mlxsw_sp *mlxsw_sp,
2267 struct mlxsw_sp_nexthop *nh, u64 *p_counter)
2268{
2269 if (!nh->counter_valid)
2270 return -EINVAL;
2271
2272 return mlxsw_sp_flow_counter_get(mlxsw_sp, nh->counter_index,
2273 p_counter, NULL);
2274}
2275
Arkadi Sharshevskyc556cd22017-09-25 10:32:25 +02002276struct mlxsw_sp_nexthop *mlxsw_sp_nexthop_next(struct mlxsw_sp_router *router,
2277 struct mlxsw_sp_nexthop *nh)
2278{
2279 if (!nh) {
2280 if (list_empty(&router->nexthop_list))
2281 return NULL;
2282 else
2283 return list_first_entry(&router->nexthop_list,
2284 typeof(*nh), router_list_node);
2285 }
2286 if (list_is_last(&nh->router_list_node, &router->nexthop_list))
2287 return NULL;
2288 return list_next_entry(nh, router_list_node);
2289}
2290
2291bool mlxsw_sp_nexthop_offload(struct mlxsw_sp_nexthop *nh)
2292{
2293 return nh->offloaded;
2294}
2295
2296unsigned char *mlxsw_sp_nexthop_ha(struct mlxsw_sp_nexthop *nh)
2297{
2298 if (!nh->offloaded)
2299 return NULL;
2300 return nh->neigh_entry->ha;
2301}
2302
2303int mlxsw_sp_nexthop_indexes(struct mlxsw_sp_nexthop *nh, u32 *p_adj_index,
Ido Schimmele69cd9d2017-10-22 23:11:43 +02002304 u32 *p_adj_size, u32 *p_adj_hash_index)
Arkadi Sharshevskyc556cd22017-09-25 10:32:25 +02002305{
2306 struct mlxsw_sp_nexthop_group *nh_grp = nh->nh_grp;
2307 u32 adj_hash_index = 0;
2308 int i;
2309
2310 if (!nh->offloaded || !nh_grp->adj_index_valid)
2311 return -EINVAL;
2312
2313 *p_adj_index = nh_grp->adj_index;
Ido Schimmele69cd9d2017-10-22 23:11:43 +02002314 *p_adj_size = nh_grp->ecmp_size;
Arkadi Sharshevskyc556cd22017-09-25 10:32:25 +02002315
2316 for (i = 0; i < nh_grp->count; i++) {
2317 struct mlxsw_sp_nexthop *nh_iter = &nh_grp->nexthops[i];
2318
2319 if (nh_iter == nh)
2320 break;
2321 if (nh_iter->offloaded)
Ido Schimmeleb789982017-10-22 23:11:48 +02002322 adj_hash_index += nh_iter->num_adj_entries;
Arkadi Sharshevskyc556cd22017-09-25 10:32:25 +02002323 }
2324
2325 *p_adj_hash_index = adj_hash_index;
2326 return 0;
2327}
2328
2329struct mlxsw_sp_rif *mlxsw_sp_nexthop_rif(struct mlxsw_sp_nexthop *nh)
2330{
2331 return nh->rif;
2332}
2333
2334bool mlxsw_sp_nexthop_group_has_ipip(struct mlxsw_sp_nexthop *nh)
2335{
2336 struct mlxsw_sp_nexthop_group *nh_grp = nh->nh_grp;
2337 int i;
2338
2339 for (i = 0; i < nh_grp->count; i++) {
2340 struct mlxsw_sp_nexthop *nh_iter = &nh_grp->nexthops[i];
2341
2342 if (nh_iter->type == MLXSW_SP_NEXTHOP_TYPE_IPIP)
2343 return true;
2344 }
2345 return false;
2346}
2347
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02002348static struct fib_info *
2349mlxsw_sp_nexthop4_group_fi(const struct mlxsw_sp_nexthop_group *nh_grp)
2350{
2351 return nh_grp->priv;
2352}
2353
2354struct mlxsw_sp_nexthop_group_cmp_arg {
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02002355 enum mlxsw_sp_l3proto proto;
2356 union {
2357 struct fib_info *fi;
2358 struct mlxsw_sp_fib6_entry *fib6_entry;
2359 };
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02002360};
2361
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02002362static bool
2363mlxsw_sp_nexthop6_group_has_nexthop(const struct mlxsw_sp_nexthop_group *nh_grp,
2364 const struct in6_addr *gw, int ifindex)
2365{
2366 int i;
2367
2368 for (i = 0; i < nh_grp->count; i++) {
2369 const struct mlxsw_sp_nexthop *nh;
2370
2371 nh = &nh_grp->nexthops[i];
2372 if (nh->ifindex == ifindex &&
2373 ipv6_addr_equal(gw, (struct in6_addr *) nh->gw_addr))
2374 return true;
2375 }
2376
2377 return false;
2378}
2379
2380static bool
2381mlxsw_sp_nexthop6_group_cmp(const struct mlxsw_sp_nexthop_group *nh_grp,
2382 const struct mlxsw_sp_fib6_entry *fib6_entry)
2383{
2384 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
2385
2386 if (nh_grp->count != fib6_entry->nrt6)
2387 return false;
2388
2389 list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) {
2390 struct in6_addr *gw;
2391 int ifindex;
2392
2393 ifindex = mlxsw_sp_rt6->rt->dst.dev->ifindex;
2394 gw = &mlxsw_sp_rt6->rt->rt6i_gateway;
2395 if (!mlxsw_sp_nexthop6_group_has_nexthop(nh_grp, gw, ifindex))
2396 return false;
2397 }
2398
2399 return true;
2400}
2401
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02002402static int
2403mlxsw_sp_nexthop_group_cmp(struct rhashtable_compare_arg *arg, const void *ptr)
2404{
2405 const struct mlxsw_sp_nexthop_group_cmp_arg *cmp_arg = arg->key;
2406 const struct mlxsw_sp_nexthop_group *nh_grp = ptr;
2407
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02002408 switch (cmp_arg->proto) {
2409 case MLXSW_SP_L3_PROTO_IPV4:
2410 return cmp_arg->fi != mlxsw_sp_nexthop4_group_fi(nh_grp);
2411 case MLXSW_SP_L3_PROTO_IPV6:
2412 return !mlxsw_sp_nexthop6_group_cmp(nh_grp,
2413 cmp_arg->fib6_entry);
2414 default:
2415 WARN_ON(1);
2416 return 1;
2417 }
2418}
2419
2420static int
2421mlxsw_sp_nexthop_group_type(const struct mlxsw_sp_nexthop_group *nh_grp)
2422{
2423 return nh_grp->neigh_tbl->family;
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02002424}
2425
2426static u32 mlxsw_sp_nexthop_group_hash_obj(const void *data, u32 len, u32 seed)
2427{
2428 const struct mlxsw_sp_nexthop_group *nh_grp = data;
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02002429 const struct mlxsw_sp_nexthop *nh;
2430 struct fib_info *fi;
2431 unsigned int val;
2432 int i;
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02002433
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02002434 switch (mlxsw_sp_nexthop_group_type(nh_grp)) {
2435 case AF_INET:
2436 fi = mlxsw_sp_nexthop4_group_fi(nh_grp);
2437 return jhash(&fi, sizeof(fi), seed);
2438 case AF_INET6:
2439 val = nh_grp->count;
2440 for (i = 0; i < nh_grp->count; i++) {
2441 nh = &nh_grp->nexthops[i];
2442 val ^= nh->ifindex;
2443 }
2444 return jhash(&val, sizeof(val), seed);
2445 default:
2446 WARN_ON(1);
2447 return 0;
2448 }
2449}
2450
2451static u32
2452mlxsw_sp_nexthop6_group_hash(struct mlxsw_sp_fib6_entry *fib6_entry, u32 seed)
2453{
2454 unsigned int val = fib6_entry->nrt6;
2455 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
2456 struct net_device *dev;
2457
2458 list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) {
2459 dev = mlxsw_sp_rt6->rt->dst.dev;
2460 val ^= dev->ifindex;
2461 }
2462
2463 return jhash(&val, sizeof(val), seed);
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02002464}
2465
2466static u32
2467mlxsw_sp_nexthop_group_hash(const void *data, u32 len, u32 seed)
2468{
2469 const struct mlxsw_sp_nexthop_group_cmp_arg *cmp_arg = data;
2470
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02002471 switch (cmp_arg->proto) {
2472 case MLXSW_SP_L3_PROTO_IPV4:
2473 return jhash(&cmp_arg->fi, sizeof(cmp_arg->fi), seed);
2474 case MLXSW_SP_L3_PROTO_IPV6:
2475 return mlxsw_sp_nexthop6_group_hash(cmp_arg->fib6_entry, seed);
2476 default:
2477 WARN_ON(1);
2478 return 0;
2479 }
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02002480}
2481
Ido Schimmele9ad5e72017-02-08 11:16:29 +01002482static const struct rhashtable_params mlxsw_sp_nexthop_group_ht_params = {
Ido Schimmele9ad5e72017-02-08 11:16:29 +01002483 .head_offset = offsetof(struct mlxsw_sp_nexthop_group, ht_node),
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02002484 .hashfn = mlxsw_sp_nexthop_group_hash,
2485 .obj_hashfn = mlxsw_sp_nexthop_group_hash_obj,
2486 .obj_cmpfn = mlxsw_sp_nexthop_group_cmp,
Ido Schimmele9ad5e72017-02-08 11:16:29 +01002487};
2488
2489static int mlxsw_sp_nexthop_group_insert(struct mlxsw_sp *mlxsw_sp,
2490 struct mlxsw_sp_nexthop_group *nh_grp)
2491{
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02002492 if (mlxsw_sp_nexthop_group_type(nh_grp) == AF_INET6 &&
2493 !nh_grp->gateway)
2494 return 0;
2495
Ido Schimmel9011b672017-05-16 19:38:25 +02002496 return rhashtable_insert_fast(&mlxsw_sp->router->nexthop_group_ht,
Ido Schimmele9ad5e72017-02-08 11:16:29 +01002497 &nh_grp->ht_node,
2498 mlxsw_sp_nexthop_group_ht_params);
2499}
2500
2501static void mlxsw_sp_nexthop_group_remove(struct mlxsw_sp *mlxsw_sp,
2502 struct mlxsw_sp_nexthop_group *nh_grp)
2503{
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02002504 if (mlxsw_sp_nexthop_group_type(nh_grp) == AF_INET6 &&
2505 !nh_grp->gateway)
2506 return;
2507
Ido Schimmel9011b672017-05-16 19:38:25 +02002508 rhashtable_remove_fast(&mlxsw_sp->router->nexthop_group_ht,
Ido Schimmele9ad5e72017-02-08 11:16:29 +01002509 &nh_grp->ht_node,
2510 mlxsw_sp_nexthop_group_ht_params);
2511}
2512
2513static struct mlxsw_sp_nexthop_group *
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02002514mlxsw_sp_nexthop4_group_lookup(struct mlxsw_sp *mlxsw_sp,
2515 struct fib_info *fi)
Ido Schimmele9ad5e72017-02-08 11:16:29 +01002516{
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02002517 struct mlxsw_sp_nexthop_group_cmp_arg cmp_arg;
2518
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02002519 cmp_arg.proto = MLXSW_SP_L3_PROTO_IPV4;
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02002520 cmp_arg.fi = fi;
2521 return rhashtable_lookup_fast(&mlxsw_sp->router->nexthop_group_ht,
2522 &cmp_arg,
Ido Schimmele9ad5e72017-02-08 11:16:29 +01002523 mlxsw_sp_nexthop_group_ht_params);
2524}
2525
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02002526static struct mlxsw_sp_nexthop_group *
2527mlxsw_sp_nexthop6_group_lookup(struct mlxsw_sp *mlxsw_sp,
2528 struct mlxsw_sp_fib6_entry *fib6_entry)
2529{
2530 struct mlxsw_sp_nexthop_group_cmp_arg cmp_arg;
2531
2532 cmp_arg.proto = MLXSW_SP_L3_PROTO_IPV6;
2533 cmp_arg.fib6_entry = fib6_entry;
2534 return rhashtable_lookup_fast(&mlxsw_sp->router->nexthop_group_ht,
2535 &cmp_arg,
2536 mlxsw_sp_nexthop_group_ht_params);
2537}
2538
Ido Schimmelc53b8e12017-02-08 11:16:30 +01002539static const struct rhashtable_params mlxsw_sp_nexthop_ht_params = {
2540 .key_offset = offsetof(struct mlxsw_sp_nexthop, key),
2541 .head_offset = offsetof(struct mlxsw_sp_nexthop, ht_node),
2542 .key_len = sizeof(struct mlxsw_sp_nexthop_key),
2543};
2544
2545static int mlxsw_sp_nexthop_insert(struct mlxsw_sp *mlxsw_sp,
2546 struct mlxsw_sp_nexthop *nh)
2547{
Ido Schimmel9011b672017-05-16 19:38:25 +02002548 return rhashtable_insert_fast(&mlxsw_sp->router->nexthop_ht,
Ido Schimmelc53b8e12017-02-08 11:16:30 +01002549 &nh->ht_node, mlxsw_sp_nexthop_ht_params);
2550}
2551
2552static void mlxsw_sp_nexthop_remove(struct mlxsw_sp *mlxsw_sp,
2553 struct mlxsw_sp_nexthop *nh)
2554{
Ido Schimmel9011b672017-05-16 19:38:25 +02002555 rhashtable_remove_fast(&mlxsw_sp->router->nexthop_ht, &nh->ht_node,
Ido Schimmelc53b8e12017-02-08 11:16:30 +01002556 mlxsw_sp_nexthop_ht_params);
2557}
2558
Ido Schimmelad178c82017-02-08 11:16:40 +01002559static struct mlxsw_sp_nexthop *
2560mlxsw_sp_nexthop_lookup(struct mlxsw_sp *mlxsw_sp,
2561 struct mlxsw_sp_nexthop_key key)
2562{
Ido Schimmel9011b672017-05-16 19:38:25 +02002563 return rhashtable_lookup_fast(&mlxsw_sp->router->nexthop_ht, &key,
Ido Schimmelad178c82017-02-08 11:16:40 +01002564 mlxsw_sp_nexthop_ht_params);
2565}
2566
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002567static int mlxsw_sp_adj_index_mass_update_vr(struct mlxsw_sp *mlxsw_sp,
Ido Schimmel76610eb2017-03-10 08:53:41 +01002568 const struct mlxsw_sp_fib *fib,
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002569 u32 adj_index, u16 ecmp_size,
2570 u32 new_adj_index,
2571 u16 new_ecmp_size)
2572{
2573 char raleu_pl[MLXSW_REG_RALEU_LEN];
2574
Ido Schimmel1a9234e662016-09-19 08:29:26 +02002575 mlxsw_reg_raleu_pack(raleu_pl,
Ido Schimmel76610eb2017-03-10 08:53:41 +01002576 (enum mlxsw_reg_ralxx_protocol) fib->proto,
2577 fib->vr->id, adj_index, ecmp_size, new_adj_index,
Ido Schimmel1a9234e662016-09-19 08:29:26 +02002578 new_ecmp_size);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002579 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raleu), raleu_pl);
2580}
2581
2582static int mlxsw_sp_adj_index_mass_update(struct mlxsw_sp *mlxsw_sp,
2583 struct mlxsw_sp_nexthop_group *nh_grp,
2584 u32 old_adj_index, u16 old_ecmp_size)
2585{
2586 struct mlxsw_sp_fib_entry *fib_entry;
Ido Schimmel76610eb2017-03-10 08:53:41 +01002587 struct mlxsw_sp_fib *fib = NULL;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002588 int err;
2589
2590 list_for_each_entry(fib_entry, &nh_grp->fib_list, nexthop_group_node) {
Ido Schimmel76610eb2017-03-10 08:53:41 +01002591 if (fib == fib_entry->fib_node->fib)
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002592 continue;
Ido Schimmel76610eb2017-03-10 08:53:41 +01002593 fib = fib_entry->fib_node->fib;
2594 err = mlxsw_sp_adj_index_mass_update_vr(mlxsw_sp, fib,
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002595 old_adj_index,
2596 old_ecmp_size,
2597 nh_grp->adj_index,
2598 nh_grp->ecmp_size);
2599 if (err)
2600 return err;
2601 }
2602 return 0;
2603}
2604
Ido Schimmeleb789982017-10-22 23:11:48 +02002605static int __mlxsw_sp_nexthop_update(struct mlxsw_sp *mlxsw_sp, u32 adj_index,
2606 struct mlxsw_sp_nexthop *nh)
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002607{
2608 struct mlxsw_sp_neigh_entry *neigh_entry = nh->neigh_entry;
2609 char ratr_pl[MLXSW_REG_RATR_LEN];
2610
2611 mlxsw_reg_ratr_pack(ratr_pl, MLXSW_REG_RATR_OP_WRITE_WRITE_ENTRY,
Petr Machata89e41982017-09-02 23:49:15 +02002612 true, MLXSW_REG_RATR_TYPE_ETHERNET,
2613 adj_index, neigh_entry->rif);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002614 mlxsw_reg_ratr_eth_entry_pack(ratr_pl, neigh_entry->ha);
Arkadi Sharshevskya5390272017-09-25 10:32:28 +02002615 if (nh->counter_valid)
2616 mlxsw_reg_ratr_counter_pack(ratr_pl, nh->counter_index, true);
2617 else
2618 mlxsw_reg_ratr_counter_pack(ratr_pl, 0, false);
2619
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002620 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ratr), ratr_pl);
2621}
2622
Ido Schimmeleb789982017-10-22 23:11:48 +02002623int mlxsw_sp_nexthop_update(struct mlxsw_sp *mlxsw_sp, u32 adj_index,
2624 struct mlxsw_sp_nexthop *nh)
2625{
2626 int i;
2627
2628 for (i = 0; i < nh->num_adj_entries; i++) {
2629 int err;
2630
2631 err = __mlxsw_sp_nexthop_update(mlxsw_sp, adj_index + i, nh);
2632 if (err)
2633 return err;
2634 }
2635
2636 return 0;
2637}
2638
2639static int __mlxsw_sp_nexthop_ipip_update(struct mlxsw_sp *mlxsw_sp,
2640 u32 adj_index,
2641 struct mlxsw_sp_nexthop *nh)
Petr Machata1012b9a2017-09-02 23:49:23 +02002642{
2643 const struct mlxsw_sp_ipip_ops *ipip_ops;
2644
2645 ipip_ops = mlxsw_sp->router->ipip_ops_arr[nh->ipip_entry->ipipt];
2646 return ipip_ops->nexthop_update(mlxsw_sp, adj_index, nh->ipip_entry);
2647}
2648
Ido Schimmeleb789982017-10-22 23:11:48 +02002649static int mlxsw_sp_nexthop_ipip_update(struct mlxsw_sp *mlxsw_sp,
2650 u32 adj_index,
2651 struct mlxsw_sp_nexthop *nh)
2652{
2653 int i;
2654
2655 for (i = 0; i < nh->num_adj_entries; i++) {
2656 int err;
2657
2658 err = __mlxsw_sp_nexthop_ipip_update(mlxsw_sp, adj_index + i,
2659 nh);
2660 if (err)
2661 return err;
2662 }
2663
2664 return 0;
2665}
2666
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002667static int
Petr Machata35225e42017-09-02 23:49:22 +02002668mlxsw_sp_nexthop_group_update(struct mlxsw_sp *mlxsw_sp,
2669 struct mlxsw_sp_nexthop_group *nh_grp,
2670 bool reallocate)
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002671{
2672 u32 adj_index = nh_grp->adj_index; /* base */
2673 struct mlxsw_sp_nexthop *nh;
2674 int i;
2675 int err;
2676
2677 for (i = 0; i < nh_grp->count; i++) {
2678 nh = &nh_grp->nexthops[i];
2679
2680 if (!nh->should_offload) {
2681 nh->offloaded = 0;
2682 continue;
2683 }
2684
Ido Schimmela59b7e02017-01-23 11:11:42 +01002685 if (nh->update || reallocate) {
Petr Machata35225e42017-09-02 23:49:22 +02002686 switch (nh->type) {
2687 case MLXSW_SP_NEXTHOP_TYPE_ETH:
Arkadi Sharshevskya5390272017-09-25 10:32:28 +02002688 err = mlxsw_sp_nexthop_update
Petr Machata35225e42017-09-02 23:49:22 +02002689 (mlxsw_sp, adj_index, nh);
2690 break;
Petr Machata1012b9a2017-09-02 23:49:23 +02002691 case MLXSW_SP_NEXTHOP_TYPE_IPIP:
2692 err = mlxsw_sp_nexthop_ipip_update
2693 (mlxsw_sp, adj_index, nh);
2694 break;
Petr Machata35225e42017-09-02 23:49:22 +02002695 }
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002696 if (err)
2697 return err;
2698 nh->update = 0;
2699 nh->offloaded = 1;
2700 }
Ido Schimmeleb789982017-10-22 23:11:48 +02002701 adj_index += nh->num_adj_entries;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002702 }
2703 return 0;
2704}
2705
Ido Schimmel1819ae32017-07-21 18:04:28 +02002706static bool
2707mlxsw_sp_fib_node_entry_is_first(const struct mlxsw_sp_fib_node *fib_node,
2708 const struct mlxsw_sp_fib_entry *fib_entry);
2709
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002710static int
2711mlxsw_sp_nexthop_fib_entries_update(struct mlxsw_sp *mlxsw_sp,
2712 struct mlxsw_sp_nexthop_group *nh_grp)
2713{
2714 struct mlxsw_sp_fib_entry *fib_entry;
2715 int err;
2716
2717 list_for_each_entry(fib_entry, &nh_grp->fib_list, nexthop_group_node) {
Ido Schimmel1819ae32017-07-21 18:04:28 +02002718 if (!mlxsw_sp_fib_node_entry_is_first(fib_entry->fib_node,
2719 fib_entry))
2720 continue;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002721 err = mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
2722 if (err)
2723 return err;
2724 }
2725 return 0;
2726}
2727
2728static void
Ido Schimmel77d964e2017-08-02 09:56:05 +02002729mlxsw_sp_fib_entry_offload_refresh(struct mlxsw_sp_fib_entry *fib_entry,
2730 enum mlxsw_reg_ralue_op op, int err);
2731
2732static void
2733mlxsw_sp_nexthop_fib_entries_refresh(struct mlxsw_sp_nexthop_group *nh_grp)
2734{
2735 enum mlxsw_reg_ralue_op op = MLXSW_REG_RALUE_OP_WRITE_WRITE;
2736 struct mlxsw_sp_fib_entry *fib_entry;
2737
2738 list_for_each_entry(fib_entry, &nh_grp->fib_list, nexthop_group_node) {
2739 if (!mlxsw_sp_fib_node_entry_is_first(fib_entry->fib_node,
2740 fib_entry))
2741 continue;
2742 mlxsw_sp_fib_entry_offload_refresh(fib_entry, op, 0);
2743 }
2744}
2745
Ido Schimmel425a08c2017-10-22 23:11:47 +02002746static void mlxsw_sp_adj_grp_size_round_up(u16 *p_adj_grp_size)
2747{
2748 /* Valid sizes for an adjacency group are:
2749 * 1-64, 512, 1024, 2048 and 4096.
2750 */
2751 if (*p_adj_grp_size <= 64)
2752 return;
2753 else if (*p_adj_grp_size <= 512)
2754 *p_adj_grp_size = 512;
2755 else if (*p_adj_grp_size <= 1024)
2756 *p_adj_grp_size = 1024;
2757 else if (*p_adj_grp_size <= 2048)
2758 *p_adj_grp_size = 2048;
2759 else
2760 *p_adj_grp_size = 4096;
2761}
2762
2763static void mlxsw_sp_adj_grp_size_round_down(u16 *p_adj_grp_size,
2764 unsigned int alloc_size)
2765{
2766 if (alloc_size >= 4096)
2767 *p_adj_grp_size = 4096;
2768 else if (alloc_size >= 2048)
2769 *p_adj_grp_size = 2048;
2770 else if (alloc_size >= 1024)
2771 *p_adj_grp_size = 1024;
2772 else if (alloc_size >= 512)
2773 *p_adj_grp_size = 512;
2774}
2775
2776static int mlxsw_sp_fix_adj_grp_size(struct mlxsw_sp *mlxsw_sp,
2777 u16 *p_adj_grp_size)
2778{
2779 unsigned int alloc_size;
2780 int err;
2781
2782 /* Round up the requested group size to the next size supported
2783 * by the device and make sure the request can be satisfied.
2784 */
2785 mlxsw_sp_adj_grp_size_round_up(p_adj_grp_size);
2786 err = mlxsw_sp_kvdl_alloc_size_query(mlxsw_sp, *p_adj_grp_size,
2787 &alloc_size);
2788 if (err)
2789 return err;
2790 /* It is possible the allocation results in more allocated
2791 * entries than requested. Try to use as much of them as
2792 * possible.
2793 */
2794 mlxsw_sp_adj_grp_size_round_down(p_adj_grp_size, alloc_size);
2795
2796 return 0;
2797}
2798
Ido Schimmel77d964e2017-08-02 09:56:05 +02002799static void
Ido Schimmeleb789982017-10-22 23:11:48 +02002800mlxsw_sp_nexthop_group_normalize(struct mlxsw_sp_nexthop_group *nh_grp)
2801{
2802 int i, g = 0, sum_norm_weight = 0;
2803 struct mlxsw_sp_nexthop *nh;
2804
2805 for (i = 0; i < nh_grp->count; i++) {
2806 nh = &nh_grp->nexthops[i];
2807
2808 if (!nh->should_offload)
2809 continue;
2810 if (g > 0)
2811 g = gcd(nh->nh_weight, g);
2812 else
2813 g = nh->nh_weight;
2814 }
2815
2816 for (i = 0; i < nh_grp->count; i++) {
2817 nh = &nh_grp->nexthops[i];
2818
2819 if (!nh->should_offload)
2820 continue;
2821 nh->norm_nh_weight = nh->nh_weight / g;
2822 sum_norm_weight += nh->norm_nh_weight;
2823 }
2824
2825 nh_grp->sum_norm_weight = sum_norm_weight;
2826}
2827
2828static void
2829mlxsw_sp_nexthop_group_rebalance(struct mlxsw_sp_nexthop_group *nh_grp)
2830{
2831 int total = nh_grp->sum_norm_weight;
2832 u16 ecmp_size = nh_grp->ecmp_size;
2833 int i, weight = 0, lower_bound = 0;
2834
2835 for (i = 0; i < nh_grp->count; i++) {
2836 struct mlxsw_sp_nexthop *nh = &nh_grp->nexthops[i];
2837 int upper_bound;
2838
2839 if (!nh->should_offload)
2840 continue;
2841 weight += nh->norm_nh_weight;
2842 upper_bound = DIV_ROUND_CLOSEST(ecmp_size * weight, total);
2843 nh->num_adj_entries = upper_bound - lower_bound;
2844 lower_bound = upper_bound;
2845 }
2846}
2847
2848static void
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002849mlxsw_sp_nexthop_group_refresh(struct mlxsw_sp *mlxsw_sp,
2850 struct mlxsw_sp_nexthop_group *nh_grp)
2851{
Ido Schimmeleb789982017-10-22 23:11:48 +02002852 u16 ecmp_size, old_ecmp_size;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002853 struct mlxsw_sp_nexthop *nh;
2854 bool offload_change = false;
2855 u32 adj_index;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002856 bool old_adj_index_valid;
2857 u32 old_adj_index;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002858 int i;
2859 int err;
2860
Ido Schimmelb3e8d1e2017-02-08 11:16:32 +01002861 if (!nh_grp->gateway) {
2862 mlxsw_sp_nexthop_fib_entries_update(mlxsw_sp, nh_grp);
2863 return;
2864 }
2865
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002866 for (i = 0; i < nh_grp->count; i++) {
2867 nh = &nh_grp->nexthops[i];
2868
Petr Machata56b8a9e2017-07-31 09:27:29 +02002869 if (nh->should_offload != nh->offloaded) {
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002870 offload_change = true;
2871 if (nh->should_offload)
2872 nh->update = 1;
2873 }
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002874 }
2875 if (!offload_change) {
2876 /* Nothing was added or removed, so no need to reallocate. Just
2877 * update MAC on existing adjacency indexes.
2878 */
Petr Machata35225e42017-09-02 23:49:22 +02002879 err = mlxsw_sp_nexthop_group_update(mlxsw_sp, nh_grp, false);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002880 if (err) {
2881 dev_warn(mlxsw_sp->bus_info->dev, "Failed to update neigh MAC in adjacency table.\n");
2882 goto set_trap;
2883 }
2884 return;
2885 }
Ido Schimmeleb789982017-10-22 23:11:48 +02002886 mlxsw_sp_nexthop_group_normalize(nh_grp);
2887 if (!nh_grp->sum_norm_weight)
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002888 /* No neigh of this group is connected so we just set
2889 * the trap and let everthing flow through kernel.
2890 */
2891 goto set_trap;
2892
Ido Schimmeleb789982017-10-22 23:11:48 +02002893 ecmp_size = nh_grp->sum_norm_weight;
Ido Schimmel425a08c2017-10-22 23:11:47 +02002894 err = mlxsw_sp_fix_adj_grp_size(mlxsw_sp, &ecmp_size);
2895 if (err)
2896 /* No valid allocation size available. */
2897 goto set_trap;
2898
Arkadi Sharshevsky13124442017-03-25 08:28:22 +01002899 err = mlxsw_sp_kvdl_alloc(mlxsw_sp, ecmp_size, &adj_index);
2900 if (err) {
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002901 /* We ran out of KVD linear space, just set the
2902 * trap and let everything flow through kernel.
2903 */
2904 dev_warn(mlxsw_sp->bus_info->dev, "Failed to allocate KVD linear area for nexthop group.\n");
2905 goto set_trap;
2906 }
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002907 old_adj_index_valid = nh_grp->adj_index_valid;
2908 old_adj_index = nh_grp->adj_index;
2909 old_ecmp_size = nh_grp->ecmp_size;
2910 nh_grp->adj_index_valid = 1;
2911 nh_grp->adj_index = adj_index;
2912 nh_grp->ecmp_size = ecmp_size;
Ido Schimmeleb789982017-10-22 23:11:48 +02002913 mlxsw_sp_nexthop_group_rebalance(nh_grp);
Petr Machata35225e42017-09-02 23:49:22 +02002914 err = mlxsw_sp_nexthop_group_update(mlxsw_sp, nh_grp, true);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002915 if (err) {
2916 dev_warn(mlxsw_sp->bus_info->dev, "Failed to update neigh MAC in adjacency table.\n");
2917 goto set_trap;
2918 }
2919
2920 if (!old_adj_index_valid) {
2921 /* The trap was set for fib entries, so we have to call
2922 * fib entry update to unset it and use adjacency index.
2923 */
2924 err = mlxsw_sp_nexthop_fib_entries_update(mlxsw_sp, nh_grp);
2925 if (err) {
2926 dev_warn(mlxsw_sp->bus_info->dev, "Failed to add adjacency index to fib entries.\n");
2927 goto set_trap;
2928 }
2929 return;
2930 }
2931
2932 err = mlxsw_sp_adj_index_mass_update(mlxsw_sp, nh_grp,
2933 old_adj_index, old_ecmp_size);
2934 mlxsw_sp_kvdl_free(mlxsw_sp, old_adj_index);
2935 if (err) {
2936 dev_warn(mlxsw_sp->bus_info->dev, "Failed to mass-update adjacency index for nexthop group.\n");
2937 goto set_trap;
2938 }
Ido Schimmel77d964e2017-08-02 09:56:05 +02002939
2940 /* Offload state within the group changed, so update the flags. */
2941 mlxsw_sp_nexthop_fib_entries_refresh(nh_grp);
2942
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002943 return;
2944
2945set_trap:
2946 old_adj_index_valid = nh_grp->adj_index_valid;
2947 nh_grp->adj_index_valid = 0;
2948 for (i = 0; i < nh_grp->count; i++) {
2949 nh = &nh_grp->nexthops[i];
2950 nh->offloaded = 0;
2951 }
2952 err = mlxsw_sp_nexthop_fib_entries_update(mlxsw_sp, nh_grp);
2953 if (err)
2954 dev_warn(mlxsw_sp->bus_info->dev, "Failed to set traps for fib entries.\n");
2955 if (old_adj_index_valid)
2956 mlxsw_sp_kvdl_free(mlxsw_sp, nh_grp->adj_index);
2957}
2958
2959static void __mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp_nexthop *nh,
2960 bool removing)
2961{
Petr Machata213666a2017-07-31 09:27:30 +02002962 if (!removing)
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002963 nh->should_offload = 1;
Petr Machata213666a2017-07-31 09:27:30 +02002964 else if (nh->offloaded)
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002965 nh->should_offload = 0;
2966 nh->update = 1;
2967}
2968
2969static void
2970mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp *mlxsw_sp,
2971 struct mlxsw_sp_neigh_entry *neigh_entry,
2972 bool removing)
2973{
2974 struct mlxsw_sp_nexthop *nh;
2975
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002976 list_for_each_entry(nh, &neigh_entry->nexthop_list,
2977 neigh_list_node) {
2978 __mlxsw_sp_nexthop_neigh_update(nh, removing);
2979 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nh_grp);
2980 }
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002981}
2982
Ido Schimmel9665b742017-02-08 11:16:42 +01002983static void mlxsw_sp_nexthop_rif_init(struct mlxsw_sp_nexthop *nh,
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01002984 struct mlxsw_sp_rif *rif)
Ido Schimmel9665b742017-02-08 11:16:42 +01002985{
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01002986 if (nh->rif)
Ido Schimmel9665b742017-02-08 11:16:42 +01002987 return;
2988
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01002989 nh->rif = rif;
2990 list_add(&nh->rif_list_node, &rif->nexthop_list);
Ido Schimmel9665b742017-02-08 11:16:42 +01002991}
2992
2993static void mlxsw_sp_nexthop_rif_fini(struct mlxsw_sp_nexthop *nh)
2994{
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01002995 if (!nh->rif)
Ido Schimmel9665b742017-02-08 11:16:42 +01002996 return;
2997
2998 list_del(&nh->rif_list_node);
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01002999 nh->rif = NULL;
Ido Schimmel9665b742017-02-08 11:16:42 +01003000}
3001
Ido Schimmela8c97012017-02-08 11:16:35 +01003002static int mlxsw_sp_nexthop_neigh_init(struct mlxsw_sp *mlxsw_sp,
3003 struct mlxsw_sp_nexthop *nh)
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003004{
3005 struct mlxsw_sp_neigh_entry *neigh_entry;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003006 struct neighbour *n;
Ido Schimmel93a87e52016-12-23 09:32:49 +01003007 u8 nud_state, dead;
Ido Schimmelc53b8e12017-02-08 11:16:30 +01003008 int err;
3009
Ido Schimmelad178c82017-02-08 11:16:40 +01003010 if (!nh->nh_grp->gateway || nh->neigh_entry)
Ido Schimmelb8399a12017-02-08 11:16:33 +01003011 return 0;
3012
Jiri Pirko33b13412016-11-10 12:31:04 +01003013 /* Take a reference of neigh here ensuring that neigh would
Petr Machata8de3c172017-07-31 09:27:25 +02003014 * not be destructed before the nexthop entry is finished.
Jiri Pirko33b13412016-11-10 12:31:04 +01003015 * The reference is taken either in neigh_lookup() or
Ido Schimmelfd76d912017-02-06 16:20:17 +01003016 * in neigh_create() in case n is not found.
Jiri Pirko33b13412016-11-10 12:31:04 +01003017 */
Ido Schimmel58adf2c2017-07-18 10:10:19 +02003018 n = neigh_lookup(nh->nh_grp->neigh_tbl, &nh->gw_addr, nh->rif->dev);
Jiri Pirko33b13412016-11-10 12:31:04 +01003019 if (!n) {
Ido Schimmel58adf2c2017-07-18 10:10:19 +02003020 n = neigh_create(nh->nh_grp->neigh_tbl, &nh->gw_addr,
3021 nh->rif->dev);
Ido Schimmela8c97012017-02-08 11:16:35 +01003022 if (IS_ERR(n))
3023 return PTR_ERR(n);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003024 neigh_event_send(n, NULL);
Jiri Pirko33b13412016-11-10 12:31:04 +01003025 }
3026 neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp, n);
3027 if (!neigh_entry) {
Ido Schimmel5c8802f2017-02-06 16:20:13 +01003028 neigh_entry = mlxsw_sp_neigh_entry_create(mlxsw_sp, n);
3029 if (IS_ERR(neigh_entry)) {
Ido Schimmelc53b8e12017-02-08 11:16:30 +01003030 err = -EINVAL;
3031 goto err_neigh_entry_create;
Ido Schimmel5c8802f2017-02-06 16:20:13 +01003032 }
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003033 }
Yotam Gigib2157142016-07-05 11:27:51 +02003034
3035 /* If that is the first nexthop connected to that neigh, add to
3036 * nexthop_neighs_list
3037 */
3038 if (list_empty(&neigh_entry->nexthop_list))
3039 list_add_tail(&neigh_entry->nexthop_neighs_list_node,
Ido Schimmel9011b672017-05-16 19:38:25 +02003040 &mlxsw_sp->router->nexthop_neighs_list);
Yotam Gigib2157142016-07-05 11:27:51 +02003041
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003042 nh->neigh_entry = neigh_entry;
3043 list_add_tail(&nh->neigh_list_node, &neigh_entry->nexthop_list);
3044 read_lock_bh(&n->lock);
3045 nud_state = n->nud_state;
Ido Schimmel93a87e52016-12-23 09:32:49 +01003046 dead = n->dead;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003047 read_unlock_bh(&n->lock);
Ido Schimmel93a87e52016-12-23 09:32:49 +01003048 __mlxsw_sp_nexthop_neigh_update(nh, !(nud_state & NUD_VALID && !dead));
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003049
3050 return 0;
Ido Schimmelc53b8e12017-02-08 11:16:30 +01003051
3052err_neigh_entry_create:
3053 neigh_release(n);
Ido Schimmelc53b8e12017-02-08 11:16:30 +01003054 return err;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003055}
3056
Ido Schimmela8c97012017-02-08 11:16:35 +01003057static void mlxsw_sp_nexthop_neigh_fini(struct mlxsw_sp *mlxsw_sp,
3058 struct mlxsw_sp_nexthop *nh)
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003059{
3060 struct mlxsw_sp_neigh_entry *neigh_entry = nh->neigh_entry;
Ido Schimmela8c97012017-02-08 11:16:35 +01003061 struct neighbour *n;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003062
Ido Schimmelb8399a12017-02-08 11:16:33 +01003063 if (!neigh_entry)
Ido Schimmela8c97012017-02-08 11:16:35 +01003064 return;
3065 n = neigh_entry->key.n;
Ido Schimmelb8399a12017-02-08 11:16:33 +01003066
Ido Schimmel58312122016-12-23 09:32:50 +01003067 __mlxsw_sp_nexthop_neigh_update(nh, true);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003068 list_del(&nh->neigh_list_node);
Ido Schimmele58be792017-02-08 11:16:28 +01003069 nh->neigh_entry = NULL;
Yotam Gigib2157142016-07-05 11:27:51 +02003070
3071 /* If that is the last nexthop connected to that neigh, remove from
3072 * nexthop_neighs_list
3073 */
Ido Schimmele58be792017-02-08 11:16:28 +01003074 if (list_empty(&neigh_entry->nexthop_list))
3075 list_del(&neigh_entry->nexthop_neighs_list_node);
Yotam Gigib2157142016-07-05 11:27:51 +02003076
Ido Schimmel5c8802f2017-02-06 16:20:13 +01003077 if (!neigh_entry->connected && list_empty(&neigh_entry->nexthop_list))
3078 mlxsw_sp_neigh_entry_destroy(mlxsw_sp, neigh_entry);
3079
3080 neigh_release(n);
Ido Schimmela8c97012017-02-08 11:16:35 +01003081}
Ido Schimmelc53b8e12017-02-08 11:16:30 +01003082
Petr Machata1012b9a2017-09-02 23:49:23 +02003083static int mlxsw_sp_nexthop_ipip_init(struct mlxsw_sp *mlxsw_sp,
Petr Machata1012b9a2017-09-02 23:49:23 +02003084 struct mlxsw_sp_nexthop *nh,
3085 struct net_device *ol_dev)
3086{
3087 if (!nh->nh_grp->gateway || nh->ipip_entry)
3088 return 0;
3089
Petr Machata4cccb732017-10-16 16:26:39 +02003090 nh->ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
3091 if (!nh->ipip_entry)
3092 return -ENOENT;
Petr Machata1012b9a2017-09-02 23:49:23 +02003093
3094 __mlxsw_sp_nexthop_neigh_update(nh, false);
3095 return 0;
3096}
3097
3098static void mlxsw_sp_nexthop_ipip_fini(struct mlxsw_sp *mlxsw_sp,
3099 struct mlxsw_sp_nexthop *nh)
3100{
3101 struct mlxsw_sp_ipip_entry *ipip_entry = nh->ipip_entry;
3102
3103 if (!ipip_entry)
3104 return;
3105
3106 __mlxsw_sp_nexthop_neigh_update(nh, true);
Petr Machata1012b9a2017-09-02 23:49:23 +02003107 nh->ipip_entry = NULL;
3108}
3109
3110static bool mlxsw_sp_nexthop4_ipip_type(const struct mlxsw_sp *mlxsw_sp,
3111 const struct fib_nh *fib_nh,
3112 enum mlxsw_sp_ipip_type *p_ipipt)
3113{
3114 struct net_device *dev = fib_nh->nh_dev;
3115
3116 return dev &&
3117 fib_nh->nh_parent->fib_type == RTN_UNICAST &&
3118 mlxsw_sp_netdev_ipip_type(mlxsw_sp, dev, p_ipipt);
3119}
3120
Petr Machata35225e42017-09-02 23:49:22 +02003121static void mlxsw_sp_nexthop_type_fini(struct mlxsw_sp *mlxsw_sp,
3122 struct mlxsw_sp_nexthop *nh)
3123{
3124 switch (nh->type) {
3125 case MLXSW_SP_NEXTHOP_TYPE_ETH:
3126 mlxsw_sp_nexthop_neigh_fini(mlxsw_sp, nh);
3127 mlxsw_sp_nexthop_rif_fini(nh);
3128 break;
Petr Machata1012b9a2017-09-02 23:49:23 +02003129 case MLXSW_SP_NEXTHOP_TYPE_IPIP:
Petr Machatade0f43c2017-10-02 12:14:57 +02003130 mlxsw_sp_nexthop_rif_fini(nh);
Petr Machata1012b9a2017-09-02 23:49:23 +02003131 mlxsw_sp_nexthop_ipip_fini(mlxsw_sp, nh);
3132 break;
Petr Machata35225e42017-09-02 23:49:22 +02003133 }
3134}
3135
3136static int mlxsw_sp_nexthop4_type_init(struct mlxsw_sp *mlxsw_sp,
3137 struct mlxsw_sp_nexthop *nh,
3138 struct fib_nh *fib_nh)
3139{
Petr Machata1012b9a2017-09-02 23:49:23 +02003140 struct mlxsw_sp_router *router = mlxsw_sp->router;
Petr Machata35225e42017-09-02 23:49:22 +02003141 struct net_device *dev = fib_nh->nh_dev;
Petr Machata1012b9a2017-09-02 23:49:23 +02003142 enum mlxsw_sp_ipip_type ipipt;
Petr Machata35225e42017-09-02 23:49:22 +02003143 struct mlxsw_sp_rif *rif;
3144 int err;
3145
Petr Machata1012b9a2017-09-02 23:49:23 +02003146 if (mlxsw_sp_nexthop4_ipip_type(mlxsw_sp, fib_nh, &ipipt) &&
3147 router->ipip_ops_arr[ipipt]->can_offload(mlxsw_sp, dev,
3148 MLXSW_SP_L3_PROTO_IPV4)) {
3149 nh->type = MLXSW_SP_NEXTHOP_TYPE_IPIP;
Petr Machata4cccb732017-10-16 16:26:39 +02003150 err = mlxsw_sp_nexthop_ipip_init(mlxsw_sp, nh, dev);
Petr Machatade0f43c2017-10-02 12:14:57 +02003151 if (err)
3152 return err;
3153 mlxsw_sp_nexthop_rif_init(nh, &nh->ipip_entry->ol_lb->common);
3154 return 0;
Petr Machata1012b9a2017-09-02 23:49:23 +02003155 }
3156
Petr Machata35225e42017-09-02 23:49:22 +02003157 nh->type = MLXSW_SP_NEXTHOP_TYPE_ETH;
3158 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
3159 if (!rif)
3160 return 0;
3161
3162 mlxsw_sp_nexthop_rif_init(nh, rif);
3163 err = mlxsw_sp_nexthop_neigh_init(mlxsw_sp, nh);
3164 if (err)
3165 goto err_neigh_init;
3166
3167 return 0;
3168
3169err_neigh_init:
3170 mlxsw_sp_nexthop_rif_fini(nh);
3171 return err;
3172}
3173
3174static void mlxsw_sp_nexthop4_type_fini(struct mlxsw_sp *mlxsw_sp,
3175 struct mlxsw_sp_nexthop *nh)
3176{
3177 mlxsw_sp_nexthop_type_fini(mlxsw_sp, nh);
3178}
3179
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02003180static int mlxsw_sp_nexthop4_init(struct mlxsw_sp *mlxsw_sp,
3181 struct mlxsw_sp_nexthop_group *nh_grp,
3182 struct mlxsw_sp_nexthop *nh,
3183 struct fib_nh *fib_nh)
Ido Schimmela8c97012017-02-08 11:16:35 +01003184{
3185 struct net_device *dev = fib_nh->nh_dev;
Ido Schimmeldf6dd79b2017-02-08 14:36:49 +01003186 struct in_device *in_dev;
Ido Schimmela8c97012017-02-08 11:16:35 +01003187 int err;
3188
3189 nh->nh_grp = nh_grp;
3190 nh->key.fib_nh = fib_nh;
Ido Schimmel408bd942017-10-22 23:11:46 +02003191#ifdef CONFIG_IP_ROUTE_MULTIPATH
3192 nh->nh_weight = fib_nh->nh_weight;
3193#else
3194 nh->nh_weight = 1;
3195#endif
Ido Schimmel58adf2c2017-07-18 10:10:19 +02003196 memcpy(&nh->gw_addr, &fib_nh->nh_gw, sizeof(fib_nh->nh_gw));
Ido Schimmela8c97012017-02-08 11:16:35 +01003197 err = mlxsw_sp_nexthop_insert(mlxsw_sp, nh);
3198 if (err)
3199 return err;
3200
Arkadi Sharshevskya5390272017-09-25 10:32:28 +02003201 mlxsw_sp_nexthop_counter_alloc(mlxsw_sp, nh);
Arkadi Sharshevskydbe45982017-09-25 10:32:23 +02003202 list_add_tail(&nh->router_list_node, &mlxsw_sp->router->nexthop_list);
3203
Ido Schimmel97989ee2017-03-10 08:53:38 +01003204 if (!dev)
3205 return 0;
3206
Ido Schimmeldf6dd79b2017-02-08 14:36:49 +01003207 in_dev = __in_dev_get_rtnl(dev);
3208 if (in_dev && IN_DEV_IGNORE_ROUTES_WITH_LINKDOWN(in_dev) &&
3209 fib_nh->nh_flags & RTNH_F_LINKDOWN)
3210 return 0;
3211
Petr Machata35225e42017-09-02 23:49:22 +02003212 err = mlxsw_sp_nexthop4_type_init(mlxsw_sp, nh, fib_nh);
Ido Schimmela8c97012017-02-08 11:16:35 +01003213 if (err)
3214 goto err_nexthop_neigh_init;
3215
3216 return 0;
3217
3218err_nexthop_neigh_init:
3219 mlxsw_sp_nexthop_remove(mlxsw_sp, nh);
3220 return err;
3221}
3222
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02003223static void mlxsw_sp_nexthop4_fini(struct mlxsw_sp *mlxsw_sp,
3224 struct mlxsw_sp_nexthop *nh)
Ido Schimmela8c97012017-02-08 11:16:35 +01003225{
Petr Machata35225e42017-09-02 23:49:22 +02003226 mlxsw_sp_nexthop4_type_fini(mlxsw_sp, nh);
Arkadi Sharshevskydbe45982017-09-25 10:32:23 +02003227 list_del(&nh->router_list_node);
Arkadi Sharshevskya5390272017-09-25 10:32:28 +02003228 mlxsw_sp_nexthop_counter_free(mlxsw_sp, nh);
Ido Schimmelc53b8e12017-02-08 11:16:30 +01003229 mlxsw_sp_nexthop_remove(mlxsw_sp, nh);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003230}
3231
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02003232static void mlxsw_sp_nexthop4_event(struct mlxsw_sp *mlxsw_sp,
3233 unsigned long event, struct fib_nh *fib_nh)
Ido Schimmelad178c82017-02-08 11:16:40 +01003234{
3235 struct mlxsw_sp_nexthop_key key;
3236 struct mlxsw_sp_nexthop *nh;
Ido Schimmelad178c82017-02-08 11:16:40 +01003237
Ido Schimmel9011b672017-05-16 19:38:25 +02003238 if (mlxsw_sp->router->aborted)
Ido Schimmelad178c82017-02-08 11:16:40 +01003239 return;
3240
3241 key.fib_nh = fib_nh;
3242 nh = mlxsw_sp_nexthop_lookup(mlxsw_sp, key);
3243 if (WARN_ON_ONCE(!nh))
3244 return;
3245
Ido Schimmelad178c82017-02-08 11:16:40 +01003246 switch (event) {
3247 case FIB_EVENT_NH_ADD:
Petr Machata35225e42017-09-02 23:49:22 +02003248 mlxsw_sp_nexthop4_type_init(mlxsw_sp, nh, fib_nh);
Ido Schimmelad178c82017-02-08 11:16:40 +01003249 break;
3250 case FIB_EVENT_NH_DEL:
Petr Machata35225e42017-09-02 23:49:22 +02003251 mlxsw_sp_nexthop4_type_fini(mlxsw_sp, nh);
Ido Schimmelad178c82017-02-08 11:16:40 +01003252 break;
3253 }
3254
3255 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nh_grp);
3256}
3257
Ido Schimmel9665b742017-02-08 11:16:42 +01003258static void mlxsw_sp_nexthop_rif_gone_sync(struct mlxsw_sp *mlxsw_sp,
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003259 struct mlxsw_sp_rif *rif)
Ido Schimmel9665b742017-02-08 11:16:42 +01003260{
3261 struct mlxsw_sp_nexthop *nh, *tmp;
3262
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003263 list_for_each_entry_safe(nh, tmp, &rif->nexthop_list, rif_list_node) {
Petr Machata35225e42017-09-02 23:49:22 +02003264 mlxsw_sp_nexthop_type_fini(mlxsw_sp, nh);
Ido Schimmel9665b742017-02-08 11:16:42 +01003265 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nh_grp);
3266 }
3267}
3268
Petr Machata9b014512017-09-02 23:49:20 +02003269static bool mlxsw_sp_fi_is_gateway(const struct mlxsw_sp *mlxsw_sp,
3270 const struct fib_info *fi)
3271{
Petr Machata1012b9a2017-09-02 23:49:23 +02003272 return fi->fib_nh->nh_scope == RT_SCOPE_LINK ||
3273 mlxsw_sp_nexthop4_ipip_type(mlxsw_sp, fi->fib_nh, NULL);
Petr Machata9b014512017-09-02 23:49:20 +02003274}
3275
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003276static struct mlxsw_sp_nexthop_group *
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02003277mlxsw_sp_nexthop4_group_create(struct mlxsw_sp *mlxsw_sp, struct fib_info *fi)
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003278{
3279 struct mlxsw_sp_nexthop_group *nh_grp;
3280 struct mlxsw_sp_nexthop *nh;
3281 struct fib_nh *fib_nh;
3282 size_t alloc_size;
3283 int i;
3284 int err;
3285
3286 alloc_size = sizeof(*nh_grp) +
3287 fi->fib_nhs * sizeof(struct mlxsw_sp_nexthop);
3288 nh_grp = kzalloc(alloc_size, GFP_KERNEL);
3289 if (!nh_grp)
3290 return ERR_PTR(-ENOMEM);
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02003291 nh_grp->priv = fi;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003292 INIT_LIST_HEAD(&nh_grp->fib_list);
Ido Schimmel58adf2c2017-07-18 10:10:19 +02003293 nh_grp->neigh_tbl = &arp_tbl;
3294
Petr Machata9b014512017-09-02 23:49:20 +02003295 nh_grp->gateway = mlxsw_sp_fi_is_gateway(mlxsw_sp, fi);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003296 nh_grp->count = fi->fib_nhs;
Ido Schimmel7387dbb2017-07-12 09:12:53 +02003297 fib_info_hold(fi);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003298 for (i = 0; i < nh_grp->count; i++) {
3299 nh = &nh_grp->nexthops[i];
3300 fib_nh = &fi->fib_nh[i];
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02003301 err = mlxsw_sp_nexthop4_init(mlxsw_sp, nh_grp, nh, fib_nh);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003302 if (err)
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02003303 goto err_nexthop4_init;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003304 }
Ido Schimmele9ad5e72017-02-08 11:16:29 +01003305 err = mlxsw_sp_nexthop_group_insert(mlxsw_sp, nh_grp);
3306 if (err)
3307 goto err_nexthop_group_insert;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003308 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
3309 return nh_grp;
3310
Ido Schimmele9ad5e72017-02-08 11:16:29 +01003311err_nexthop_group_insert:
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02003312err_nexthop4_init:
Ido Schimmeldf6dd79b2017-02-08 14:36:49 +01003313 for (i--; i >= 0; i--) {
3314 nh = &nh_grp->nexthops[i];
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02003315 mlxsw_sp_nexthop4_fini(mlxsw_sp, nh);
Ido Schimmeldf6dd79b2017-02-08 14:36:49 +01003316 }
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02003317 fib_info_put(fi);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003318 kfree(nh_grp);
3319 return ERR_PTR(err);
3320}
3321
3322static void
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02003323mlxsw_sp_nexthop4_group_destroy(struct mlxsw_sp *mlxsw_sp,
3324 struct mlxsw_sp_nexthop_group *nh_grp)
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003325{
3326 struct mlxsw_sp_nexthop *nh;
3327 int i;
3328
Ido Schimmele9ad5e72017-02-08 11:16:29 +01003329 mlxsw_sp_nexthop_group_remove(mlxsw_sp, nh_grp);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003330 for (i = 0; i < nh_grp->count; i++) {
3331 nh = &nh_grp->nexthops[i];
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02003332 mlxsw_sp_nexthop4_fini(mlxsw_sp, nh);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003333 }
Ido Schimmel58312122016-12-23 09:32:50 +01003334 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
3335 WARN_ON_ONCE(nh_grp->adj_index_valid);
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02003336 fib_info_put(mlxsw_sp_nexthop4_group_fi(nh_grp));
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003337 kfree(nh_grp);
3338}
3339
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02003340static int mlxsw_sp_nexthop4_group_get(struct mlxsw_sp *mlxsw_sp,
3341 struct mlxsw_sp_fib_entry *fib_entry,
3342 struct fib_info *fi)
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003343{
3344 struct mlxsw_sp_nexthop_group *nh_grp;
3345
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02003346 nh_grp = mlxsw_sp_nexthop4_group_lookup(mlxsw_sp, fi);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003347 if (!nh_grp) {
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02003348 nh_grp = mlxsw_sp_nexthop4_group_create(mlxsw_sp, fi);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003349 if (IS_ERR(nh_grp))
3350 return PTR_ERR(nh_grp);
3351 }
3352 list_add_tail(&fib_entry->nexthop_group_node, &nh_grp->fib_list);
3353 fib_entry->nh_group = nh_grp;
3354 return 0;
3355}
3356
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02003357static void mlxsw_sp_nexthop4_group_put(struct mlxsw_sp *mlxsw_sp,
3358 struct mlxsw_sp_fib_entry *fib_entry)
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003359{
3360 struct mlxsw_sp_nexthop_group *nh_grp = fib_entry->nh_group;
3361
3362 list_del(&fib_entry->nexthop_group_node);
3363 if (!list_empty(&nh_grp->fib_list))
3364 return;
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02003365 mlxsw_sp_nexthop4_group_destroy(mlxsw_sp, nh_grp);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003366}
3367
Ido Schimmel013b20f2017-02-08 11:16:36 +01003368static bool
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02003369mlxsw_sp_fib4_entry_should_offload(const struct mlxsw_sp_fib_entry *fib_entry)
3370{
3371 struct mlxsw_sp_fib4_entry *fib4_entry;
3372
3373 fib4_entry = container_of(fib_entry, struct mlxsw_sp_fib4_entry,
3374 common);
3375 return !fib4_entry->tos;
3376}
3377
3378static bool
Ido Schimmel013b20f2017-02-08 11:16:36 +01003379mlxsw_sp_fib_entry_should_offload(const struct mlxsw_sp_fib_entry *fib_entry)
3380{
3381 struct mlxsw_sp_nexthop_group *nh_group = fib_entry->nh_group;
3382
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02003383 switch (fib_entry->fib_node->fib->proto) {
3384 case MLXSW_SP_L3_PROTO_IPV4:
3385 if (!mlxsw_sp_fib4_entry_should_offload(fib_entry))
3386 return false;
3387 break;
3388 case MLXSW_SP_L3_PROTO_IPV6:
3389 break;
3390 }
Ido Schimmel9aecce12017-02-09 10:28:42 +01003391
Ido Schimmel013b20f2017-02-08 11:16:36 +01003392 switch (fib_entry->type) {
3393 case MLXSW_SP_FIB_ENTRY_TYPE_REMOTE:
3394 return !!nh_group->adj_index_valid;
3395 case MLXSW_SP_FIB_ENTRY_TYPE_LOCAL:
Ido Schimmel70ad3502017-02-08 11:16:38 +01003396 return !!nh_group->nh_rif;
Petr Machata4607f6d2017-09-02 23:49:25 +02003397 case MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP:
3398 return true;
Ido Schimmel013b20f2017-02-08 11:16:36 +01003399 default:
3400 return false;
3401 }
3402}
3403
Ido Schimmel428b8512017-08-03 13:28:28 +02003404static struct mlxsw_sp_nexthop *
3405mlxsw_sp_rt6_nexthop(struct mlxsw_sp_nexthop_group *nh_grp,
3406 const struct mlxsw_sp_rt6 *mlxsw_sp_rt6)
3407{
3408 int i;
3409
3410 for (i = 0; i < nh_grp->count; i++) {
3411 struct mlxsw_sp_nexthop *nh = &nh_grp->nexthops[i];
3412 struct rt6_info *rt = mlxsw_sp_rt6->rt;
3413
3414 if (nh->rif && nh->rif->dev == rt->dst.dev &&
3415 ipv6_addr_equal((const struct in6_addr *) &nh->gw_addr,
3416 &rt->rt6i_gateway))
3417 return nh;
3418 continue;
3419 }
3420
3421 return NULL;
3422}
3423
Ido Schimmel3984d1a2017-08-02 09:56:03 +02003424static void
3425mlxsw_sp_fib4_entry_offload_set(struct mlxsw_sp_fib_entry *fib_entry)
3426{
3427 struct mlxsw_sp_nexthop_group *nh_grp = fib_entry->nh_group;
3428 int i;
3429
Petr Machata4607f6d2017-09-02 23:49:25 +02003430 if (fib_entry->type == MLXSW_SP_FIB_ENTRY_TYPE_LOCAL ||
3431 fib_entry->type == MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP) {
Ido Schimmel3984d1a2017-08-02 09:56:03 +02003432 nh_grp->nexthops->key.fib_nh->nh_flags |= RTNH_F_OFFLOAD;
3433 return;
3434 }
3435
3436 for (i = 0; i < nh_grp->count; i++) {
3437 struct mlxsw_sp_nexthop *nh = &nh_grp->nexthops[i];
3438
3439 if (nh->offloaded)
3440 nh->key.fib_nh->nh_flags |= RTNH_F_OFFLOAD;
3441 else
3442 nh->key.fib_nh->nh_flags &= ~RTNH_F_OFFLOAD;
3443 }
3444}
3445
3446static void
3447mlxsw_sp_fib4_entry_offload_unset(struct mlxsw_sp_fib_entry *fib_entry)
3448{
3449 struct mlxsw_sp_nexthop_group *nh_grp = fib_entry->nh_group;
3450 int i;
3451
3452 for (i = 0; i < nh_grp->count; i++) {
3453 struct mlxsw_sp_nexthop *nh = &nh_grp->nexthops[i];
3454
3455 nh->key.fib_nh->nh_flags &= ~RTNH_F_OFFLOAD;
3456 }
3457}
3458
Ido Schimmel428b8512017-08-03 13:28:28 +02003459static void
3460mlxsw_sp_fib6_entry_offload_set(struct mlxsw_sp_fib_entry *fib_entry)
3461{
3462 struct mlxsw_sp_fib6_entry *fib6_entry;
3463 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
3464
3465 fib6_entry = container_of(fib_entry, struct mlxsw_sp_fib6_entry,
3466 common);
3467
3468 if (fib_entry->type == MLXSW_SP_FIB_ENTRY_TYPE_LOCAL) {
3469 list_first_entry(&fib6_entry->rt6_list, struct mlxsw_sp_rt6,
Ido Schimmelfe400792017-08-15 09:09:49 +02003470 list)->rt->rt6i_nh_flags |= RTNH_F_OFFLOAD;
Ido Schimmel428b8512017-08-03 13:28:28 +02003471 return;
3472 }
3473
3474 list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) {
3475 struct mlxsw_sp_nexthop_group *nh_grp = fib_entry->nh_group;
3476 struct mlxsw_sp_nexthop *nh;
3477
3478 nh = mlxsw_sp_rt6_nexthop(nh_grp, mlxsw_sp_rt6);
3479 if (nh && nh->offloaded)
Ido Schimmelfe400792017-08-15 09:09:49 +02003480 mlxsw_sp_rt6->rt->rt6i_nh_flags |= RTNH_F_OFFLOAD;
Ido Schimmel428b8512017-08-03 13:28:28 +02003481 else
Ido Schimmelfe400792017-08-15 09:09:49 +02003482 mlxsw_sp_rt6->rt->rt6i_nh_flags &= ~RTNH_F_OFFLOAD;
Ido Schimmel428b8512017-08-03 13:28:28 +02003483 }
3484}
3485
3486static void
3487mlxsw_sp_fib6_entry_offload_unset(struct mlxsw_sp_fib_entry *fib_entry)
3488{
3489 struct mlxsw_sp_fib6_entry *fib6_entry;
3490 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
3491
3492 fib6_entry = container_of(fib_entry, struct mlxsw_sp_fib6_entry,
3493 common);
3494 list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) {
3495 struct rt6_info *rt = mlxsw_sp_rt6->rt;
3496
Ido Schimmelfe400792017-08-15 09:09:49 +02003497 rt->rt6i_nh_flags &= ~RTNH_F_OFFLOAD;
Ido Schimmel428b8512017-08-03 13:28:28 +02003498 }
3499}
3500
Ido Schimmel013b20f2017-02-08 11:16:36 +01003501static void mlxsw_sp_fib_entry_offload_set(struct mlxsw_sp_fib_entry *fib_entry)
3502{
Ido Schimmel76610eb2017-03-10 08:53:41 +01003503 switch (fib_entry->fib_node->fib->proto) {
Ido Schimmel013b20f2017-02-08 11:16:36 +01003504 case MLXSW_SP_L3_PROTO_IPV4:
Ido Schimmel3984d1a2017-08-02 09:56:03 +02003505 mlxsw_sp_fib4_entry_offload_set(fib_entry);
Ido Schimmel013b20f2017-02-08 11:16:36 +01003506 break;
3507 case MLXSW_SP_L3_PROTO_IPV6:
Ido Schimmel428b8512017-08-03 13:28:28 +02003508 mlxsw_sp_fib6_entry_offload_set(fib_entry);
3509 break;
Ido Schimmel013b20f2017-02-08 11:16:36 +01003510 }
3511}
3512
3513static void
3514mlxsw_sp_fib_entry_offload_unset(struct mlxsw_sp_fib_entry *fib_entry)
3515{
Ido Schimmel76610eb2017-03-10 08:53:41 +01003516 switch (fib_entry->fib_node->fib->proto) {
Ido Schimmel013b20f2017-02-08 11:16:36 +01003517 case MLXSW_SP_L3_PROTO_IPV4:
Ido Schimmel3984d1a2017-08-02 09:56:03 +02003518 mlxsw_sp_fib4_entry_offload_unset(fib_entry);
Ido Schimmel013b20f2017-02-08 11:16:36 +01003519 break;
3520 case MLXSW_SP_L3_PROTO_IPV6:
Ido Schimmel428b8512017-08-03 13:28:28 +02003521 mlxsw_sp_fib6_entry_offload_unset(fib_entry);
3522 break;
Ido Schimmel013b20f2017-02-08 11:16:36 +01003523 }
Ido Schimmel013b20f2017-02-08 11:16:36 +01003524}
3525
3526static void
3527mlxsw_sp_fib_entry_offload_refresh(struct mlxsw_sp_fib_entry *fib_entry,
3528 enum mlxsw_reg_ralue_op op, int err)
3529{
3530 switch (op) {
3531 case MLXSW_REG_RALUE_OP_WRITE_DELETE:
Ido Schimmel013b20f2017-02-08 11:16:36 +01003532 return mlxsw_sp_fib_entry_offload_unset(fib_entry);
3533 case MLXSW_REG_RALUE_OP_WRITE_WRITE:
3534 if (err)
3535 return;
Ido Schimmel1353ee72017-08-02 09:56:04 +02003536 if (mlxsw_sp_fib_entry_should_offload(fib_entry))
Ido Schimmel013b20f2017-02-08 11:16:36 +01003537 mlxsw_sp_fib_entry_offload_set(fib_entry);
Petr Machata85f44a12017-10-02 12:21:58 +02003538 else
Ido Schimmel013b20f2017-02-08 11:16:36 +01003539 mlxsw_sp_fib_entry_offload_unset(fib_entry);
3540 return;
3541 default:
3542 return;
3543 }
3544}
3545
Ido Schimmel9dbf4d72017-07-18 10:10:24 +02003546static void
3547mlxsw_sp_fib_entry_ralue_pack(char *ralue_pl,
3548 const struct mlxsw_sp_fib_entry *fib_entry,
3549 enum mlxsw_reg_ralue_op op)
3550{
3551 struct mlxsw_sp_fib *fib = fib_entry->fib_node->fib;
3552 enum mlxsw_reg_ralxx_protocol proto;
3553 u32 *p_dip;
3554
3555 proto = (enum mlxsw_reg_ralxx_protocol) fib->proto;
3556
3557 switch (fib->proto) {
3558 case MLXSW_SP_L3_PROTO_IPV4:
3559 p_dip = (u32 *) fib_entry->fib_node->key.addr;
3560 mlxsw_reg_ralue_pack4(ralue_pl, proto, op, fib->vr->id,
3561 fib_entry->fib_node->key.prefix_len,
3562 *p_dip);
3563 break;
3564 case MLXSW_SP_L3_PROTO_IPV6:
3565 mlxsw_reg_ralue_pack6(ralue_pl, proto, op, fib->vr->id,
3566 fib_entry->fib_node->key.prefix_len,
3567 fib_entry->fib_node->key.addr);
3568 break;
3569 }
3570}
3571
3572static int mlxsw_sp_fib_entry_op_remote(struct mlxsw_sp *mlxsw_sp,
3573 struct mlxsw_sp_fib_entry *fib_entry,
3574 enum mlxsw_reg_ralue_op op)
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003575{
3576 char ralue_pl[MLXSW_REG_RALUE_LEN];
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003577 enum mlxsw_reg_ralue_trap_action trap_action;
3578 u16 trap_id = 0;
3579 u32 adjacency_index = 0;
3580 u16 ecmp_size = 0;
3581
3582 /* In case the nexthop group adjacency index is valid, use it
3583 * with provided ECMP size. Otherwise, setup trap and pass
3584 * traffic to kernel.
3585 */
Ido Schimmel4b411472017-02-08 11:16:37 +01003586 if (mlxsw_sp_fib_entry_should_offload(fib_entry)) {
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003587 trap_action = MLXSW_REG_RALUE_TRAP_ACTION_NOP;
3588 adjacency_index = fib_entry->nh_group->adj_index;
3589 ecmp_size = fib_entry->nh_group->ecmp_size;
3590 } else {
3591 trap_action = MLXSW_REG_RALUE_TRAP_ACTION_TRAP;
3592 trap_id = MLXSW_TRAP_ID_RTR_INGRESS0;
3593 }
3594
Ido Schimmel9dbf4d72017-07-18 10:10:24 +02003595 mlxsw_sp_fib_entry_ralue_pack(ralue_pl, fib_entry, op);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003596 mlxsw_reg_ralue_act_remote_pack(ralue_pl, trap_action, trap_id,
3597 adjacency_index, ecmp_size);
3598 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
3599}
3600
Ido Schimmel9dbf4d72017-07-18 10:10:24 +02003601static int mlxsw_sp_fib_entry_op_local(struct mlxsw_sp *mlxsw_sp,
3602 struct mlxsw_sp_fib_entry *fib_entry,
3603 enum mlxsw_reg_ralue_op op)
Jiri Pirko61c503f2016-07-04 08:23:11 +02003604{
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003605 struct mlxsw_sp_rif *rif = fib_entry->nh_group->nh_rif;
Ido Schimmel70ad3502017-02-08 11:16:38 +01003606 enum mlxsw_reg_ralue_trap_action trap_action;
Jiri Pirko61c503f2016-07-04 08:23:11 +02003607 char ralue_pl[MLXSW_REG_RALUE_LEN];
Ido Schimmel70ad3502017-02-08 11:16:38 +01003608 u16 trap_id = 0;
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003609 u16 rif_index = 0;
Ido Schimmel70ad3502017-02-08 11:16:38 +01003610
3611 if (mlxsw_sp_fib_entry_should_offload(fib_entry)) {
3612 trap_action = MLXSW_REG_RALUE_TRAP_ACTION_NOP;
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003613 rif_index = rif->rif_index;
Ido Schimmel70ad3502017-02-08 11:16:38 +01003614 } else {
3615 trap_action = MLXSW_REG_RALUE_TRAP_ACTION_TRAP;
3616 trap_id = MLXSW_TRAP_ID_RTR_INGRESS0;
3617 }
Jiri Pirko61c503f2016-07-04 08:23:11 +02003618
Ido Schimmel9dbf4d72017-07-18 10:10:24 +02003619 mlxsw_sp_fib_entry_ralue_pack(ralue_pl, fib_entry, op);
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003620 mlxsw_reg_ralue_act_local_pack(ralue_pl, trap_action, trap_id,
3621 rif_index);
Jiri Pirko61c503f2016-07-04 08:23:11 +02003622 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
3623}
3624
Ido Schimmel9dbf4d72017-07-18 10:10:24 +02003625static int mlxsw_sp_fib_entry_op_trap(struct mlxsw_sp *mlxsw_sp,
3626 struct mlxsw_sp_fib_entry *fib_entry,
3627 enum mlxsw_reg_ralue_op op)
Jiri Pirko61c503f2016-07-04 08:23:11 +02003628{
3629 char ralue_pl[MLXSW_REG_RALUE_LEN];
Jiri Pirko61c503f2016-07-04 08:23:11 +02003630
Ido Schimmel9dbf4d72017-07-18 10:10:24 +02003631 mlxsw_sp_fib_entry_ralue_pack(ralue_pl, fib_entry, op);
Jiri Pirko61c503f2016-07-04 08:23:11 +02003632 mlxsw_reg_ralue_act_ip2me_pack(ralue_pl);
3633 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
3634}
3635
Petr Machata4607f6d2017-09-02 23:49:25 +02003636static int
3637mlxsw_sp_fib_entry_op_ipip_decap(struct mlxsw_sp *mlxsw_sp,
3638 struct mlxsw_sp_fib_entry *fib_entry,
3639 enum mlxsw_reg_ralue_op op)
3640{
3641 struct mlxsw_sp_ipip_entry *ipip_entry = fib_entry->decap.ipip_entry;
3642 const struct mlxsw_sp_ipip_ops *ipip_ops;
3643
3644 if (WARN_ON(!ipip_entry))
3645 return -EINVAL;
3646
3647 ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt];
3648 return ipip_ops->fib_entry_op(mlxsw_sp, ipip_entry, op,
3649 fib_entry->decap.tunnel_index);
3650}
3651
Ido Schimmel9dbf4d72017-07-18 10:10:24 +02003652static int __mlxsw_sp_fib_entry_op(struct mlxsw_sp *mlxsw_sp,
3653 struct mlxsw_sp_fib_entry *fib_entry,
3654 enum mlxsw_reg_ralue_op op)
Jiri Pirko61c503f2016-07-04 08:23:11 +02003655{
3656 switch (fib_entry->type) {
3657 case MLXSW_SP_FIB_ENTRY_TYPE_REMOTE:
Ido Schimmel9dbf4d72017-07-18 10:10:24 +02003658 return mlxsw_sp_fib_entry_op_remote(mlxsw_sp, fib_entry, op);
Jiri Pirko61c503f2016-07-04 08:23:11 +02003659 case MLXSW_SP_FIB_ENTRY_TYPE_LOCAL:
Ido Schimmel9dbf4d72017-07-18 10:10:24 +02003660 return mlxsw_sp_fib_entry_op_local(mlxsw_sp, fib_entry, op);
Jiri Pirko61c503f2016-07-04 08:23:11 +02003661 case MLXSW_SP_FIB_ENTRY_TYPE_TRAP:
Ido Schimmel9dbf4d72017-07-18 10:10:24 +02003662 return mlxsw_sp_fib_entry_op_trap(mlxsw_sp, fib_entry, op);
Petr Machata4607f6d2017-09-02 23:49:25 +02003663 case MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP:
3664 return mlxsw_sp_fib_entry_op_ipip_decap(mlxsw_sp,
3665 fib_entry, op);
Jiri Pirko61c503f2016-07-04 08:23:11 +02003666 }
3667 return -EINVAL;
3668}
3669
3670static int mlxsw_sp_fib_entry_op(struct mlxsw_sp *mlxsw_sp,
3671 struct mlxsw_sp_fib_entry *fib_entry,
3672 enum mlxsw_reg_ralue_op op)
3673{
Ido Schimmel9dbf4d72017-07-18 10:10:24 +02003674 int err = __mlxsw_sp_fib_entry_op(mlxsw_sp, fib_entry, op);
Ido Schimmel013b20f2017-02-08 11:16:36 +01003675
Ido Schimmel013b20f2017-02-08 11:16:36 +01003676 mlxsw_sp_fib_entry_offload_refresh(fib_entry, op, err);
Ido Schimmel9dbf4d72017-07-18 10:10:24 +02003677
Ido Schimmel013b20f2017-02-08 11:16:36 +01003678 return err;
Jiri Pirko61c503f2016-07-04 08:23:11 +02003679}
3680
3681static int mlxsw_sp_fib_entry_update(struct mlxsw_sp *mlxsw_sp,
3682 struct mlxsw_sp_fib_entry *fib_entry)
3683{
Jiri Pirko7146da32016-09-01 10:37:41 +02003684 return mlxsw_sp_fib_entry_op(mlxsw_sp, fib_entry,
3685 MLXSW_REG_RALUE_OP_WRITE_WRITE);
Jiri Pirko61c503f2016-07-04 08:23:11 +02003686}
3687
3688static int mlxsw_sp_fib_entry_del(struct mlxsw_sp *mlxsw_sp,
3689 struct mlxsw_sp_fib_entry *fib_entry)
3690{
3691 return mlxsw_sp_fib_entry_op(mlxsw_sp, fib_entry,
3692 MLXSW_REG_RALUE_OP_WRITE_DELETE);
3693}
3694
Jiri Pirko61c503f2016-07-04 08:23:11 +02003695static int
Ido Schimmel013b20f2017-02-08 11:16:36 +01003696mlxsw_sp_fib4_entry_type_set(struct mlxsw_sp *mlxsw_sp,
3697 const struct fib_entry_notifier_info *fen_info,
3698 struct mlxsw_sp_fib_entry *fib_entry)
Jiri Pirko61c503f2016-07-04 08:23:11 +02003699{
Petr Machata4607f6d2017-09-02 23:49:25 +02003700 union mlxsw_sp_l3addr dip = { .addr4 = htonl(fen_info->dst) };
3701 struct net_device *dev = fen_info->fi->fib_dev;
3702 struct mlxsw_sp_ipip_entry *ipip_entry;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02003703 struct fib_info *fi = fen_info->fi;
Jiri Pirko61c503f2016-07-04 08:23:11 +02003704
Ido Schimmel97989ee2017-03-10 08:53:38 +01003705 switch (fen_info->type) {
Ido Schimmel97989ee2017-03-10 08:53:38 +01003706 case RTN_LOCAL:
Petr Machata4607f6d2017-09-02 23:49:25 +02003707 ipip_entry = mlxsw_sp_ipip_entry_find_by_decap(mlxsw_sp, dev,
3708 MLXSW_SP_L3_PROTO_IPV4, dip);
3709 if (ipip_entry) {
3710 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP;
3711 return mlxsw_sp_fib_entry_decap_init(mlxsw_sp,
3712 fib_entry,
3713 ipip_entry);
3714 }
3715 /* fall through */
3716 case RTN_BROADCAST:
Jiri Pirko61c503f2016-07-04 08:23:11 +02003717 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
3718 return 0;
Ido Schimmel97989ee2017-03-10 08:53:38 +01003719 case RTN_UNREACHABLE: /* fall through */
3720 case RTN_BLACKHOLE: /* fall through */
3721 case RTN_PROHIBIT:
3722 /* Packets hitting these routes need to be trapped, but
3723 * can do so with a lower priority than packets directed
3724 * at the host, so use action type local instead of trap.
3725 */
Jiri Pirkob45f64d2016-09-26 12:52:31 +02003726 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_LOCAL;
Ido Schimmel97989ee2017-03-10 08:53:38 +01003727 return 0;
3728 case RTN_UNICAST:
Petr Machata9b014512017-09-02 23:49:20 +02003729 if (mlxsw_sp_fi_is_gateway(mlxsw_sp, fi))
Ido Schimmel97989ee2017-03-10 08:53:38 +01003730 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_REMOTE;
Petr Machata9b014512017-09-02 23:49:20 +02003731 else
3732 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_LOCAL;
Ido Schimmel97989ee2017-03-10 08:53:38 +01003733 return 0;
3734 default:
3735 return -EINVAL;
3736 }
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003737}
3738
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02003739static struct mlxsw_sp_fib4_entry *
Ido Schimmel9aecce12017-02-09 10:28:42 +01003740mlxsw_sp_fib4_entry_create(struct mlxsw_sp *mlxsw_sp,
3741 struct mlxsw_sp_fib_node *fib_node,
3742 const struct fib_entry_notifier_info *fen_info)
Jiri Pirko5b004412016-09-01 10:37:40 +02003743{
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02003744 struct mlxsw_sp_fib4_entry *fib4_entry;
Jiri Pirko5b004412016-09-01 10:37:40 +02003745 struct mlxsw_sp_fib_entry *fib_entry;
Ido Schimmel9aecce12017-02-09 10:28:42 +01003746 int err;
3747
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02003748 fib4_entry = kzalloc(sizeof(*fib4_entry), GFP_KERNEL);
3749 if (!fib4_entry)
3750 return ERR_PTR(-ENOMEM);
3751 fib_entry = &fib4_entry->common;
Ido Schimmel9aecce12017-02-09 10:28:42 +01003752
3753 err = mlxsw_sp_fib4_entry_type_set(mlxsw_sp, fen_info, fib_entry);
3754 if (err)
3755 goto err_fib4_entry_type_set;
3756
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02003757 err = mlxsw_sp_nexthop4_group_get(mlxsw_sp, fib_entry, fen_info->fi);
Ido Schimmel9aecce12017-02-09 10:28:42 +01003758 if (err)
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02003759 goto err_nexthop4_group_get;
Ido Schimmel9aecce12017-02-09 10:28:42 +01003760
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02003761 fib4_entry->prio = fen_info->fi->fib_priority;
3762 fib4_entry->tb_id = fen_info->tb_id;
3763 fib4_entry->type = fen_info->type;
3764 fib4_entry->tos = fen_info->tos;
Ido Schimmel9aecce12017-02-09 10:28:42 +01003765
3766 fib_entry->fib_node = fib_node;
3767
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02003768 return fib4_entry;
Ido Schimmel9aecce12017-02-09 10:28:42 +01003769
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02003770err_nexthop4_group_get:
Ido Schimmel9aecce12017-02-09 10:28:42 +01003771err_fib4_entry_type_set:
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02003772 kfree(fib4_entry);
Ido Schimmel9aecce12017-02-09 10:28:42 +01003773 return ERR_PTR(err);
3774}
3775
3776static void mlxsw_sp_fib4_entry_destroy(struct mlxsw_sp *mlxsw_sp,
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02003777 struct mlxsw_sp_fib4_entry *fib4_entry)
Ido Schimmel9aecce12017-02-09 10:28:42 +01003778{
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02003779 mlxsw_sp_nexthop4_group_put(mlxsw_sp, &fib4_entry->common);
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02003780 kfree(fib4_entry);
Ido Schimmel9aecce12017-02-09 10:28:42 +01003781}
3782
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02003783static struct mlxsw_sp_fib4_entry *
Ido Schimmel9aecce12017-02-09 10:28:42 +01003784mlxsw_sp_fib4_entry_lookup(struct mlxsw_sp *mlxsw_sp,
3785 const struct fib_entry_notifier_info *fen_info)
3786{
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02003787 struct mlxsw_sp_fib4_entry *fib4_entry;
Ido Schimmel9aecce12017-02-09 10:28:42 +01003788 struct mlxsw_sp_fib_node *fib_node;
Ido Schimmel160e22a2017-07-18 10:10:20 +02003789 struct mlxsw_sp_fib *fib;
3790 struct mlxsw_sp_vr *vr;
Ido Schimmel9aecce12017-02-09 10:28:42 +01003791
Ido Schimmel160e22a2017-07-18 10:10:20 +02003792 vr = mlxsw_sp_vr_find(mlxsw_sp, fen_info->tb_id);
3793 if (!vr)
3794 return NULL;
3795 fib = mlxsw_sp_vr_fib(vr, MLXSW_SP_L3_PROTO_IPV4);
3796
3797 fib_node = mlxsw_sp_fib_node_lookup(fib, &fen_info->dst,
3798 sizeof(fen_info->dst),
3799 fen_info->dst_len);
3800 if (!fib_node)
Ido Schimmel9aecce12017-02-09 10:28:42 +01003801 return NULL;
3802
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02003803 list_for_each_entry(fib4_entry, &fib_node->entry_list, common.list) {
3804 if (fib4_entry->tb_id == fen_info->tb_id &&
3805 fib4_entry->tos == fen_info->tos &&
3806 fib4_entry->type == fen_info->type &&
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02003807 mlxsw_sp_nexthop4_group_fi(fib4_entry->common.nh_group) ==
3808 fen_info->fi) {
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02003809 return fib4_entry;
Ido Schimmel9aecce12017-02-09 10:28:42 +01003810 }
3811 }
3812
3813 return NULL;
3814}
3815
3816static const struct rhashtable_params mlxsw_sp_fib_ht_params = {
3817 .key_offset = offsetof(struct mlxsw_sp_fib_node, key),
3818 .head_offset = offsetof(struct mlxsw_sp_fib_node, ht_node),
3819 .key_len = sizeof(struct mlxsw_sp_fib_key),
3820 .automatic_shrinking = true,
3821};
3822
3823static int mlxsw_sp_fib_node_insert(struct mlxsw_sp_fib *fib,
3824 struct mlxsw_sp_fib_node *fib_node)
3825{
3826 return rhashtable_insert_fast(&fib->ht, &fib_node->ht_node,
3827 mlxsw_sp_fib_ht_params);
3828}
3829
3830static void mlxsw_sp_fib_node_remove(struct mlxsw_sp_fib *fib,
3831 struct mlxsw_sp_fib_node *fib_node)
3832{
3833 rhashtable_remove_fast(&fib->ht, &fib_node->ht_node,
3834 mlxsw_sp_fib_ht_params);
3835}
3836
3837static struct mlxsw_sp_fib_node *
3838mlxsw_sp_fib_node_lookup(struct mlxsw_sp_fib *fib, const void *addr,
3839 size_t addr_len, unsigned char prefix_len)
3840{
3841 struct mlxsw_sp_fib_key key;
3842
3843 memset(&key, 0, sizeof(key));
3844 memcpy(key.addr, addr, addr_len);
3845 key.prefix_len = prefix_len;
3846 return rhashtable_lookup_fast(&fib->ht, &key, mlxsw_sp_fib_ht_params);
3847}
3848
3849static struct mlxsw_sp_fib_node *
Ido Schimmel76610eb2017-03-10 08:53:41 +01003850mlxsw_sp_fib_node_create(struct mlxsw_sp_fib *fib, const void *addr,
Ido Schimmel9aecce12017-02-09 10:28:42 +01003851 size_t addr_len, unsigned char prefix_len)
3852{
3853 struct mlxsw_sp_fib_node *fib_node;
3854
3855 fib_node = kzalloc(sizeof(*fib_node), GFP_KERNEL);
3856 if (!fib_node)
3857 return NULL;
3858
3859 INIT_LIST_HEAD(&fib_node->entry_list);
Ido Schimmel76610eb2017-03-10 08:53:41 +01003860 list_add(&fib_node->list, &fib->node_list);
Ido Schimmel9aecce12017-02-09 10:28:42 +01003861 memcpy(fib_node->key.addr, addr, addr_len);
3862 fib_node->key.prefix_len = prefix_len;
Ido Schimmel9aecce12017-02-09 10:28:42 +01003863
3864 return fib_node;
3865}
3866
3867static void mlxsw_sp_fib_node_destroy(struct mlxsw_sp_fib_node *fib_node)
3868{
Ido Schimmel9aecce12017-02-09 10:28:42 +01003869 list_del(&fib_node->list);
3870 WARN_ON(!list_empty(&fib_node->entry_list));
3871 kfree(fib_node);
3872}
3873
3874static bool
3875mlxsw_sp_fib_node_entry_is_first(const struct mlxsw_sp_fib_node *fib_node,
3876 const struct mlxsw_sp_fib_entry *fib_entry)
3877{
3878 return list_first_entry(&fib_node->entry_list,
3879 struct mlxsw_sp_fib_entry, list) == fib_entry;
3880}
3881
Ido Schimmelfc922bb2017-08-14 10:54:05 +02003882static int mlxsw_sp_fib_lpm_tree_link(struct mlxsw_sp *mlxsw_sp,
3883 struct mlxsw_sp_fib *fib,
3884 struct mlxsw_sp_fib_node *fib_node)
3885{
3886 struct mlxsw_sp_prefix_usage req_prefix_usage = {{ 0 } };
3887 struct mlxsw_sp_lpm_tree *lpm_tree;
3888 int err;
3889
3890 /* Since the tree is shared between all virtual routers we must
3891 * make sure it contains all the required prefix lengths. This
3892 * can be computed by either adding the new prefix length to the
3893 * existing prefix usage of a bound tree, or by aggregating the
3894 * prefix lengths across all virtual routers and adding the new
3895 * one as well.
3896 */
3897 if (fib->lpm_tree)
3898 mlxsw_sp_prefix_usage_cpy(&req_prefix_usage,
3899 &fib->lpm_tree->prefix_usage);
3900 else
3901 mlxsw_sp_vrs_prefixes(mlxsw_sp, fib->proto, &req_prefix_usage);
3902 mlxsw_sp_prefix_usage_set(&req_prefix_usage, fib_node->key.prefix_len);
3903
3904 lpm_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, &req_prefix_usage,
3905 fib->proto);
3906 if (IS_ERR(lpm_tree))
3907 return PTR_ERR(lpm_tree);
3908
3909 if (fib->lpm_tree && fib->lpm_tree->id == lpm_tree->id)
3910 return 0;
3911
3912 err = mlxsw_sp_vrs_lpm_tree_replace(mlxsw_sp, fib, lpm_tree);
3913 if (err)
3914 return err;
3915
3916 return 0;
3917}
3918
3919static void mlxsw_sp_fib_lpm_tree_unlink(struct mlxsw_sp *mlxsw_sp,
3920 struct mlxsw_sp_fib *fib)
3921{
Ido Schimmelfc922bb2017-08-14 10:54:05 +02003922 if (!mlxsw_sp_prefix_usage_none(&fib->prefix_usage))
3923 return;
3924 mlxsw_sp_vr_lpm_tree_unbind(mlxsw_sp, fib);
3925 mlxsw_sp_lpm_tree_put(mlxsw_sp, fib->lpm_tree);
3926 fib->lpm_tree = NULL;
3927}
3928
Ido Schimmel9aecce12017-02-09 10:28:42 +01003929static void mlxsw_sp_fib_node_prefix_inc(struct mlxsw_sp_fib_node *fib_node)
3930{
3931 unsigned char prefix_len = fib_node->key.prefix_len;
Ido Schimmel76610eb2017-03-10 08:53:41 +01003932 struct mlxsw_sp_fib *fib = fib_node->fib;
Ido Schimmel9aecce12017-02-09 10:28:42 +01003933
3934 if (fib->prefix_ref_count[prefix_len]++ == 0)
3935 mlxsw_sp_prefix_usage_set(&fib->prefix_usage, prefix_len);
3936}
3937
3938static void mlxsw_sp_fib_node_prefix_dec(struct mlxsw_sp_fib_node *fib_node)
3939{
3940 unsigned char prefix_len = fib_node->key.prefix_len;
Ido Schimmel76610eb2017-03-10 08:53:41 +01003941 struct mlxsw_sp_fib *fib = fib_node->fib;
Ido Schimmel9aecce12017-02-09 10:28:42 +01003942
3943 if (--fib->prefix_ref_count[prefix_len] == 0)
3944 mlxsw_sp_prefix_usage_clear(&fib->prefix_usage, prefix_len);
3945}
3946
Ido Schimmel76610eb2017-03-10 08:53:41 +01003947static int mlxsw_sp_fib_node_init(struct mlxsw_sp *mlxsw_sp,
3948 struct mlxsw_sp_fib_node *fib_node,
3949 struct mlxsw_sp_fib *fib)
3950{
Ido Schimmel76610eb2017-03-10 08:53:41 +01003951 int err;
3952
3953 err = mlxsw_sp_fib_node_insert(fib, fib_node);
3954 if (err)
3955 return err;
3956 fib_node->fib = fib;
3957
Ido Schimmelfc922bb2017-08-14 10:54:05 +02003958 err = mlxsw_sp_fib_lpm_tree_link(mlxsw_sp, fib, fib_node);
3959 if (err)
3960 goto err_fib_lpm_tree_link;
Ido Schimmel76610eb2017-03-10 08:53:41 +01003961
3962 mlxsw_sp_fib_node_prefix_inc(fib_node);
3963
3964 return 0;
3965
Ido Schimmelfc922bb2017-08-14 10:54:05 +02003966err_fib_lpm_tree_link:
Ido Schimmel76610eb2017-03-10 08:53:41 +01003967 fib_node->fib = NULL;
3968 mlxsw_sp_fib_node_remove(fib, fib_node);
3969 return err;
3970}
3971
3972static void mlxsw_sp_fib_node_fini(struct mlxsw_sp *mlxsw_sp,
3973 struct mlxsw_sp_fib_node *fib_node)
3974{
Ido Schimmel76610eb2017-03-10 08:53:41 +01003975 struct mlxsw_sp_fib *fib = fib_node->fib;
3976
3977 mlxsw_sp_fib_node_prefix_dec(fib_node);
Ido Schimmelfc922bb2017-08-14 10:54:05 +02003978 mlxsw_sp_fib_lpm_tree_unlink(mlxsw_sp, fib);
Ido Schimmel76610eb2017-03-10 08:53:41 +01003979 fib_node->fib = NULL;
3980 mlxsw_sp_fib_node_remove(fib, fib_node);
3981}
3982
Ido Schimmel9aecce12017-02-09 10:28:42 +01003983static struct mlxsw_sp_fib_node *
Ido Schimmel731ea1c2017-07-18 10:10:21 +02003984mlxsw_sp_fib_node_get(struct mlxsw_sp *mlxsw_sp, u32 tb_id, const void *addr,
3985 size_t addr_len, unsigned char prefix_len,
3986 enum mlxsw_sp_l3proto proto)
Ido Schimmel9aecce12017-02-09 10:28:42 +01003987{
3988 struct mlxsw_sp_fib_node *fib_node;
Ido Schimmel76610eb2017-03-10 08:53:41 +01003989 struct mlxsw_sp_fib *fib;
Jiri Pirko5b004412016-09-01 10:37:40 +02003990 struct mlxsw_sp_vr *vr;
3991 int err;
3992
David Ahernf8fa9b42017-10-18 09:56:56 -07003993 vr = mlxsw_sp_vr_get(mlxsw_sp, tb_id, NULL);
Jiri Pirko5b004412016-09-01 10:37:40 +02003994 if (IS_ERR(vr))
3995 return ERR_CAST(vr);
Ido Schimmel731ea1c2017-07-18 10:10:21 +02003996 fib = mlxsw_sp_vr_fib(vr, proto);
Jiri Pirko5b004412016-09-01 10:37:40 +02003997
Ido Schimmel731ea1c2017-07-18 10:10:21 +02003998 fib_node = mlxsw_sp_fib_node_lookup(fib, addr, addr_len, prefix_len);
Ido Schimmel9aecce12017-02-09 10:28:42 +01003999 if (fib_node)
4000 return fib_node;
4001
Ido Schimmel731ea1c2017-07-18 10:10:21 +02004002 fib_node = mlxsw_sp_fib_node_create(fib, addr, addr_len, prefix_len);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004003 if (!fib_node) {
Jiri Pirko5b004412016-09-01 10:37:40 +02004004 err = -ENOMEM;
Ido Schimmel9aecce12017-02-09 10:28:42 +01004005 goto err_fib_node_create;
Jiri Pirko5b004412016-09-01 10:37:40 +02004006 }
Jiri Pirko5b004412016-09-01 10:37:40 +02004007
Ido Schimmel76610eb2017-03-10 08:53:41 +01004008 err = mlxsw_sp_fib_node_init(mlxsw_sp, fib_node, fib);
4009 if (err)
4010 goto err_fib_node_init;
4011
Ido Schimmel9aecce12017-02-09 10:28:42 +01004012 return fib_node;
Jiri Pirko5b004412016-09-01 10:37:40 +02004013
Ido Schimmel76610eb2017-03-10 08:53:41 +01004014err_fib_node_init:
4015 mlxsw_sp_fib_node_destroy(fib_node);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004016err_fib_node_create:
Ido Schimmel76610eb2017-03-10 08:53:41 +01004017 mlxsw_sp_vr_put(vr);
Jiri Pirko5b004412016-09-01 10:37:40 +02004018 return ERR_PTR(err);
4019}
4020
Ido Schimmel731ea1c2017-07-18 10:10:21 +02004021static void mlxsw_sp_fib_node_put(struct mlxsw_sp *mlxsw_sp,
4022 struct mlxsw_sp_fib_node *fib_node)
Jiri Pirko5b004412016-09-01 10:37:40 +02004023{
Ido Schimmel76610eb2017-03-10 08:53:41 +01004024 struct mlxsw_sp_vr *vr = fib_node->fib->vr;
Jiri Pirko5b004412016-09-01 10:37:40 +02004025
Ido Schimmel9aecce12017-02-09 10:28:42 +01004026 if (!list_empty(&fib_node->entry_list))
4027 return;
Ido Schimmel76610eb2017-03-10 08:53:41 +01004028 mlxsw_sp_fib_node_fini(mlxsw_sp, fib_node);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004029 mlxsw_sp_fib_node_destroy(fib_node);
Ido Schimmel76610eb2017-03-10 08:53:41 +01004030 mlxsw_sp_vr_put(vr);
Jiri Pirko5b004412016-09-01 10:37:40 +02004031}
4032
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004033static struct mlxsw_sp_fib4_entry *
Ido Schimmel9aecce12017-02-09 10:28:42 +01004034mlxsw_sp_fib4_node_entry_find(const struct mlxsw_sp_fib_node *fib_node,
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004035 const struct mlxsw_sp_fib4_entry *new4_entry)
Jiri Pirko61c503f2016-07-04 08:23:11 +02004036{
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004037 struct mlxsw_sp_fib4_entry *fib4_entry;
Ido Schimmel9aecce12017-02-09 10:28:42 +01004038
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004039 list_for_each_entry(fib4_entry, &fib_node->entry_list, common.list) {
4040 if (fib4_entry->tb_id > new4_entry->tb_id)
Ido Schimmel9aecce12017-02-09 10:28:42 +01004041 continue;
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004042 if (fib4_entry->tb_id != new4_entry->tb_id)
Ido Schimmel9aecce12017-02-09 10:28:42 +01004043 break;
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004044 if (fib4_entry->tos > new4_entry->tos)
Ido Schimmel9aecce12017-02-09 10:28:42 +01004045 continue;
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004046 if (fib4_entry->prio >= new4_entry->prio ||
4047 fib4_entry->tos < new4_entry->tos)
4048 return fib4_entry;
Ido Schimmel9aecce12017-02-09 10:28:42 +01004049 }
4050
4051 return NULL;
4052}
4053
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004054static int
4055mlxsw_sp_fib4_node_list_append(struct mlxsw_sp_fib4_entry *fib4_entry,
4056 struct mlxsw_sp_fib4_entry *new4_entry)
Ido Schimmel4283bce2017-02-09 10:28:43 +01004057{
4058 struct mlxsw_sp_fib_node *fib_node;
4059
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004060 if (WARN_ON(!fib4_entry))
Ido Schimmel4283bce2017-02-09 10:28:43 +01004061 return -EINVAL;
4062
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004063 fib_node = fib4_entry->common.fib_node;
4064 list_for_each_entry_from(fib4_entry, &fib_node->entry_list,
4065 common.list) {
4066 if (fib4_entry->tb_id != new4_entry->tb_id ||
4067 fib4_entry->tos != new4_entry->tos ||
4068 fib4_entry->prio != new4_entry->prio)
Ido Schimmel4283bce2017-02-09 10:28:43 +01004069 break;
4070 }
4071
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004072 list_add_tail(&new4_entry->common.list, &fib4_entry->common.list);
Ido Schimmel4283bce2017-02-09 10:28:43 +01004073 return 0;
4074}
4075
Ido Schimmel9aecce12017-02-09 10:28:42 +01004076static int
Ido Schimmel9efbee62017-07-18 10:10:28 +02004077mlxsw_sp_fib4_node_list_insert(struct mlxsw_sp_fib4_entry *new4_entry,
Ido Schimmel599cf8f2017-02-09 10:28:44 +01004078 bool replace, bool append)
Ido Schimmel9aecce12017-02-09 10:28:42 +01004079{
Ido Schimmel9efbee62017-07-18 10:10:28 +02004080 struct mlxsw_sp_fib_node *fib_node = new4_entry->common.fib_node;
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004081 struct mlxsw_sp_fib4_entry *fib4_entry;
Ido Schimmel9aecce12017-02-09 10:28:42 +01004082
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004083 fib4_entry = mlxsw_sp_fib4_node_entry_find(fib_node, new4_entry);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004084
Ido Schimmel4283bce2017-02-09 10:28:43 +01004085 if (append)
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004086 return mlxsw_sp_fib4_node_list_append(fib4_entry, new4_entry);
4087 if (replace && WARN_ON(!fib4_entry))
Ido Schimmel599cf8f2017-02-09 10:28:44 +01004088 return -EINVAL;
Ido Schimmel4283bce2017-02-09 10:28:43 +01004089
Ido Schimmel599cf8f2017-02-09 10:28:44 +01004090 /* Insert new entry before replaced one, so that we can later
4091 * remove the second.
4092 */
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004093 if (fib4_entry) {
4094 list_add_tail(&new4_entry->common.list,
4095 &fib4_entry->common.list);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004096 } else {
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004097 struct mlxsw_sp_fib4_entry *last;
Ido Schimmel9aecce12017-02-09 10:28:42 +01004098
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004099 list_for_each_entry(last, &fib_node->entry_list, common.list) {
4100 if (new4_entry->tb_id > last->tb_id)
Ido Schimmel9aecce12017-02-09 10:28:42 +01004101 break;
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004102 fib4_entry = last;
Ido Schimmel9aecce12017-02-09 10:28:42 +01004103 }
4104
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004105 if (fib4_entry)
4106 list_add(&new4_entry->common.list,
4107 &fib4_entry->common.list);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004108 else
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004109 list_add(&new4_entry->common.list,
4110 &fib_node->entry_list);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004111 }
4112
4113 return 0;
4114}
4115
4116static void
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004117mlxsw_sp_fib4_node_list_remove(struct mlxsw_sp_fib4_entry *fib4_entry)
Ido Schimmel9aecce12017-02-09 10:28:42 +01004118{
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004119 list_del(&fib4_entry->common.list);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004120}
4121
Ido Schimmel80c238f2017-07-18 10:10:29 +02004122static int mlxsw_sp_fib_node_entry_add(struct mlxsw_sp *mlxsw_sp,
4123 struct mlxsw_sp_fib_entry *fib_entry)
Ido Schimmel9aecce12017-02-09 10:28:42 +01004124{
Ido Schimmel9efbee62017-07-18 10:10:28 +02004125 struct mlxsw_sp_fib_node *fib_node = fib_entry->fib_node;
4126
Ido Schimmel9aecce12017-02-09 10:28:42 +01004127 if (!mlxsw_sp_fib_node_entry_is_first(fib_node, fib_entry))
4128 return 0;
4129
4130 /* To prevent packet loss, overwrite the previously offloaded
4131 * entry.
4132 */
4133 if (!list_is_singular(&fib_node->entry_list)) {
4134 enum mlxsw_reg_ralue_op op = MLXSW_REG_RALUE_OP_WRITE_DELETE;
4135 struct mlxsw_sp_fib_entry *n = list_next_entry(fib_entry, list);
4136
4137 mlxsw_sp_fib_entry_offload_refresh(n, op, 0);
4138 }
4139
4140 return mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
4141}
4142
Ido Schimmel80c238f2017-07-18 10:10:29 +02004143static void mlxsw_sp_fib_node_entry_del(struct mlxsw_sp *mlxsw_sp,
4144 struct mlxsw_sp_fib_entry *fib_entry)
Ido Schimmel9aecce12017-02-09 10:28:42 +01004145{
Ido Schimmel9efbee62017-07-18 10:10:28 +02004146 struct mlxsw_sp_fib_node *fib_node = fib_entry->fib_node;
4147
Ido Schimmel9aecce12017-02-09 10:28:42 +01004148 if (!mlxsw_sp_fib_node_entry_is_first(fib_node, fib_entry))
4149 return;
4150
4151 /* Promote the next entry by overwriting the deleted entry */
4152 if (!list_is_singular(&fib_node->entry_list)) {
4153 struct mlxsw_sp_fib_entry *n = list_next_entry(fib_entry, list);
4154 enum mlxsw_reg_ralue_op op = MLXSW_REG_RALUE_OP_WRITE_DELETE;
4155
4156 mlxsw_sp_fib_entry_update(mlxsw_sp, n);
4157 mlxsw_sp_fib_entry_offload_refresh(fib_entry, op, 0);
4158 return;
4159 }
4160
4161 mlxsw_sp_fib_entry_del(mlxsw_sp, fib_entry);
4162}
4163
4164static int mlxsw_sp_fib4_node_entry_link(struct mlxsw_sp *mlxsw_sp,
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004165 struct mlxsw_sp_fib4_entry *fib4_entry,
Ido Schimmel599cf8f2017-02-09 10:28:44 +01004166 bool replace, bool append)
Ido Schimmel9aecce12017-02-09 10:28:42 +01004167{
Ido Schimmel9aecce12017-02-09 10:28:42 +01004168 int err;
4169
Ido Schimmel9efbee62017-07-18 10:10:28 +02004170 err = mlxsw_sp_fib4_node_list_insert(fib4_entry, replace, append);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004171 if (err)
4172 return err;
4173
Ido Schimmel80c238f2017-07-18 10:10:29 +02004174 err = mlxsw_sp_fib_node_entry_add(mlxsw_sp, &fib4_entry->common);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004175 if (err)
Ido Schimmel80c238f2017-07-18 10:10:29 +02004176 goto err_fib_node_entry_add;
Ido Schimmel9aecce12017-02-09 10:28:42 +01004177
Ido Schimmel9aecce12017-02-09 10:28:42 +01004178 return 0;
4179
Ido Schimmel80c238f2017-07-18 10:10:29 +02004180err_fib_node_entry_add:
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004181 mlxsw_sp_fib4_node_list_remove(fib4_entry);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004182 return err;
4183}
4184
4185static void
4186mlxsw_sp_fib4_node_entry_unlink(struct mlxsw_sp *mlxsw_sp,
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004187 struct mlxsw_sp_fib4_entry *fib4_entry)
Ido Schimmel9aecce12017-02-09 10:28:42 +01004188{
Ido Schimmel80c238f2017-07-18 10:10:29 +02004189 mlxsw_sp_fib_node_entry_del(mlxsw_sp, &fib4_entry->common);
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004190 mlxsw_sp_fib4_node_list_remove(fib4_entry);
Petr Machata4607f6d2017-09-02 23:49:25 +02004191
4192 if (fib4_entry->common.type == MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP)
4193 mlxsw_sp_fib_entry_decap_fini(mlxsw_sp, &fib4_entry->common);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004194}
4195
Ido Schimmel599cf8f2017-02-09 10:28:44 +01004196static void mlxsw_sp_fib4_entry_replace(struct mlxsw_sp *mlxsw_sp,
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004197 struct mlxsw_sp_fib4_entry *fib4_entry,
Ido Schimmel599cf8f2017-02-09 10:28:44 +01004198 bool replace)
4199{
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004200 struct mlxsw_sp_fib_node *fib_node = fib4_entry->common.fib_node;
4201 struct mlxsw_sp_fib4_entry *replaced;
Ido Schimmel599cf8f2017-02-09 10:28:44 +01004202
4203 if (!replace)
4204 return;
4205
4206 /* We inserted the new entry before replaced one */
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004207 replaced = list_next_entry(fib4_entry, common.list);
Ido Schimmel599cf8f2017-02-09 10:28:44 +01004208
4209 mlxsw_sp_fib4_node_entry_unlink(mlxsw_sp, replaced);
4210 mlxsw_sp_fib4_entry_destroy(mlxsw_sp, replaced);
Ido Schimmel731ea1c2017-07-18 10:10:21 +02004211 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
Ido Schimmel599cf8f2017-02-09 10:28:44 +01004212}
4213
Ido Schimmel9aecce12017-02-09 10:28:42 +01004214static int
4215mlxsw_sp_router_fib4_add(struct mlxsw_sp *mlxsw_sp,
Ido Schimmel4283bce2017-02-09 10:28:43 +01004216 const struct fib_entry_notifier_info *fen_info,
Ido Schimmel599cf8f2017-02-09 10:28:44 +01004217 bool replace, bool append)
Ido Schimmel9aecce12017-02-09 10:28:42 +01004218{
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004219 struct mlxsw_sp_fib4_entry *fib4_entry;
Ido Schimmel9aecce12017-02-09 10:28:42 +01004220 struct mlxsw_sp_fib_node *fib_node;
Jiri Pirko61c503f2016-07-04 08:23:11 +02004221 int err;
4222
Ido Schimmel9011b672017-05-16 19:38:25 +02004223 if (mlxsw_sp->router->aborted)
Jiri Pirkob45f64d2016-09-26 12:52:31 +02004224 return 0;
4225
Ido Schimmel731ea1c2017-07-18 10:10:21 +02004226 fib_node = mlxsw_sp_fib_node_get(mlxsw_sp, fen_info->tb_id,
4227 &fen_info->dst, sizeof(fen_info->dst),
4228 fen_info->dst_len,
4229 MLXSW_SP_L3_PROTO_IPV4);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004230 if (IS_ERR(fib_node)) {
4231 dev_warn(mlxsw_sp->bus_info->dev, "Failed to get FIB node\n");
4232 return PTR_ERR(fib_node);
4233 }
4234
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004235 fib4_entry = mlxsw_sp_fib4_entry_create(mlxsw_sp, fib_node, fen_info);
4236 if (IS_ERR(fib4_entry)) {
Ido Schimmel9aecce12017-02-09 10:28:42 +01004237 dev_warn(mlxsw_sp->bus_info->dev, "Failed to create FIB entry\n");
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004238 err = PTR_ERR(fib4_entry);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004239 goto err_fib4_entry_create;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02004240 }
Jiri Pirko61c503f2016-07-04 08:23:11 +02004241
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004242 err = mlxsw_sp_fib4_node_entry_link(mlxsw_sp, fib4_entry, replace,
Ido Schimmel599cf8f2017-02-09 10:28:44 +01004243 append);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02004244 if (err) {
Ido Schimmel9aecce12017-02-09 10:28:42 +01004245 dev_warn(mlxsw_sp->bus_info->dev, "Failed to link FIB entry to node\n");
4246 goto err_fib4_node_entry_link;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02004247 }
Ido Schimmel9aecce12017-02-09 10:28:42 +01004248
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004249 mlxsw_sp_fib4_entry_replace(mlxsw_sp, fib4_entry, replace);
Ido Schimmel599cf8f2017-02-09 10:28:44 +01004250
Jiri Pirko61c503f2016-07-04 08:23:11 +02004251 return 0;
4252
Ido Schimmel9aecce12017-02-09 10:28:42 +01004253err_fib4_node_entry_link:
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004254 mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib4_entry);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004255err_fib4_entry_create:
Ido Schimmel731ea1c2017-07-18 10:10:21 +02004256 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
Jiri Pirko61c503f2016-07-04 08:23:11 +02004257 return err;
4258}
4259
Jiri Pirko37956d72016-10-20 16:05:43 +02004260static void mlxsw_sp_router_fib4_del(struct mlxsw_sp *mlxsw_sp,
4261 struct fib_entry_notifier_info *fen_info)
Jiri Pirko61c503f2016-07-04 08:23:11 +02004262{
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004263 struct mlxsw_sp_fib4_entry *fib4_entry;
Ido Schimmel9aecce12017-02-09 10:28:42 +01004264 struct mlxsw_sp_fib_node *fib_node;
Jiri Pirko61c503f2016-07-04 08:23:11 +02004265
Ido Schimmel9011b672017-05-16 19:38:25 +02004266 if (mlxsw_sp->router->aborted)
Jiri Pirko37956d72016-10-20 16:05:43 +02004267 return;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02004268
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004269 fib4_entry = mlxsw_sp_fib4_entry_lookup(mlxsw_sp, fen_info);
4270 if (WARN_ON(!fib4_entry))
Jiri Pirko37956d72016-10-20 16:05:43 +02004271 return;
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004272 fib_node = fib4_entry->common.fib_node;
Jiri Pirko5b004412016-09-01 10:37:40 +02004273
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004274 mlxsw_sp_fib4_node_entry_unlink(mlxsw_sp, fib4_entry);
4275 mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib4_entry);
Ido Schimmel731ea1c2017-07-18 10:10:21 +02004276 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
Jiri Pirko61c503f2016-07-04 08:23:11 +02004277}
Jiri Pirkob45f64d2016-09-26 12:52:31 +02004278
Ido Schimmel428b8512017-08-03 13:28:28 +02004279static bool mlxsw_sp_fib6_rt_should_ignore(const struct rt6_info *rt)
4280{
4281 /* Packets with link-local destination IP arriving to the router
4282 * are trapped to the CPU, so no need to program specific routes
4283 * for them.
4284 */
4285 if (ipv6_addr_type(&rt->rt6i_dst.addr) & IPV6_ADDR_LINKLOCAL)
4286 return true;
4287
4288 /* Multicast routes aren't supported, so ignore them. Neighbour
4289 * Discovery packets are specifically trapped.
4290 */
4291 if (ipv6_addr_type(&rt->rt6i_dst.addr) & IPV6_ADDR_MULTICAST)
4292 return true;
4293
4294 /* Cloned routes are irrelevant in the forwarding path. */
4295 if (rt->rt6i_flags & RTF_CACHE)
4296 return true;
4297
4298 return false;
4299}
4300
4301static struct mlxsw_sp_rt6 *mlxsw_sp_rt6_create(struct rt6_info *rt)
4302{
4303 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
4304
4305 mlxsw_sp_rt6 = kzalloc(sizeof(*mlxsw_sp_rt6), GFP_KERNEL);
4306 if (!mlxsw_sp_rt6)
4307 return ERR_PTR(-ENOMEM);
4308
4309 /* In case of route replace, replaced route is deleted with
4310 * no notification. Take reference to prevent accessing freed
4311 * memory.
4312 */
4313 mlxsw_sp_rt6->rt = rt;
4314 rt6_hold(rt);
4315
4316 return mlxsw_sp_rt6;
4317}
4318
4319#if IS_ENABLED(CONFIG_IPV6)
4320static void mlxsw_sp_rt6_release(struct rt6_info *rt)
4321{
4322 rt6_release(rt);
4323}
4324#else
4325static void mlxsw_sp_rt6_release(struct rt6_info *rt)
4326{
4327}
4328#endif
4329
4330static void mlxsw_sp_rt6_destroy(struct mlxsw_sp_rt6 *mlxsw_sp_rt6)
4331{
4332 mlxsw_sp_rt6_release(mlxsw_sp_rt6->rt);
4333 kfree(mlxsw_sp_rt6);
4334}
4335
4336static bool mlxsw_sp_fib6_rt_can_mp(const struct rt6_info *rt)
4337{
4338 /* RTF_CACHE routes are ignored */
4339 return (rt->rt6i_flags & (RTF_GATEWAY | RTF_ADDRCONF)) == RTF_GATEWAY;
4340}
4341
4342static struct rt6_info *
4343mlxsw_sp_fib6_entry_rt(const struct mlxsw_sp_fib6_entry *fib6_entry)
4344{
4345 return list_first_entry(&fib6_entry->rt6_list, struct mlxsw_sp_rt6,
4346 list)->rt;
4347}
4348
4349static struct mlxsw_sp_fib6_entry *
4350mlxsw_sp_fib6_node_mp_entry_find(const struct mlxsw_sp_fib_node *fib_node,
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02004351 const struct rt6_info *nrt, bool replace)
Ido Schimmel428b8512017-08-03 13:28:28 +02004352{
4353 struct mlxsw_sp_fib6_entry *fib6_entry;
4354
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02004355 if (!mlxsw_sp_fib6_rt_can_mp(nrt) || replace)
Ido Schimmel428b8512017-08-03 13:28:28 +02004356 return NULL;
4357
4358 list_for_each_entry(fib6_entry, &fib_node->entry_list, common.list) {
4359 struct rt6_info *rt = mlxsw_sp_fib6_entry_rt(fib6_entry);
4360
4361 /* RT6_TABLE_LOCAL and RT6_TABLE_MAIN share the same
4362 * virtual router.
4363 */
4364 if (rt->rt6i_table->tb6_id > nrt->rt6i_table->tb6_id)
4365 continue;
4366 if (rt->rt6i_table->tb6_id != nrt->rt6i_table->tb6_id)
4367 break;
4368 if (rt->rt6i_metric < nrt->rt6i_metric)
4369 continue;
4370 if (rt->rt6i_metric == nrt->rt6i_metric &&
4371 mlxsw_sp_fib6_rt_can_mp(rt))
4372 return fib6_entry;
4373 if (rt->rt6i_metric > nrt->rt6i_metric)
4374 break;
4375 }
4376
4377 return NULL;
4378}
4379
4380static struct mlxsw_sp_rt6 *
4381mlxsw_sp_fib6_entry_rt_find(const struct mlxsw_sp_fib6_entry *fib6_entry,
4382 const struct rt6_info *rt)
4383{
4384 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
4385
4386 list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) {
4387 if (mlxsw_sp_rt6->rt == rt)
4388 return mlxsw_sp_rt6;
4389 }
4390
4391 return NULL;
4392}
4393
Petr Machata8f28a302017-09-02 23:49:24 +02004394static bool mlxsw_sp_nexthop6_ipip_type(const struct mlxsw_sp *mlxsw_sp,
4395 const struct rt6_info *rt,
4396 enum mlxsw_sp_ipip_type *ret)
4397{
4398 return rt->dst.dev &&
4399 mlxsw_sp_netdev_ipip_type(mlxsw_sp, rt->dst.dev, ret);
4400}
4401
Petr Machata35225e42017-09-02 23:49:22 +02004402static int mlxsw_sp_nexthop6_type_init(struct mlxsw_sp *mlxsw_sp,
4403 struct mlxsw_sp_nexthop_group *nh_grp,
4404 struct mlxsw_sp_nexthop *nh,
4405 const struct rt6_info *rt)
Ido Schimmel428b8512017-08-03 13:28:28 +02004406{
Petr Machata8f28a302017-09-02 23:49:24 +02004407 struct mlxsw_sp_router *router = mlxsw_sp->router;
Ido Schimmel428b8512017-08-03 13:28:28 +02004408 struct net_device *dev = rt->dst.dev;
Petr Machata8f28a302017-09-02 23:49:24 +02004409 enum mlxsw_sp_ipip_type ipipt;
Ido Schimmel428b8512017-08-03 13:28:28 +02004410 struct mlxsw_sp_rif *rif;
4411 int err;
4412
Petr Machata8f28a302017-09-02 23:49:24 +02004413 if (mlxsw_sp_nexthop6_ipip_type(mlxsw_sp, rt, &ipipt) &&
4414 router->ipip_ops_arr[ipipt]->can_offload(mlxsw_sp, dev,
4415 MLXSW_SP_L3_PROTO_IPV6)) {
4416 nh->type = MLXSW_SP_NEXTHOP_TYPE_IPIP;
Petr Machata4cccb732017-10-16 16:26:39 +02004417 err = mlxsw_sp_nexthop_ipip_init(mlxsw_sp, nh, dev);
Petr Machatade0f43c2017-10-02 12:14:57 +02004418 if (err)
4419 return err;
4420 mlxsw_sp_nexthop_rif_init(nh, &nh->ipip_entry->ol_lb->common);
4421 return 0;
Petr Machata8f28a302017-09-02 23:49:24 +02004422 }
4423
Petr Machata35225e42017-09-02 23:49:22 +02004424 nh->type = MLXSW_SP_NEXTHOP_TYPE_ETH;
Ido Schimmel428b8512017-08-03 13:28:28 +02004425 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
4426 if (!rif)
4427 return 0;
4428 mlxsw_sp_nexthop_rif_init(nh, rif);
4429
4430 err = mlxsw_sp_nexthop_neigh_init(mlxsw_sp, nh);
4431 if (err)
4432 goto err_nexthop_neigh_init;
4433
4434 return 0;
4435
4436err_nexthop_neigh_init:
4437 mlxsw_sp_nexthop_rif_fini(nh);
4438 return err;
4439}
4440
Petr Machata35225e42017-09-02 23:49:22 +02004441static void mlxsw_sp_nexthop6_type_fini(struct mlxsw_sp *mlxsw_sp,
4442 struct mlxsw_sp_nexthop *nh)
4443{
4444 mlxsw_sp_nexthop_type_fini(mlxsw_sp, nh);
4445}
4446
4447static int mlxsw_sp_nexthop6_init(struct mlxsw_sp *mlxsw_sp,
4448 struct mlxsw_sp_nexthop_group *nh_grp,
4449 struct mlxsw_sp_nexthop *nh,
4450 const struct rt6_info *rt)
4451{
4452 struct net_device *dev = rt->dst.dev;
4453
4454 nh->nh_grp = nh_grp;
Ido Schimmel408bd942017-10-22 23:11:46 +02004455 nh->nh_weight = 1;
Petr Machata35225e42017-09-02 23:49:22 +02004456 memcpy(&nh->gw_addr, &rt->rt6i_gateway, sizeof(nh->gw_addr));
Arkadi Sharshevskya5390272017-09-25 10:32:28 +02004457 mlxsw_sp_nexthop_counter_alloc(mlxsw_sp, nh);
Petr Machata35225e42017-09-02 23:49:22 +02004458
Arkadi Sharshevskydbe45982017-09-25 10:32:23 +02004459 list_add_tail(&nh->router_list_node, &mlxsw_sp->router->nexthop_list);
4460
Petr Machata35225e42017-09-02 23:49:22 +02004461 if (!dev)
4462 return 0;
4463 nh->ifindex = dev->ifindex;
4464
4465 return mlxsw_sp_nexthop6_type_init(mlxsw_sp, nh_grp, nh, rt);
4466}
4467
Ido Schimmel428b8512017-08-03 13:28:28 +02004468static void mlxsw_sp_nexthop6_fini(struct mlxsw_sp *mlxsw_sp,
4469 struct mlxsw_sp_nexthop *nh)
4470{
Petr Machata35225e42017-09-02 23:49:22 +02004471 mlxsw_sp_nexthop6_type_fini(mlxsw_sp, nh);
Arkadi Sharshevskydbe45982017-09-25 10:32:23 +02004472 list_del(&nh->router_list_node);
Arkadi Sharshevskya5390272017-09-25 10:32:28 +02004473 mlxsw_sp_nexthop_counter_free(mlxsw_sp, nh);
Ido Schimmel428b8512017-08-03 13:28:28 +02004474}
4475
Petr Machataf6050ee2017-09-02 23:49:21 +02004476static bool mlxsw_sp_rt6_is_gateway(const struct mlxsw_sp *mlxsw_sp,
4477 const struct rt6_info *rt)
4478{
Petr Machata8f28a302017-09-02 23:49:24 +02004479 return rt->rt6i_flags & RTF_GATEWAY ||
4480 mlxsw_sp_nexthop6_ipip_type(mlxsw_sp, rt, NULL);
Petr Machataf6050ee2017-09-02 23:49:21 +02004481}
4482
Ido Schimmel428b8512017-08-03 13:28:28 +02004483static struct mlxsw_sp_nexthop_group *
4484mlxsw_sp_nexthop6_group_create(struct mlxsw_sp *mlxsw_sp,
4485 struct mlxsw_sp_fib6_entry *fib6_entry)
4486{
4487 struct mlxsw_sp_nexthop_group *nh_grp;
4488 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
4489 struct mlxsw_sp_nexthop *nh;
4490 size_t alloc_size;
4491 int i = 0;
4492 int err;
4493
4494 alloc_size = sizeof(*nh_grp) +
4495 fib6_entry->nrt6 * sizeof(struct mlxsw_sp_nexthop);
4496 nh_grp = kzalloc(alloc_size, GFP_KERNEL);
4497 if (!nh_grp)
4498 return ERR_PTR(-ENOMEM);
4499 INIT_LIST_HEAD(&nh_grp->fib_list);
4500#if IS_ENABLED(CONFIG_IPV6)
4501 nh_grp->neigh_tbl = &nd_tbl;
4502#endif
4503 mlxsw_sp_rt6 = list_first_entry(&fib6_entry->rt6_list,
4504 struct mlxsw_sp_rt6, list);
Petr Machataf6050ee2017-09-02 23:49:21 +02004505 nh_grp->gateway = mlxsw_sp_rt6_is_gateway(mlxsw_sp, mlxsw_sp_rt6->rt);
Ido Schimmel428b8512017-08-03 13:28:28 +02004506 nh_grp->count = fib6_entry->nrt6;
4507 for (i = 0; i < nh_grp->count; i++) {
4508 struct rt6_info *rt = mlxsw_sp_rt6->rt;
4509
4510 nh = &nh_grp->nexthops[i];
4511 err = mlxsw_sp_nexthop6_init(mlxsw_sp, nh_grp, nh, rt);
4512 if (err)
4513 goto err_nexthop6_init;
4514 mlxsw_sp_rt6 = list_next_entry(mlxsw_sp_rt6, list);
4515 }
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02004516
4517 err = mlxsw_sp_nexthop_group_insert(mlxsw_sp, nh_grp);
4518 if (err)
4519 goto err_nexthop_group_insert;
4520
Ido Schimmel428b8512017-08-03 13:28:28 +02004521 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
4522 return nh_grp;
4523
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02004524err_nexthop_group_insert:
Ido Schimmel428b8512017-08-03 13:28:28 +02004525err_nexthop6_init:
4526 for (i--; i >= 0; i--) {
4527 nh = &nh_grp->nexthops[i];
4528 mlxsw_sp_nexthop6_fini(mlxsw_sp, nh);
4529 }
4530 kfree(nh_grp);
4531 return ERR_PTR(err);
4532}
4533
4534static void
4535mlxsw_sp_nexthop6_group_destroy(struct mlxsw_sp *mlxsw_sp,
4536 struct mlxsw_sp_nexthop_group *nh_grp)
4537{
4538 struct mlxsw_sp_nexthop *nh;
4539 int i = nh_grp->count;
4540
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02004541 mlxsw_sp_nexthop_group_remove(mlxsw_sp, nh_grp);
Ido Schimmel428b8512017-08-03 13:28:28 +02004542 for (i--; i >= 0; i--) {
4543 nh = &nh_grp->nexthops[i];
4544 mlxsw_sp_nexthop6_fini(mlxsw_sp, nh);
4545 }
4546 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
4547 WARN_ON(nh_grp->adj_index_valid);
4548 kfree(nh_grp);
4549}
4550
4551static int mlxsw_sp_nexthop6_group_get(struct mlxsw_sp *mlxsw_sp,
4552 struct mlxsw_sp_fib6_entry *fib6_entry)
4553{
4554 struct mlxsw_sp_nexthop_group *nh_grp;
4555
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02004556 nh_grp = mlxsw_sp_nexthop6_group_lookup(mlxsw_sp, fib6_entry);
4557 if (!nh_grp) {
4558 nh_grp = mlxsw_sp_nexthop6_group_create(mlxsw_sp, fib6_entry);
4559 if (IS_ERR(nh_grp))
4560 return PTR_ERR(nh_grp);
4561 }
Ido Schimmel428b8512017-08-03 13:28:28 +02004562
4563 list_add_tail(&fib6_entry->common.nexthop_group_node,
4564 &nh_grp->fib_list);
4565 fib6_entry->common.nh_group = nh_grp;
4566
4567 return 0;
4568}
4569
4570static void mlxsw_sp_nexthop6_group_put(struct mlxsw_sp *mlxsw_sp,
4571 struct mlxsw_sp_fib_entry *fib_entry)
4572{
4573 struct mlxsw_sp_nexthop_group *nh_grp = fib_entry->nh_group;
4574
4575 list_del(&fib_entry->nexthop_group_node);
4576 if (!list_empty(&nh_grp->fib_list))
4577 return;
4578 mlxsw_sp_nexthop6_group_destroy(mlxsw_sp, nh_grp);
4579}
4580
4581static int
4582mlxsw_sp_nexthop6_group_update(struct mlxsw_sp *mlxsw_sp,
4583 struct mlxsw_sp_fib6_entry *fib6_entry)
4584{
4585 struct mlxsw_sp_nexthop_group *old_nh_grp = fib6_entry->common.nh_group;
4586 int err;
4587
4588 fib6_entry->common.nh_group = NULL;
4589 list_del(&fib6_entry->common.nexthop_group_node);
4590
4591 err = mlxsw_sp_nexthop6_group_get(mlxsw_sp, fib6_entry);
4592 if (err)
4593 goto err_nexthop6_group_get;
4594
4595 /* In case this entry is offloaded, then the adjacency index
4596 * currently associated with it in the device's table is that
4597 * of the old group. Start using the new one instead.
4598 */
4599 err = mlxsw_sp_fib_node_entry_add(mlxsw_sp, &fib6_entry->common);
4600 if (err)
4601 goto err_fib_node_entry_add;
4602
4603 if (list_empty(&old_nh_grp->fib_list))
4604 mlxsw_sp_nexthop6_group_destroy(mlxsw_sp, old_nh_grp);
4605
4606 return 0;
4607
4608err_fib_node_entry_add:
4609 mlxsw_sp_nexthop6_group_put(mlxsw_sp, &fib6_entry->common);
4610err_nexthop6_group_get:
4611 list_add_tail(&fib6_entry->common.nexthop_group_node,
4612 &old_nh_grp->fib_list);
4613 fib6_entry->common.nh_group = old_nh_grp;
4614 return err;
4615}
4616
4617static int
4618mlxsw_sp_fib6_entry_nexthop_add(struct mlxsw_sp *mlxsw_sp,
4619 struct mlxsw_sp_fib6_entry *fib6_entry,
4620 struct rt6_info *rt)
4621{
4622 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
4623 int err;
4624
4625 mlxsw_sp_rt6 = mlxsw_sp_rt6_create(rt);
4626 if (IS_ERR(mlxsw_sp_rt6))
4627 return PTR_ERR(mlxsw_sp_rt6);
4628
4629 list_add_tail(&mlxsw_sp_rt6->list, &fib6_entry->rt6_list);
4630 fib6_entry->nrt6++;
4631
4632 err = mlxsw_sp_nexthop6_group_update(mlxsw_sp, fib6_entry);
4633 if (err)
4634 goto err_nexthop6_group_update;
4635
4636 return 0;
4637
4638err_nexthop6_group_update:
4639 fib6_entry->nrt6--;
4640 list_del(&mlxsw_sp_rt6->list);
4641 mlxsw_sp_rt6_destroy(mlxsw_sp_rt6);
4642 return err;
4643}
4644
4645static void
4646mlxsw_sp_fib6_entry_nexthop_del(struct mlxsw_sp *mlxsw_sp,
4647 struct mlxsw_sp_fib6_entry *fib6_entry,
4648 struct rt6_info *rt)
4649{
4650 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
4651
4652 mlxsw_sp_rt6 = mlxsw_sp_fib6_entry_rt_find(fib6_entry, rt);
4653 if (WARN_ON(!mlxsw_sp_rt6))
4654 return;
4655
4656 fib6_entry->nrt6--;
4657 list_del(&mlxsw_sp_rt6->list);
4658 mlxsw_sp_nexthop6_group_update(mlxsw_sp, fib6_entry);
4659 mlxsw_sp_rt6_destroy(mlxsw_sp_rt6);
4660}
4661
Petr Machataf6050ee2017-09-02 23:49:21 +02004662static void mlxsw_sp_fib6_entry_type_set(struct mlxsw_sp *mlxsw_sp,
4663 struct mlxsw_sp_fib_entry *fib_entry,
Ido Schimmel428b8512017-08-03 13:28:28 +02004664 const struct rt6_info *rt)
4665{
4666 /* Packets hitting RTF_REJECT routes need to be discarded by the
4667 * stack. We can rely on their destination device not having a
4668 * RIF (it's the loopback device) and can thus use action type
4669 * local, which will cause them to be trapped with a lower
4670 * priority than packets that need to be locally received.
4671 */
Ido Schimmeld3b6d372017-09-01 10:58:55 +02004672 if (rt->rt6i_flags & (RTF_LOCAL | RTF_ANYCAST))
Ido Schimmel428b8512017-08-03 13:28:28 +02004673 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
4674 else if (rt->rt6i_flags & RTF_REJECT)
4675 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_LOCAL;
Petr Machataf6050ee2017-09-02 23:49:21 +02004676 else if (mlxsw_sp_rt6_is_gateway(mlxsw_sp, rt))
Ido Schimmel428b8512017-08-03 13:28:28 +02004677 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_REMOTE;
4678 else
4679 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_LOCAL;
4680}
4681
4682static void
4683mlxsw_sp_fib6_entry_rt_destroy_all(struct mlxsw_sp_fib6_entry *fib6_entry)
4684{
4685 struct mlxsw_sp_rt6 *mlxsw_sp_rt6, *tmp;
4686
4687 list_for_each_entry_safe(mlxsw_sp_rt6, tmp, &fib6_entry->rt6_list,
4688 list) {
4689 fib6_entry->nrt6--;
4690 list_del(&mlxsw_sp_rt6->list);
4691 mlxsw_sp_rt6_destroy(mlxsw_sp_rt6);
4692 }
4693}
4694
4695static struct mlxsw_sp_fib6_entry *
4696mlxsw_sp_fib6_entry_create(struct mlxsw_sp *mlxsw_sp,
4697 struct mlxsw_sp_fib_node *fib_node,
4698 struct rt6_info *rt)
4699{
4700 struct mlxsw_sp_fib6_entry *fib6_entry;
4701 struct mlxsw_sp_fib_entry *fib_entry;
4702 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
4703 int err;
4704
4705 fib6_entry = kzalloc(sizeof(*fib6_entry), GFP_KERNEL);
4706 if (!fib6_entry)
4707 return ERR_PTR(-ENOMEM);
4708 fib_entry = &fib6_entry->common;
4709
4710 mlxsw_sp_rt6 = mlxsw_sp_rt6_create(rt);
4711 if (IS_ERR(mlxsw_sp_rt6)) {
4712 err = PTR_ERR(mlxsw_sp_rt6);
4713 goto err_rt6_create;
4714 }
4715
Petr Machataf6050ee2017-09-02 23:49:21 +02004716 mlxsw_sp_fib6_entry_type_set(mlxsw_sp, fib_entry, mlxsw_sp_rt6->rt);
Ido Schimmel428b8512017-08-03 13:28:28 +02004717
4718 INIT_LIST_HEAD(&fib6_entry->rt6_list);
4719 list_add_tail(&mlxsw_sp_rt6->list, &fib6_entry->rt6_list);
4720 fib6_entry->nrt6 = 1;
4721 err = mlxsw_sp_nexthop6_group_get(mlxsw_sp, fib6_entry);
4722 if (err)
4723 goto err_nexthop6_group_get;
4724
4725 fib_entry->fib_node = fib_node;
4726
4727 return fib6_entry;
4728
4729err_nexthop6_group_get:
4730 list_del(&mlxsw_sp_rt6->list);
4731 mlxsw_sp_rt6_destroy(mlxsw_sp_rt6);
4732err_rt6_create:
4733 kfree(fib6_entry);
4734 return ERR_PTR(err);
4735}
4736
4737static void mlxsw_sp_fib6_entry_destroy(struct mlxsw_sp *mlxsw_sp,
4738 struct mlxsw_sp_fib6_entry *fib6_entry)
4739{
4740 mlxsw_sp_nexthop6_group_put(mlxsw_sp, &fib6_entry->common);
4741 mlxsw_sp_fib6_entry_rt_destroy_all(fib6_entry);
4742 WARN_ON(fib6_entry->nrt6);
4743 kfree(fib6_entry);
4744}
4745
4746static struct mlxsw_sp_fib6_entry *
4747mlxsw_sp_fib6_node_entry_find(const struct mlxsw_sp_fib_node *fib_node,
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02004748 const struct rt6_info *nrt, bool replace)
Ido Schimmel428b8512017-08-03 13:28:28 +02004749{
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02004750 struct mlxsw_sp_fib6_entry *fib6_entry, *fallback = NULL;
Ido Schimmel428b8512017-08-03 13:28:28 +02004751
4752 list_for_each_entry(fib6_entry, &fib_node->entry_list, common.list) {
4753 struct rt6_info *rt = mlxsw_sp_fib6_entry_rt(fib6_entry);
4754
4755 if (rt->rt6i_table->tb6_id > nrt->rt6i_table->tb6_id)
4756 continue;
4757 if (rt->rt6i_table->tb6_id != nrt->rt6i_table->tb6_id)
4758 break;
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02004759 if (replace && rt->rt6i_metric == nrt->rt6i_metric) {
4760 if (mlxsw_sp_fib6_rt_can_mp(rt) ==
4761 mlxsw_sp_fib6_rt_can_mp(nrt))
4762 return fib6_entry;
4763 if (mlxsw_sp_fib6_rt_can_mp(nrt))
4764 fallback = fallback ?: fib6_entry;
4765 }
Ido Schimmel428b8512017-08-03 13:28:28 +02004766 if (rt->rt6i_metric > nrt->rt6i_metric)
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02004767 return fallback ?: fib6_entry;
Ido Schimmel428b8512017-08-03 13:28:28 +02004768 }
4769
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02004770 return fallback;
Ido Schimmel428b8512017-08-03 13:28:28 +02004771}
4772
4773static int
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02004774mlxsw_sp_fib6_node_list_insert(struct mlxsw_sp_fib6_entry *new6_entry,
4775 bool replace)
Ido Schimmel428b8512017-08-03 13:28:28 +02004776{
4777 struct mlxsw_sp_fib_node *fib_node = new6_entry->common.fib_node;
4778 struct rt6_info *nrt = mlxsw_sp_fib6_entry_rt(new6_entry);
4779 struct mlxsw_sp_fib6_entry *fib6_entry;
4780
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02004781 fib6_entry = mlxsw_sp_fib6_node_entry_find(fib_node, nrt, replace);
4782
4783 if (replace && WARN_ON(!fib6_entry))
4784 return -EINVAL;
Ido Schimmel428b8512017-08-03 13:28:28 +02004785
4786 if (fib6_entry) {
4787 list_add_tail(&new6_entry->common.list,
4788 &fib6_entry->common.list);
4789 } else {
4790 struct mlxsw_sp_fib6_entry *last;
4791
4792 list_for_each_entry(last, &fib_node->entry_list, common.list) {
4793 struct rt6_info *rt = mlxsw_sp_fib6_entry_rt(last);
4794
4795 if (nrt->rt6i_table->tb6_id > rt->rt6i_table->tb6_id)
4796 break;
4797 fib6_entry = last;
4798 }
4799
4800 if (fib6_entry)
4801 list_add(&new6_entry->common.list,
4802 &fib6_entry->common.list);
4803 else
4804 list_add(&new6_entry->common.list,
4805 &fib_node->entry_list);
4806 }
4807
4808 return 0;
4809}
4810
4811static void
4812mlxsw_sp_fib6_node_list_remove(struct mlxsw_sp_fib6_entry *fib6_entry)
4813{
4814 list_del(&fib6_entry->common.list);
4815}
4816
4817static int mlxsw_sp_fib6_node_entry_link(struct mlxsw_sp *mlxsw_sp,
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02004818 struct mlxsw_sp_fib6_entry *fib6_entry,
4819 bool replace)
Ido Schimmel428b8512017-08-03 13:28:28 +02004820{
4821 int err;
4822
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02004823 err = mlxsw_sp_fib6_node_list_insert(fib6_entry, replace);
Ido Schimmel428b8512017-08-03 13:28:28 +02004824 if (err)
4825 return err;
4826
4827 err = mlxsw_sp_fib_node_entry_add(mlxsw_sp, &fib6_entry->common);
4828 if (err)
4829 goto err_fib_node_entry_add;
4830
4831 return 0;
4832
4833err_fib_node_entry_add:
4834 mlxsw_sp_fib6_node_list_remove(fib6_entry);
4835 return err;
4836}
4837
4838static void
4839mlxsw_sp_fib6_node_entry_unlink(struct mlxsw_sp *mlxsw_sp,
4840 struct mlxsw_sp_fib6_entry *fib6_entry)
4841{
4842 mlxsw_sp_fib_node_entry_del(mlxsw_sp, &fib6_entry->common);
4843 mlxsw_sp_fib6_node_list_remove(fib6_entry);
4844}
4845
4846static struct mlxsw_sp_fib6_entry *
4847mlxsw_sp_fib6_entry_lookup(struct mlxsw_sp *mlxsw_sp,
4848 const struct rt6_info *rt)
4849{
4850 struct mlxsw_sp_fib6_entry *fib6_entry;
4851 struct mlxsw_sp_fib_node *fib_node;
4852 struct mlxsw_sp_fib *fib;
4853 struct mlxsw_sp_vr *vr;
4854
4855 vr = mlxsw_sp_vr_find(mlxsw_sp, rt->rt6i_table->tb6_id);
4856 if (!vr)
4857 return NULL;
4858 fib = mlxsw_sp_vr_fib(vr, MLXSW_SP_L3_PROTO_IPV6);
4859
4860 fib_node = mlxsw_sp_fib_node_lookup(fib, &rt->rt6i_dst.addr,
4861 sizeof(rt->rt6i_dst.addr),
4862 rt->rt6i_dst.plen);
4863 if (!fib_node)
4864 return NULL;
4865
4866 list_for_each_entry(fib6_entry, &fib_node->entry_list, common.list) {
4867 struct rt6_info *iter_rt = mlxsw_sp_fib6_entry_rt(fib6_entry);
4868
4869 if (rt->rt6i_table->tb6_id == iter_rt->rt6i_table->tb6_id &&
4870 rt->rt6i_metric == iter_rt->rt6i_metric &&
4871 mlxsw_sp_fib6_entry_rt_find(fib6_entry, rt))
4872 return fib6_entry;
4873 }
4874
4875 return NULL;
4876}
4877
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02004878static void mlxsw_sp_fib6_entry_replace(struct mlxsw_sp *mlxsw_sp,
4879 struct mlxsw_sp_fib6_entry *fib6_entry,
4880 bool replace)
4881{
4882 struct mlxsw_sp_fib_node *fib_node = fib6_entry->common.fib_node;
4883 struct mlxsw_sp_fib6_entry *replaced;
4884
4885 if (!replace)
4886 return;
4887
4888 replaced = list_next_entry(fib6_entry, common.list);
4889
4890 mlxsw_sp_fib6_node_entry_unlink(mlxsw_sp, replaced);
4891 mlxsw_sp_fib6_entry_destroy(mlxsw_sp, replaced);
4892 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
4893}
4894
Ido Schimmel428b8512017-08-03 13:28:28 +02004895static int mlxsw_sp_router_fib6_add(struct mlxsw_sp *mlxsw_sp,
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02004896 struct rt6_info *rt, bool replace)
Ido Schimmel428b8512017-08-03 13:28:28 +02004897{
4898 struct mlxsw_sp_fib6_entry *fib6_entry;
4899 struct mlxsw_sp_fib_node *fib_node;
4900 int err;
4901
4902 if (mlxsw_sp->router->aborted)
4903 return 0;
4904
Ido Schimmelf36f5ac2017-08-03 13:28:30 +02004905 if (rt->rt6i_src.plen)
4906 return -EINVAL;
4907
Ido Schimmel428b8512017-08-03 13:28:28 +02004908 if (mlxsw_sp_fib6_rt_should_ignore(rt))
4909 return 0;
4910
4911 fib_node = mlxsw_sp_fib_node_get(mlxsw_sp, rt->rt6i_table->tb6_id,
4912 &rt->rt6i_dst.addr,
4913 sizeof(rt->rt6i_dst.addr),
4914 rt->rt6i_dst.plen,
4915 MLXSW_SP_L3_PROTO_IPV6);
4916 if (IS_ERR(fib_node))
4917 return PTR_ERR(fib_node);
4918
4919 /* Before creating a new entry, try to append route to an existing
4920 * multipath entry.
4921 */
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02004922 fib6_entry = mlxsw_sp_fib6_node_mp_entry_find(fib_node, rt, replace);
Ido Schimmel428b8512017-08-03 13:28:28 +02004923 if (fib6_entry) {
4924 err = mlxsw_sp_fib6_entry_nexthop_add(mlxsw_sp, fib6_entry, rt);
4925 if (err)
4926 goto err_fib6_entry_nexthop_add;
4927 return 0;
4928 }
4929
4930 fib6_entry = mlxsw_sp_fib6_entry_create(mlxsw_sp, fib_node, rt);
4931 if (IS_ERR(fib6_entry)) {
4932 err = PTR_ERR(fib6_entry);
4933 goto err_fib6_entry_create;
4934 }
4935
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02004936 err = mlxsw_sp_fib6_node_entry_link(mlxsw_sp, fib6_entry, replace);
Ido Schimmel428b8512017-08-03 13:28:28 +02004937 if (err)
4938 goto err_fib6_node_entry_link;
4939
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02004940 mlxsw_sp_fib6_entry_replace(mlxsw_sp, fib6_entry, replace);
4941
Ido Schimmel428b8512017-08-03 13:28:28 +02004942 return 0;
4943
4944err_fib6_node_entry_link:
4945 mlxsw_sp_fib6_entry_destroy(mlxsw_sp, fib6_entry);
4946err_fib6_entry_create:
4947err_fib6_entry_nexthop_add:
4948 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
4949 return err;
4950}
4951
4952static void mlxsw_sp_router_fib6_del(struct mlxsw_sp *mlxsw_sp,
4953 struct rt6_info *rt)
4954{
4955 struct mlxsw_sp_fib6_entry *fib6_entry;
4956 struct mlxsw_sp_fib_node *fib_node;
4957
4958 if (mlxsw_sp->router->aborted)
4959 return;
4960
4961 if (mlxsw_sp_fib6_rt_should_ignore(rt))
4962 return;
4963
4964 fib6_entry = mlxsw_sp_fib6_entry_lookup(mlxsw_sp, rt);
4965 if (WARN_ON(!fib6_entry))
4966 return;
4967
4968 /* If route is part of a multipath entry, but not the last one
4969 * removed, then only reduce its nexthop group.
4970 */
4971 if (!list_is_singular(&fib6_entry->rt6_list)) {
4972 mlxsw_sp_fib6_entry_nexthop_del(mlxsw_sp, fib6_entry, rt);
4973 return;
4974 }
4975
4976 fib_node = fib6_entry->common.fib_node;
4977
4978 mlxsw_sp_fib6_node_entry_unlink(mlxsw_sp, fib6_entry);
4979 mlxsw_sp_fib6_entry_destroy(mlxsw_sp, fib6_entry);
4980 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
4981}
4982
Ido Schimmelbc65a8a2017-07-18 10:10:25 +02004983static int __mlxsw_sp_router_set_abort_trap(struct mlxsw_sp *mlxsw_sp,
4984 enum mlxsw_reg_ralxx_protocol proto,
4985 u8 tree_id)
Jiri Pirkob45f64d2016-09-26 12:52:31 +02004986{
4987 char ralta_pl[MLXSW_REG_RALTA_LEN];
4988 char ralst_pl[MLXSW_REG_RALST_LEN];
Ido Schimmelb5d90e62017-03-10 08:53:43 +01004989 int i, err;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02004990
Ido Schimmelbc65a8a2017-07-18 10:10:25 +02004991 mlxsw_reg_ralta_pack(ralta_pl, true, proto, tree_id);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02004992 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralta), ralta_pl);
4993 if (err)
4994 return err;
4995
Ido Schimmelbc65a8a2017-07-18 10:10:25 +02004996 mlxsw_reg_ralst_pack(ralst_pl, 0xff, tree_id);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02004997 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralst), ralst_pl);
4998 if (err)
4999 return err;
5000
Ido Schimmelb5d90e62017-03-10 08:53:43 +01005001 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
Ido Schimmel9011b672017-05-16 19:38:25 +02005002 struct mlxsw_sp_vr *vr = &mlxsw_sp->router->vrs[i];
Ido Schimmelb5d90e62017-03-10 08:53:43 +01005003 char raltb_pl[MLXSW_REG_RALTB_LEN];
5004 char ralue_pl[MLXSW_REG_RALUE_LEN];
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005005
Ido Schimmelbc65a8a2017-07-18 10:10:25 +02005006 mlxsw_reg_raltb_pack(raltb_pl, vr->id, proto, tree_id);
Ido Schimmelb5d90e62017-03-10 08:53:43 +01005007 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raltb),
5008 raltb_pl);
5009 if (err)
5010 return err;
5011
Ido Schimmelbc65a8a2017-07-18 10:10:25 +02005012 mlxsw_reg_ralue_pack(ralue_pl, proto,
5013 MLXSW_REG_RALUE_OP_WRITE_WRITE, vr->id, 0);
Ido Schimmelb5d90e62017-03-10 08:53:43 +01005014 mlxsw_reg_ralue_act_ip2me_pack(ralue_pl);
5015 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue),
5016 ralue_pl);
5017 if (err)
5018 return err;
5019 }
5020
5021 return 0;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005022}
5023
Yotam Gigid42b0962017-09-27 08:23:20 +02005024static int mlxsw_sp_router_fibmr_add(struct mlxsw_sp *mlxsw_sp,
5025 struct mfc_entry_notifier_info *men_info,
5026 bool replace)
5027{
5028 struct mlxsw_sp_vr *vr;
5029
5030 if (mlxsw_sp->router->aborted)
5031 return 0;
5032
David Ahernf8fa9b42017-10-18 09:56:56 -07005033 vr = mlxsw_sp_vr_get(mlxsw_sp, men_info->tb_id, NULL);
Yotam Gigid42b0962017-09-27 08:23:20 +02005034 if (IS_ERR(vr))
5035 return PTR_ERR(vr);
5036
5037 return mlxsw_sp_mr_route4_add(vr->mr4_table, men_info->mfc, replace);
5038}
5039
5040static void mlxsw_sp_router_fibmr_del(struct mlxsw_sp *mlxsw_sp,
5041 struct mfc_entry_notifier_info *men_info)
5042{
5043 struct mlxsw_sp_vr *vr;
5044
5045 if (mlxsw_sp->router->aborted)
5046 return;
5047
5048 vr = mlxsw_sp_vr_find(mlxsw_sp, men_info->tb_id);
5049 if (WARN_ON(!vr))
5050 return;
5051
5052 mlxsw_sp_mr_route4_del(vr->mr4_table, men_info->mfc);
5053 mlxsw_sp_vr_put(vr);
5054}
5055
5056static int
5057mlxsw_sp_router_fibmr_vif_add(struct mlxsw_sp *mlxsw_sp,
5058 struct vif_entry_notifier_info *ven_info)
5059{
5060 struct mlxsw_sp_rif *rif;
5061 struct mlxsw_sp_vr *vr;
5062
5063 if (mlxsw_sp->router->aborted)
5064 return 0;
5065
David Ahernf8fa9b42017-10-18 09:56:56 -07005066 vr = mlxsw_sp_vr_get(mlxsw_sp, ven_info->tb_id, NULL);
Yotam Gigid42b0962017-09-27 08:23:20 +02005067 if (IS_ERR(vr))
5068 return PTR_ERR(vr);
5069
5070 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, ven_info->dev);
5071 return mlxsw_sp_mr_vif_add(vr->mr4_table, ven_info->dev,
5072 ven_info->vif_index,
5073 ven_info->vif_flags, rif);
5074}
5075
5076static void
5077mlxsw_sp_router_fibmr_vif_del(struct mlxsw_sp *mlxsw_sp,
5078 struct vif_entry_notifier_info *ven_info)
5079{
5080 struct mlxsw_sp_vr *vr;
5081
5082 if (mlxsw_sp->router->aborted)
5083 return;
5084
5085 vr = mlxsw_sp_vr_find(mlxsw_sp, ven_info->tb_id);
5086 if (WARN_ON(!vr))
5087 return;
5088
5089 mlxsw_sp_mr_vif_del(vr->mr4_table, ven_info->vif_index);
5090 mlxsw_sp_vr_put(vr);
5091}
5092
Ido Schimmelbc65a8a2017-07-18 10:10:25 +02005093static int mlxsw_sp_router_set_abort_trap(struct mlxsw_sp *mlxsw_sp)
5094{
5095 enum mlxsw_reg_ralxx_protocol proto = MLXSW_REG_RALXX_PROTOCOL_IPV4;
5096 int err;
5097
5098 err = __mlxsw_sp_router_set_abort_trap(mlxsw_sp, proto,
5099 MLXSW_SP_LPM_TREE_MIN);
5100 if (err)
5101 return err;
5102
Yotam Gigid42b0962017-09-27 08:23:20 +02005103 /* The multicast router code does not need an abort trap as by default,
5104 * packets that don't match any routes are trapped to the CPU.
5105 */
5106
Ido Schimmelbc65a8a2017-07-18 10:10:25 +02005107 proto = MLXSW_REG_RALXX_PROTOCOL_IPV6;
5108 return __mlxsw_sp_router_set_abort_trap(mlxsw_sp, proto,
5109 MLXSW_SP_LPM_TREE_MIN + 1);
5110}
5111
Ido Schimmel9aecce12017-02-09 10:28:42 +01005112static void mlxsw_sp_fib4_node_flush(struct mlxsw_sp *mlxsw_sp,
5113 struct mlxsw_sp_fib_node *fib_node)
5114{
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02005115 struct mlxsw_sp_fib4_entry *fib4_entry, *tmp;
Ido Schimmel9aecce12017-02-09 10:28:42 +01005116
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02005117 list_for_each_entry_safe(fib4_entry, tmp, &fib_node->entry_list,
5118 common.list) {
5119 bool do_break = &tmp->common.list == &fib_node->entry_list;
Ido Schimmel9aecce12017-02-09 10:28:42 +01005120
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02005121 mlxsw_sp_fib4_node_entry_unlink(mlxsw_sp, fib4_entry);
5122 mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib4_entry);
Ido Schimmel731ea1c2017-07-18 10:10:21 +02005123 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
Ido Schimmel9aecce12017-02-09 10:28:42 +01005124 /* Break when entry list is empty and node was freed.
5125 * Otherwise, we'll access freed memory in the next
5126 * iteration.
5127 */
5128 if (do_break)
5129 break;
5130 }
5131}
5132
Ido Schimmel428b8512017-08-03 13:28:28 +02005133static void mlxsw_sp_fib6_node_flush(struct mlxsw_sp *mlxsw_sp,
5134 struct mlxsw_sp_fib_node *fib_node)
5135{
5136 struct mlxsw_sp_fib6_entry *fib6_entry, *tmp;
5137
5138 list_for_each_entry_safe(fib6_entry, tmp, &fib_node->entry_list,
5139 common.list) {
5140 bool do_break = &tmp->common.list == &fib_node->entry_list;
5141
5142 mlxsw_sp_fib6_node_entry_unlink(mlxsw_sp, fib6_entry);
5143 mlxsw_sp_fib6_entry_destroy(mlxsw_sp, fib6_entry);
5144 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
5145 if (do_break)
5146 break;
5147 }
5148}
5149
Ido Schimmel9aecce12017-02-09 10:28:42 +01005150static void mlxsw_sp_fib_node_flush(struct mlxsw_sp *mlxsw_sp,
5151 struct mlxsw_sp_fib_node *fib_node)
5152{
Ido Schimmel76610eb2017-03-10 08:53:41 +01005153 switch (fib_node->fib->proto) {
Ido Schimmel9aecce12017-02-09 10:28:42 +01005154 case MLXSW_SP_L3_PROTO_IPV4:
5155 mlxsw_sp_fib4_node_flush(mlxsw_sp, fib_node);
5156 break;
5157 case MLXSW_SP_L3_PROTO_IPV6:
Ido Schimmel428b8512017-08-03 13:28:28 +02005158 mlxsw_sp_fib6_node_flush(mlxsw_sp, fib_node);
Ido Schimmel9aecce12017-02-09 10:28:42 +01005159 break;
5160 }
5161}
5162
Ido Schimmel76610eb2017-03-10 08:53:41 +01005163static void mlxsw_sp_vr_fib_flush(struct mlxsw_sp *mlxsw_sp,
5164 struct mlxsw_sp_vr *vr,
5165 enum mlxsw_sp_l3proto proto)
5166{
5167 struct mlxsw_sp_fib *fib = mlxsw_sp_vr_fib(vr, proto);
5168 struct mlxsw_sp_fib_node *fib_node, *tmp;
5169
5170 list_for_each_entry_safe(fib_node, tmp, &fib->node_list, list) {
5171 bool do_break = &tmp->list == &fib->node_list;
5172
5173 mlxsw_sp_fib_node_flush(mlxsw_sp, fib_node);
5174 if (do_break)
5175 break;
5176 }
5177}
5178
Ido Schimmelac571de2016-11-14 11:26:32 +01005179static void mlxsw_sp_router_fib_flush(struct mlxsw_sp *mlxsw_sp)
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005180{
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005181 int i;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005182
Jiri Pirkoc1a38312016-10-21 16:07:23 +02005183 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
Ido Schimmel9011b672017-05-16 19:38:25 +02005184 struct mlxsw_sp_vr *vr = &mlxsw_sp->router->vrs[i];
Ido Schimmelac571de2016-11-14 11:26:32 +01005185
Ido Schimmel76610eb2017-03-10 08:53:41 +01005186 if (!mlxsw_sp_vr_is_used(vr))
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005187 continue;
Yotam Gigid42b0962017-09-27 08:23:20 +02005188
5189 mlxsw_sp_mr_table_flush(vr->mr4_table);
Ido Schimmel76610eb2017-03-10 08:53:41 +01005190 mlxsw_sp_vr_fib_flush(mlxsw_sp, vr, MLXSW_SP_L3_PROTO_IPV4);
Ido Schimmela3d9bc52017-07-18 10:10:22 +02005191
5192 /* If virtual router was only used for IPv4, then it's no
5193 * longer used.
5194 */
5195 if (!mlxsw_sp_vr_is_used(vr))
5196 continue;
5197 mlxsw_sp_vr_fib_flush(mlxsw_sp, vr, MLXSW_SP_L3_PROTO_IPV6);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005198 }
Ido Schimmelac571de2016-11-14 11:26:32 +01005199}
5200
Ido Schimmelbc65a8a2017-07-18 10:10:25 +02005201static void mlxsw_sp_router_fib_abort(struct mlxsw_sp *mlxsw_sp)
Ido Schimmelac571de2016-11-14 11:26:32 +01005202{
5203 int err;
5204
Ido Schimmel9011b672017-05-16 19:38:25 +02005205 if (mlxsw_sp->router->aborted)
Ido Schimmeld331d302016-11-16 09:51:58 +01005206 return;
5207 dev_warn(mlxsw_sp->bus_info->dev, "FIB abort triggered. Note that FIB entries are no longer being offloaded to this device.\n");
Ido Schimmelac571de2016-11-14 11:26:32 +01005208 mlxsw_sp_router_fib_flush(mlxsw_sp);
Ido Schimmel9011b672017-05-16 19:38:25 +02005209 mlxsw_sp->router->aborted = true;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005210 err = mlxsw_sp_router_set_abort_trap(mlxsw_sp);
5211 if (err)
5212 dev_warn(mlxsw_sp->bus_info->dev, "Failed to set abort trap.\n");
5213}
5214
Ido Schimmel30572242016-12-03 16:45:01 +01005215struct mlxsw_sp_fib_event_work {
Ido Schimmela0e47612017-02-06 16:20:10 +01005216 struct work_struct work;
Ido Schimmelad178c82017-02-08 11:16:40 +01005217 union {
Ido Schimmel428b8512017-08-03 13:28:28 +02005218 struct fib6_entry_notifier_info fen6_info;
Ido Schimmelad178c82017-02-08 11:16:40 +01005219 struct fib_entry_notifier_info fen_info;
Ido Schimmel5d7bfd12017-03-16 09:08:14 +01005220 struct fib_rule_notifier_info fr_info;
Ido Schimmelad178c82017-02-08 11:16:40 +01005221 struct fib_nh_notifier_info fnh_info;
Yotam Gigid42b0962017-09-27 08:23:20 +02005222 struct mfc_entry_notifier_info men_info;
5223 struct vif_entry_notifier_info ven_info;
Ido Schimmelad178c82017-02-08 11:16:40 +01005224 };
Ido Schimmel30572242016-12-03 16:45:01 +01005225 struct mlxsw_sp *mlxsw_sp;
5226 unsigned long event;
5227};
5228
Ido Schimmel66a57632017-08-03 13:28:26 +02005229static void mlxsw_sp_router_fib4_event_work(struct work_struct *work)
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005230{
Ido Schimmel30572242016-12-03 16:45:01 +01005231 struct mlxsw_sp_fib_event_work *fib_work =
Ido Schimmela0e47612017-02-06 16:20:10 +01005232 container_of(work, struct mlxsw_sp_fib_event_work, work);
Ido Schimmel30572242016-12-03 16:45:01 +01005233 struct mlxsw_sp *mlxsw_sp = fib_work->mlxsw_sp;
Ido Schimmel599cf8f2017-02-09 10:28:44 +01005234 bool replace, append;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005235 int err;
5236
Ido Schimmel30572242016-12-03 16:45:01 +01005237 /* Protect internal structures from changes */
5238 rtnl_lock();
5239 switch (fib_work->event) {
Ido Schimmel599cf8f2017-02-09 10:28:44 +01005240 case FIB_EVENT_ENTRY_REPLACE: /* fall through */
Ido Schimmel4283bce2017-02-09 10:28:43 +01005241 case FIB_EVENT_ENTRY_APPEND: /* fall through */
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005242 case FIB_EVENT_ENTRY_ADD:
Ido Schimmel599cf8f2017-02-09 10:28:44 +01005243 replace = fib_work->event == FIB_EVENT_ENTRY_REPLACE;
Ido Schimmel4283bce2017-02-09 10:28:43 +01005244 append = fib_work->event == FIB_EVENT_ENTRY_APPEND;
5245 err = mlxsw_sp_router_fib4_add(mlxsw_sp, &fib_work->fen_info,
Ido Schimmel599cf8f2017-02-09 10:28:44 +01005246 replace, append);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005247 if (err)
Ido Schimmelbc65a8a2017-07-18 10:10:25 +02005248 mlxsw_sp_router_fib_abort(mlxsw_sp);
Ido Schimmel30572242016-12-03 16:45:01 +01005249 fib_info_put(fib_work->fen_info.fi);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005250 break;
5251 case FIB_EVENT_ENTRY_DEL:
Ido Schimmel30572242016-12-03 16:45:01 +01005252 mlxsw_sp_router_fib4_del(mlxsw_sp, &fib_work->fen_info);
5253 fib_info_put(fib_work->fen_info.fi);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005254 break;
David Ahern1f279232017-10-27 17:37:14 -07005255 case FIB_EVENT_RULE_ADD:
5256 /* if we get here, a rule was added that we do not support.
5257 * just do the fib_abort
5258 */
5259 mlxsw_sp_router_fib_abort(mlxsw_sp);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005260 break;
Ido Schimmelad178c82017-02-08 11:16:40 +01005261 case FIB_EVENT_NH_ADD: /* fall through */
5262 case FIB_EVENT_NH_DEL:
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02005263 mlxsw_sp_nexthop4_event(mlxsw_sp, fib_work->event,
5264 fib_work->fnh_info.fib_nh);
Ido Schimmelad178c82017-02-08 11:16:40 +01005265 fib_info_put(fib_work->fnh_info.fib_nh->nh_parent);
5266 break;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005267 }
Ido Schimmel30572242016-12-03 16:45:01 +01005268 rtnl_unlock();
5269 kfree(fib_work);
5270}
5271
Ido Schimmel66a57632017-08-03 13:28:26 +02005272static void mlxsw_sp_router_fib6_event_work(struct work_struct *work)
5273{
Ido Schimmel583419f2017-08-03 13:28:27 +02005274 struct mlxsw_sp_fib_event_work *fib_work =
5275 container_of(work, struct mlxsw_sp_fib_event_work, work);
5276 struct mlxsw_sp *mlxsw_sp = fib_work->mlxsw_sp;
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02005277 bool replace;
Ido Schimmel428b8512017-08-03 13:28:28 +02005278 int err;
Ido Schimmel583419f2017-08-03 13:28:27 +02005279
5280 rtnl_lock();
5281 switch (fib_work->event) {
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02005282 case FIB_EVENT_ENTRY_REPLACE: /* fall through */
Ido Schimmel428b8512017-08-03 13:28:28 +02005283 case FIB_EVENT_ENTRY_ADD:
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02005284 replace = fib_work->event == FIB_EVENT_ENTRY_REPLACE;
Ido Schimmel428b8512017-08-03 13:28:28 +02005285 err = mlxsw_sp_router_fib6_add(mlxsw_sp,
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02005286 fib_work->fen6_info.rt, replace);
Ido Schimmel428b8512017-08-03 13:28:28 +02005287 if (err)
5288 mlxsw_sp_router_fib_abort(mlxsw_sp);
5289 mlxsw_sp_rt6_release(fib_work->fen6_info.rt);
5290 break;
5291 case FIB_EVENT_ENTRY_DEL:
5292 mlxsw_sp_router_fib6_del(mlxsw_sp, fib_work->fen6_info.rt);
5293 mlxsw_sp_rt6_release(fib_work->fen6_info.rt);
5294 break;
David Ahern1f279232017-10-27 17:37:14 -07005295 case FIB_EVENT_RULE_ADD:
5296 /* if we get here, a rule was added that we do not support.
5297 * just do the fib_abort
5298 */
5299 mlxsw_sp_router_fib_abort(mlxsw_sp);
Ido Schimmel583419f2017-08-03 13:28:27 +02005300 break;
5301 }
5302 rtnl_unlock();
5303 kfree(fib_work);
Ido Schimmel66a57632017-08-03 13:28:26 +02005304}
5305
Yotam Gigid42b0962017-09-27 08:23:20 +02005306static void mlxsw_sp_router_fibmr_event_work(struct work_struct *work)
5307{
5308 struct mlxsw_sp_fib_event_work *fib_work =
5309 container_of(work, struct mlxsw_sp_fib_event_work, work);
5310 struct mlxsw_sp *mlxsw_sp = fib_work->mlxsw_sp;
Yotam Gigid42b0962017-09-27 08:23:20 +02005311 bool replace;
5312 int err;
5313
5314 rtnl_lock();
5315 switch (fib_work->event) {
5316 case FIB_EVENT_ENTRY_REPLACE: /* fall through */
5317 case FIB_EVENT_ENTRY_ADD:
5318 replace = fib_work->event == FIB_EVENT_ENTRY_REPLACE;
5319
5320 err = mlxsw_sp_router_fibmr_add(mlxsw_sp, &fib_work->men_info,
5321 replace);
5322 if (err)
5323 mlxsw_sp_router_fib_abort(mlxsw_sp);
5324 ipmr_cache_put(fib_work->men_info.mfc);
5325 break;
5326 case FIB_EVENT_ENTRY_DEL:
5327 mlxsw_sp_router_fibmr_del(mlxsw_sp, &fib_work->men_info);
5328 ipmr_cache_put(fib_work->men_info.mfc);
5329 break;
5330 case FIB_EVENT_VIF_ADD:
5331 err = mlxsw_sp_router_fibmr_vif_add(mlxsw_sp,
5332 &fib_work->ven_info);
5333 if (err)
5334 mlxsw_sp_router_fib_abort(mlxsw_sp);
5335 dev_put(fib_work->ven_info.dev);
5336 break;
5337 case FIB_EVENT_VIF_DEL:
5338 mlxsw_sp_router_fibmr_vif_del(mlxsw_sp,
5339 &fib_work->ven_info);
5340 dev_put(fib_work->ven_info.dev);
5341 break;
David Ahern1f279232017-10-27 17:37:14 -07005342 case FIB_EVENT_RULE_ADD:
5343 /* if we get here, a rule was added that we do not support.
5344 * just do the fib_abort
5345 */
5346 mlxsw_sp_router_fib_abort(mlxsw_sp);
Yotam Gigid42b0962017-09-27 08:23:20 +02005347 break;
5348 }
5349 rtnl_unlock();
5350 kfree(fib_work);
5351}
5352
Ido Schimmel66a57632017-08-03 13:28:26 +02005353static void mlxsw_sp_router_fib4_event(struct mlxsw_sp_fib_event_work *fib_work,
5354 struct fib_notifier_info *info)
5355{
David Ahern3c75f9b2017-10-18 15:01:38 -07005356 struct fib_entry_notifier_info *fen_info;
David Ahern3c75f9b2017-10-18 15:01:38 -07005357 struct fib_nh_notifier_info *fnh_info;
5358
Ido Schimmel66a57632017-08-03 13:28:26 +02005359 switch (fib_work->event) {
5360 case FIB_EVENT_ENTRY_REPLACE: /* fall through */
5361 case FIB_EVENT_ENTRY_APPEND: /* fall through */
5362 case FIB_EVENT_ENTRY_ADD: /* fall through */
5363 case FIB_EVENT_ENTRY_DEL:
David Ahern3c75f9b2017-10-18 15:01:38 -07005364 fen_info = container_of(info, struct fib_entry_notifier_info,
5365 info);
5366 fib_work->fen_info = *fen_info;
5367 /* Take reference on fib_info to prevent it from being
Ido Schimmel66a57632017-08-03 13:28:26 +02005368 * freed while work is queued. Release it afterwards.
5369 */
5370 fib_info_hold(fib_work->fen_info.fi);
5371 break;
Ido Schimmel66a57632017-08-03 13:28:26 +02005372 case FIB_EVENT_NH_ADD: /* fall through */
5373 case FIB_EVENT_NH_DEL:
David Ahern3c75f9b2017-10-18 15:01:38 -07005374 fnh_info = container_of(info, struct fib_nh_notifier_info,
5375 info);
5376 fib_work->fnh_info = *fnh_info;
Ido Schimmel66a57632017-08-03 13:28:26 +02005377 fib_info_hold(fib_work->fnh_info.fib_nh->nh_parent);
5378 break;
5379 }
5380}
5381
5382static void mlxsw_sp_router_fib6_event(struct mlxsw_sp_fib_event_work *fib_work,
5383 struct fib_notifier_info *info)
5384{
David Ahern3c75f9b2017-10-18 15:01:38 -07005385 struct fib6_entry_notifier_info *fen6_info;
David Ahern3c75f9b2017-10-18 15:01:38 -07005386
Ido Schimmel583419f2017-08-03 13:28:27 +02005387 switch (fib_work->event) {
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02005388 case FIB_EVENT_ENTRY_REPLACE: /* fall through */
Ido Schimmel428b8512017-08-03 13:28:28 +02005389 case FIB_EVENT_ENTRY_ADD: /* fall through */
5390 case FIB_EVENT_ENTRY_DEL:
David Ahern3c75f9b2017-10-18 15:01:38 -07005391 fen6_info = container_of(info, struct fib6_entry_notifier_info,
5392 info);
5393 fib_work->fen6_info = *fen6_info;
Ido Schimmel428b8512017-08-03 13:28:28 +02005394 rt6_hold(fib_work->fen6_info.rt);
5395 break;
Ido Schimmel583419f2017-08-03 13:28:27 +02005396 }
Ido Schimmel66a57632017-08-03 13:28:26 +02005397}
5398
Yotam Gigid42b0962017-09-27 08:23:20 +02005399static void
5400mlxsw_sp_router_fibmr_event(struct mlxsw_sp_fib_event_work *fib_work,
5401 struct fib_notifier_info *info)
5402{
5403 switch (fib_work->event) {
5404 case FIB_EVENT_ENTRY_REPLACE: /* fall through */
5405 case FIB_EVENT_ENTRY_ADD: /* fall through */
5406 case FIB_EVENT_ENTRY_DEL:
5407 memcpy(&fib_work->men_info, info, sizeof(fib_work->men_info));
5408 ipmr_cache_hold(fib_work->men_info.mfc);
5409 break;
5410 case FIB_EVENT_VIF_ADD: /* fall through */
5411 case FIB_EVENT_VIF_DEL:
5412 memcpy(&fib_work->ven_info, info, sizeof(fib_work->ven_info));
5413 dev_hold(fib_work->ven_info.dev);
5414 break;
David Ahern1f279232017-10-27 17:37:14 -07005415 }
5416}
5417
5418static int mlxsw_sp_router_fib_rule_event(unsigned long event,
5419 struct fib_notifier_info *info,
5420 struct mlxsw_sp *mlxsw_sp)
5421{
5422 struct netlink_ext_ack *extack = info->extack;
5423 struct fib_rule_notifier_info *fr_info;
5424 struct fib_rule *rule;
5425 int err = 0;
5426
5427 /* nothing to do at the moment */
5428 if (event == FIB_EVENT_RULE_DEL)
5429 return 0;
5430
5431 if (mlxsw_sp->router->aborted)
5432 return 0;
5433
5434 fr_info = container_of(info, struct fib_rule_notifier_info, info);
5435 rule = fr_info->rule;
5436
5437 switch (info->family) {
5438 case AF_INET:
5439 if (!fib4_rule_default(rule) && !rule->l3mdev)
5440 err = -1;
5441 break;
5442 case AF_INET6:
5443 if (!fib6_rule_default(rule) && !rule->l3mdev)
5444 err = -1;
5445 break;
5446 case RTNL_FAMILY_IPMR:
5447 if (!ipmr_rule_default(rule) && !rule->l3mdev)
5448 err = -1;
Yotam Gigid42b0962017-09-27 08:23:20 +02005449 break;
5450 }
David Ahern1f279232017-10-27 17:37:14 -07005451
5452 if (err < 0)
5453 NL_SET_ERR_MSG(extack, "spectrum: FIB rules not supported. Aborting offload");
5454
5455 return err;
Yotam Gigid42b0962017-09-27 08:23:20 +02005456}
5457
Ido Schimmel30572242016-12-03 16:45:01 +01005458/* Called with rcu_read_lock() */
5459static int mlxsw_sp_router_fib_event(struct notifier_block *nb,
5460 unsigned long event, void *ptr)
5461{
Ido Schimmel30572242016-12-03 16:45:01 +01005462 struct mlxsw_sp_fib_event_work *fib_work;
5463 struct fib_notifier_info *info = ptr;
Ido Schimmel7e39d112017-05-16 19:38:28 +02005464 struct mlxsw_sp_router *router;
David Ahern1f279232017-10-27 17:37:14 -07005465 int err;
Ido Schimmel30572242016-12-03 16:45:01 +01005466
Ido Schimmel8e29f972017-09-15 15:31:07 +02005467 if (!net_eq(info->net, &init_net) ||
Yotam Gigi664375e2017-09-27 08:23:22 +02005468 (info->family != AF_INET && info->family != AF_INET6 &&
5469 info->family != RTNL_FAMILY_IPMR))
Ido Schimmel30572242016-12-03 16:45:01 +01005470 return NOTIFY_DONE;
5471
David Ahern1f279232017-10-27 17:37:14 -07005472 router = container_of(nb, struct mlxsw_sp_router, fib_nb);
5473
5474 switch (event) {
5475 case FIB_EVENT_RULE_ADD: /* fall through */
5476 case FIB_EVENT_RULE_DEL:
5477 err = mlxsw_sp_router_fib_rule_event(event, info,
5478 router->mlxsw_sp);
5479 if (!err)
5480 return NOTIFY_DONE;
5481 }
5482
Ido Schimmel30572242016-12-03 16:45:01 +01005483 fib_work = kzalloc(sizeof(*fib_work), GFP_ATOMIC);
5484 if (WARN_ON(!fib_work))
5485 return NOTIFY_BAD;
5486
Ido Schimmel7e39d112017-05-16 19:38:28 +02005487 fib_work->mlxsw_sp = router->mlxsw_sp;
Ido Schimmel30572242016-12-03 16:45:01 +01005488 fib_work->event = event;
5489
Ido Schimmel66a57632017-08-03 13:28:26 +02005490 switch (info->family) {
5491 case AF_INET:
5492 INIT_WORK(&fib_work->work, mlxsw_sp_router_fib4_event_work);
5493 mlxsw_sp_router_fib4_event(fib_work, info);
Ido Schimmel30572242016-12-03 16:45:01 +01005494 break;
Ido Schimmel66a57632017-08-03 13:28:26 +02005495 case AF_INET6:
5496 INIT_WORK(&fib_work->work, mlxsw_sp_router_fib6_event_work);
5497 mlxsw_sp_router_fib6_event(fib_work, info);
Ido Schimmelad178c82017-02-08 11:16:40 +01005498 break;
Yotam Gigid42b0962017-09-27 08:23:20 +02005499 case RTNL_FAMILY_IPMR:
5500 INIT_WORK(&fib_work->work, mlxsw_sp_router_fibmr_event_work);
5501 mlxsw_sp_router_fibmr_event(fib_work, info);
5502 break;
Ido Schimmel30572242016-12-03 16:45:01 +01005503 }
5504
Ido Schimmela0e47612017-02-06 16:20:10 +01005505 mlxsw_core_schedule_work(&fib_work->work);
Ido Schimmel30572242016-12-03 16:45:01 +01005506
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005507 return NOTIFY_DONE;
5508}
5509
Ido Schimmel4724ba562017-03-10 08:53:39 +01005510static struct mlxsw_sp_rif *
5511mlxsw_sp_rif_find_by_dev(const struct mlxsw_sp *mlxsw_sp,
5512 const struct net_device *dev)
5513{
5514 int i;
5515
5516 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++)
Ido Schimmel5f9efff2017-05-16 19:38:27 +02005517 if (mlxsw_sp->router->rifs[i] &&
5518 mlxsw_sp->router->rifs[i]->dev == dev)
5519 return mlxsw_sp->router->rifs[i];
Ido Schimmel4724ba562017-03-10 08:53:39 +01005520
5521 return NULL;
5522}
5523
5524static int mlxsw_sp_router_rif_disable(struct mlxsw_sp *mlxsw_sp, u16 rif)
5525{
5526 char ritr_pl[MLXSW_REG_RITR_LEN];
5527 int err;
5528
5529 mlxsw_reg_ritr_rif_pack(ritr_pl, rif);
5530 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
5531 if (WARN_ON_ONCE(err))
5532 return err;
5533
5534 mlxsw_reg_ritr_enable_set(ritr_pl, false);
5535 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
5536}
5537
5538static void mlxsw_sp_router_rif_gone_sync(struct mlxsw_sp *mlxsw_sp,
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01005539 struct mlxsw_sp_rif *rif)
Ido Schimmel4724ba562017-03-10 08:53:39 +01005540{
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01005541 mlxsw_sp_router_rif_disable(mlxsw_sp, rif->rif_index);
5542 mlxsw_sp_nexthop_rif_gone_sync(mlxsw_sp, rif);
5543 mlxsw_sp_neigh_rif_gone_sync(mlxsw_sp, rif);
Ido Schimmel4724ba562017-03-10 08:53:39 +01005544}
5545
Arkadi Sharshevsky5ea12372017-07-18 10:10:13 +02005546static bool
5547mlxsw_sp_rif_should_config(struct mlxsw_sp_rif *rif, struct net_device *dev,
5548 unsigned long event)
Ido Schimmel4724ba562017-03-10 08:53:39 +01005549{
Arkadi Sharshevsky5ea12372017-07-18 10:10:13 +02005550 struct inet6_dev *inet6_dev;
5551 bool addr_list_empty = true;
5552 struct in_device *idev;
5553
Ido Schimmel4724ba562017-03-10 08:53:39 +01005554 switch (event) {
5555 case NETDEV_UP:
Petr Machataf1b1f272017-07-31 09:27:28 +02005556 return rif == NULL;
Ido Schimmel4724ba562017-03-10 08:53:39 +01005557 case NETDEV_DOWN:
Arkadi Sharshevsky5ea12372017-07-18 10:10:13 +02005558 idev = __in_dev_get_rtnl(dev);
5559 if (idev && idev->ifa_list)
5560 addr_list_empty = false;
5561
5562 inet6_dev = __in6_dev_get(dev);
5563 if (addr_list_empty && inet6_dev &&
5564 !list_empty(&inet6_dev->addr_list))
5565 addr_list_empty = false;
5566
5567 if (rif && addr_list_empty &&
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01005568 !netif_is_l3_slave(rif->dev))
Ido Schimmel4724ba562017-03-10 08:53:39 +01005569 return true;
5570 /* It is possible we already removed the RIF ourselves
5571 * if it was assigned to a netdev that is now a bridge
5572 * or LAG slave.
5573 */
5574 return false;
5575 }
5576
5577 return false;
5578}
5579
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02005580static enum mlxsw_sp_rif_type
5581mlxsw_sp_dev_rif_type(const struct mlxsw_sp *mlxsw_sp,
5582 const struct net_device *dev)
5583{
5584 enum mlxsw_sp_fid_type type;
5585
Petr Machata6ddb7422017-09-02 23:49:19 +02005586 if (mlxsw_sp_netdev_ipip_type(mlxsw_sp, dev, NULL))
5587 return MLXSW_SP_RIF_TYPE_IPIP_LB;
5588
5589 /* Otherwise RIF type is derived from the type of the underlying FID. */
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02005590 if (is_vlan_dev(dev) && netif_is_bridge_master(vlan_dev_real_dev(dev)))
5591 type = MLXSW_SP_FID_TYPE_8021Q;
5592 else if (netif_is_bridge_master(dev) && br_vlan_enabled(dev))
5593 type = MLXSW_SP_FID_TYPE_8021Q;
5594 else if (netif_is_bridge_master(dev))
5595 type = MLXSW_SP_FID_TYPE_8021D;
5596 else
5597 type = MLXSW_SP_FID_TYPE_RFID;
5598
5599 return mlxsw_sp_fid_type_rif_type(mlxsw_sp, type);
5600}
5601
Ido Schimmelde5ed992017-06-04 16:53:40 +02005602static int mlxsw_sp_rif_index_alloc(struct mlxsw_sp *mlxsw_sp, u16 *p_rif_index)
Ido Schimmel4724ba562017-03-10 08:53:39 +01005603{
5604 int i;
5605
Ido Schimmelde5ed992017-06-04 16:53:40 +02005606 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++) {
5607 if (!mlxsw_sp->router->rifs[i]) {
5608 *p_rif_index = i;
5609 return 0;
5610 }
5611 }
Ido Schimmel4724ba562017-03-10 08:53:39 +01005612
Ido Schimmelde5ed992017-06-04 16:53:40 +02005613 return -ENOBUFS;
Ido Schimmel4724ba562017-03-10 08:53:39 +01005614}
5615
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02005616static struct mlxsw_sp_rif *mlxsw_sp_rif_alloc(size_t rif_size, u16 rif_index,
5617 u16 vr_id,
5618 struct net_device *l3_dev)
Ido Schimmel4724ba562017-03-10 08:53:39 +01005619{
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01005620 struct mlxsw_sp_rif *rif;
Ido Schimmel4724ba562017-03-10 08:53:39 +01005621
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02005622 rif = kzalloc(rif_size, GFP_KERNEL);
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01005623 if (!rif)
Ido Schimmel4724ba562017-03-10 08:53:39 +01005624 return NULL;
5625
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01005626 INIT_LIST_HEAD(&rif->nexthop_list);
5627 INIT_LIST_HEAD(&rif->neigh_list);
5628 ether_addr_copy(rif->addr, l3_dev->dev_addr);
5629 rif->mtu = l3_dev->mtu;
5630 rif->vr_id = vr_id;
5631 rif->dev = l3_dev;
5632 rif->rif_index = rif_index;
Ido Schimmel4724ba562017-03-10 08:53:39 +01005633
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01005634 return rif;
Ido Schimmel4724ba562017-03-10 08:53:39 +01005635}
5636
Ido Schimmel5f9efff2017-05-16 19:38:27 +02005637struct mlxsw_sp_rif *mlxsw_sp_rif_by_index(const struct mlxsw_sp *mlxsw_sp,
5638 u16 rif_index)
5639{
5640 return mlxsw_sp->router->rifs[rif_index];
5641}
5642
Arkadi Sharshevskyfd1b9d42017-03-28 17:24:16 +02005643u16 mlxsw_sp_rif_index(const struct mlxsw_sp_rif *rif)
5644{
5645 return rif->rif_index;
5646}
5647
Petr Machata92107cf2017-09-02 23:49:28 +02005648u16 mlxsw_sp_ipip_lb_rif_index(const struct mlxsw_sp_rif_ipip_lb *lb_rif)
5649{
5650 return lb_rif->common.rif_index;
5651}
5652
5653u16 mlxsw_sp_ipip_lb_ul_vr_id(const struct mlxsw_sp_rif_ipip_lb *lb_rif)
5654{
5655 return lb_rif->ul_vr_id;
5656}
5657
Arkadi Sharshevskyfd1b9d42017-03-28 17:24:16 +02005658int mlxsw_sp_rif_dev_ifindex(const struct mlxsw_sp_rif *rif)
5659{
5660 return rif->dev->ifindex;
5661}
5662
Yotam Gigi91e4d592017-09-19 10:00:19 +02005663const struct net_device *mlxsw_sp_rif_dev(const struct mlxsw_sp_rif *rif)
5664{
5665 return rif->dev;
5666}
5667
Ido Schimmel4724ba562017-03-10 08:53:39 +01005668static struct mlxsw_sp_rif *
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02005669mlxsw_sp_rif_create(struct mlxsw_sp *mlxsw_sp,
David Ahernf8fa9b42017-10-18 09:56:56 -07005670 const struct mlxsw_sp_rif_params *params,
5671 struct netlink_ext_ack *extack)
Ido Schimmel4724ba562017-03-10 08:53:39 +01005672{
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02005673 u32 tb_id = l3mdev_fib_table(params->dev);
5674 const struct mlxsw_sp_rif_ops *ops;
Petr Machata010cadf2017-09-02 23:49:18 +02005675 struct mlxsw_sp_fid *fid = NULL;
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02005676 enum mlxsw_sp_rif_type type;
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01005677 struct mlxsw_sp_rif *rif;
Ido Schimmela1107482017-05-26 08:37:39 +02005678 struct mlxsw_sp_vr *vr;
5679 u16 rif_index;
Ido Schimmel4724ba562017-03-10 08:53:39 +01005680 int err;
5681
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02005682 type = mlxsw_sp_dev_rif_type(mlxsw_sp, params->dev);
5683 ops = mlxsw_sp->router->rif_ops_arr[type];
5684
David Ahernf8fa9b42017-10-18 09:56:56 -07005685 vr = mlxsw_sp_vr_get(mlxsw_sp, tb_id ? : RT_TABLE_MAIN, extack);
Ido Schimmelc9ec53f2017-05-26 08:37:38 +02005686 if (IS_ERR(vr))
5687 return ERR_CAST(vr);
Petr Machata28a04c72017-10-02 12:14:56 +02005688 vr->rif_count++;
Ido Schimmelc9ec53f2017-05-26 08:37:38 +02005689
Ido Schimmelde5ed992017-06-04 16:53:40 +02005690 err = mlxsw_sp_rif_index_alloc(mlxsw_sp, &rif_index);
David Ahernf8fa9b42017-10-18 09:56:56 -07005691 if (err) {
5692 NL_SET_ERR_MSG(extack, "spectrum: Exceeded number of supported router interfaces");
Ido Schimmelde5ed992017-06-04 16:53:40 +02005693 goto err_rif_index_alloc;
David Ahernf8fa9b42017-10-18 09:56:56 -07005694 }
Ido Schimmel4724ba562017-03-10 08:53:39 +01005695
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02005696 rif = mlxsw_sp_rif_alloc(ops->rif_size, rif_index, vr->id, params->dev);
Ido Schimmela13a5942017-05-26 08:37:33 +02005697 if (!rif) {
5698 err = -ENOMEM;
5699 goto err_rif_alloc;
5700 }
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02005701 rif->mlxsw_sp = mlxsw_sp;
5702 rif->ops = ops;
Ido Schimmela13a5942017-05-26 08:37:33 +02005703
Petr Machata010cadf2017-09-02 23:49:18 +02005704 if (ops->fid_get) {
5705 fid = ops->fid_get(rif);
5706 if (IS_ERR(fid)) {
5707 err = PTR_ERR(fid);
5708 goto err_fid_get;
5709 }
5710 rif->fid = fid;
Ido Schimmel4d93cee2017-05-26 08:37:34 +02005711 }
5712
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02005713 if (ops->setup)
5714 ops->setup(rif, params);
5715
5716 err = ops->configure(rif);
Ido Schimmel4724ba562017-03-10 08:53:39 +01005717 if (err)
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02005718 goto err_configure;
Ido Schimmel4724ba562017-03-10 08:53:39 +01005719
Yotam Gigid42b0962017-09-27 08:23:20 +02005720 err = mlxsw_sp_mr_rif_add(vr->mr4_table, rif);
5721 if (err)
5722 goto err_mr_rif_add;
5723
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02005724 mlxsw_sp_rif_counters_alloc(rif);
Ido Schimmel5f9efff2017-05-16 19:38:27 +02005725 mlxsw_sp->router->rifs[rif_index] = rif;
Ido Schimmel4724ba562017-03-10 08:53:39 +01005726
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01005727 return rif;
Ido Schimmel4724ba562017-03-10 08:53:39 +01005728
Yotam Gigid42b0962017-09-27 08:23:20 +02005729err_mr_rif_add:
5730 ops->deconfigure(rif);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02005731err_configure:
Petr Machata010cadf2017-09-02 23:49:18 +02005732 if (fid)
5733 mlxsw_sp_fid_put(fid);
Ido Schimmela1107482017-05-26 08:37:39 +02005734err_fid_get:
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02005735 kfree(rif);
5736err_rif_alloc:
Ido Schimmelde5ed992017-06-04 16:53:40 +02005737err_rif_index_alloc:
Petr Machata28a04c72017-10-02 12:14:56 +02005738 vr->rif_count--;
Ido Schimmelc9ec53f2017-05-26 08:37:38 +02005739 mlxsw_sp_vr_put(vr);
Ido Schimmel4724ba562017-03-10 08:53:39 +01005740 return ERR_PTR(err);
5741}
5742
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02005743void mlxsw_sp_rif_destroy(struct mlxsw_sp_rif *rif)
Ido Schimmel4724ba562017-03-10 08:53:39 +01005744{
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02005745 const struct mlxsw_sp_rif_ops *ops = rif->ops;
5746 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
Ido Schimmela1107482017-05-26 08:37:39 +02005747 struct mlxsw_sp_fid *fid = rif->fid;
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02005748 struct mlxsw_sp_vr *vr;
Ido Schimmel4724ba562017-03-10 08:53:39 +01005749
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01005750 mlxsw_sp_router_rif_gone_sync(mlxsw_sp, rif);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02005751 vr = &mlxsw_sp->router->vrs[rif->vr_id];
Arkadi Sharshevskye0c0afd2017-03-28 17:24:15 +02005752
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02005753 mlxsw_sp->router->rifs[rif->rif_index] = NULL;
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02005754 mlxsw_sp_rif_counters_free(rif);
Yotam Gigid42b0962017-09-27 08:23:20 +02005755 mlxsw_sp_mr_rif_del(vr->mr4_table, rif);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02005756 ops->deconfigure(rif);
Petr Machata010cadf2017-09-02 23:49:18 +02005757 if (fid)
5758 /* Loopback RIFs are not associated with a FID. */
5759 mlxsw_sp_fid_put(fid);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02005760 kfree(rif);
Petr Machata28a04c72017-10-02 12:14:56 +02005761 vr->rif_count--;
Ido Schimmelc9ec53f2017-05-26 08:37:38 +02005762 mlxsw_sp_vr_put(vr);
Ido Schimmel4724ba562017-03-10 08:53:39 +01005763}
5764
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02005765static void
5766mlxsw_sp_rif_subport_params_init(struct mlxsw_sp_rif_params *params,
5767 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
5768{
5769 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
5770
5771 params->vid = mlxsw_sp_port_vlan->vid;
5772 params->lag = mlxsw_sp_port->lagged;
5773 if (params->lag)
5774 params->lag_id = mlxsw_sp_port->lag_id;
5775 else
5776 params->system_port = mlxsw_sp_port->local_port;
5777}
5778
Ido Schimmel7cbecf22017-05-26 08:37:28 +02005779static int
Ido Schimmela1107482017-05-26 08:37:39 +02005780mlxsw_sp_port_vlan_router_join(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan,
David Ahernf8fa9b42017-10-18 09:56:56 -07005781 struct net_device *l3_dev,
5782 struct netlink_ext_ack *extack)
Ido Schimmel4724ba562017-03-10 08:53:39 +01005783{
Ido Schimmel7cbecf22017-05-26 08:37:28 +02005784 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
Ido Schimmel1b8f09a2017-05-26 08:37:36 +02005785 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
Ido Schimmel7cbecf22017-05-26 08:37:28 +02005786 u16 vid = mlxsw_sp_port_vlan->vid;
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01005787 struct mlxsw_sp_rif *rif;
Ido Schimmela1107482017-05-26 08:37:39 +02005788 struct mlxsw_sp_fid *fid;
Ido Schimmel03ea01e2017-05-23 21:56:30 +02005789 int err;
Ido Schimmel4724ba562017-03-10 08:53:39 +01005790
Ido Schimmel1b8f09a2017-05-26 08:37:36 +02005791 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev);
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01005792 if (!rif) {
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02005793 struct mlxsw_sp_rif_params params = {
5794 .dev = l3_dev,
5795 };
5796
5797 mlxsw_sp_rif_subport_params_init(&params, mlxsw_sp_port_vlan);
David Ahernf8fa9b42017-10-18 09:56:56 -07005798 rif = mlxsw_sp_rif_create(mlxsw_sp, &params, extack);
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01005799 if (IS_ERR(rif))
5800 return PTR_ERR(rif);
Ido Schimmel4724ba562017-03-10 08:53:39 +01005801 }
5802
Ido Schimmela1107482017-05-26 08:37:39 +02005803 /* FID was already created, just take a reference */
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02005804 fid = rif->ops->fid_get(rif);
Ido Schimmela1107482017-05-26 08:37:39 +02005805 err = mlxsw_sp_fid_port_vid_map(fid, mlxsw_sp_port, vid);
5806 if (err)
5807 goto err_fid_port_vid_map;
5808
Ido Schimmel7cbecf22017-05-26 08:37:28 +02005809 err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, false);
Ido Schimmel03ea01e2017-05-23 21:56:30 +02005810 if (err)
5811 goto err_port_vid_learning_set;
5812
Ido Schimmel7cbecf22017-05-26 08:37:28 +02005813 err = mlxsw_sp_port_vid_stp_set(mlxsw_sp_port, vid,
Ido Schimmel03ea01e2017-05-23 21:56:30 +02005814 BR_STATE_FORWARDING);
5815 if (err)
5816 goto err_port_vid_stp_set;
5817
Ido Schimmela1107482017-05-26 08:37:39 +02005818 mlxsw_sp_port_vlan->fid = fid;
Ido Schimmel4724ba562017-03-10 08:53:39 +01005819
Ido Schimmel4724ba562017-03-10 08:53:39 +01005820 return 0;
Ido Schimmel03ea01e2017-05-23 21:56:30 +02005821
5822err_port_vid_stp_set:
Ido Schimmel7cbecf22017-05-26 08:37:28 +02005823 mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, true);
Ido Schimmel03ea01e2017-05-23 21:56:30 +02005824err_port_vid_learning_set:
Ido Schimmela1107482017-05-26 08:37:39 +02005825 mlxsw_sp_fid_port_vid_unmap(fid, mlxsw_sp_port, vid);
5826err_fid_port_vid_map:
5827 mlxsw_sp_fid_put(fid);
Ido Schimmel03ea01e2017-05-23 21:56:30 +02005828 return err;
Ido Schimmel4724ba562017-03-10 08:53:39 +01005829}
5830
Ido Schimmela1107482017-05-26 08:37:39 +02005831void
5832mlxsw_sp_port_vlan_router_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
Ido Schimmel4724ba562017-03-10 08:53:39 +01005833{
Ido Schimmelce95e152017-05-26 08:37:27 +02005834 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
Ido Schimmel7cbecf22017-05-26 08:37:28 +02005835 struct mlxsw_sp_fid *fid = mlxsw_sp_port_vlan->fid;
Ido Schimmelce95e152017-05-26 08:37:27 +02005836 u16 vid = mlxsw_sp_port_vlan->vid;
Ido Schimmelce95e152017-05-26 08:37:27 +02005837
Ido Schimmela1107482017-05-26 08:37:39 +02005838 if (WARN_ON(mlxsw_sp_fid_type(fid) != MLXSW_SP_FID_TYPE_RFID))
5839 return;
Ido Schimmel4aafc362017-05-26 08:37:25 +02005840
Ido Schimmela1107482017-05-26 08:37:39 +02005841 mlxsw_sp_port_vlan->fid = NULL;
Ido Schimmel7cbecf22017-05-26 08:37:28 +02005842 mlxsw_sp_port_vid_stp_set(mlxsw_sp_port, vid, BR_STATE_BLOCKING);
5843 mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, true);
Ido Schimmela1107482017-05-26 08:37:39 +02005844 mlxsw_sp_fid_port_vid_unmap(fid, mlxsw_sp_port, vid);
5845 /* If router port holds the last reference on the rFID, then the
5846 * associated Sub-port RIF will be destroyed.
5847 */
5848 mlxsw_sp_fid_put(fid);
Ido Schimmel4724ba562017-03-10 08:53:39 +01005849}
5850
Ido Schimmel7cbecf22017-05-26 08:37:28 +02005851static int mlxsw_sp_inetaddr_port_vlan_event(struct net_device *l3_dev,
5852 struct net_device *port_dev,
David Ahernf8fa9b42017-10-18 09:56:56 -07005853 unsigned long event, u16 vid,
5854 struct netlink_ext_ack *extack)
Ido Schimmel4724ba562017-03-10 08:53:39 +01005855{
5856 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(port_dev);
Ido Schimmelce95e152017-05-26 08:37:27 +02005857 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
Ido Schimmel4724ba562017-03-10 08:53:39 +01005858
Ido Schimmelce95e152017-05-26 08:37:27 +02005859 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
Ido Schimmel7cbecf22017-05-26 08:37:28 +02005860 if (WARN_ON(!mlxsw_sp_port_vlan))
5861 return -EINVAL;
Ido Schimmel4724ba562017-03-10 08:53:39 +01005862
5863 switch (event) {
5864 case NETDEV_UP:
Ido Schimmela1107482017-05-26 08:37:39 +02005865 return mlxsw_sp_port_vlan_router_join(mlxsw_sp_port_vlan,
David Ahernf8fa9b42017-10-18 09:56:56 -07005866 l3_dev, extack);
Ido Schimmel4724ba562017-03-10 08:53:39 +01005867 case NETDEV_DOWN:
Ido Schimmela1107482017-05-26 08:37:39 +02005868 mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port_vlan);
Ido Schimmel4724ba562017-03-10 08:53:39 +01005869 break;
5870 }
5871
5872 return 0;
5873}
5874
5875static int mlxsw_sp_inetaddr_port_event(struct net_device *port_dev,
David Ahernf8fa9b42017-10-18 09:56:56 -07005876 unsigned long event,
5877 struct netlink_ext_ack *extack)
Ido Schimmel4724ba562017-03-10 08:53:39 +01005878{
Jiri Pirko2b94e582017-04-18 16:55:37 +02005879 if (netif_is_bridge_port(port_dev) ||
5880 netif_is_lag_port(port_dev) ||
5881 netif_is_ovs_port(port_dev))
Ido Schimmel4724ba562017-03-10 08:53:39 +01005882 return 0;
5883
David Ahernf8fa9b42017-10-18 09:56:56 -07005884 return mlxsw_sp_inetaddr_port_vlan_event(port_dev, port_dev, event, 1,
5885 extack);
Ido Schimmel4724ba562017-03-10 08:53:39 +01005886}
5887
5888static int __mlxsw_sp_inetaddr_lag_event(struct net_device *l3_dev,
5889 struct net_device *lag_dev,
David Ahernf8fa9b42017-10-18 09:56:56 -07005890 unsigned long event, u16 vid,
5891 struct netlink_ext_ack *extack)
Ido Schimmel4724ba562017-03-10 08:53:39 +01005892{
5893 struct net_device *port_dev;
5894 struct list_head *iter;
5895 int err;
5896
5897 netdev_for_each_lower_dev(lag_dev, port_dev, iter) {
5898 if (mlxsw_sp_port_dev_check(port_dev)) {
Ido Schimmel7cbecf22017-05-26 08:37:28 +02005899 err = mlxsw_sp_inetaddr_port_vlan_event(l3_dev,
5900 port_dev,
David Ahernf8fa9b42017-10-18 09:56:56 -07005901 event, vid,
5902 extack);
Ido Schimmel4724ba562017-03-10 08:53:39 +01005903 if (err)
5904 return err;
5905 }
5906 }
5907
5908 return 0;
5909}
5910
5911static int mlxsw_sp_inetaddr_lag_event(struct net_device *lag_dev,
David Ahernf8fa9b42017-10-18 09:56:56 -07005912 unsigned long event,
5913 struct netlink_ext_ack *extack)
Ido Schimmel4724ba562017-03-10 08:53:39 +01005914{
5915 if (netif_is_bridge_port(lag_dev))
5916 return 0;
5917
David Ahernf8fa9b42017-10-18 09:56:56 -07005918 return __mlxsw_sp_inetaddr_lag_event(lag_dev, lag_dev, event, 1,
5919 extack);
Ido Schimmel4724ba562017-03-10 08:53:39 +01005920}
5921
Ido Schimmel4724ba562017-03-10 08:53:39 +01005922static int mlxsw_sp_inetaddr_bridge_event(struct net_device *l3_dev,
David Ahernf8fa9b42017-10-18 09:56:56 -07005923 unsigned long event,
5924 struct netlink_ext_ack *extack)
Ido Schimmel4724ba562017-03-10 08:53:39 +01005925{
5926 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(l3_dev);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02005927 struct mlxsw_sp_rif_params params = {
5928 .dev = l3_dev,
5929 };
Ido Schimmela1107482017-05-26 08:37:39 +02005930 struct mlxsw_sp_rif *rif;
Ido Schimmel4724ba562017-03-10 08:53:39 +01005931
5932 switch (event) {
5933 case NETDEV_UP:
David Ahernf8fa9b42017-10-18 09:56:56 -07005934 rif = mlxsw_sp_rif_create(mlxsw_sp, &params, extack);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02005935 if (IS_ERR(rif))
5936 return PTR_ERR(rif);
5937 break;
Ido Schimmel4724ba562017-03-10 08:53:39 +01005938 case NETDEV_DOWN:
Ido Schimmela1107482017-05-26 08:37:39 +02005939 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02005940 mlxsw_sp_rif_destroy(rif);
Ido Schimmel4724ba562017-03-10 08:53:39 +01005941 break;
5942 }
5943
5944 return 0;
5945}
5946
5947static int mlxsw_sp_inetaddr_vlan_event(struct net_device *vlan_dev,
David Ahernf8fa9b42017-10-18 09:56:56 -07005948 unsigned long event,
5949 struct netlink_ext_ack *extack)
Ido Schimmel4724ba562017-03-10 08:53:39 +01005950{
5951 struct net_device *real_dev = vlan_dev_real_dev(vlan_dev);
Ido Schimmel4724ba562017-03-10 08:53:39 +01005952 u16 vid = vlan_dev_vlan_id(vlan_dev);
5953
Ido Schimmel6b27c8a2017-06-28 09:03:12 +03005954 if (netif_is_bridge_port(vlan_dev))
5955 return 0;
5956
Ido Schimmel4724ba562017-03-10 08:53:39 +01005957 if (mlxsw_sp_port_dev_check(real_dev))
Ido Schimmel7cbecf22017-05-26 08:37:28 +02005958 return mlxsw_sp_inetaddr_port_vlan_event(vlan_dev, real_dev,
David Ahernf8fa9b42017-10-18 09:56:56 -07005959 event, vid, extack);
Ido Schimmel4724ba562017-03-10 08:53:39 +01005960 else if (netif_is_lag_master(real_dev))
5961 return __mlxsw_sp_inetaddr_lag_event(vlan_dev, real_dev, event,
David Ahernf8fa9b42017-10-18 09:56:56 -07005962 vid, extack);
Ido Schimmelc57529e2017-05-26 08:37:31 +02005963 else if (netif_is_bridge_master(real_dev) && br_vlan_enabled(real_dev))
David Ahernf8fa9b42017-10-18 09:56:56 -07005964 return mlxsw_sp_inetaddr_bridge_event(vlan_dev, event, extack);
Ido Schimmel4724ba562017-03-10 08:53:39 +01005965
5966 return 0;
5967}
5968
Ido Schimmelb1e45522017-04-30 19:47:14 +03005969static int __mlxsw_sp_inetaddr_event(struct net_device *dev,
David Ahernf8fa9b42017-10-18 09:56:56 -07005970 unsigned long event,
5971 struct netlink_ext_ack *extack)
Ido Schimmelb1e45522017-04-30 19:47:14 +03005972{
5973 if (mlxsw_sp_port_dev_check(dev))
David Ahernf8fa9b42017-10-18 09:56:56 -07005974 return mlxsw_sp_inetaddr_port_event(dev, event, extack);
Ido Schimmelb1e45522017-04-30 19:47:14 +03005975 else if (netif_is_lag_master(dev))
David Ahernf8fa9b42017-10-18 09:56:56 -07005976 return mlxsw_sp_inetaddr_lag_event(dev, event, extack);
Ido Schimmelb1e45522017-04-30 19:47:14 +03005977 else if (netif_is_bridge_master(dev))
David Ahernf8fa9b42017-10-18 09:56:56 -07005978 return mlxsw_sp_inetaddr_bridge_event(dev, event, extack);
Ido Schimmelb1e45522017-04-30 19:47:14 +03005979 else if (is_vlan_dev(dev))
David Ahernf8fa9b42017-10-18 09:56:56 -07005980 return mlxsw_sp_inetaddr_vlan_event(dev, event, extack);
Ido Schimmelb1e45522017-04-30 19:47:14 +03005981 else
5982 return 0;
5983}
5984
Ido Schimmel4724ba562017-03-10 08:53:39 +01005985int mlxsw_sp_inetaddr_event(struct notifier_block *unused,
5986 unsigned long event, void *ptr)
5987{
5988 struct in_ifaddr *ifa = (struct in_ifaddr *) ptr;
5989 struct net_device *dev = ifa->ifa_dev->dev;
5990 struct mlxsw_sp *mlxsw_sp;
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01005991 struct mlxsw_sp_rif *rif;
Ido Schimmel4724ba562017-03-10 08:53:39 +01005992 int err = 0;
5993
David Ahern89d5dd22017-10-18 09:56:55 -07005994 /* NETDEV_UP event is handled by mlxsw_sp_inetaddr_valid_event */
5995 if (event == NETDEV_UP)
5996 goto out;
5997
5998 mlxsw_sp = mlxsw_sp_lower_get(dev);
5999 if (!mlxsw_sp)
6000 goto out;
6001
6002 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
6003 if (!mlxsw_sp_rif_should_config(rif, dev, event))
6004 goto out;
6005
David Ahernf8fa9b42017-10-18 09:56:56 -07006006 err = __mlxsw_sp_inetaddr_event(dev, event, NULL);
David Ahern89d5dd22017-10-18 09:56:55 -07006007out:
6008 return notifier_from_errno(err);
6009}
6010
6011int mlxsw_sp_inetaddr_valid_event(struct notifier_block *unused,
6012 unsigned long event, void *ptr)
6013{
6014 struct in_validator_info *ivi = (struct in_validator_info *) ptr;
6015 struct net_device *dev = ivi->ivi_dev->dev;
6016 struct mlxsw_sp *mlxsw_sp;
6017 struct mlxsw_sp_rif *rif;
6018 int err = 0;
6019
Ido Schimmel4724ba562017-03-10 08:53:39 +01006020 mlxsw_sp = mlxsw_sp_lower_get(dev);
6021 if (!mlxsw_sp)
6022 goto out;
6023
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01006024 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
Arkadi Sharshevsky5ea12372017-07-18 10:10:13 +02006025 if (!mlxsw_sp_rif_should_config(rif, dev, event))
Ido Schimmel4724ba562017-03-10 08:53:39 +01006026 goto out;
6027
David Ahernf8fa9b42017-10-18 09:56:56 -07006028 err = __mlxsw_sp_inetaddr_event(dev, event, ivi->extack);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006029out:
6030 return notifier_from_errno(err);
6031}
6032
Arkadi Sharshevsky5ea12372017-07-18 10:10:13 +02006033struct mlxsw_sp_inet6addr_event_work {
6034 struct work_struct work;
6035 struct net_device *dev;
6036 unsigned long event;
6037};
6038
6039static void mlxsw_sp_inet6addr_event_work(struct work_struct *work)
6040{
6041 struct mlxsw_sp_inet6addr_event_work *inet6addr_work =
6042 container_of(work, struct mlxsw_sp_inet6addr_event_work, work);
6043 struct net_device *dev = inet6addr_work->dev;
6044 unsigned long event = inet6addr_work->event;
6045 struct mlxsw_sp *mlxsw_sp;
6046 struct mlxsw_sp_rif *rif;
6047
6048 rtnl_lock();
6049 mlxsw_sp = mlxsw_sp_lower_get(dev);
6050 if (!mlxsw_sp)
6051 goto out;
6052
6053 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
6054 if (!mlxsw_sp_rif_should_config(rif, dev, event))
6055 goto out;
6056
David Ahernf8fa9b42017-10-18 09:56:56 -07006057 __mlxsw_sp_inetaddr_event(dev, event, NULL);
Arkadi Sharshevsky5ea12372017-07-18 10:10:13 +02006058out:
6059 rtnl_unlock();
6060 dev_put(dev);
6061 kfree(inet6addr_work);
6062}
6063
6064/* Called with rcu_read_lock() */
6065int mlxsw_sp_inet6addr_event(struct notifier_block *unused,
6066 unsigned long event, void *ptr)
6067{
6068 struct inet6_ifaddr *if6 = (struct inet6_ifaddr *) ptr;
6069 struct mlxsw_sp_inet6addr_event_work *inet6addr_work;
6070 struct net_device *dev = if6->idev->dev;
6071
David Ahern89d5dd22017-10-18 09:56:55 -07006072 /* NETDEV_UP event is handled by mlxsw_sp_inet6addr_valid_event */
6073 if (event == NETDEV_UP)
6074 return NOTIFY_DONE;
6075
Arkadi Sharshevsky5ea12372017-07-18 10:10:13 +02006076 if (!mlxsw_sp_port_dev_lower_find_rcu(dev))
6077 return NOTIFY_DONE;
6078
6079 inet6addr_work = kzalloc(sizeof(*inet6addr_work), GFP_ATOMIC);
6080 if (!inet6addr_work)
6081 return NOTIFY_BAD;
6082
6083 INIT_WORK(&inet6addr_work->work, mlxsw_sp_inet6addr_event_work);
6084 inet6addr_work->dev = dev;
6085 inet6addr_work->event = event;
6086 dev_hold(dev);
6087 mlxsw_core_schedule_work(&inet6addr_work->work);
6088
6089 return NOTIFY_DONE;
6090}
6091
David Ahern89d5dd22017-10-18 09:56:55 -07006092int mlxsw_sp_inet6addr_valid_event(struct notifier_block *unused,
6093 unsigned long event, void *ptr)
6094{
6095 struct in6_validator_info *i6vi = (struct in6_validator_info *) ptr;
6096 struct net_device *dev = i6vi->i6vi_dev->dev;
6097 struct mlxsw_sp *mlxsw_sp;
6098 struct mlxsw_sp_rif *rif;
6099 int err = 0;
6100
6101 mlxsw_sp = mlxsw_sp_lower_get(dev);
6102 if (!mlxsw_sp)
6103 goto out;
6104
6105 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
6106 if (!mlxsw_sp_rif_should_config(rif, dev, event))
6107 goto out;
6108
David Ahernf8fa9b42017-10-18 09:56:56 -07006109 err = __mlxsw_sp_inetaddr_event(dev, event, i6vi->extack);
David Ahern89d5dd22017-10-18 09:56:55 -07006110out:
6111 return notifier_from_errno(err);
6112}
6113
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01006114static int mlxsw_sp_rif_edit(struct mlxsw_sp *mlxsw_sp, u16 rif_index,
Ido Schimmel4724ba562017-03-10 08:53:39 +01006115 const char *mac, int mtu)
6116{
6117 char ritr_pl[MLXSW_REG_RITR_LEN];
6118 int err;
6119
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01006120 mlxsw_reg_ritr_rif_pack(ritr_pl, rif_index);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006121 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
6122 if (err)
6123 return err;
6124
6125 mlxsw_reg_ritr_mtu_set(ritr_pl, mtu);
6126 mlxsw_reg_ritr_if_mac_memcpy_to(ritr_pl, mac);
6127 mlxsw_reg_ritr_op_set(ritr_pl, MLXSW_REG_RITR_RIF_CREATE);
6128 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
6129}
6130
6131int mlxsw_sp_netdevice_router_port_event(struct net_device *dev)
6132{
6133 struct mlxsw_sp *mlxsw_sp;
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01006134 struct mlxsw_sp_rif *rif;
Ido Schimmela1107482017-05-26 08:37:39 +02006135 u16 fid_index;
Ido Schimmel4724ba562017-03-10 08:53:39 +01006136 int err;
6137
6138 mlxsw_sp = mlxsw_sp_lower_get(dev);
6139 if (!mlxsw_sp)
6140 return 0;
6141
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01006142 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
6143 if (!rif)
Ido Schimmel4724ba562017-03-10 08:53:39 +01006144 return 0;
Ido Schimmela1107482017-05-26 08:37:39 +02006145 fid_index = mlxsw_sp_fid_index(rif->fid);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006146
Ido Schimmela1107482017-05-26 08:37:39 +02006147 err = mlxsw_sp_rif_fdb_op(mlxsw_sp, rif->addr, fid_index, false);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006148 if (err)
6149 return err;
6150
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01006151 err = mlxsw_sp_rif_edit(mlxsw_sp, rif->rif_index, dev->dev_addr,
6152 dev->mtu);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006153 if (err)
6154 goto err_rif_edit;
6155
Ido Schimmela1107482017-05-26 08:37:39 +02006156 err = mlxsw_sp_rif_fdb_op(mlxsw_sp, dev->dev_addr, fid_index, true);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006157 if (err)
6158 goto err_rif_fdb_op;
6159
Yotam Gigifd890fe2017-09-27 08:23:21 +02006160 if (rif->mtu != dev->mtu) {
6161 struct mlxsw_sp_vr *vr;
6162
6163 /* The RIF is relevant only to its mr_table instance, as unlike
6164 * unicast routing, in multicast routing a RIF cannot be shared
6165 * between several multicast routing tables.
6166 */
6167 vr = &mlxsw_sp->router->vrs[rif->vr_id];
6168 mlxsw_sp_mr_rif_mtu_update(vr->mr4_table, rif, dev->mtu);
6169 }
6170
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01006171 ether_addr_copy(rif->addr, dev->dev_addr);
6172 rif->mtu = dev->mtu;
Ido Schimmel4724ba562017-03-10 08:53:39 +01006173
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01006174 netdev_dbg(dev, "Updated RIF=%d\n", rif->rif_index);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006175
6176 return 0;
6177
6178err_rif_fdb_op:
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01006179 mlxsw_sp_rif_edit(mlxsw_sp, rif->rif_index, rif->addr, rif->mtu);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006180err_rif_edit:
Ido Schimmela1107482017-05-26 08:37:39 +02006181 mlxsw_sp_rif_fdb_op(mlxsw_sp, rif->addr, fid_index, true);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006182 return err;
6183}
6184
Ido Schimmelb1e45522017-04-30 19:47:14 +03006185static int mlxsw_sp_port_vrf_join(struct mlxsw_sp *mlxsw_sp,
David Ahernf8fa9b42017-10-18 09:56:56 -07006186 struct net_device *l3_dev,
6187 struct netlink_ext_ack *extack)
Ido Schimmel7179eb52017-03-16 09:08:18 +01006188{
Ido Schimmelb1e45522017-04-30 19:47:14 +03006189 struct mlxsw_sp_rif *rif;
Ido Schimmel7179eb52017-03-16 09:08:18 +01006190
Ido Schimmelb1e45522017-04-30 19:47:14 +03006191 /* If netdev is already associated with a RIF, then we need to
6192 * destroy it and create a new one with the new virtual router ID.
Ido Schimmel7179eb52017-03-16 09:08:18 +01006193 */
Ido Schimmelb1e45522017-04-30 19:47:14 +03006194 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev);
6195 if (rif)
David Ahernf8fa9b42017-10-18 09:56:56 -07006196 __mlxsw_sp_inetaddr_event(l3_dev, NETDEV_DOWN, extack);
Ido Schimmel7179eb52017-03-16 09:08:18 +01006197
David Ahernf8fa9b42017-10-18 09:56:56 -07006198 return __mlxsw_sp_inetaddr_event(l3_dev, NETDEV_UP, extack);
Ido Schimmel7179eb52017-03-16 09:08:18 +01006199}
6200
Ido Schimmelb1e45522017-04-30 19:47:14 +03006201static void mlxsw_sp_port_vrf_leave(struct mlxsw_sp *mlxsw_sp,
6202 struct net_device *l3_dev)
Ido Schimmel7179eb52017-03-16 09:08:18 +01006203{
Ido Schimmelb1e45522017-04-30 19:47:14 +03006204 struct mlxsw_sp_rif *rif;
Ido Schimmel7179eb52017-03-16 09:08:18 +01006205
Ido Schimmelb1e45522017-04-30 19:47:14 +03006206 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev);
6207 if (!rif)
Ido Schimmel7179eb52017-03-16 09:08:18 +01006208 return;
David Ahernf8fa9b42017-10-18 09:56:56 -07006209 __mlxsw_sp_inetaddr_event(l3_dev, NETDEV_DOWN, NULL);
Ido Schimmel7179eb52017-03-16 09:08:18 +01006210}
6211
Ido Schimmelb1e45522017-04-30 19:47:14 +03006212int mlxsw_sp_netdevice_vrf_event(struct net_device *l3_dev, unsigned long event,
6213 struct netdev_notifier_changeupper_info *info)
Ido Schimmel3d70e4582017-03-16 09:08:19 +01006214{
Ido Schimmelb1e45522017-04-30 19:47:14 +03006215 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(l3_dev);
6216 int err = 0;
Ido Schimmel3d70e4582017-03-16 09:08:19 +01006217
Ido Schimmelb1e45522017-04-30 19:47:14 +03006218 if (!mlxsw_sp)
6219 return 0;
Ido Schimmel3d70e4582017-03-16 09:08:19 +01006220
Ido Schimmelb1e45522017-04-30 19:47:14 +03006221 switch (event) {
6222 case NETDEV_PRECHANGEUPPER:
6223 return 0;
6224 case NETDEV_CHANGEUPPER:
David Ahernf8fa9b42017-10-18 09:56:56 -07006225 if (info->linking) {
6226 struct netlink_ext_ack *extack;
6227
6228 extack = netdev_notifier_info_to_extack(&info->info);
6229 err = mlxsw_sp_port_vrf_join(mlxsw_sp, l3_dev, extack);
6230 } else {
Ido Schimmelb1e45522017-04-30 19:47:14 +03006231 mlxsw_sp_port_vrf_leave(mlxsw_sp, l3_dev);
David Ahernf8fa9b42017-10-18 09:56:56 -07006232 }
Ido Schimmelb1e45522017-04-30 19:47:14 +03006233 break;
6234 }
Ido Schimmel3d70e4582017-03-16 09:08:19 +01006235
Ido Schimmelb1e45522017-04-30 19:47:14 +03006236 return err;
Ido Schimmel3d70e4582017-03-16 09:08:19 +01006237}
6238
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006239static struct mlxsw_sp_rif_subport *
6240mlxsw_sp_rif_subport_rif(const struct mlxsw_sp_rif *rif)
Ido Schimmela1107482017-05-26 08:37:39 +02006241{
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006242 return container_of(rif, struct mlxsw_sp_rif_subport, common);
Ido Schimmela1107482017-05-26 08:37:39 +02006243}
6244
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006245static void mlxsw_sp_rif_subport_setup(struct mlxsw_sp_rif *rif,
6246 const struct mlxsw_sp_rif_params *params)
6247{
6248 struct mlxsw_sp_rif_subport *rif_subport;
6249
6250 rif_subport = mlxsw_sp_rif_subport_rif(rif);
6251 rif_subport->vid = params->vid;
6252 rif_subport->lag = params->lag;
6253 if (params->lag)
6254 rif_subport->lag_id = params->lag_id;
6255 else
6256 rif_subport->system_port = params->system_port;
6257}
6258
6259static int mlxsw_sp_rif_subport_op(struct mlxsw_sp_rif *rif, bool enable)
6260{
6261 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
6262 struct mlxsw_sp_rif_subport *rif_subport;
6263 char ritr_pl[MLXSW_REG_RITR_LEN];
6264
6265 rif_subport = mlxsw_sp_rif_subport_rif(rif);
6266 mlxsw_reg_ritr_pack(ritr_pl, enable, MLXSW_REG_RITR_SP_IF,
Petr Machata9571e822017-09-02 23:49:14 +02006267 rif->rif_index, rif->vr_id, rif->dev->mtu);
6268 mlxsw_reg_ritr_mac_pack(ritr_pl, rif->dev->dev_addr);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006269 mlxsw_reg_ritr_sp_if_pack(ritr_pl, rif_subport->lag,
6270 rif_subport->lag ? rif_subport->lag_id :
6271 rif_subport->system_port,
6272 rif_subport->vid);
6273
6274 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
6275}
6276
6277static int mlxsw_sp_rif_subport_configure(struct mlxsw_sp_rif *rif)
6278{
Petr Machata010cadf2017-09-02 23:49:18 +02006279 int err;
6280
6281 err = mlxsw_sp_rif_subport_op(rif, true);
6282 if (err)
6283 return err;
6284
6285 err = mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
6286 mlxsw_sp_fid_index(rif->fid), true);
6287 if (err)
6288 goto err_rif_fdb_op;
6289
6290 mlxsw_sp_fid_rif_set(rif->fid, rif);
6291 return 0;
6292
6293err_rif_fdb_op:
6294 mlxsw_sp_rif_subport_op(rif, false);
6295 return err;
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006296}
6297
6298static void mlxsw_sp_rif_subport_deconfigure(struct mlxsw_sp_rif *rif)
6299{
Petr Machata010cadf2017-09-02 23:49:18 +02006300 struct mlxsw_sp_fid *fid = rif->fid;
6301
6302 mlxsw_sp_fid_rif_set(fid, NULL);
6303 mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
6304 mlxsw_sp_fid_index(fid), false);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006305 mlxsw_sp_rif_subport_op(rif, false);
6306}
6307
6308static struct mlxsw_sp_fid *
6309mlxsw_sp_rif_subport_fid_get(struct mlxsw_sp_rif *rif)
6310{
6311 return mlxsw_sp_fid_rfid_get(rif->mlxsw_sp, rif->rif_index);
6312}
6313
6314static const struct mlxsw_sp_rif_ops mlxsw_sp_rif_subport_ops = {
6315 .type = MLXSW_SP_RIF_TYPE_SUBPORT,
6316 .rif_size = sizeof(struct mlxsw_sp_rif_subport),
6317 .setup = mlxsw_sp_rif_subport_setup,
6318 .configure = mlxsw_sp_rif_subport_configure,
6319 .deconfigure = mlxsw_sp_rif_subport_deconfigure,
6320 .fid_get = mlxsw_sp_rif_subport_fid_get,
6321};
6322
6323static int mlxsw_sp_rif_vlan_fid_op(struct mlxsw_sp_rif *rif,
6324 enum mlxsw_reg_ritr_if_type type,
6325 u16 vid_fid, bool enable)
6326{
6327 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
6328 char ritr_pl[MLXSW_REG_RITR_LEN];
6329
6330 mlxsw_reg_ritr_pack(ritr_pl, enable, type, rif->rif_index, rif->vr_id,
Petr Machata9571e822017-09-02 23:49:14 +02006331 rif->dev->mtu);
6332 mlxsw_reg_ritr_mac_pack(ritr_pl, rif->dev->dev_addr);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006333 mlxsw_reg_ritr_fid_set(ritr_pl, type, vid_fid);
6334
6335 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
6336}
6337
Yotam Gigib35750f2017-10-09 11:15:33 +02006338u8 mlxsw_sp_router_port(const struct mlxsw_sp *mlxsw_sp)
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006339{
6340 return mlxsw_core_max_ports(mlxsw_sp->core) + 1;
6341}
6342
6343static int mlxsw_sp_rif_vlan_configure(struct mlxsw_sp_rif *rif)
6344{
6345 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
6346 u16 vid = mlxsw_sp_fid_8021q_vid(rif->fid);
6347 int err;
6348
6349 err = mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_VLAN_IF, vid, true);
6350 if (err)
6351 return err;
6352
Ido Schimmel0d284812017-07-18 10:10:12 +02006353 err = mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
6354 mlxsw_sp_router_port(mlxsw_sp), true);
6355 if (err)
6356 goto err_fid_mc_flood_set;
6357
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006358 err = mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
6359 mlxsw_sp_router_port(mlxsw_sp), true);
6360 if (err)
6361 goto err_fid_bc_flood_set;
6362
Petr Machata010cadf2017-09-02 23:49:18 +02006363 err = mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
6364 mlxsw_sp_fid_index(rif->fid), true);
6365 if (err)
6366 goto err_rif_fdb_op;
6367
6368 mlxsw_sp_fid_rif_set(rif->fid, rif);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006369 return 0;
6370
Petr Machata010cadf2017-09-02 23:49:18 +02006371err_rif_fdb_op:
6372 mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
6373 mlxsw_sp_router_port(mlxsw_sp), false);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006374err_fid_bc_flood_set:
Ido Schimmel0d284812017-07-18 10:10:12 +02006375 mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
6376 mlxsw_sp_router_port(mlxsw_sp), false);
6377err_fid_mc_flood_set:
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006378 mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_VLAN_IF, vid, false);
6379 return err;
6380}
6381
6382static void mlxsw_sp_rif_vlan_deconfigure(struct mlxsw_sp_rif *rif)
6383{
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006384 u16 vid = mlxsw_sp_fid_8021q_vid(rif->fid);
Petr Machata010cadf2017-09-02 23:49:18 +02006385 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
6386 struct mlxsw_sp_fid *fid = rif->fid;
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006387
Petr Machata010cadf2017-09-02 23:49:18 +02006388 mlxsw_sp_fid_rif_set(fid, NULL);
6389 mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
6390 mlxsw_sp_fid_index(fid), false);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006391 mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
6392 mlxsw_sp_router_port(mlxsw_sp), false);
Ido Schimmel0d284812017-07-18 10:10:12 +02006393 mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
6394 mlxsw_sp_router_port(mlxsw_sp), false);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006395 mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_VLAN_IF, vid, false);
6396}
6397
6398static struct mlxsw_sp_fid *
6399mlxsw_sp_rif_vlan_fid_get(struct mlxsw_sp_rif *rif)
6400{
6401 u16 vid = is_vlan_dev(rif->dev) ? vlan_dev_vlan_id(rif->dev) : 1;
6402
6403 return mlxsw_sp_fid_8021q_get(rif->mlxsw_sp, vid);
6404}
6405
6406static const struct mlxsw_sp_rif_ops mlxsw_sp_rif_vlan_ops = {
6407 .type = MLXSW_SP_RIF_TYPE_VLAN,
6408 .rif_size = sizeof(struct mlxsw_sp_rif),
6409 .configure = mlxsw_sp_rif_vlan_configure,
6410 .deconfigure = mlxsw_sp_rif_vlan_deconfigure,
6411 .fid_get = mlxsw_sp_rif_vlan_fid_get,
6412};
6413
6414static int mlxsw_sp_rif_fid_configure(struct mlxsw_sp_rif *rif)
6415{
6416 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
6417 u16 fid_index = mlxsw_sp_fid_index(rif->fid);
6418 int err;
6419
6420 err = mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_FID_IF, fid_index,
6421 true);
6422 if (err)
6423 return err;
6424
Ido Schimmel0d284812017-07-18 10:10:12 +02006425 err = mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
6426 mlxsw_sp_router_port(mlxsw_sp), true);
6427 if (err)
6428 goto err_fid_mc_flood_set;
6429
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006430 err = mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
6431 mlxsw_sp_router_port(mlxsw_sp), true);
6432 if (err)
6433 goto err_fid_bc_flood_set;
6434
Petr Machata010cadf2017-09-02 23:49:18 +02006435 err = mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
6436 mlxsw_sp_fid_index(rif->fid), true);
6437 if (err)
6438 goto err_rif_fdb_op;
6439
6440 mlxsw_sp_fid_rif_set(rif->fid, rif);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006441 return 0;
6442
Petr Machata010cadf2017-09-02 23:49:18 +02006443err_rif_fdb_op:
6444 mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
6445 mlxsw_sp_router_port(mlxsw_sp), false);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006446err_fid_bc_flood_set:
Ido Schimmel0d284812017-07-18 10:10:12 +02006447 mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
6448 mlxsw_sp_router_port(mlxsw_sp), false);
6449err_fid_mc_flood_set:
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006450 mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_FID_IF, fid_index, false);
6451 return err;
6452}
6453
6454static void mlxsw_sp_rif_fid_deconfigure(struct mlxsw_sp_rif *rif)
6455{
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006456 u16 fid_index = mlxsw_sp_fid_index(rif->fid);
Petr Machata010cadf2017-09-02 23:49:18 +02006457 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
6458 struct mlxsw_sp_fid *fid = rif->fid;
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006459
Petr Machata010cadf2017-09-02 23:49:18 +02006460 mlxsw_sp_fid_rif_set(fid, NULL);
6461 mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
6462 mlxsw_sp_fid_index(fid), false);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006463 mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
6464 mlxsw_sp_router_port(mlxsw_sp), false);
Ido Schimmel0d284812017-07-18 10:10:12 +02006465 mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
6466 mlxsw_sp_router_port(mlxsw_sp), false);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006467 mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_FID_IF, fid_index, false);
6468}
6469
6470static struct mlxsw_sp_fid *
6471mlxsw_sp_rif_fid_fid_get(struct mlxsw_sp_rif *rif)
6472{
6473 return mlxsw_sp_fid_8021d_get(rif->mlxsw_sp, rif->dev->ifindex);
6474}
6475
6476static const struct mlxsw_sp_rif_ops mlxsw_sp_rif_fid_ops = {
6477 .type = MLXSW_SP_RIF_TYPE_FID,
6478 .rif_size = sizeof(struct mlxsw_sp_rif),
6479 .configure = mlxsw_sp_rif_fid_configure,
6480 .deconfigure = mlxsw_sp_rif_fid_deconfigure,
6481 .fid_get = mlxsw_sp_rif_fid_fid_get,
6482};
6483
Petr Machata6ddb7422017-09-02 23:49:19 +02006484static struct mlxsw_sp_rif_ipip_lb *
6485mlxsw_sp_rif_ipip_lb_rif(struct mlxsw_sp_rif *rif)
6486{
6487 return container_of(rif, struct mlxsw_sp_rif_ipip_lb, common);
6488}
6489
6490static void
6491mlxsw_sp_rif_ipip_lb_setup(struct mlxsw_sp_rif *rif,
6492 const struct mlxsw_sp_rif_params *params)
6493{
6494 struct mlxsw_sp_rif_params_ipip_lb *params_lb;
6495 struct mlxsw_sp_rif_ipip_lb *rif_lb;
6496
6497 params_lb = container_of(params, struct mlxsw_sp_rif_params_ipip_lb,
6498 common);
6499 rif_lb = mlxsw_sp_rif_ipip_lb_rif(rif);
6500 rif_lb->lb_config = params_lb->lb_config;
6501}
6502
6503static int
6504mlxsw_sp_rif_ipip_lb_op(struct mlxsw_sp_rif_ipip_lb *lb_rif,
6505 struct mlxsw_sp_vr *ul_vr, bool enable)
6506{
6507 struct mlxsw_sp_rif_ipip_lb_config lb_cf = lb_rif->lb_config;
6508 struct mlxsw_sp_rif *rif = &lb_rif->common;
6509 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
6510 char ritr_pl[MLXSW_REG_RITR_LEN];
6511 u32 saddr4;
6512
6513 switch (lb_cf.ul_protocol) {
6514 case MLXSW_SP_L3_PROTO_IPV4:
6515 saddr4 = be32_to_cpu(lb_cf.saddr.addr4);
6516 mlxsw_reg_ritr_pack(ritr_pl, enable, MLXSW_REG_RITR_LOOPBACK_IF,
6517 rif->rif_index, rif->vr_id, rif->dev->mtu);
6518 mlxsw_reg_ritr_loopback_ipip4_pack(ritr_pl, lb_cf.lb_ipipt,
6519 MLXSW_REG_RITR_LOOPBACK_IPIP_OPTIONS_GRE_KEY_PRESET,
6520 ul_vr->id, saddr4, lb_cf.okey);
6521 break;
6522
6523 case MLXSW_SP_L3_PROTO_IPV6:
6524 return -EAFNOSUPPORT;
6525 }
6526
6527 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
6528}
6529
6530static int
6531mlxsw_sp_rif_ipip_lb_configure(struct mlxsw_sp_rif *rif)
6532{
6533 struct mlxsw_sp_rif_ipip_lb *lb_rif = mlxsw_sp_rif_ipip_lb_rif(rif);
6534 u32 ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(rif->dev);
6535 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
6536 struct mlxsw_sp_vr *ul_vr;
6537 int err;
6538
David Ahernf8fa9b42017-10-18 09:56:56 -07006539 ul_vr = mlxsw_sp_vr_get(mlxsw_sp, ul_tb_id, NULL);
Petr Machata6ddb7422017-09-02 23:49:19 +02006540 if (IS_ERR(ul_vr))
6541 return PTR_ERR(ul_vr);
6542
6543 err = mlxsw_sp_rif_ipip_lb_op(lb_rif, ul_vr, true);
6544 if (err)
6545 goto err_loopback_op;
6546
6547 lb_rif->ul_vr_id = ul_vr->id;
6548 ++ul_vr->rif_count;
6549 return 0;
6550
6551err_loopback_op:
6552 mlxsw_sp_vr_put(ul_vr);
6553 return err;
6554}
6555
6556static void mlxsw_sp_rif_ipip_lb_deconfigure(struct mlxsw_sp_rif *rif)
6557{
6558 struct mlxsw_sp_rif_ipip_lb *lb_rif = mlxsw_sp_rif_ipip_lb_rif(rif);
6559 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
6560 struct mlxsw_sp_vr *ul_vr;
6561
6562 ul_vr = &mlxsw_sp->router->vrs[lb_rif->ul_vr_id];
6563 mlxsw_sp_rif_ipip_lb_op(lb_rif, ul_vr, false);
6564
6565 --ul_vr->rif_count;
6566 mlxsw_sp_vr_put(ul_vr);
6567}
6568
6569static const struct mlxsw_sp_rif_ops mlxsw_sp_rif_ipip_lb_ops = {
6570 .type = MLXSW_SP_RIF_TYPE_IPIP_LB,
6571 .rif_size = sizeof(struct mlxsw_sp_rif_ipip_lb),
6572 .setup = mlxsw_sp_rif_ipip_lb_setup,
6573 .configure = mlxsw_sp_rif_ipip_lb_configure,
6574 .deconfigure = mlxsw_sp_rif_ipip_lb_deconfigure,
6575};
6576
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006577static const struct mlxsw_sp_rif_ops *mlxsw_sp_rif_ops_arr[] = {
6578 [MLXSW_SP_RIF_TYPE_SUBPORT] = &mlxsw_sp_rif_subport_ops,
6579 [MLXSW_SP_RIF_TYPE_VLAN] = &mlxsw_sp_rif_vlan_ops,
6580 [MLXSW_SP_RIF_TYPE_FID] = &mlxsw_sp_rif_fid_ops,
Petr Machata6ddb7422017-09-02 23:49:19 +02006581 [MLXSW_SP_RIF_TYPE_IPIP_LB] = &mlxsw_sp_rif_ipip_lb_ops,
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006582};
6583
Ido Schimmel348b8fc2017-05-16 19:38:29 +02006584static int mlxsw_sp_rifs_init(struct mlxsw_sp *mlxsw_sp)
6585{
6586 u64 max_rifs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS);
6587
6588 mlxsw_sp->router->rifs = kcalloc(max_rifs,
6589 sizeof(struct mlxsw_sp_rif *),
6590 GFP_KERNEL);
6591 if (!mlxsw_sp->router->rifs)
6592 return -ENOMEM;
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006593
6594 mlxsw_sp->router->rif_ops_arr = mlxsw_sp_rif_ops_arr;
6595
Ido Schimmel348b8fc2017-05-16 19:38:29 +02006596 return 0;
6597}
6598
6599static void mlxsw_sp_rifs_fini(struct mlxsw_sp *mlxsw_sp)
6600{
6601 int i;
6602
6603 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++)
6604 WARN_ON_ONCE(mlxsw_sp->router->rifs[i]);
6605
6606 kfree(mlxsw_sp->router->rifs);
6607}
6608
Petr Machatadcbda282017-10-20 09:16:16 +02006609static int
6610mlxsw_sp_ipip_config_tigcr(struct mlxsw_sp *mlxsw_sp)
6611{
6612 char tigcr_pl[MLXSW_REG_TIGCR_LEN];
6613
6614 mlxsw_reg_tigcr_pack(tigcr_pl, true, 0);
6615 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(tigcr), tigcr_pl);
6616}
6617
Petr Machata38ebc0f2017-09-02 23:49:17 +02006618static int mlxsw_sp_ipips_init(struct mlxsw_sp *mlxsw_sp)
6619{
6620 mlxsw_sp->router->ipip_ops_arr = mlxsw_sp_ipip_ops_arr;
Petr Machata1012b9a2017-09-02 23:49:23 +02006621 INIT_LIST_HEAD(&mlxsw_sp->router->ipip_list);
Petr Machatadcbda282017-10-20 09:16:16 +02006622 return mlxsw_sp_ipip_config_tigcr(mlxsw_sp);
Petr Machata38ebc0f2017-09-02 23:49:17 +02006623}
6624
6625static void mlxsw_sp_ipips_fini(struct mlxsw_sp *mlxsw_sp)
6626{
Petr Machata1012b9a2017-09-02 23:49:23 +02006627 WARN_ON(!list_empty(&mlxsw_sp->router->ipip_list));
Petr Machata38ebc0f2017-09-02 23:49:17 +02006628}
6629
Ido Schimmelc3852ef2016-12-03 16:45:07 +01006630static void mlxsw_sp_router_fib_dump_flush(struct notifier_block *nb)
6631{
Ido Schimmel7e39d112017-05-16 19:38:28 +02006632 struct mlxsw_sp_router *router;
Ido Schimmelc3852ef2016-12-03 16:45:07 +01006633
6634 /* Flush pending FIB notifications and then flush the device's
6635 * table before requesting another dump. The FIB notification
6636 * block is unregistered, so no need to take RTNL.
6637 */
6638 mlxsw_core_flush_owq();
Ido Schimmel7e39d112017-05-16 19:38:28 +02006639 router = container_of(nb, struct mlxsw_sp_router, fib_nb);
6640 mlxsw_sp_router_fib_flush(router->mlxsw_sp);
Ido Schimmelc3852ef2016-12-03 16:45:07 +01006641}
6642
Ido Schimmelaf658b62017-11-02 17:14:09 +01006643#ifdef CONFIG_IP_ROUTE_MULTIPATH
6644static void mlxsw_sp_mp_hash_header_set(char *recr2_pl, int header)
6645{
6646 mlxsw_reg_recr2_outer_header_enables_set(recr2_pl, header, true);
6647}
6648
6649static void mlxsw_sp_mp_hash_field_set(char *recr2_pl, int field)
6650{
6651 mlxsw_reg_recr2_outer_header_fields_enable_set(recr2_pl, field, true);
6652}
6653
6654static void mlxsw_sp_mp4_hash_init(char *recr2_pl)
6655{
6656 bool only_l3 = !init_net.ipv4.sysctl_fib_multipath_hash_policy;
6657
6658 mlxsw_sp_mp_hash_header_set(recr2_pl,
6659 MLXSW_REG_RECR2_IPV4_EN_NOT_TCP_NOT_UDP);
6660 mlxsw_sp_mp_hash_header_set(recr2_pl, MLXSW_REG_RECR2_IPV4_EN_TCP_UDP);
6661 mlxsw_reg_recr2_ipv4_sip_enable(recr2_pl);
6662 mlxsw_reg_recr2_ipv4_dip_enable(recr2_pl);
6663 if (only_l3)
6664 return;
6665 mlxsw_sp_mp_hash_header_set(recr2_pl, MLXSW_REG_RECR2_TCP_UDP_EN_IPV4);
6666 mlxsw_sp_mp_hash_field_set(recr2_pl, MLXSW_REG_RECR2_IPV4_PROTOCOL);
6667 mlxsw_sp_mp_hash_field_set(recr2_pl, MLXSW_REG_RECR2_TCP_UDP_SPORT);
6668 mlxsw_sp_mp_hash_field_set(recr2_pl, MLXSW_REG_RECR2_TCP_UDP_DPORT);
6669}
6670
6671static void mlxsw_sp_mp6_hash_init(char *recr2_pl)
6672{
6673 mlxsw_sp_mp_hash_header_set(recr2_pl,
6674 MLXSW_REG_RECR2_IPV6_EN_NOT_TCP_NOT_UDP);
6675 mlxsw_sp_mp_hash_header_set(recr2_pl, MLXSW_REG_RECR2_IPV6_EN_TCP_UDP);
6676 mlxsw_reg_recr2_ipv6_sip_enable(recr2_pl);
6677 mlxsw_reg_recr2_ipv6_dip_enable(recr2_pl);
6678 mlxsw_sp_mp_hash_field_set(recr2_pl, MLXSW_REG_RECR2_IPV6_FLOW_LABEL);
6679 mlxsw_sp_mp_hash_field_set(recr2_pl, MLXSW_REG_RECR2_IPV6_NEXT_HEADER);
6680}
6681
6682static int mlxsw_sp_mp_hash_init(struct mlxsw_sp *mlxsw_sp)
6683{
6684 char recr2_pl[MLXSW_REG_RECR2_LEN];
6685 u32 seed;
6686
6687 get_random_bytes(&seed, sizeof(seed));
6688 mlxsw_reg_recr2_pack(recr2_pl, seed);
6689 mlxsw_sp_mp4_hash_init(recr2_pl);
6690 mlxsw_sp_mp6_hash_init(recr2_pl);
6691
6692 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(recr2), recr2_pl);
6693}
6694#else
6695static int mlxsw_sp_mp_hash_init(struct mlxsw_sp *mlxsw_sp)
6696{
6697 return 0;
6698}
6699#endif
6700
Ido Schimmel4724ba562017-03-10 08:53:39 +01006701static int __mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp)
6702{
6703 char rgcr_pl[MLXSW_REG_RGCR_LEN];
6704 u64 max_rifs;
6705 int err;
6706
6707 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_RIFS))
6708 return -EIO;
Ido Schimmel4724ba562017-03-10 08:53:39 +01006709 max_rifs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006710
Arkadi Sharshevskye29237e2017-07-18 10:10:09 +02006711 mlxsw_reg_rgcr_pack(rgcr_pl, true, true);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006712 mlxsw_reg_rgcr_max_router_interfaces_set(rgcr_pl, max_rifs);
6713 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rgcr), rgcr_pl);
6714 if (err)
Ido Schimmel348b8fc2017-05-16 19:38:29 +02006715 return err;
Ido Schimmel4724ba562017-03-10 08:53:39 +01006716 return 0;
Ido Schimmel4724ba562017-03-10 08:53:39 +01006717}
6718
6719static void __mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp)
6720{
6721 char rgcr_pl[MLXSW_REG_RGCR_LEN];
Ido Schimmel4724ba562017-03-10 08:53:39 +01006722
Arkadi Sharshevskye29237e2017-07-18 10:10:09 +02006723 mlxsw_reg_rgcr_pack(rgcr_pl, false, false);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006724 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rgcr), rgcr_pl);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006725}
6726
Jiri Pirkob45f64d2016-09-26 12:52:31 +02006727int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp)
6728{
Ido Schimmel9011b672017-05-16 19:38:25 +02006729 struct mlxsw_sp_router *router;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02006730 int err;
6731
Ido Schimmel9011b672017-05-16 19:38:25 +02006732 router = kzalloc(sizeof(*mlxsw_sp->router), GFP_KERNEL);
6733 if (!router)
6734 return -ENOMEM;
6735 mlxsw_sp->router = router;
6736 router->mlxsw_sp = mlxsw_sp;
6737
6738 INIT_LIST_HEAD(&mlxsw_sp->router->nexthop_neighs_list);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02006739 err = __mlxsw_sp_router_init(mlxsw_sp);
6740 if (err)
Ido Schimmel9011b672017-05-16 19:38:25 +02006741 goto err_router_init;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02006742
Ido Schimmel348b8fc2017-05-16 19:38:29 +02006743 err = mlxsw_sp_rifs_init(mlxsw_sp);
6744 if (err)
6745 goto err_rifs_init;
6746
Petr Machata38ebc0f2017-09-02 23:49:17 +02006747 err = mlxsw_sp_ipips_init(mlxsw_sp);
6748 if (err)
6749 goto err_ipips_init;
6750
Ido Schimmel9011b672017-05-16 19:38:25 +02006751 err = rhashtable_init(&mlxsw_sp->router->nexthop_ht,
Ido Schimmelc53b8e12017-02-08 11:16:30 +01006752 &mlxsw_sp_nexthop_ht_params);
6753 if (err)
6754 goto err_nexthop_ht_init;
6755
Ido Schimmel9011b672017-05-16 19:38:25 +02006756 err = rhashtable_init(&mlxsw_sp->router->nexthop_group_ht,
Ido Schimmele9ad5e72017-02-08 11:16:29 +01006757 &mlxsw_sp_nexthop_group_ht_params);
6758 if (err)
6759 goto err_nexthop_group_ht_init;
6760
Arkadi Sharshevskydbe45982017-09-25 10:32:23 +02006761 INIT_LIST_HEAD(&mlxsw_sp->router->nexthop_list);
Ido Schimmel8494ab02017-03-24 08:02:47 +01006762 err = mlxsw_sp_lpm_init(mlxsw_sp);
6763 if (err)
6764 goto err_lpm_init;
6765
Yotam Gigid42b0962017-09-27 08:23:20 +02006766 err = mlxsw_sp_mr_init(mlxsw_sp, &mlxsw_sp_mr_tcam_ops);
6767 if (err)
6768 goto err_mr_init;
6769
Jiri Pirkob45f64d2016-09-26 12:52:31 +02006770 err = mlxsw_sp_vrs_init(mlxsw_sp);
6771 if (err)
6772 goto err_vrs_init;
6773
Ido Schimmel8c9583a2016-10-27 15:12:57 +02006774 err = mlxsw_sp_neigh_init(mlxsw_sp);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02006775 if (err)
6776 goto err_neigh_init;
6777
Ido Schimmel48fac882017-11-02 17:14:06 +01006778 mlxsw_sp->router->netevent_nb.notifier_call =
6779 mlxsw_sp_router_netevent_event;
6780 err = register_netevent_notifier(&mlxsw_sp->router->netevent_nb);
6781 if (err)
6782 goto err_register_netevent_notifier;
6783
Ido Schimmelaf658b62017-11-02 17:14:09 +01006784 err = mlxsw_sp_mp_hash_init(mlxsw_sp);
6785 if (err)
6786 goto err_mp_hash_init;
6787
Ido Schimmel7e39d112017-05-16 19:38:28 +02006788 mlxsw_sp->router->fib_nb.notifier_call = mlxsw_sp_router_fib_event;
6789 err = register_fib_notifier(&mlxsw_sp->router->fib_nb,
Ido Schimmelc3852ef2016-12-03 16:45:07 +01006790 mlxsw_sp_router_fib_dump_flush);
6791 if (err)
6792 goto err_register_fib_notifier;
6793
Jiri Pirkob45f64d2016-09-26 12:52:31 +02006794 return 0;
6795
Ido Schimmelc3852ef2016-12-03 16:45:07 +01006796err_register_fib_notifier:
Ido Schimmelaf658b62017-11-02 17:14:09 +01006797err_mp_hash_init:
Ido Schimmel48fac882017-11-02 17:14:06 +01006798 unregister_netevent_notifier(&mlxsw_sp->router->netevent_nb);
6799err_register_netevent_notifier:
Ido Schimmelc3852ef2016-12-03 16:45:07 +01006800 mlxsw_sp_neigh_fini(mlxsw_sp);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02006801err_neigh_init:
6802 mlxsw_sp_vrs_fini(mlxsw_sp);
6803err_vrs_init:
Yotam Gigid42b0962017-09-27 08:23:20 +02006804 mlxsw_sp_mr_fini(mlxsw_sp);
6805err_mr_init:
Ido Schimmel8494ab02017-03-24 08:02:47 +01006806 mlxsw_sp_lpm_fini(mlxsw_sp);
6807err_lpm_init:
Ido Schimmel9011b672017-05-16 19:38:25 +02006808 rhashtable_destroy(&mlxsw_sp->router->nexthop_group_ht);
Ido Schimmele9ad5e72017-02-08 11:16:29 +01006809err_nexthop_group_ht_init:
Ido Schimmel9011b672017-05-16 19:38:25 +02006810 rhashtable_destroy(&mlxsw_sp->router->nexthop_ht);
Ido Schimmelc53b8e12017-02-08 11:16:30 +01006811err_nexthop_ht_init:
Petr Machata38ebc0f2017-09-02 23:49:17 +02006812 mlxsw_sp_ipips_fini(mlxsw_sp);
6813err_ipips_init:
Ido Schimmel348b8fc2017-05-16 19:38:29 +02006814 mlxsw_sp_rifs_fini(mlxsw_sp);
6815err_rifs_init:
Jiri Pirkob45f64d2016-09-26 12:52:31 +02006816 __mlxsw_sp_router_fini(mlxsw_sp);
Ido Schimmel9011b672017-05-16 19:38:25 +02006817err_router_init:
6818 kfree(mlxsw_sp->router);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02006819 return err;
6820}
6821
6822void mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp)
6823{
Ido Schimmel7e39d112017-05-16 19:38:28 +02006824 unregister_fib_notifier(&mlxsw_sp->router->fib_nb);
Ido Schimmel48fac882017-11-02 17:14:06 +01006825 unregister_netevent_notifier(&mlxsw_sp->router->netevent_nb);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02006826 mlxsw_sp_neigh_fini(mlxsw_sp);
6827 mlxsw_sp_vrs_fini(mlxsw_sp);
Yotam Gigid42b0962017-09-27 08:23:20 +02006828 mlxsw_sp_mr_fini(mlxsw_sp);
Ido Schimmel8494ab02017-03-24 08:02:47 +01006829 mlxsw_sp_lpm_fini(mlxsw_sp);
Ido Schimmel9011b672017-05-16 19:38:25 +02006830 rhashtable_destroy(&mlxsw_sp->router->nexthop_group_ht);
6831 rhashtable_destroy(&mlxsw_sp->router->nexthop_ht);
Petr Machata38ebc0f2017-09-02 23:49:17 +02006832 mlxsw_sp_ipips_fini(mlxsw_sp);
Ido Schimmel348b8fc2017-05-16 19:38:29 +02006833 mlxsw_sp_rifs_fini(mlxsw_sp);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02006834 __mlxsw_sp_router_fini(mlxsw_sp);
Ido Schimmel9011b672017-05-16 19:38:25 +02006835 kfree(mlxsw_sp->router);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02006836}