blob: c4f1881cfedf8880bea801f91278afa74c432b0c [file] [log] [blame]
Ido Schimmel464dce12016-07-02 11:00:15 +02001/*
2 * drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
Petr Machata6ddb7422017-09-02 23:49:19 +02003 * Copyright (c) 2016-2017 Mellanox Technologies. All rights reserved.
Ido Schimmel464dce12016-07-02 11:00:15 +02004 * Copyright (c) 2016 Jiri Pirko <jiri@mellanox.com>
5 * Copyright (c) 2016 Ido Schimmel <idosch@mellanox.com>
Yotam Gigic723c7352016-07-05 11:27:43 +02006 * Copyright (c) 2016 Yotam Gigi <yotamg@mellanox.com>
Petr Machata6ddb7422017-09-02 23:49:19 +02007 * Copyright (c) 2017 Petr Machata <petrm@mellanox.com>
Ido Schimmel464dce12016-07-02 11:00:15 +02008 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions are met:
11 *
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. Neither the names of the copyright holders nor the names of its
18 * contributors may be used to endorse or promote products derived from
19 * this software without specific prior written permission.
20 *
21 * Alternatively, this software may be distributed under the terms of the
22 * GNU General Public License ("GPL") version 2 as published by the Free
23 * Software Foundation.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
26 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
29 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
36 */
37
38#include <linux/kernel.h>
39#include <linux/types.h>
Jiri Pirko5e9c16c2016-07-04 08:23:04 +020040#include <linux/rhashtable.h>
41#include <linux/bitops.h>
42#include <linux/in6.h>
Yotam Gigic723c7352016-07-05 11:27:43 +020043#include <linux/notifier.h>
Ido Schimmeldf6dd79b2017-02-08 14:36:49 +010044#include <linux/inetdevice.h>
Ido Schimmel9db032b2017-03-16 09:08:17 +010045#include <linux/netdevice.h>
Ido Schimmel03ea01e2017-05-23 21:56:30 +020046#include <linux/if_bridge.h>
Ido Schimmelb5f3e0d2017-07-24 09:56:00 +020047#include <linux/socket.h>
Ido Schimmel428b8512017-08-03 13:28:28 +020048#include <linux/route.h>
Ido Schimmeleb789982017-10-22 23:11:48 +020049#include <linux/gcd.h>
Ido Schimmelaf658b62017-11-02 17:14:09 +010050#include <linux/random.h>
Yotam Gigic723c7352016-07-05 11:27:43 +020051#include <net/netevent.h>
Jiri Pirko6cf3c972016-07-05 11:27:39 +020052#include <net/neighbour.h>
53#include <net/arp.h>
Jiri Pirkob45f64d2016-09-26 12:52:31 +020054#include <net/ip_fib.h>
Ido Schimmel583419f2017-08-03 13:28:27 +020055#include <net/ip6_fib.h>
Ido Schimmel5d7bfd12017-03-16 09:08:14 +010056#include <net/fib_rules.h>
Petr Machata6ddb7422017-09-02 23:49:19 +020057#include <net/ip_tunnels.h>
Ido Schimmel57837882017-03-16 09:08:16 +010058#include <net/l3mdev.h>
Arkadi Sharshevsky5ea12372017-07-18 10:10:13 +020059#include <net/addrconf.h>
Arkadi Sharshevskyd5eb89c2017-07-18 10:10:15 +020060#include <net/ndisc.h>
61#include <net/ipv6.h>
Ido Schimmel04b1d4e2017-08-03 13:28:11 +020062#include <net/fib_notifier.h>
Ido Schimmel464dce12016-07-02 11:00:15 +020063
64#include "spectrum.h"
65#include "core.h"
66#include "reg.h"
Arkadi Sharshevskye0c0afd2017-03-28 17:24:15 +020067#include "spectrum_cnt.h"
68#include "spectrum_dpipe.h"
Petr Machata38ebc0f2017-09-02 23:49:17 +020069#include "spectrum_ipip.h"
Yotam Gigid42b0962017-09-27 08:23:20 +020070#include "spectrum_mr.h"
71#include "spectrum_mr_tcam.h"
Arkadi Sharshevskye0c0afd2017-03-28 17:24:15 +020072#include "spectrum_router.h"
Ido Schimmel464dce12016-07-02 11:00:15 +020073
Ido Schimmel9011b672017-05-16 19:38:25 +020074struct mlxsw_sp_vr;
75struct mlxsw_sp_lpm_tree;
Ido Schimmele4f3c1c2017-05-26 08:37:40 +020076struct mlxsw_sp_rif_ops;
Ido Schimmel9011b672017-05-16 19:38:25 +020077
78struct mlxsw_sp_router {
79 struct mlxsw_sp *mlxsw_sp;
Ido Schimmel5f9efff2017-05-16 19:38:27 +020080 struct mlxsw_sp_rif **rifs;
Ido Schimmel9011b672017-05-16 19:38:25 +020081 struct mlxsw_sp_vr *vrs;
82 struct rhashtable neigh_ht;
83 struct rhashtable nexthop_group_ht;
84 struct rhashtable nexthop_ht;
Arkadi Sharshevskydbe45982017-09-25 10:32:23 +020085 struct list_head nexthop_list;
Ido Schimmel9011b672017-05-16 19:38:25 +020086 struct {
87 struct mlxsw_sp_lpm_tree *trees;
88 unsigned int tree_count;
89 } lpm;
90 struct {
91 struct delayed_work dw;
92 unsigned long interval; /* ms */
93 } neighs_update;
94 struct delayed_work nexthop_probe_dw;
95#define MLXSW_SP_UNRESOLVED_NH_PROBE_INTERVAL 5000 /* ms */
96 struct list_head nexthop_neighs_list;
Petr Machata1012b9a2017-09-02 23:49:23 +020097 struct list_head ipip_list;
Ido Schimmel9011b672017-05-16 19:38:25 +020098 bool aborted;
Ido Schimmel7e39d112017-05-16 19:38:28 +020099 struct notifier_block fib_nb;
Ido Schimmel48fac882017-11-02 17:14:06 +0100100 struct notifier_block netevent_nb;
Ido Schimmele4f3c1c2017-05-26 08:37:40 +0200101 const struct mlxsw_sp_rif_ops **rif_ops_arr;
Petr Machata38ebc0f2017-09-02 23:49:17 +0200102 const struct mlxsw_sp_ipip_ops **ipip_ops_arr;
Ido Schimmel9011b672017-05-16 19:38:25 +0200103};
104
Ido Schimmel4724ba562017-03-10 08:53:39 +0100105struct mlxsw_sp_rif {
106 struct list_head nexthop_list;
107 struct list_head neigh_list;
108 struct net_device *dev;
Ido Schimmela1107482017-05-26 08:37:39 +0200109 struct mlxsw_sp_fid *fid;
Ido Schimmel4724ba562017-03-10 08:53:39 +0100110 unsigned char addr[ETH_ALEN];
111 int mtu;
Arkadi Sharshevskybf952332017-03-17 09:38:00 +0100112 u16 rif_index;
Ido Schimmel69132292017-03-10 08:53:42 +0100113 u16 vr_id;
Ido Schimmele4f3c1c2017-05-26 08:37:40 +0200114 const struct mlxsw_sp_rif_ops *ops;
115 struct mlxsw_sp *mlxsw_sp;
116
Arkadi Sharshevskye0c0afd2017-03-28 17:24:15 +0200117 unsigned int counter_ingress;
118 bool counter_ingress_valid;
119 unsigned int counter_egress;
120 bool counter_egress_valid;
Ido Schimmel4724ba562017-03-10 08:53:39 +0100121};
122
Ido Schimmele4f3c1c2017-05-26 08:37:40 +0200123struct mlxsw_sp_rif_params {
124 struct net_device *dev;
125 union {
126 u16 system_port;
127 u16 lag_id;
128 };
129 u16 vid;
130 bool lag;
131};
132
Ido Schimmel4d93cee2017-05-26 08:37:34 +0200133struct mlxsw_sp_rif_subport {
134 struct mlxsw_sp_rif common;
135 union {
136 u16 system_port;
137 u16 lag_id;
138 };
139 u16 vid;
140 bool lag;
141};
142
Petr Machata6ddb7422017-09-02 23:49:19 +0200143struct mlxsw_sp_rif_ipip_lb {
144 struct mlxsw_sp_rif common;
145 struct mlxsw_sp_rif_ipip_lb_config lb_config;
146 u16 ul_vr_id; /* Reserved for Spectrum-2. */
147};
148
149struct mlxsw_sp_rif_params_ipip_lb {
150 struct mlxsw_sp_rif_params common;
151 struct mlxsw_sp_rif_ipip_lb_config lb_config;
152};
153
Ido Schimmele4f3c1c2017-05-26 08:37:40 +0200154struct mlxsw_sp_rif_ops {
155 enum mlxsw_sp_rif_type type;
156 size_t rif_size;
157
158 void (*setup)(struct mlxsw_sp_rif *rif,
159 const struct mlxsw_sp_rif_params *params);
160 int (*configure)(struct mlxsw_sp_rif *rif);
161 void (*deconfigure)(struct mlxsw_sp_rif *rif);
162 struct mlxsw_sp_fid * (*fid_get)(struct mlxsw_sp_rif *rif);
163};
164
Arkadi Sharshevskye0c0afd2017-03-28 17:24:15 +0200165static unsigned int *
166mlxsw_sp_rif_p_counter_get(struct mlxsw_sp_rif *rif,
167 enum mlxsw_sp_rif_counter_dir dir)
168{
169 switch (dir) {
170 case MLXSW_SP_RIF_COUNTER_EGRESS:
171 return &rif->counter_egress;
172 case MLXSW_SP_RIF_COUNTER_INGRESS:
173 return &rif->counter_ingress;
174 }
175 return NULL;
176}
177
178static bool
179mlxsw_sp_rif_counter_valid_get(struct mlxsw_sp_rif *rif,
180 enum mlxsw_sp_rif_counter_dir dir)
181{
182 switch (dir) {
183 case MLXSW_SP_RIF_COUNTER_EGRESS:
184 return rif->counter_egress_valid;
185 case MLXSW_SP_RIF_COUNTER_INGRESS:
186 return rif->counter_ingress_valid;
187 }
188 return false;
189}
190
191static void
192mlxsw_sp_rif_counter_valid_set(struct mlxsw_sp_rif *rif,
193 enum mlxsw_sp_rif_counter_dir dir,
194 bool valid)
195{
196 switch (dir) {
197 case MLXSW_SP_RIF_COUNTER_EGRESS:
198 rif->counter_egress_valid = valid;
199 break;
200 case MLXSW_SP_RIF_COUNTER_INGRESS:
201 rif->counter_ingress_valid = valid;
202 break;
203 }
204}
205
206static int mlxsw_sp_rif_counter_edit(struct mlxsw_sp *mlxsw_sp, u16 rif_index,
207 unsigned int counter_index, bool enable,
208 enum mlxsw_sp_rif_counter_dir dir)
209{
210 char ritr_pl[MLXSW_REG_RITR_LEN];
211 bool is_egress = false;
212 int err;
213
214 if (dir == MLXSW_SP_RIF_COUNTER_EGRESS)
215 is_egress = true;
216 mlxsw_reg_ritr_rif_pack(ritr_pl, rif_index);
217 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
218 if (err)
219 return err;
220
221 mlxsw_reg_ritr_counter_pack(ritr_pl, counter_index, enable,
222 is_egress);
223 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
224}
225
226int mlxsw_sp_rif_counter_value_get(struct mlxsw_sp *mlxsw_sp,
227 struct mlxsw_sp_rif *rif,
228 enum mlxsw_sp_rif_counter_dir dir, u64 *cnt)
229{
230 char ricnt_pl[MLXSW_REG_RICNT_LEN];
231 unsigned int *p_counter_index;
232 bool valid;
233 int err;
234
235 valid = mlxsw_sp_rif_counter_valid_get(rif, dir);
236 if (!valid)
237 return -EINVAL;
238
239 p_counter_index = mlxsw_sp_rif_p_counter_get(rif, dir);
240 if (!p_counter_index)
241 return -EINVAL;
242 mlxsw_reg_ricnt_pack(ricnt_pl, *p_counter_index,
243 MLXSW_REG_RICNT_OPCODE_NOP);
244 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ricnt), ricnt_pl);
245 if (err)
246 return err;
247 *cnt = mlxsw_reg_ricnt_good_unicast_packets_get(ricnt_pl);
248 return 0;
249}
250
251static int mlxsw_sp_rif_counter_clear(struct mlxsw_sp *mlxsw_sp,
252 unsigned int counter_index)
253{
254 char ricnt_pl[MLXSW_REG_RICNT_LEN];
255
256 mlxsw_reg_ricnt_pack(ricnt_pl, counter_index,
257 MLXSW_REG_RICNT_OPCODE_CLEAR);
258 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ricnt), ricnt_pl);
259}
260
261int mlxsw_sp_rif_counter_alloc(struct mlxsw_sp *mlxsw_sp,
262 struct mlxsw_sp_rif *rif,
263 enum mlxsw_sp_rif_counter_dir dir)
264{
265 unsigned int *p_counter_index;
266 int err;
267
268 p_counter_index = mlxsw_sp_rif_p_counter_get(rif, dir);
269 if (!p_counter_index)
270 return -EINVAL;
271 err = mlxsw_sp_counter_alloc(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_RIF,
272 p_counter_index);
273 if (err)
274 return err;
275
276 err = mlxsw_sp_rif_counter_clear(mlxsw_sp, *p_counter_index);
277 if (err)
278 goto err_counter_clear;
279
280 err = mlxsw_sp_rif_counter_edit(mlxsw_sp, rif->rif_index,
281 *p_counter_index, true, dir);
282 if (err)
283 goto err_counter_edit;
284 mlxsw_sp_rif_counter_valid_set(rif, dir, true);
285 return 0;
286
287err_counter_edit:
288err_counter_clear:
289 mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_RIF,
290 *p_counter_index);
291 return err;
292}
293
294void mlxsw_sp_rif_counter_free(struct mlxsw_sp *mlxsw_sp,
295 struct mlxsw_sp_rif *rif,
296 enum mlxsw_sp_rif_counter_dir dir)
297{
298 unsigned int *p_counter_index;
299
Arkadi Sharshevsky6b1206b2017-05-18 09:18:53 +0200300 if (!mlxsw_sp_rif_counter_valid_get(rif, dir))
301 return;
302
Arkadi Sharshevskye0c0afd2017-03-28 17:24:15 +0200303 p_counter_index = mlxsw_sp_rif_p_counter_get(rif, dir);
304 if (WARN_ON(!p_counter_index))
305 return;
306 mlxsw_sp_rif_counter_edit(mlxsw_sp, rif->rif_index,
307 *p_counter_index, false, dir);
308 mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_RIF,
309 *p_counter_index);
310 mlxsw_sp_rif_counter_valid_set(rif, dir, false);
311}
312
Ido Schimmele4f3c1c2017-05-26 08:37:40 +0200313static void mlxsw_sp_rif_counters_alloc(struct mlxsw_sp_rif *rif)
314{
315 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
316 struct devlink *devlink;
317
318 devlink = priv_to_devlink(mlxsw_sp->core);
319 if (!devlink_dpipe_table_counter_enabled(devlink,
320 MLXSW_SP_DPIPE_TABLE_NAME_ERIF))
321 return;
322 mlxsw_sp_rif_counter_alloc(mlxsw_sp, rif, MLXSW_SP_RIF_COUNTER_EGRESS);
323}
324
325static void mlxsw_sp_rif_counters_free(struct mlxsw_sp_rif *rif)
326{
327 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
328
329 mlxsw_sp_rif_counter_free(mlxsw_sp, rif, MLXSW_SP_RIF_COUNTER_EGRESS);
330}
331
Ido Schimmel4724ba562017-03-10 08:53:39 +0100332static struct mlxsw_sp_rif *
333mlxsw_sp_rif_find_by_dev(const struct mlxsw_sp *mlxsw_sp,
334 const struct net_device *dev);
335
Ido Schimmel7dcc18a2017-07-18 10:10:30 +0200336#define MLXSW_SP_PREFIX_COUNT (sizeof(struct in6_addr) * BITS_PER_BYTE + 1)
Ido Schimmel9011b672017-05-16 19:38:25 +0200337
338struct mlxsw_sp_prefix_usage {
339 DECLARE_BITMAP(b, MLXSW_SP_PREFIX_COUNT);
340};
341
Jiri Pirko53342022016-07-04 08:23:08 +0200342#define mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage) \
343 for_each_set_bit(prefix, (prefix_usage)->b, MLXSW_SP_PREFIX_COUNT)
344
345static bool
346mlxsw_sp_prefix_usage_eq(struct mlxsw_sp_prefix_usage *prefix_usage1,
347 struct mlxsw_sp_prefix_usage *prefix_usage2)
348{
349 return !memcmp(prefix_usage1, prefix_usage2, sizeof(*prefix_usage1));
350}
351
Jiri Pirko6b75c482016-07-04 08:23:09 +0200352static bool
353mlxsw_sp_prefix_usage_none(struct mlxsw_sp_prefix_usage *prefix_usage)
354{
355 struct mlxsw_sp_prefix_usage prefix_usage_none = {{ 0 } };
356
357 return mlxsw_sp_prefix_usage_eq(prefix_usage, &prefix_usage_none);
358}
359
360static void
361mlxsw_sp_prefix_usage_cpy(struct mlxsw_sp_prefix_usage *prefix_usage1,
362 struct mlxsw_sp_prefix_usage *prefix_usage2)
363{
364 memcpy(prefix_usage1, prefix_usage2, sizeof(*prefix_usage1));
365}
366
367static void
Jiri Pirko5e9c16c2016-07-04 08:23:04 +0200368mlxsw_sp_prefix_usage_set(struct mlxsw_sp_prefix_usage *prefix_usage,
369 unsigned char prefix_len)
370{
371 set_bit(prefix_len, prefix_usage->b);
372}
373
374static void
375mlxsw_sp_prefix_usage_clear(struct mlxsw_sp_prefix_usage *prefix_usage,
376 unsigned char prefix_len)
377{
378 clear_bit(prefix_len, prefix_usage->b);
379}
380
381struct mlxsw_sp_fib_key {
382 unsigned char addr[sizeof(struct in6_addr)];
383 unsigned char prefix_len;
384};
385
Jiri Pirko61c503f2016-07-04 08:23:11 +0200386enum mlxsw_sp_fib_entry_type {
387 MLXSW_SP_FIB_ENTRY_TYPE_REMOTE,
388 MLXSW_SP_FIB_ENTRY_TYPE_LOCAL,
389 MLXSW_SP_FIB_ENTRY_TYPE_TRAP,
Petr Machata4607f6d2017-09-02 23:49:25 +0200390
391 /* This is a special case of local delivery, where a packet should be
392 * decapsulated on reception. Note that there is no corresponding ENCAP,
393 * because that's a type of next hop, not of FIB entry. (There can be
394 * several next hops in a REMOTE entry, and some of them may be
395 * encapsulating entries.)
396 */
397 MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP,
Jiri Pirko61c503f2016-07-04 08:23:11 +0200398};
399
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +0200400struct mlxsw_sp_nexthop_group;
Ido Schimmel9011b672017-05-16 19:38:25 +0200401struct mlxsw_sp_fib;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +0200402
Ido Schimmel9aecce12017-02-09 10:28:42 +0100403struct mlxsw_sp_fib_node {
404 struct list_head entry_list;
Jiri Pirkob45f64d2016-09-26 12:52:31 +0200405 struct list_head list;
Ido Schimmel9aecce12017-02-09 10:28:42 +0100406 struct rhash_head ht_node;
Ido Schimmel76610eb2017-03-10 08:53:41 +0100407 struct mlxsw_sp_fib *fib;
Ido Schimmel9aecce12017-02-09 10:28:42 +0100408 struct mlxsw_sp_fib_key key;
409};
410
Petr Machata4607f6d2017-09-02 23:49:25 +0200411struct mlxsw_sp_fib_entry_decap {
412 struct mlxsw_sp_ipip_entry *ipip_entry;
413 u32 tunnel_index;
414};
415
Ido Schimmel9aecce12017-02-09 10:28:42 +0100416struct mlxsw_sp_fib_entry {
417 struct list_head list;
418 struct mlxsw_sp_fib_node *fib_node;
419 enum mlxsw_sp_fib_entry_type type;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +0200420 struct list_head nexthop_group_node;
421 struct mlxsw_sp_nexthop_group *nh_group;
Petr Machata4607f6d2017-09-02 23:49:25 +0200422 struct mlxsw_sp_fib_entry_decap decap; /* Valid for decap entries. */
Jiri Pirko5e9c16c2016-07-04 08:23:04 +0200423};
424
Ido Schimmel4f1c7f12017-07-18 10:10:26 +0200425struct mlxsw_sp_fib4_entry {
426 struct mlxsw_sp_fib_entry common;
427 u32 tb_id;
428 u32 prio;
429 u8 tos;
430 u8 type;
431};
432
Ido Schimmel428b8512017-08-03 13:28:28 +0200433struct mlxsw_sp_fib6_entry {
434 struct mlxsw_sp_fib_entry common;
435 struct list_head rt6_list;
436 unsigned int nrt6;
437};
438
439struct mlxsw_sp_rt6 {
440 struct list_head list;
441 struct rt6_info *rt;
442};
443
Ido Schimmel9011b672017-05-16 19:38:25 +0200444struct mlxsw_sp_lpm_tree {
445 u8 id; /* tree ID */
446 unsigned int ref_count;
447 enum mlxsw_sp_l3proto proto;
448 struct mlxsw_sp_prefix_usage prefix_usage;
449};
450
Jiri Pirko5e9c16c2016-07-04 08:23:04 +0200451struct mlxsw_sp_fib {
452 struct rhashtable ht;
Ido Schimmel9aecce12017-02-09 10:28:42 +0100453 struct list_head node_list;
Ido Schimmel76610eb2017-03-10 08:53:41 +0100454 struct mlxsw_sp_vr *vr;
455 struct mlxsw_sp_lpm_tree *lpm_tree;
Jiri Pirko5e9c16c2016-07-04 08:23:04 +0200456 unsigned long prefix_ref_count[MLXSW_SP_PREFIX_COUNT];
457 struct mlxsw_sp_prefix_usage prefix_usage;
Ido Schimmel76610eb2017-03-10 08:53:41 +0100458 enum mlxsw_sp_l3proto proto;
Jiri Pirko5e9c16c2016-07-04 08:23:04 +0200459};
460
Ido Schimmel9011b672017-05-16 19:38:25 +0200461struct mlxsw_sp_vr {
462 u16 id; /* virtual router ID */
463 u32 tb_id; /* kernel fib table id */
464 unsigned int rif_count;
465 struct mlxsw_sp_fib *fib4;
Ido Schimmela3d9bc52017-07-18 10:10:22 +0200466 struct mlxsw_sp_fib *fib6;
Yotam Gigid42b0962017-09-27 08:23:20 +0200467 struct mlxsw_sp_mr_table *mr4_table;
Ido Schimmel9011b672017-05-16 19:38:25 +0200468};
469
Ido Schimmel9aecce12017-02-09 10:28:42 +0100470static const struct rhashtable_params mlxsw_sp_fib_ht_params;
Jiri Pirko5e9c16c2016-07-04 08:23:04 +0200471
Ido Schimmel76610eb2017-03-10 08:53:41 +0100472static struct mlxsw_sp_fib *mlxsw_sp_fib_create(struct mlxsw_sp_vr *vr,
473 enum mlxsw_sp_l3proto proto)
Jiri Pirko5e9c16c2016-07-04 08:23:04 +0200474{
475 struct mlxsw_sp_fib *fib;
476 int err;
477
478 fib = kzalloc(sizeof(*fib), GFP_KERNEL);
479 if (!fib)
480 return ERR_PTR(-ENOMEM);
481 err = rhashtable_init(&fib->ht, &mlxsw_sp_fib_ht_params);
482 if (err)
483 goto err_rhashtable_init;
Ido Schimmel9aecce12017-02-09 10:28:42 +0100484 INIT_LIST_HEAD(&fib->node_list);
Ido Schimmel76610eb2017-03-10 08:53:41 +0100485 fib->proto = proto;
486 fib->vr = vr;
Jiri Pirko5e9c16c2016-07-04 08:23:04 +0200487 return fib;
488
489err_rhashtable_init:
490 kfree(fib);
491 return ERR_PTR(err);
492}
493
494static void mlxsw_sp_fib_destroy(struct mlxsw_sp_fib *fib)
495{
Ido Schimmel9aecce12017-02-09 10:28:42 +0100496 WARN_ON(!list_empty(&fib->node_list));
Ido Schimmel76610eb2017-03-10 08:53:41 +0100497 WARN_ON(fib->lpm_tree);
Jiri Pirko5e9c16c2016-07-04 08:23:04 +0200498 rhashtable_destroy(&fib->ht);
499 kfree(fib);
500}
501
Jiri Pirko53342022016-07-04 08:23:08 +0200502static struct mlxsw_sp_lpm_tree *
Ido Schimmel382dbb42017-03-10 08:53:40 +0100503mlxsw_sp_lpm_tree_find_unused(struct mlxsw_sp *mlxsw_sp)
Jiri Pirko53342022016-07-04 08:23:08 +0200504{
505 static struct mlxsw_sp_lpm_tree *lpm_tree;
506 int i;
507
Ido Schimmel9011b672017-05-16 19:38:25 +0200508 for (i = 0; i < mlxsw_sp->router->lpm.tree_count; i++) {
509 lpm_tree = &mlxsw_sp->router->lpm.trees[i];
Ido Schimmel382dbb42017-03-10 08:53:40 +0100510 if (lpm_tree->ref_count == 0)
511 return lpm_tree;
Jiri Pirko53342022016-07-04 08:23:08 +0200512 }
513 return NULL;
514}
515
516static int mlxsw_sp_lpm_tree_alloc(struct mlxsw_sp *mlxsw_sp,
517 struct mlxsw_sp_lpm_tree *lpm_tree)
518{
519 char ralta_pl[MLXSW_REG_RALTA_LEN];
520
Ido Schimmel1a9234e662016-09-19 08:29:26 +0200521 mlxsw_reg_ralta_pack(ralta_pl, true,
522 (enum mlxsw_reg_ralxx_protocol) lpm_tree->proto,
523 lpm_tree->id);
Jiri Pirko53342022016-07-04 08:23:08 +0200524 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralta), ralta_pl);
525}
526
Ido Schimmelcc702672017-08-14 10:54:03 +0200527static void mlxsw_sp_lpm_tree_free(struct mlxsw_sp *mlxsw_sp,
528 struct mlxsw_sp_lpm_tree *lpm_tree)
Jiri Pirko53342022016-07-04 08:23:08 +0200529{
530 char ralta_pl[MLXSW_REG_RALTA_LEN];
531
Ido Schimmel1a9234e662016-09-19 08:29:26 +0200532 mlxsw_reg_ralta_pack(ralta_pl, false,
533 (enum mlxsw_reg_ralxx_protocol) lpm_tree->proto,
534 lpm_tree->id);
Ido Schimmelcc702672017-08-14 10:54:03 +0200535 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralta), ralta_pl);
Jiri Pirko53342022016-07-04 08:23:08 +0200536}
537
538static int
539mlxsw_sp_lpm_tree_left_struct_set(struct mlxsw_sp *mlxsw_sp,
540 struct mlxsw_sp_prefix_usage *prefix_usage,
541 struct mlxsw_sp_lpm_tree *lpm_tree)
542{
543 char ralst_pl[MLXSW_REG_RALST_LEN];
544 u8 root_bin = 0;
545 u8 prefix;
546 u8 last_prefix = MLXSW_REG_RALST_BIN_NO_CHILD;
547
548 mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage)
549 root_bin = prefix;
550
551 mlxsw_reg_ralst_pack(ralst_pl, root_bin, lpm_tree->id);
552 mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage) {
553 if (prefix == 0)
554 continue;
555 mlxsw_reg_ralst_bin_pack(ralst_pl, prefix, last_prefix,
556 MLXSW_REG_RALST_BIN_NO_CHILD);
557 last_prefix = prefix;
558 }
559 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralst), ralst_pl);
560}
561
562static struct mlxsw_sp_lpm_tree *
563mlxsw_sp_lpm_tree_create(struct mlxsw_sp *mlxsw_sp,
564 struct mlxsw_sp_prefix_usage *prefix_usage,
Ido Schimmel382dbb42017-03-10 08:53:40 +0100565 enum mlxsw_sp_l3proto proto)
Jiri Pirko53342022016-07-04 08:23:08 +0200566{
567 struct mlxsw_sp_lpm_tree *lpm_tree;
568 int err;
569
Ido Schimmel382dbb42017-03-10 08:53:40 +0100570 lpm_tree = mlxsw_sp_lpm_tree_find_unused(mlxsw_sp);
Jiri Pirko53342022016-07-04 08:23:08 +0200571 if (!lpm_tree)
572 return ERR_PTR(-EBUSY);
573 lpm_tree->proto = proto;
574 err = mlxsw_sp_lpm_tree_alloc(mlxsw_sp, lpm_tree);
575 if (err)
576 return ERR_PTR(err);
577
578 err = mlxsw_sp_lpm_tree_left_struct_set(mlxsw_sp, prefix_usage,
579 lpm_tree);
580 if (err)
581 goto err_left_struct_set;
Jiri Pirko2083d362016-10-25 11:25:56 +0200582 memcpy(&lpm_tree->prefix_usage, prefix_usage,
583 sizeof(lpm_tree->prefix_usage));
Jiri Pirko53342022016-07-04 08:23:08 +0200584 return lpm_tree;
585
586err_left_struct_set:
587 mlxsw_sp_lpm_tree_free(mlxsw_sp, lpm_tree);
588 return ERR_PTR(err);
589}
590
Ido Schimmelcc702672017-08-14 10:54:03 +0200591static void mlxsw_sp_lpm_tree_destroy(struct mlxsw_sp *mlxsw_sp,
592 struct mlxsw_sp_lpm_tree *lpm_tree)
Jiri Pirko53342022016-07-04 08:23:08 +0200593{
Ido Schimmelcc702672017-08-14 10:54:03 +0200594 mlxsw_sp_lpm_tree_free(mlxsw_sp, lpm_tree);
Jiri Pirko53342022016-07-04 08:23:08 +0200595}
596
597static struct mlxsw_sp_lpm_tree *
598mlxsw_sp_lpm_tree_get(struct mlxsw_sp *mlxsw_sp,
599 struct mlxsw_sp_prefix_usage *prefix_usage,
Ido Schimmel382dbb42017-03-10 08:53:40 +0100600 enum mlxsw_sp_l3proto proto)
Jiri Pirko53342022016-07-04 08:23:08 +0200601{
602 struct mlxsw_sp_lpm_tree *lpm_tree;
603 int i;
604
Ido Schimmel9011b672017-05-16 19:38:25 +0200605 for (i = 0; i < mlxsw_sp->router->lpm.tree_count; i++) {
606 lpm_tree = &mlxsw_sp->router->lpm.trees[i];
Jiri Pirko8b99bec2016-10-25 11:25:57 +0200607 if (lpm_tree->ref_count != 0 &&
608 lpm_tree->proto == proto &&
Jiri Pirko53342022016-07-04 08:23:08 +0200609 mlxsw_sp_prefix_usage_eq(&lpm_tree->prefix_usage,
610 prefix_usage))
Ido Schimmelfc922bb2017-08-14 10:54:05 +0200611 return lpm_tree;
Jiri Pirko53342022016-07-04 08:23:08 +0200612 }
Ido Schimmelfc922bb2017-08-14 10:54:05 +0200613 return mlxsw_sp_lpm_tree_create(mlxsw_sp, prefix_usage, proto);
614}
Jiri Pirko53342022016-07-04 08:23:08 +0200615
Ido Schimmelfc922bb2017-08-14 10:54:05 +0200616static void mlxsw_sp_lpm_tree_hold(struct mlxsw_sp_lpm_tree *lpm_tree)
617{
Jiri Pirko53342022016-07-04 08:23:08 +0200618 lpm_tree->ref_count++;
Jiri Pirko53342022016-07-04 08:23:08 +0200619}
620
Ido Schimmelcc702672017-08-14 10:54:03 +0200621static void mlxsw_sp_lpm_tree_put(struct mlxsw_sp *mlxsw_sp,
622 struct mlxsw_sp_lpm_tree *lpm_tree)
Jiri Pirko53342022016-07-04 08:23:08 +0200623{
624 if (--lpm_tree->ref_count == 0)
Ido Schimmelcc702672017-08-14 10:54:03 +0200625 mlxsw_sp_lpm_tree_destroy(mlxsw_sp, lpm_tree);
Jiri Pirko53342022016-07-04 08:23:08 +0200626}
627
Ido Schimmeld7a60302017-06-08 08:47:43 +0200628#define MLXSW_SP_LPM_TREE_MIN 1 /* tree 0 is reserved */
Ido Schimmel8494ab02017-03-24 08:02:47 +0100629
630static int mlxsw_sp_lpm_init(struct mlxsw_sp *mlxsw_sp)
Jiri Pirko53342022016-07-04 08:23:08 +0200631{
632 struct mlxsw_sp_lpm_tree *lpm_tree;
Ido Schimmel8494ab02017-03-24 08:02:47 +0100633 u64 max_trees;
Jiri Pirko53342022016-07-04 08:23:08 +0200634 int i;
635
Ido Schimmel8494ab02017-03-24 08:02:47 +0100636 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_LPM_TREES))
637 return -EIO;
638
639 max_trees = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_LPM_TREES);
Ido Schimmel9011b672017-05-16 19:38:25 +0200640 mlxsw_sp->router->lpm.tree_count = max_trees - MLXSW_SP_LPM_TREE_MIN;
641 mlxsw_sp->router->lpm.trees = kcalloc(mlxsw_sp->router->lpm.tree_count,
Ido Schimmel8494ab02017-03-24 08:02:47 +0100642 sizeof(struct mlxsw_sp_lpm_tree),
643 GFP_KERNEL);
Ido Schimmel9011b672017-05-16 19:38:25 +0200644 if (!mlxsw_sp->router->lpm.trees)
Ido Schimmel8494ab02017-03-24 08:02:47 +0100645 return -ENOMEM;
646
Ido Schimmel9011b672017-05-16 19:38:25 +0200647 for (i = 0; i < mlxsw_sp->router->lpm.tree_count; i++) {
648 lpm_tree = &mlxsw_sp->router->lpm.trees[i];
Jiri Pirko53342022016-07-04 08:23:08 +0200649 lpm_tree->id = i + MLXSW_SP_LPM_TREE_MIN;
650 }
Ido Schimmel8494ab02017-03-24 08:02:47 +0100651
652 return 0;
653}
654
655static void mlxsw_sp_lpm_fini(struct mlxsw_sp *mlxsw_sp)
656{
Ido Schimmel9011b672017-05-16 19:38:25 +0200657 kfree(mlxsw_sp->router->lpm.trees);
Jiri Pirko53342022016-07-04 08:23:08 +0200658}
659
Ido Schimmel76610eb2017-03-10 08:53:41 +0100660static bool mlxsw_sp_vr_is_used(const struct mlxsw_sp_vr *vr)
661{
Yotam Gigid42b0962017-09-27 08:23:20 +0200662 return !!vr->fib4 || !!vr->fib6 || !!vr->mr4_table;
Ido Schimmel76610eb2017-03-10 08:53:41 +0100663}
664
Jiri Pirko6b75c482016-07-04 08:23:09 +0200665static struct mlxsw_sp_vr *mlxsw_sp_vr_find_unused(struct mlxsw_sp *mlxsw_sp)
666{
667 struct mlxsw_sp_vr *vr;
668 int i;
669
Jiri Pirkoc1a38312016-10-21 16:07:23 +0200670 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
Ido Schimmel9011b672017-05-16 19:38:25 +0200671 vr = &mlxsw_sp->router->vrs[i];
Ido Schimmel76610eb2017-03-10 08:53:41 +0100672 if (!mlxsw_sp_vr_is_used(vr))
Jiri Pirko6b75c482016-07-04 08:23:09 +0200673 return vr;
674 }
675 return NULL;
676}
677
678static int mlxsw_sp_vr_lpm_tree_bind(struct mlxsw_sp *mlxsw_sp,
Ido Schimmel0adb2142017-08-14 10:54:04 +0200679 const struct mlxsw_sp_fib *fib, u8 tree_id)
Jiri Pirko6b75c482016-07-04 08:23:09 +0200680{
681 char raltb_pl[MLXSW_REG_RALTB_LEN];
682
Ido Schimmel76610eb2017-03-10 08:53:41 +0100683 mlxsw_reg_raltb_pack(raltb_pl, fib->vr->id,
684 (enum mlxsw_reg_ralxx_protocol) fib->proto,
Ido Schimmel0adb2142017-08-14 10:54:04 +0200685 tree_id);
Jiri Pirko6b75c482016-07-04 08:23:09 +0200686 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raltb), raltb_pl);
687}
688
689static int mlxsw_sp_vr_lpm_tree_unbind(struct mlxsw_sp *mlxsw_sp,
Ido Schimmel76610eb2017-03-10 08:53:41 +0100690 const struct mlxsw_sp_fib *fib)
Jiri Pirko6b75c482016-07-04 08:23:09 +0200691{
692 char raltb_pl[MLXSW_REG_RALTB_LEN];
693
694 /* Bind to tree 0 which is default */
Ido Schimmel76610eb2017-03-10 08:53:41 +0100695 mlxsw_reg_raltb_pack(raltb_pl, fib->vr->id,
696 (enum mlxsw_reg_ralxx_protocol) fib->proto, 0);
Jiri Pirko6b75c482016-07-04 08:23:09 +0200697 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raltb), raltb_pl);
698}
699
700static u32 mlxsw_sp_fix_tb_id(u32 tb_id)
701{
Yotam Gigi7e50d432017-09-27 08:23:19 +0200702 /* For our purpose, squash main, default and local tables into one */
703 if (tb_id == RT_TABLE_LOCAL || tb_id == RT_TABLE_DEFAULT)
Jiri Pirko6b75c482016-07-04 08:23:09 +0200704 tb_id = RT_TABLE_MAIN;
705 return tb_id;
706}
707
708static struct mlxsw_sp_vr *mlxsw_sp_vr_find(struct mlxsw_sp *mlxsw_sp,
Ido Schimmel76610eb2017-03-10 08:53:41 +0100709 u32 tb_id)
Jiri Pirko6b75c482016-07-04 08:23:09 +0200710{
711 struct mlxsw_sp_vr *vr;
712 int i;
713
714 tb_id = mlxsw_sp_fix_tb_id(tb_id);
Nogah Frankel9497c042016-09-20 11:16:54 +0200715
Jiri Pirkoc1a38312016-10-21 16:07:23 +0200716 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
Ido Schimmel9011b672017-05-16 19:38:25 +0200717 vr = &mlxsw_sp->router->vrs[i];
Ido Schimmel76610eb2017-03-10 08:53:41 +0100718 if (mlxsw_sp_vr_is_used(vr) && vr->tb_id == tb_id)
Jiri Pirko6b75c482016-07-04 08:23:09 +0200719 return vr;
720 }
721 return NULL;
722}
723
Ido Schimmel76610eb2017-03-10 08:53:41 +0100724static struct mlxsw_sp_fib *mlxsw_sp_vr_fib(const struct mlxsw_sp_vr *vr,
725 enum mlxsw_sp_l3proto proto)
Jiri Pirko6b75c482016-07-04 08:23:09 +0200726{
Ido Schimmel76610eb2017-03-10 08:53:41 +0100727 switch (proto) {
728 case MLXSW_SP_L3_PROTO_IPV4:
729 return vr->fib4;
730 case MLXSW_SP_L3_PROTO_IPV6:
Ido Schimmela3d9bc52017-07-18 10:10:22 +0200731 return vr->fib6;
Ido Schimmel76610eb2017-03-10 08:53:41 +0100732 }
733 return NULL;
734}
735
736static struct mlxsw_sp_vr *mlxsw_sp_vr_create(struct mlxsw_sp *mlxsw_sp,
David Ahernf8fa9b42017-10-18 09:56:56 -0700737 u32 tb_id,
738 struct netlink_ext_ack *extack)
Ido Schimmel76610eb2017-03-10 08:53:41 +0100739{
Jiri Pirko6b75c482016-07-04 08:23:09 +0200740 struct mlxsw_sp_vr *vr;
Ido Schimmela3d9bc52017-07-18 10:10:22 +0200741 int err;
Jiri Pirko6b75c482016-07-04 08:23:09 +0200742
743 vr = mlxsw_sp_vr_find_unused(mlxsw_sp);
David Ahernf8fa9b42017-10-18 09:56:56 -0700744 if (!vr) {
745 NL_SET_ERR_MSG(extack, "spectrum: Exceeded number of supported virtual routers");
Jiri Pirko6b75c482016-07-04 08:23:09 +0200746 return ERR_PTR(-EBUSY);
David Ahernf8fa9b42017-10-18 09:56:56 -0700747 }
Ido Schimmel76610eb2017-03-10 08:53:41 +0100748 vr->fib4 = mlxsw_sp_fib_create(vr, MLXSW_SP_L3_PROTO_IPV4);
749 if (IS_ERR(vr->fib4))
750 return ERR_CAST(vr->fib4);
Ido Schimmela3d9bc52017-07-18 10:10:22 +0200751 vr->fib6 = mlxsw_sp_fib_create(vr, MLXSW_SP_L3_PROTO_IPV6);
752 if (IS_ERR(vr->fib6)) {
753 err = PTR_ERR(vr->fib6);
754 goto err_fib6_create;
755 }
Yotam Gigid42b0962017-09-27 08:23:20 +0200756 vr->mr4_table = mlxsw_sp_mr_table_create(mlxsw_sp, vr->id,
757 MLXSW_SP_L3_PROTO_IPV4);
758 if (IS_ERR(vr->mr4_table)) {
759 err = PTR_ERR(vr->mr4_table);
760 goto err_mr_table_create;
761 }
Jiri Pirko6b75c482016-07-04 08:23:09 +0200762 vr->tb_id = tb_id;
Jiri Pirko6b75c482016-07-04 08:23:09 +0200763 return vr;
Ido Schimmela3d9bc52017-07-18 10:10:22 +0200764
Yotam Gigid42b0962017-09-27 08:23:20 +0200765err_mr_table_create:
766 mlxsw_sp_fib_destroy(vr->fib6);
767 vr->fib6 = NULL;
Ido Schimmela3d9bc52017-07-18 10:10:22 +0200768err_fib6_create:
769 mlxsw_sp_fib_destroy(vr->fib4);
770 vr->fib4 = NULL;
771 return ERR_PTR(err);
Jiri Pirko6b75c482016-07-04 08:23:09 +0200772}
773
Ido Schimmel76610eb2017-03-10 08:53:41 +0100774static void mlxsw_sp_vr_destroy(struct mlxsw_sp_vr *vr)
Jiri Pirko6b75c482016-07-04 08:23:09 +0200775{
Yotam Gigid42b0962017-09-27 08:23:20 +0200776 mlxsw_sp_mr_table_destroy(vr->mr4_table);
777 vr->mr4_table = NULL;
Ido Schimmela3d9bc52017-07-18 10:10:22 +0200778 mlxsw_sp_fib_destroy(vr->fib6);
779 vr->fib6 = NULL;
Ido Schimmel76610eb2017-03-10 08:53:41 +0100780 mlxsw_sp_fib_destroy(vr->fib4);
781 vr->fib4 = NULL;
Jiri Pirko6b75c482016-07-04 08:23:09 +0200782}
783
David Ahernf8fa9b42017-10-18 09:56:56 -0700784static struct mlxsw_sp_vr *mlxsw_sp_vr_get(struct mlxsw_sp *mlxsw_sp, u32 tb_id,
785 struct netlink_ext_ack *extack)
Jiri Pirko6b75c482016-07-04 08:23:09 +0200786{
787 struct mlxsw_sp_vr *vr;
Jiri Pirko6b75c482016-07-04 08:23:09 +0200788
789 tb_id = mlxsw_sp_fix_tb_id(tb_id);
Ido Schimmel76610eb2017-03-10 08:53:41 +0100790 vr = mlxsw_sp_vr_find(mlxsw_sp, tb_id);
791 if (!vr)
David Ahernf8fa9b42017-10-18 09:56:56 -0700792 vr = mlxsw_sp_vr_create(mlxsw_sp, tb_id, extack);
Jiri Pirko6b75c482016-07-04 08:23:09 +0200793 return vr;
794}
795
Ido Schimmel76610eb2017-03-10 08:53:41 +0100796static void mlxsw_sp_vr_put(struct mlxsw_sp_vr *vr)
Jiri Pirko6b75c482016-07-04 08:23:09 +0200797{
Ido Schimmela3d9bc52017-07-18 10:10:22 +0200798 if (!vr->rif_count && list_empty(&vr->fib4->node_list) &&
Yotam Gigid42b0962017-09-27 08:23:20 +0200799 list_empty(&vr->fib6->node_list) &&
800 mlxsw_sp_mr_table_empty(vr->mr4_table))
Ido Schimmel76610eb2017-03-10 08:53:41 +0100801 mlxsw_sp_vr_destroy(vr);
Jiri Pirko6b75c482016-07-04 08:23:09 +0200802}
803
Ido Schimmelfc922bb2017-08-14 10:54:05 +0200804static bool
805mlxsw_sp_vr_lpm_tree_should_replace(struct mlxsw_sp_vr *vr,
806 enum mlxsw_sp_l3proto proto, u8 tree_id)
807{
808 struct mlxsw_sp_fib *fib = mlxsw_sp_vr_fib(vr, proto);
809
810 if (!mlxsw_sp_vr_is_used(vr))
811 return false;
812 if (fib->lpm_tree && fib->lpm_tree->id == tree_id)
813 return true;
814 return false;
815}
816
817static int mlxsw_sp_vr_lpm_tree_replace(struct mlxsw_sp *mlxsw_sp,
818 struct mlxsw_sp_fib *fib,
819 struct mlxsw_sp_lpm_tree *new_tree)
820{
821 struct mlxsw_sp_lpm_tree *old_tree = fib->lpm_tree;
822 int err;
823
824 err = mlxsw_sp_vr_lpm_tree_bind(mlxsw_sp, fib, new_tree->id);
825 if (err)
826 return err;
827 fib->lpm_tree = new_tree;
828 mlxsw_sp_lpm_tree_hold(new_tree);
829 mlxsw_sp_lpm_tree_put(mlxsw_sp, old_tree);
830 return 0;
831}
832
833static int mlxsw_sp_vrs_lpm_tree_replace(struct mlxsw_sp *mlxsw_sp,
834 struct mlxsw_sp_fib *fib,
835 struct mlxsw_sp_lpm_tree *new_tree)
836{
837 struct mlxsw_sp_lpm_tree *old_tree = fib->lpm_tree;
838 enum mlxsw_sp_l3proto proto = fib->proto;
839 u8 old_id, new_id = new_tree->id;
840 struct mlxsw_sp_vr *vr;
841 int i, err;
842
843 if (!old_tree)
844 goto no_replace;
845 old_id = old_tree->id;
846
847 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
848 vr = &mlxsw_sp->router->vrs[i];
849 if (!mlxsw_sp_vr_lpm_tree_should_replace(vr, proto, old_id))
850 continue;
851 err = mlxsw_sp_vr_lpm_tree_replace(mlxsw_sp,
852 mlxsw_sp_vr_fib(vr, proto),
853 new_tree);
854 if (err)
855 goto err_tree_replace;
856 }
857
858 return 0;
859
860err_tree_replace:
861 for (i--; i >= 0; i--) {
862 if (!mlxsw_sp_vr_lpm_tree_should_replace(vr, proto, new_id))
863 continue;
864 mlxsw_sp_vr_lpm_tree_replace(mlxsw_sp,
865 mlxsw_sp_vr_fib(vr, proto),
866 old_tree);
867 }
868 return err;
869
870no_replace:
871 err = mlxsw_sp_vr_lpm_tree_bind(mlxsw_sp, fib, new_tree->id);
872 if (err)
873 return err;
874 fib->lpm_tree = new_tree;
875 mlxsw_sp_lpm_tree_hold(new_tree);
876 return 0;
877}
878
879static void
880mlxsw_sp_vrs_prefixes(struct mlxsw_sp *mlxsw_sp,
881 enum mlxsw_sp_l3proto proto,
882 struct mlxsw_sp_prefix_usage *req_prefix_usage)
883{
884 int i;
885
886 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
887 struct mlxsw_sp_vr *vr = &mlxsw_sp->router->vrs[i];
888 struct mlxsw_sp_fib *fib = mlxsw_sp_vr_fib(vr, proto);
889 unsigned char prefix;
890
891 if (!mlxsw_sp_vr_is_used(vr))
892 continue;
893 mlxsw_sp_prefix_usage_for_each(prefix, &fib->prefix_usage)
894 mlxsw_sp_prefix_usage_set(req_prefix_usage, prefix);
895 }
896}
897
Nogah Frankel9497c042016-09-20 11:16:54 +0200898static int mlxsw_sp_vrs_init(struct mlxsw_sp *mlxsw_sp)
Jiri Pirko6b75c482016-07-04 08:23:09 +0200899{
900 struct mlxsw_sp_vr *vr;
Jiri Pirkoc1a38312016-10-21 16:07:23 +0200901 u64 max_vrs;
Jiri Pirko6b75c482016-07-04 08:23:09 +0200902 int i;
903
Jiri Pirkoc1a38312016-10-21 16:07:23 +0200904 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_VRS))
Nogah Frankel9497c042016-09-20 11:16:54 +0200905 return -EIO;
906
Jiri Pirkoc1a38312016-10-21 16:07:23 +0200907 max_vrs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS);
Ido Schimmel9011b672017-05-16 19:38:25 +0200908 mlxsw_sp->router->vrs = kcalloc(max_vrs, sizeof(struct mlxsw_sp_vr),
909 GFP_KERNEL);
910 if (!mlxsw_sp->router->vrs)
Nogah Frankel9497c042016-09-20 11:16:54 +0200911 return -ENOMEM;
912
Jiri Pirkoc1a38312016-10-21 16:07:23 +0200913 for (i = 0; i < max_vrs; i++) {
Ido Schimmel9011b672017-05-16 19:38:25 +0200914 vr = &mlxsw_sp->router->vrs[i];
Jiri Pirko6b75c482016-07-04 08:23:09 +0200915 vr->id = i;
916 }
Nogah Frankel9497c042016-09-20 11:16:54 +0200917
918 return 0;
919}
920
Ido Schimmelac571de2016-11-14 11:26:32 +0100921static void mlxsw_sp_router_fib_flush(struct mlxsw_sp *mlxsw_sp);
922
Nogah Frankel9497c042016-09-20 11:16:54 +0200923static void mlxsw_sp_vrs_fini(struct mlxsw_sp *mlxsw_sp)
924{
Ido Schimmel30572242016-12-03 16:45:01 +0100925 /* At this stage we're guaranteed not to have new incoming
926 * FIB notifications and the work queue is free from FIBs
927 * sitting on top of mlxsw netdevs. However, we can still
928 * have other FIBs queued. Flush the queue before flushing
929 * the device's tables. No need for locks, as we're the only
930 * writer.
931 */
932 mlxsw_core_flush_owq();
Ido Schimmelac571de2016-11-14 11:26:32 +0100933 mlxsw_sp_router_fib_flush(mlxsw_sp);
Ido Schimmel9011b672017-05-16 19:38:25 +0200934 kfree(mlxsw_sp->router->vrs);
Jiri Pirko6b75c482016-07-04 08:23:09 +0200935}
936
Petr Machata6ddb7422017-09-02 23:49:19 +0200937static struct net_device *
938__mlxsw_sp_ipip_netdev_ul_dev_get(const struct net_device *ol_dev)
939{
940 struct ip_tunnel *tun = netdev_priv(ol_dev);
941 struct net *net = dev_net(ol_dev);
942
943 return __dev_get_by_index(net, tun->parms.link);
944}
945
946static u32 mlxsw_sp_ipip_dev_ul_tb_id(const struct net_device *ol_dev)
947{
948 struct net_device *d = __mlxsw_sp_ipip_netdev_ul_dev_get(ol_dev);
949
950 if (d)
951 return l3mdev_fib_table(d) ? : RT_TABLE_MAIN;
952 else
953 return l3mdev_fib_table(ol_dev) ? : RT_TABLE_MAIN;
954}
955
Petr Machata1012b9a2017-09-02 23:49:23 +0200956static struct mlxsw_sp_rif *
957mlxsw_sp_rif_create(struct mlxsw_sp *mlxsw_sp,
David Ahernf8fa9b42017-10-18 09:56:56 -0700958 const struct mlxsw_sp_rif_params *params,
959 struct netlink_ext_ack *extack);
Petr Machata1012b9a2017-09-02 23:49:23 +0200960
961static struct mlxsw_sp_rif_ipip_lb *
962mlxsw_sp_ipip_ol_ipip_lb_create(struct mlxsw_sp *mlxsw_sp,
963 enum mlxsw_sp_ipip_type ipipt,
Petr Machata7e75af62017-11-03 10:03:36 +0100964 struct net_device *ol_dev,
965 struct netlink_ext_ack *extack)
Petr Machata1012b9a2017-09-02 23:49:23 +0200966{
967 struct mlxsw_sp_rif_params_ipip_lb lb_params;
968 const struct mlxsw_sp_ipip_ops *ipip_ops;
969 struct mlxsw_sp_rif *rif;
970
971 ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipipt];
972 lb_params = (struct mlxsw_sp_rif_params_ipip_lb) {
973 .common.dev = ol_dev,
974 .common.lag = false,
975 .lb_config = ipip_ops->ol_loopback_config(mlxsw_sp, ol_dev),
976 };
977
Petr Machata7e75af62017-11-03 10:03:36 +0100978 rif = mlxsw_sp_rif_create(mlxsw_sp, &lb_params.common, extack);
Petr Machata1012b9a2017-09-02 23:49:23 +0200979 if (IS_ERR(rif))
980 return ERR_CAST(rif);
981 return container_of(rif, struct mlxsw_sp_rif_ipip_lb, common);
982}
983
984static struct mlxsw_sp_ipip_entry *
985mlxsw_sp_ipip_entry_alloc(struct mlxsw_sp *mlxsw_sp,
986 enum mlxsw_sp_ipip_type ipipt,
987 struct net_device *ol_dev)
988{
989 struct mlxsw_sp_ipip_entry *ipip_entry;
990 struct mlxsw_sp_ipip_entry *ret = NULL;
991
992 ipip_entry = kzalloc(sizeof(*ipip_entry), GFP_KERNEL);
993 if (!ipip_entry)
994 return ERR_PTR(-ENOMEM);
995
996 ipip_entry->ol_lb = mlxsw_sp_ipip_ol_ipip_lb_create(mlxsw_sp, ipipt,
Petr Machata7e75af62017-11-03 10:03:36 +0100997 ol_dev, NULL);
Petr Machata1012b9a2017-09-02 23:49:23 +0200998 if (IS_ERR(ipip_entry->ol_lb)) {
999 ret = ERR_CAST(ipip_entry->ol_lb);
1000 goto err_ol_ipip_lb_create;
1001 }
1002
1003 ipip_entry->ipipt = ipipt;
1004 ipip_entry->ol_dev = ol_dev;
1005
1006 return ipip_entry;
1007
1008err_ol_ipip_lb_create:
1009 kfree(ipip_entry);
1010 return ret;
1011}
1012
1013static void
Petr Machata4cccb732017-10-16 16:26:39 +02001014mlxsw_sp_ipip_entry_dealloc(struct mlxsw_sp_ipip_entry *ipip_entry)
Petr Machata1012b9a2017-09-02 23:49:23 +02001015{
Petr Machata1012b9a2017-09-02 23:49:23 +02001016 mlxsw_sp_rif_destroy(&ipip_entry->ol_lb->common);
1017 kfree(ipip_entry);
1018}
1019
Petr Machata1012b9a2017-09-02 23:49:23 +02001020static bool mlxsw_sp_l3addr_eq(const union mlxsw_sp_l3addr *addr1,
1021 const union mlxsw_sp_l3addr *addr2)
1022{
1023 return !memcmp(addr1, addr2, sizeof(*addr1));
1024}
1025
1026static bool
1027mlxsw_sp_ipip_entry_saddr_matches(struct mlxsw_sp *mlxsw_sp,
1028 const enum mlxsw_sp_l3proto ul_proto,
1029 union mlxsw_sp_l3addr saddr,
1030 u32 ul_tb_id,
1031 struct mlxsw_sp_ipip_entry *ipip_entry)
1032{
1033 u32 tun_ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(ipip_entry->ol_dev);
1034 enum mlxsw_sp_ipip_type ipipt = ipip_entry->ipipt;
1035 union mlxsw_sp_l3addr tun_saddr;
1036
1037 if (mlxsw_sp->router->ipip_ops_arr[ipipt]->ul_proto != ul_proto)
1038 return false;
1039
1040 tun_saddr = mlxsw_sp_ipip_netdev_saddr(ul_proto, ipip_entry->ol_dev);
1041 return tun_ul_tb_id == ul_tb_id &&
1042 mlxsw_sp_l3addr_eq(&tun_saddr, &saddr);
1043}
1044
Petr Machata4607f6d2017-09-02 23:49:25 +02001045static int
1046mlxsw_sp_fib_entry_decap_init(struct mlxsw_sp *mlxsw_sp,
1047 struct mlxsw_sp_fib_entry *fib_entry,
1048 struct mlxsw_sp_ipip_entry *ipip_entry)
1049{
1050 u32 tunnel_index;
1051 int err;
1052
1053 err = mlxsw_sp_kvdl_alloc(mlxsw_sp, 1, &tunnel_index);
1054 if (err)
1055 return err;
1056
1057 ipip_entry->decap_fib_entry = fib_entry;
1058 fib_entry->decap.ipip_entry = ipip_entry;
1059 fib_entry->decap.tunnel_index = tunnel_index;
1060 return 0;
1061}
1062
1063static void mlxsw_sp_fib_entry_decap_fini(struct mlxsw_sp *mlxsw_sp,
1064 struct mlxsw_sp_fib_entry *fib_entry)
1065{
1066 /* Unlink this node from the IPIP entry that it's the decap entry of. */
1067 fib_entry->decap.ipip_entry->decap_fib_entry = NULL;
1068 fib_entry->decap.ipip_entry = NULL;
1069 mlxsw_sp_kvdl_free(mlxsw_sp, fib_entry->decap.tunnel_index);
1070}
1071
Petr Machata1cc38fb2017-09-02 23:49:26 +02001072static struct mlxsw_sp_fib_node *
1073mlxsw_sp_fib_node_lookup(struct mlxsw_sp_fib *fib, const void *addr,
1074 size_t addr_len, unsigned char prefix_len);
Petr Machata4607f6d2017-09-02 23:49:25 +02001075static int mlxsw_sp_fib_entry_update(struct mlxsw_sp *mlxsw_sp,
1076 struct mlxsw_sp_fib_entry *fib_entry);
1077
1078static void
1079mlxsw_sp_ipip_entry_demote_decap(struct mlxsw_sp *mlxsw_sp,
1080 struct mlxsw_sp_ipip_entry *ipip_entry)
1081{
1082 struct mlxsw_sp_fib_entry *fib_entry = ipip_entry->decap_fib_entry;
1083
1084 mlxsw_sp_fib_entry_decap_fini(mlxsw_sp, fib_entry);
1085 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
1086
1087 mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
1088}
1089
Petr Machata1cc38fb2017-09-02 23:49:26 +02001090static void
1091mlxsw_sp_ipip_entry_promote_decap(struct mlxsw_sp *mlxsw_sp,
1092 struct mlxsw_sp_ipip_entry *ipip_entry,
1093 struct mlxsw_sp_fib_entry *decap_fib_entry)
1094{
1095 if (mlxsw_sp_fib_entry_decap_init(mlxsw_sp, decap_fib_entry,
1096 ipip_entry))
1097 return;
1098 decap_fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP;
1099
1100 if (mlxsw_sp_fib_entry_update(mlxsw_sp, decap_fib_entry))
1101 mlxsw_sp_ipip_entry_demote_decap(mlxsw_sp, ipip_entry);
1102}
1103
1104/* Given an IPIP entry, find the corresponding decap route. */
1105static struct mlxsw_sp_fib_entry *
1106mlxsw_sp_ipip_entry_find_decap(struct mlxsw_sp *mlxsw_sp,
1107 struct mlxsw_sp_ipip_entry *ipip_entry)
1108{
1109 static struct mlxsw_sp_fib_node *fib_node;
1110 const struct mlxsw_sp_ipip_ops *ipip_ops;
1111 struct mlxsw_sp_fib_entry *fib_entry;
1112 unsigned char saddr_prefix_len;
1113 union mlxsw_sp_l3addr saddr;
1114 struct mlxsw_sp_fib *ul_fib;
1115 struct mlxsw_sp_vr *ul_vr;
1116 const void *saddrp;
1117 size_t saddr_len;
1118 u32 ul_tb_id;
1119 u32 saddr4;
1120
1121 ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt];
1122
1123 ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(ipip_entry->ol_dev);
1124 ul_vr = mlxsw_sp_vr_find(mlxsw_sp, ul_tb_id);
1125 if (!ul_vr)
1126 return NULL;
1127
1128 ul_fib = mlxsw_sp_vr_fib(ul_vr, ipip_ops->ul_proto);
1129 saddr = mlxsw_sp_ipip_netdev_saddr(ipip_ops->ul_proto,
1130 ipip_entry->ol_dev);
1131
1132 switch (ipip_ops->ul_proto) {
1133 case MLXSW_SP_L3_PROTO_IPV4:
1134 saddr4 = be32_to_cpu(saddr.addr4);
1135 saddrp = &saddr4;
1136 saddr_len = 4;
1137 saddr_prefix_len = 32;
1138 break;
1139 case MLXSW_SP_L3_PROTO_IPV6:
1140 WARN_ON(1);
1141 return NULL;
1142 }
1143
1144 fib_node = mlxsw_sp_fib_node_lookup(ul_fib, saddrp, saddr_len,
1145 saddr_prefix_len);
1146 if (!fib_node || list_empty(&fib_node->entry_list))
1147 return NULL;
1148
1149 fib_entry = list_first_entry(&fib_node->entry_list,
1150 struct mlxsw_sp_fib_entry, list);
1151 if (fib_entry->type != MLXSW_SP_FIB_ENTRY_TYPE_TRAP)
1152 return NULL;
1153
1154 return fib_entry;
1155}
1156
Petr Machata1012b9a2017-09-02 23:49:23 +02001157static struct mlxsw_sp_ipip_entry *
Petr Machata4cccb732017-10-16 16:26:39 +02001158mlxsw_sp_ipip_entry_create(struct mlxsw_sp *mlxsw_sp,
1159 enum mlxsw_sp_ipip_type ipipt,
1160 struct net_device *ol_dev)
Petr Machata1012b9a2017-09-02 23:49:23 +02001161{
1162 u32 ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(ol_dev);
1163 struct mlxsw_sp_router *router = mlxsw_sp->router;
1164 struct mlxsw_sp_ipip_entry *ipip_entry;
1165 enum mlxsw_sp_l3proto ul_proto;
1166 union mlxsw_sp_l3addr saddr;
1167
Petr Machata4cccb732017-10-16 16:26:39 +02001168 /* The configuration where several tunnels have the same local address
1169 * in the same underlay table needs special treatment in the HW. That is
1170 * currently not implemented in the driver.
1171 */
Petr Machata1012b9a2017-09-02 23:49:23 +02001172 list_for_each_entry(ipip_entry, &mlxsw_sp->router->ipip_list,
1173 ipip_list_node) {
Petr Machata1012b9a2017-09-02 23:49:23 +02001174 ul_proto = router->ipip_ops_arr[ipip_entry->ipipt]->ul_proto;
1175 saddr = mlxsw_sp_ipip_netdev_saddr(ul_proto, ol_dev);
1176 if (mlxsw_sp_ipip_entry_saddr_matches(mlxsw_sp, ul_proto, saddr,
1177 ul_tb_id, ipip_entry))
1178 return ERR_PTR(-EEXIST);
1179 }
1180
1181 ipip_entry = mlxsw_sp_ipip_entry_alloc(mlxsw_sp, ipipt, ol_dev);
1182 if (IS_ERR(ipip_entry))
1183 return ipip_entry;
1184
1185 list_add_tail(&ipip_entry->ipip_list_node,
1186 &mlxsw_sp->router->ipip_list);
1187
Petr Machata1012b9a2017-09-02 23:49:23 +02001188 return ipip_entry;
1189}
1190
1191static void
Petr Machata4cccb732017-10-16 16:26:39 +02001192mlxsw_sp_ipip_entry_destroy(struct mlxsw_sp *mlxsw_sp,
1193 struct mlxsw_sp_ipip_entry *ipip_entry)
Petr Machata1012b9a2017-09-02 23:49:23 +02001194{
Petr Machata4cccb732017-10-16 16:26:39 +02001195 list_del(&ipip_entry->ipip_list_node);
1196 mlxsw_sp_ipip_entry_dealloc(ipip_entry);
Petr Machata1012b9a2017-09-02 23:49:23 +02001197}
1198
Petr Machata4607f6d2017-09-02 23:49:25 +02001199static bool
1200mlxsw_sp_ipip_entry_matches_decap(struct mlxsw_sp *mlxsw_sp,
1201 const struct net_device *ul_dev,
1202 enum mlxsw_sp_l3proto ul_proto,
1203 union mlxsw_sp_l3addr ul_dip,
1204 struct mlxsw_sp_ipip_entry *ipip_entry)
1205{
1206 u32 ul_tb_id = l3mdev_fib_table(ul_dev) ? : RT_TABLE_MAIN;
1207 enum mlxsw_sp_ipip_type ipipt = ipip_entry->ipipt;
1208 struct net_device *ipip_ul_dev;
1209
1210 if (mlxsw_sp->router->ipip_ops_arr[ipipt]->ul_proto != ul_proto)
1211 return false;
1212
1213 ipip_ul_dev = __mlxsw_sp_ipip_netdev_ul_dev_get(ipip_entry->ol_dev);
1214 return mlxsw_sp_ipip_entry_saddr_matches(mlxsw_sp, ul_proto, ul_dip,
1215 ul_tb_id, ipip_entry) &&
1216 (!ipip_ul_dev || ipip_ul_dev == ul_dev);
1217}
1218
1219/* Given decap parameters, find the corresponding IPIP entry. */
1220static struct mlxsw_sp_ipip_entry *
1221mlxsw_sp_ipip_entry_find_by_decap(struct mlxsw_sp *mlxsw_sp,
1222 const struct net_device *ul_dev,
1223 enum mlxsw_sp_l3proto ul_proto,
1224 union mlxsw_sp_l3addr ul_dip)
1225{
1226 struct mlxsw_sp_ipip_entry *ipip_entry;
1227
1228 list_for_each_entry(ipip_entry, &mlxsw_sp->router->ipip_list,
1229 ipip_list_node)
1230 if (mlxsw_sp_ipip_entry_matches_decap(mlxsw_sp, ul_dev,
1231 ul_proto, ul_dip,
1232 ipip_entry))
1233 return ipip_entry;
1234
1235 return NULL;
1236}
1237
Petr Machata6698c162017-10-16 16:26:36 +02001238static bool mlxsw_sp_netdev_ipip_type(const struct mlxsw_sp *mlxsw_sp,
1239 const struct net_device *dev,
1240 enum mlxsw_sp_ipip_type *p_type)
1241{
1242 struct mlxsw_sp_router *router = mlxsw_sp->router;
1243 const struct mlxsw_sp_ipip_ops *ipip_ops;
1244 enum mlxsw_sp_ipip_type ipipt;
1245
1246 for (ipipt = 0; ipipt < MLXSW_SP_IPIP_TYPE_MAX; ++ipipt) {
1247 ipip_ops = router->ipip_ops_arr[ipipt];
1248 if (dev->type == ipip_ops->dev_type) {
1249 if (p_type)
1250 *p_type = ipipt;
1251 return true;
1252 }
1253 }
1254 return false;
1255}
1256
Petr Machata796ec772017-11-03 10:03:29 +01001257bool mlxsw_sp_netdev_is_ipip_ol(const struct mlxsw_sp *mlxsw_sp,
1258 const struct net_device *dev)
Petr Machata00635872017-10-16 16:26:37 +02001259{
1260 return mlxsw_sp_netdev_ipip_type(mlxsw_sp, dev, NULL);
1261}
1262
1263static struct mlxsw_sp_ipip_entry *
1264mlxsw_sp_ipip_entry_find_by_ol_dev(struct mlxsw_sp *mlxsw_sp,
1265 const struct net_device *ol_dev)
1266{
1267 struct mlxsw_sp_ipip_entry *ipip_entry;
1268
1269 list_for_each_entry(ipip_entry, &mlxsw_sp->router->ipip_list,
1270 ipip_list_node)
1271 if (ipip_entry->ol_dev == ol_dev)
1272 return ipip_entry;
1273
1274 return NULL;
1275}
1276
Petr Machatacafdb2a2017-11-03 10:03:30 +01001277static bool mlxsw_sp_netdevice_ipip_can_offload(struct mlxsw_sp *mlxsw_sp,
1278 const struct net_device *ol_dev,
1279 enum mlxsw_sp_ipip_type ipipt)
1280{
1281 const struct mlxsw_sp_ipip_ops *ops
1282 = mlxsw_sp->router->ipip_ops_arr[ipipt];
1283
1284 /* For deciding whether decap should be offloaded, we don't care about
1285 * overlay protocol, so ask whether either one is supported.
1286 */
1287 return ops->can_offload(mlxsw_sp, ol_dev, MLXSW_SP_L3_PROTO_IPV4) ||
1288 ops->can_offload(mlxsw_sp, ol_dev, MLXSW_SP_L3_PROTO_IPV6);
1289}
1290
Petr Machata796ec772017-11-03 10:03:29 +01001291static int mlxsw_sp_netdevice_ipip_ol_reg_event(struct mlxsw_sp *mlxsw_sp,
1292 struct net_device *ol_dev)
Petr Machata00635872017-10-16 16:26:37 +02001293{
Petr Machata00635872017-10-16 16:26:37 +02001294 struct mlxsw_sp_ipip_entry *ipip_entry;
1295 enum mlxsw_sp_ipip_type ipipt;
1296
1297 mlxsw_sp_netdev_ipip_type(mlxsw_sp, ol_dev, &ipipt);
Petr Machatacafdb2a2017-11-03 10:03:30 +01001298 if (mlxsw_sp_netdevice_ipip_can_offload(mlxsw_sp, ol_dev, ipipt)) {
Petr Machata4cccb732017-10-16 16:26:39 +02001299 ipip_entry = mlxsw_sp_ipip_entry_create(mlxsw_sp, ipipt,
1300 ol_dev);
Petr Machata00635872017-10-16 16:26:37 +02001301 if (IS_ERR(ipip_entry))
1302 return PTR_ERR(ipip_entry);
1303 }
1304
1305 return 0;
1306}
1307
Petr Machata796ec772017-11-03 10:03:29 +01001308static void mlxsw_sp_netdevice_ipip_ol_unreg_event(struct mlxsw_sp *mlxsw_sp,
1309 struct net_device *ol_dev)
Petr Machata00635872017-10-16 16:26:37 +02001310{
1311 struct mlxsw_sp_ipip_entry *ipip_entry;
1312
1313 ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
1314 if (ipip_entry)
Petr Machata4cccb732017-10-16 16:26:39 +02001315 mlxsw_sp_ipip_entry_destroy(mlxsw_sp, ipip_entry);
Petr Machata00635872017-10-16 16:26:37 +02001316}
1317
Petr Machata47518ca2017-11-03 10:03:35 +01001318static void
1319mlxsw_sp_ipip_entry_ol_up_event(struct mlxsw_sp *mlxsw_sp,
1320 struct mlxsw_sp_ipip_entry *ipip_entry)
1321{
1322 struct mlxsw_sp_fib_entry *decap_fib_entry;
1323
1324 decap_fib_entry = mlxsw_sp_ipip_entry_find_decap(mlxsw_sp, ipip_entry);
1325 if (decap_fib_entry)
1326 mlxsw_sp_ipip_entry_promote_decap(mlxsw_sp, ipip_entry,
1327 decap_fib_entry);
1328}
1329
Petr Machata6d4de442017-11-03 10:03:34 +01001330static void mlxsw_sp_netdevice_ipip_ol_up_event(struct mlxsw_sp *mlxsw_sp,
1331 struct net_device *ol_dev)
Petr Machata00635872017-10-16 16:26:37 +02001332{
Petr Machata00635872017-10-16 16:26:37 +02001333 struct mlxsw_sp_ipip_entry *ipip_entry;
1334
1335 ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
Petr Machata47518ca2017-11-03 10:03:35 +01001336 if (ipip_entry)
1337 mlxsw_sp_ipip_entry_ol_up_event(mlxsw_sp, ipip_entry);
Petr Machata00635872017-10-16 16:26:37 +02001338}
1339
Petr Machataa3fe1982017-11-03 10:03:33 +01001340static void
1341mlxsw_sp_ipip_entry_ol_down_event(struct mlxsw_sp *mlxsw_sp,
1342 struct mlxsw_sp_ipip_entry *ipip_entry)
1343{
1344 if (ipip_entry->decap_fib_entry)
1345 mlxsw_sp_ipip_entry_demote_decap(mlxsw_sp, ipip_entry);
1346}
1347
Petr Machata796ec772017-11-03 10:03:29 +01001348static void mlxsw_sp_netdevice_ipip_ol_down_event(struct mlxsw_sp *mlxsw_sp,
1349 struct net_device *ol_dev)
Petr Machata00635872017-10-16 16:26:37 +02001350{
1351 struct mlxsw_sp_ipip_entry *ipip_entry;
1352
1353 ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
Petr Machataa3fe1982017-11-03 10:03:33 +01001354 if (ipip_entry)
1355 mlxsw_sp_ipip_entry_ol_down_event(mlxsw_sp, ipip_entry);
Petr Machata00635872017-10-16 16:26:37 +02001356}
1357
Petr Machata796ec772017-11-03 10:03:29 +01001358static int mlxsw_sp_netdevice_ipip_ol_vrf_event(struct mlxsw_sp *mlxsw_sp,
Petr Machata7e75af62017-11-03 10:03:36 +01001359 struct net_device *ol_dev,
1360 struct netlink_ext_ack *extack)
Petr Machataf63ce4e2017-10-16 16:26:38 +02001361{
1362 struct mlxsw_sp_fib_entry *decap_fib_entry;
1363 struct mlxsw_sp_ipip_entry *ipip_entry;
1364 struct mlxsw_sp_rif_ipip_lb *lb_rif;
1365
1366 ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
1367 if (!ipip_entry)
1368 return 0;
1369
1370 /* When a tunneling device is moved to a different VRF, we need to
1371 * update the backing loopback. Since RIFs can't be edited, we need to
1372 * destroy and recreate it. That might create a window of opportunity
1373 * where RALUE and RATR registers end up referencing a RIF that's
1374 * already gone. RATRs are handled by the RIF destroy, and to take care
1375 * of RALUE, demote the decap route back.
1376 */
1377 if (ipip_entry->decap_fib_entry)
1378 mlxsw_sp_ipip_entry_demote_decap(mlxsw_sp, ipip_entry);
1379
1380 lb_rif = mlxsw_sp_ipip_ol_ipip_lb_create(mlxsw_sp, ipip_entry->ipipt,
Petr Machata7e75af62017-11-03 10:03:36 +01001381 ol_dev, extack);
Petr Machataf63ce4e2017-10-16 16:26:38 +02001382 if (IS_ERR(lb_rif))
1383 return PTR_ERR(lb_rif);
1384 mlxsw_sp_rif_destroy(&ipip_entry->ol_lb->common);
1385 ipip_entry->ol_lb = lb_rif;
1386
1387 if (ol_dev->flags & IFF_UP) {
1388 decap_fib_entry = mlxsw_sp_ipip_entry_find_decap(mlxsw_sp,
1389 ipip_entry);
1390 if (decap_fib_entry)
1391 mlxsw_sp_ipip_entry_promote_decap(mlxsw_sp, ipip_entry,
1392 decap_fib_entry);
1393 }
1394
1395 return 0;
1396}
1397
Petr Machata7e75af62017-11-03 10:03:36 +01001398int mlxsw_sp_netdevice_ipip_ol_event(struct mlxsw_sp *mlxsw_sp,
1399 struct net_device *ol_dev,
1400 unsigned long event,
1401 struct netdev_notifier_info *info)
Petr Machata00635872017-10-16 16:26:37 +02001402{
Petr Machata7e75af62017-11-03 10:03:36 +01001403 struct netdev_notifier_changeupper_info *chup;
1404 struct netlink_ext_ack *extack;
1405
Petr Machata00635872017-10-16 16:26:37 +02001406 switch (event) {
1407 case NETDEV_REGISTER:
Petr Machata796ec772017-11-03 10:03:29 +01001408 return mlxsw_sp_netdevice_ipip_ol_reg_event(mlxsw_sp, ol_dev);
Petr Machata00635872017-10-16 16:26:37 +02001409 case NETDEV_UNREGISTER:
Petr Machata796ec772017-11-03 10:03:29 +01001410 mlxsw_sp_netdevice_ipip_ol_unreg_event(mlxsw_sp, ol_dev);
Petr Machata00635872017-10-16 16:26:37 +02001411 return 0;
1412 case NETDEV_UP:
Petr Machata6d4de442017-11-03 10:03:34 +01001413 mlxsw_sp_netdevice_ipip_ol_up_event(mlxsw_sp, ol_dev);
1414 return 0;
Petr Machata00635872017-10-16 16:26:37 +02001415 case NETDEV_DOWN:
Petr Machata796ec772017-11-03 10:03:29 +01001416 mlxsw_sp_netdevice_ipip_ol_down_event(mlxsw_sp, ol_dev);
Petr Machata00635872017-10-16 16:26:37 +02001417 return 0;
Petr Machataf63ce4e2017-10-16 16:26:38 +02001418 case NETDEV_CHANGEUPPER:
Petr Machata7e75af62017-11-03 10:03:36 +01001419 chup = container_of(info, typeof(*chup), info);
1420 extack = info->extack;
1421 if (netif_is_l3_master(chup->upper_dev))
Petr Machata796ec772017-11-03 10:03:29 +01001422 return mlxsw_sp_netdevice_ipip_ol_vrf_event(mlxsw_sp,
Petr Machata7e75af62017-11-03 10:03:36 +01001423 ol_dev,
1424 extack);
Petr Machataf63ce4e2017-10-16 16:26:38 +02001425 return 0;
Petr Machata00635872017-10-16 16:26:37 +02001426 }
1427 return 0;
1428}
1429
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001430struct mlxsw_sp_neigh_key {
Jiri Pirko33b13412016-11-10 12:31:04 +01001431 struct neighbour *n;
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001432};
1433
1434struct mlxsw_sp_neigh_entry {
Ido Schimmel9665b742017-02-08 11:16:42 +01001435 struct list_head rif_list_node;
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001436 struct rhash_head ht_node;
1437 struct mlxsw_sp_neigh_key key;
1438 u16 rif;
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001439 bool connected;
Yotam Gigia6bf9e92016-07-05 11:27:44 +02001440 unsigned char ha[ETH_ALEN];
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001441 struct list_head nexthop_list; /* list of nexthops using
1442 * this neigh entry
1443 */
Yotam Gigib2157142016-07-05 11:27:51 +02001444 struct list_head nexthop_neighs_list_node;
Arkadi Sharshevsky7cfcbc72017-08-24 08:40:08 +02001445 unsigned int counter_index;
1446 bool counter_valid;
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001447};
1448
1449static const struct rhashtable_params mlxsw_sp_neigh_ht_params = {
1450 .key_offset = offsetof(struct mlxsw_sp_neigh_entry, key),
1451 .head_offset = offsetof(struct mlxsw_sp_neigh_entry, ht_node),
1452 .key_len = sizeof(struct mlxsw_sp_neigh_key),
1453};
1454
Arkadi Sharshevskyf17cc842017-08-24 08:40:04 +02001455struct mlxsw_sp_neigh_entry *
1456mlxsw_sp_rif_neigh_next(struct mlxsw_sp_rif *rif,
1457 struct mlxsw_sp_neigh_entry *neigh_entry)
1458{
1459 if (!neigh_entry) {
1460 if (list_empty(&rif->neigh_list))
1461 return NULL;
1462 else
1463 return list_first_entry(&rif->neigh_list,
1464 typeof(*neigh_entry),
1465 rif_list_node);
1466 }
Arkadi Sharshevskyec2437f2017-09-25 10:32:24 +02001467 if (list_is_last(&neigh_entry->rif_list_node, &rif->neigh_list))
Arkadi Sharshevskyf17cc842017-08-24 08:40:04 +02001468 return NULL;
1469 return list_next_entry(neigh_entry, rif_list_node);
1470}
1471
1472int mlxsw_sp_neigh_entry_type(struct mlxsw_sp_neigh_entry *neigh_entry)
1473{
1474 return neigh_entry->key.n->tbl->family;
1475}
1476
1477unsigned char *
1478mlxsw_sp_neigh_entry_ha(struct mlxsw_sp_neigh_entry *neigh_entry)
1479{
1480 return neigh_entry->ha;
1481}
1482
1483u32 mlxsw_sp_neigh4_entry_dip(struct mlxsw_sp_neigh_entry *neigh_entry)
1484{
1485 struct neighbour *n;
1486
1487 n = neigh_entry->key.n;
1488 return ntohl(*((__be32 *) n->primary_key));
1489}
1490
Arkadi Sharshevsky02507682017-08-31 17:59:15 +02001491struct in6_addr *
1492mlxsw_sp_neigh6_entry_dip(struct mlxsw_sp_neigh_entry *neigh_entry)
1493{
1494 struct neighbour *n;
1495
1496 n = neigh_entry->key.n;
1497 return (struct in6_addr *) &n->primary_key;
1498}
1499
Arkadi Sharshevsky7cfcbc72017-08-24 08:40:08 +02001500int mlxsw_sp_neigh_counter_get(struct mlxsw_sp *mlxsw_sp,
1501 struct mlxsw_sp_neigh_entry *neigh_entry,
1502 u64 *p_counter)
1503{
1504 if (!neigh_entry->counter_valid)
1505 return -EINVAL;
1506
1507 return mlxsw_sp_flow_counter_get(mlxsw_sp, neigh_entry->counter_index,
1508 p_counter, NULL);
1509}
1510
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001511static struct mlxsw_sp_neigh_entry *
1512mlxsw_sp_neigh_entry_alloc(struct mlxsw_sp *mlxsw_sp, struct neighbour *n,
1513 u16 rif)
1514{
1515 struct mlxsw_sp_neigh_entry *neigh_entry;
1516
1517 neigh_entry = kzalloc(sizeof(*neigh_entry), GFP_KERNEL);
1518 if (!neigh_entry)
1519 return NULL;
1520
1521 neigh_entry->key.n = n;
1522 neigh_entry->rif = rif;
1523 INIT_LIST_HEAD(&neigh_entry->nexthop_list);
1524
1525 return neigh_entry;
1526}
1527
1528static void mlxsw_sp_neigh_entry_free(struct mlxsw_sp_neigh_entry *neigh_entry)
1529{
1530 kfree(neigh_entry);
1531}
1532
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001533static int
1534mlxsw_sp_neigh_entry_insert(struct mlxsw_sp *mlxsw_sp,
1535 struct mlxsw_sp_neigh_entry *neigh_entry)
1536{
Ido Schimmel9011b672017-05-16 19:38:25 +02001537 return rhashtable_insert_fast(&mlxsw_sp->router->neigh_ht,
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001538 &neigh_entry->ht_node,
1539 mlxsw_sp_neigh_ht_params);
1540}
1541
1542static void
1543mlxsw_sp_neigh_entry_remove(struct mlxsw_sp *mlxsw_sp,
1544 struct mlxsw_sp_neigh_entry *neigh_entry)
1545{
Ido Schimmel9011b672017-05-16 19:38:25 +02001546 rhashtable_remove_fast(&mlxsw_sp->router->neigh_ht,
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001547 &neigh_entry->ht_node,
1548 mlxsw_sp_neigh_ht_params);
1549}
1550
Arkadi Sharshevsky7cfcbc72017-08-24 08:40:08 +02001551static bool
Arkadi Sharshevsky1ed55742017-08-31 17:59:18 +02001552mlxsw_sp_neigh_counter_should_alloc(struct mlxsw_sp *mlxsw_sp,
1553 struct mlxsw_sp_neigh_entry *neigh_entry)
Arkadi Sharshevsky7cfcbc72017-08-24 08:40:08 +02001554{
1555 struct devlink *devlink;
Arkadi Sharshevsky1ed55742017-08-31 17:59:18 +02001556 const char *table_name;
1557
1558 switch (mlxsw_sp_neigh_entry_type(neigh_entry)) {
1559 case AF_INET:
1560 table_name = MLXSW_SP_DPIPE_TABLE_NAME_HOST4;
1561 break;
1562 case AF_INET6:
1563 table_name = MLXSW_SP_DPIPE_TABLE_NAME_HOST6;
1564 break;
1565 default:
1566 WARN_ON(1);
1567 return false;
1568 }
Arkadi Sharshevsky7cfcbc72017-08-24 08:40:08 +02001569
1570 devlink = priv_to_devlink(mlxsw_sp->core);
Arkadi Sharshevsky1ed55742017-08-31 17:59:18 +02001571 return devlink_dpipe_table_counter_enabled(devlink, table_name);
Arkadi Sharshevsky7cfcbc72017-08-24 08:40:08 +02001572}
1573
1574static void
1575mlxsw_sp_neigh_counter_alloc(struct mlxsw_sp *mlxsw_sp,
1576 struct mlxsw_sp_neigh_entry *neigh_entry)
1577{
Arkadi Sharshevsky1ed55742017-08-31 17:59:18 +02001578 if (!mlxsw_sp_neigh_counter_should_alloc(mlxsw_sp, neigh_entry))
Arkadi Sharshevsky7cfcbc72017-08-24 08:40:08 +02001579 return;
1580
1581 if (mlxsw_sp_flow_counter_alloc(mlxsw_sp, &neigh_entry->counter_index))
1582 return;
1583
1584 neigh_entry->counter_valid = true;
1585}
1586
1587static void
1588mlxsw_sp_neigh_counter_free(struct mlxsw_sp *mlxsw_sp,
1589 struct mlxsw_sp_neigh_entry *neigh_entry)
1590{
1591 if (!neigh_entry->counter_valid)
1592 return;
1593 mlxsw_sp_flow_counter_free(mlxsw_sp,
1594 neigh_entry->counter_index);
1595 neigh_entry->counter_valid = false;
1596}
1597
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001598static struct mlxsw_sp_neigh_entry *
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001599mlxsw_sp_neigh_entry_create(struct mlxsw_sp *mlxsw_sp, struct neighbour *n)
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001600{
1601 struct mlxsw_sp_neigh_entry *neigh_entry;
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01001602 struct mlxsw_sp_rif *rif;
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001603 int err;
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001604
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01001605 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, n->dev);
1606 if (!rif)
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001607 return ERR_PTR(-EINVAL);
1608
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01001609 neigh_entry = mlxsw_sp_neigh_entry_alloc(mlxsw_sp, n, rif->rif_index);
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001610 if (!neigh_entry)
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001611 return ERR_PTR(-ENOMEM);
1612
1613 err = mlxsw_sp_neigh_entry_insert(mlxsw_sp, neigh_entry);
1614 if (err)
1615 goto err_neigh_entry_insert;
1616
Arkadi Sharshevsky7cfcbc72017-08-24 08:40:08 +02001617 mlxsw_sp_neigh_counter_alloc(mlxsw_sp, neigh_entry);
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01001618 list_add(&neigh_entry->rif_list_node, &rif->neigh_list);
Ido Schimmel9665b742017-02-08 11:16:42 +01001619
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001620 return neigh_entry;
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001621
1622err_neigh_entry_insert:
1623 mlxsw_sp_neigh_entry_free(neigh_entry);
1624 return ERR_PTR(err);
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001625}
1626
1627static void
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001628mlxsw_sp_neigh_entry_destroy(struct mlxsw_sp *mlxsw_sp,
1629 struct mlxsw_sp_neigh_entry *neigh_entry)
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001630{
Ido Schimmel9665b742017-02-08 11:16:42 +01001631 list_del(&neigh_entry->rif_list_node);
Arkadi Sharshevsky7cfcbc72017-08-24 08:40:08 +02001632 mlxsw_sp_neigh_counter_free(mlxsw_sp, neigh_entry);
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001633 mlxsw_sp_neigh_entry_remove(mlxsw_sp, neigh_entry);
1634 mlxsw_sp_neigh_entry_free(neigh_entry);
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001635}
1636
1637static struct mlxsw_sp_neigh_entry *
Jiri Pirko33b13412016-11-10 12:31:04 +01001638mlxsw_sp_neigh_entry_lookup(struct mlxsw_sp *mlxsw_sp, struct neighbour *n)
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001639{
Jiri Pirko33b13412016-11-10 12:31:04 +01001640 struct mlxsw_sp_neigh_key key;
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001641
Jiri Pirko33b13412016-11-10 12:31:04 +01001642 key.n = n;
Ido Schimmel9011b672017-05-16 19:38:25 +02001643 return rhashtable_lookup_fast(&mlxsw_sp->router->neigh_ht,
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001644 &key, mlxsw_sp_neigh_ht_params);
1645}
1646
Yotam Gigic723c7352016-07-05 11:27:43 +02001647static void
1648mlxsw_sp_router_neighs_update_interval_init(struct mlxsw_sp *mlxsw_sp)
1649{
Arkadi Sharshevskya6c9b5d2017-07-18 10:10:18 +02001650 unsigned long interval;
Yotam Gigic723c7352016-07-05 11:27:43 +02001651
Ido Schimmelb5f3e0d2017-07-24 09:56:00 +02001652#if IS_ENABLED(CONFIG_IPV6)
Arkadi Sharshevskya6c9b5d2017-07-18 10:10:18 +02001653 interval = min_t(unsigned long,
1654 NEIGH_VAR(&arp_tbl.parms, DELAY_PROBE_TIME),
1655 NEIGH_VAR(&nd_tbl.parms, DELAY_PROBE_TIME));
Ido Schimmelb5f3e0d2017-07-24 09:56:00 +02001656#else
1657 interval = NEIGH_VAR(&arp_tbl.parms, DELAY_PROBE_TIME);
1658#endif
Ido Schimmel9011b672017-05-16 19:38:25 +02001659 mlxsw_sp->router->neighs_update.interval = jiffies_to_msecs(interval);
Yotam Gigic723c7352016-07-05 11:27:43 +02001660}
1661
1662static void mlxsw_sp_router_neigh_ent_ipv4_process(struct mlxsw_sp *mlxsw_sp,
1663 char *rauhtd_pl,
1664 int ent_index)
1665{
1666 struct net_device *dev;
1667 struct neighbour *n;
1668 __be32 dipn;
1669 u32 dip;
1670 u16 rif;
1671
1672 mlxsw_reg_rauhtd_ent_ipv4_unpack(rauhtd_pl, ent_index, &rif, &dip);
1673
Ido Schimmel5f9efff2017-05-16 19:38:27 +02001674 if (!mlxsw_sp->router->rifs[rif]) {
Yotam Gigic723c7352016-07-05 11:27:43 +02001675 dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Incorrect RIF in neighbour entry\n");
1676 return;
1677 }
1678
1679 dipn = htonl(dip);
Ido Schimmel5f9efff2017-05-16 19:38:27 +02001680 dev = mlxsw_sp->router->rifs[rif]->dev;
Yotam Gigic723c7352016-07-05 11:27:43 +02001681 n = neigh_lookup(&arp_tbl, &dipn, dev);
1682 if (!n) {
1683 netdev_err(dev, "Failed to find matching neighbour for IP=%pI4h\n",
1684 &dip);
1685 return;
1686 }
1687
1688 netdev_dbg(dev, "Updating neighbour with IP=%pI4h\n", &dip);
1689 neigh_event_send(n, NULL);
1690 neigh_release(n);
1691}
1692
Ido Schimmeldf9a21f2017-08-15 09:10:33 +02001693#if IS_ENABLED(CONFIG_IPV6)
Arkadi Sharshevsky60f040c2017-07-18 10:10:17 +02001694static void mlxsw_sp_router_neigh_ent_ipv6_process(struct mlxsw_sp *mlxsw_sp,
1695 char *rauhtd_pl,
1696 int rec_index)
1697{
1698 struct net_device *dev;
1699 struct neighbour *n;
1700 struct in6_addr dip;
1701 u16 rif;
1702
1703 mlxsw_reg_rauhtd_ent_ipv6_unpack(rauhtd_pl, rec_index, &rif,
1704 (char *) &dip);
1705
1706 if (!mlxsw_sp->router->rifs[rif]) {
1707 dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Incorrect RIF in neighbour entry\n");
1708 return;
1709 }
1710
1711 dev = mlxsw_sp->router->rifs[rif]->dev;
1712 n = neigh_lookup(&nd_tbl, &dip, dev);
1713 if (!n) {
1714 netdev_err(dev, "Failed to find matching neighbour for IP=%pI6c\n",
1715 &dip);
1716 return;
1717 }
1718
1719 netdev_dbg(dev, "Updating neighbour with IP=%pI6c\n", &dip);
1720 neigh_event_send(n, NULL);
1721 neigh_release(n);
1722}
Ido Schimmelb5f3e0d2017-07-24 09:56:00 +02001723#else
1724static void mlxsw_sp_router_neigh_ent_ipv6_process(struct mlxsw_sp *mlxsw_sp,
1725 char *rauhtd_pl,
1726 int rec_index)
1727{
1728}
1729#endif
Arkadi Sharshevsky60f040c2017-07-18 10:10:17 +02001730
Yotam Gigic723c7352016-07-05 11:27:43 +02001731static void mlxsw_sp_router_neigh_rec_ipv4_process(struct mlxsw_sp *mlxsw_sp,
1732 char *rauhtd_pl,
1733 int rec_index)
1734{
1735 u8 num_entries;
1736 int i;
1737
1738 num_entries = mlxsw_reg_rauhtd_ipv4_rec_num_entries_get(rauhtd_pl,
1739 rec_index);
1740 /* Hardware starts counting at 0, so add 1. */
1741 num_entries++;
1742
1743 /* Each record consists of several neighbour entries. */
1744 for (i = 0; i < num_entries; i++) {
1745 int ent_index;
1746
1747 ent_index = rec_index * MLXSW_REG_RAUHTD_IPV4_ENT_PER_REC + i;
1748 mlxsw_sp_router_neigh_ent_ipv4_process(mlxsw_sp, rauhtd_pl,
1749 ent_index);
1750 }
1751
1752}
1753
Arkadi Sharshevsky60f040c2017-07-18 10:10:17 +02001754static void mlxsw_sp_router_neigh_rec_ipv6_process(struct mlxsw_sp *mlxsw_sp,
1755 char *rauhtd_pl,
1756 int rec_index)
1757{
1758 /* One record contains one entry. */
1759 mlxsw_sp_router_neigh_ent_ipv6_process(mlxsw_sp, rauhtd_pl,
1760 rec_index);
1761}
1762
Yotam Gigic723c7352016-07-05 11:27:43 +02001763static void mlxsw_sp_router_neigh_rec_process(struct mlxsw_sp *mlxsw_sp,
1764 char *rauhtd_pl, int rec_index)
1765{
1766 switch (mlxsw_reg_rauhtd_rec_type_get(rauhtd_pl, rec_index)) {
1767 case MLXSW_REG_RAUHTD_TYPE_IPV4:
1768 mlxsw_sp_router_neigh_rec_ipv4_process(mlxsw_sp, rauhtd_pl,
1769 rec_index);
1770 break;
1771 case MLXSW_REG_RAUHTD_TYPE_IPV6:
Arkadi Sharshevsky60f040c2017-07-18 10:10:17 +02001772 mlxsw_sp_router_neigh_rec_ipv6_process(mlxsw_sp, rauhtd_pl,
1773 rec_index);
Yotam Gigic723c7352016-07-05 11:27:43 +02001774 break;
1775 }
1776}
1777
Arkadi Sharshevsky42cdb332016-11-11 16:34:26 +01001778static bool mlxsw_sp_router_rauhtd_is_full(char *rauhtd_pl)
1779{
1780 u8 num_rec, last_rec_index, num_entries;
1781
1782 num_rec = mlxsw_reg_rauhtd_num_rec_get(rauhtd_pl);
1783 last_rec_index = num_rec - 1;
1784
1785 if (num_rec < MLXSW_REG_RAUHTD_REC_MAX_NUM)
1786 return false;
1787 if (mlxsw_reg_rauhtd_rec_type_get(rauhtd_pl, last_rec_index) ==
1788 MLXSW_REG_RAUHTD_TYPE_IPV6)
1789 return true;
1790
1791 num_entries = mlxsw_reg_rauhtd_ipv4_rec_num_entries_get(rauhtd_pl,
1792 last_rec_index);
1793 if (++num_entries == MLXSW_REG_RAUHTD_IPV4_ENT_PER_REC)
1794 return true;
1795 return false;
1796}
1797
Arkadi Sharshevsky60f040c2017-07-18 10:10:17 +02001798static int
1799__mlxsw_sp_router_neighs_update_rauhtd(struct mlxsw_sp *mlxsw_sp,
1800 char *rauhtd_pl,
1801 enum mlxsw_reg_rauhtd_type type)
Yotam Gigic723c7352016-07-05 11:27:43 +02001802{
Arkadi Sharshevsky60f040c2017-07-18 10:10:17 +02001803 int i, num_rec;
1804 int err;
Yotam Gigic723c7352016-07-05 11:27:43 +02001805
1806 /* Make sure the neighbour's netdev isn't removed in the
1807 * process.
1808 */
1809 rtnl_lock();
1810 do {
Arkadi Sharshevsky60f040c2017-07-18 10:10:17 +02001811 mlxsw_reg_rauhtd_pack(rauhtd_pl, type);
Yotam Gigic723c7352016-07-05 11:27:43 +02001812 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(rauhtd),
1813 rauhtd_pl);
1814 if (err) {
Petr Machata7ff176f2017-10-02 12:21:57 +02001815 dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Failed to dump neighbour table\n");
Yotam Gigic723c7352016-07-05 11:27:43 +02001816 break;
1817 }
1818 num_rec = mlxsw_reg_rauhtd_num_rec_get(rauhtd_pl);
1819 for (i = 0; i < num_rec; i++)
1820 mlxsw_sp_router_neigh_rec_process(mlxsw_sp, rauhtd_pl,
1821 i);
Arkadi Sharshevsky42cdb332016-11-11 16:34:26 +01001822 } while (mlxsw_sp_router_rauhtd_is_full(rauhtd_pl));
Yotam Gigic723c7352016-07-05 11:27:43 +02001823 rtnl_unlock();
1824
Arkadi Sharshevsky60f040c2017-07-18 10:10:17 +02001825 return err;
1826}
1827
1828static int mlxsw_sp_router_neighs_update_rauhtd(struct mlxsw_sp *mlxsw_sp)
1829{
1830 enum mlxsw_reg_rauhtd_type type;
1831 char *rauhtd_pl;
1832 int err;
1833
1834 rauhtd_pl = kmalloc(MLXSW_REG_RAUHTD_LEN, GFP_KERNEL);
1835 if (!rauhtd_pl)
1836 return -ENOMEM;
1837
1838 type = MLXSW_REG_RAUHTD_TYPE_IPV4;
1839 err = __mlxsw_sp_router_neighs_update_rauhtd(mlxsw_sp, rauhtd_pl, type);
1840 if (err)
1841 goto out;
1842
1843 type = MLXSW_REG_RAUHTD_TYPE_IPV6;
1844 err = __mlxsw_sp_router_neighs_update_rauhtd(mlxsw_sp, rauhtd_pl, type);
1845out:
Yotam Gigic723c7352016-07-05 11:27:43 +02001846 kfree(rauhtd_pl);
Yotam Gigib2157142016-07-05 11:27:51 +02001847 return err;
1848}
1849
1850static void mlxsw_sp_router_neighs_update_nh(struct mlxsw_sp *mlxsw_sp)
1851{
1852 struct mlxsw_sp_neigh_entry *neigh_entry;
1853
1854 /* Take RTNL mutex here to prevent lists from changes */
1855 rtnl_lock();
Ido Schimmel9011b672017-05-16 19:38:25 +02001856 list_for_each_entry(neigh_entry, &mlxsw_sp->router->nexthop_neighs_list,
Ido Schimmel8a0b7272017-02-06 16:20:15 +01001857 nexthop_neighs_list_node)
Yotam Gigib2157142016-07-05 11:27:51 +02001858 /* If this neigh have nexthops, make the kernel think this neigh
1859 * is active regardless of the traffic.
1860 */
Ido Schimmel8a0b7272017-02-06 16:20:15 +01001861 neigh_event_send(neigh_entry->key.n, NULL);
Yotam Gigib2157142016-07-05 11:27:51 +02001862 rtnl_unlock();
1863}
1864
1865static void
1866mlxsw_sp_router_neighs_update_work_schedule(struct mlxsw_sp *mlxsw_sp)
1867{
Ido Schimmel9011b672017-05-16 19:38:25 +02001868 unsigned long interval = mlxsw_sp->router->neighs_update.interval;
Yotam Gigib2157142016-07-05 11:27:51 +02001869
Ido Schimmel9011b672017-05-16 19:38:25 +02001870 mlxsw_core_schedule_dw(&mlxsw_sp->router->neighs_update.dw,
Yotam Gigib2157142016-07-05 11:27:51 +02001871 msecs_to_jiffies(interval));
1872}
1873
1874static void mlxsw_sp_router_neighs_update_work(struct work_struct *work)
1875{
Ido Schimmel9011b672017-05-16 19:38:25 +02001876 struct mlxsw_sp_router *router;
Yotam Gigib2157142016-07-05 11:27:51 +02001877 int err;
1878
Ido Schimmel9011b672017-05-16 19:38:25 +02001879 router = container_of(work, struct mlxsw_sp_router,
1880 neighs_update.dw.work);
1881 err = mlxsw_sp_router_neighs_update_rauhtd(router->mlxsw_sp);
Yotam Gigib2157142016-07-05 11:27:51 +02001882 if (err)
Ido Schimmel9011b672017-05-16 19:38:25 +02001883 dev_err(router->mlxsw_sp->bus_info->dev, "Could not update kernel for neigh activity");
Yotam Gigib2157142016-07-05 11:27:51 +02001884
Ido Schimmel9011b672017-05-16 19:38:25 +02001885 mlxsw_sp_router_neighs_update_nh(router->mlxsw_sp);
Yotam Gigib2157142016-07-05 11:27:51 +02001886
Ido Schimmel9011b672017-05-16 19:38:25 +02001887 mlxsw_sp_router_neighs_update_work_schedule(router->mlxsw_sp);
Yotam Gigic723c7352016-07-05 11:27:43 +02001888}
1889
Yotam Gigi0b2361d2016-07-05 11:27:52 +02001890static void mlxsw_sp_router_probe_unresolved_nexthops(struct work_struct *work)
1891{
1892 struct mlxsw_sp_neigh_entry *neigh_entry;
Ido Schimmel9011b672017-05-16 19:38:25 +02001893 struct mlxsw_sp_router *router;
Yotam Gigi0b2361d2016-07-05 11:27:52 +02001894
Ido Schimmel9011b672017-05-16 19:38:25 +02001895 router = container_of(work, struct mlxsw_sp_router,
1896 nexthop_probe_dw.work);
Yotam Gigi0b2361d2016-07-05 11:27:52 +02001897 /* Iterate over nexthop neighbours, find those who are unresolved and
1898 * send arp on them. This solves the chicken-egg problem when
1899 * the nexthop wouldn't get offloaded until the neighbor is resolved
1900 * but it wouldn't get resolved ever in case traffic is flowing in HW
1901 * using different nexthop.
1902 *
1903 * Take RTNL mutex here to prevent lists from changes.
1904 */
1905 rtnl_lock();
Ido Schimmel9011b672017-05-16 19:38:25 +02001906 list_for_each_entry(neigh_entry, &router->nexthop_neighs_list,
Ido Schimmel8a0b7272017-02-06 16:20:15 +01001907 nexthop_neighs_list_node)
Ido Schimmel01b1aa32017-02-06 16:20:16 +01001908 if (!neigh_entry->connected)
Jiri Pirko33b13412016-11-10 12:31:04 +01001909 neigh_event_send(neigh_entry->key.n, NULL);
Yotam Gigi0b2361d2016-07-05 11:27:52 +02001910 rtnl_unlock();
1911
Ido Schimmel9011b672017-05-16 19:38:25 +02001912 mlxsw_core_schedule_dw(&router->nexthop_probe_dw,
Yotam Gigi0b2361d2016-07-05 11:27:52 +02001913 MLXSW_SP_UNRESOLVED_NH_PROBE_INTERVAL);
1914}
1915
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001916static void
1917mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp *mlxsw_sp,
1918 struct mlxsw_sp_neigh_entry *neigh_entry,
1919 bool removing);
1920
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001921static enum mlxsw_reg_rauht_op mlxsw_sp_rauht_op(bool adding)
Yotam Gigia6bf9e92016-07-05 11:27:44 +02001922{
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001923 return adding ? MLXSW_REG_RAUHT_OP_WRITE_ADD :
1924 MLXSW_REG_RAUHT_OP_WRITE_DELETE;
1925}
1926
1927static void
1928mlxsw_sp_router_neigh_entry_op4(struct mlxsw_sp *mlxsw_sp,
1929 struct mlxsw_sp_neigh_entry *neigh_entry,
1930 enum mlxsw_reg_rauht_op op)
1931{
Jiri Pirko33b13412016-11-10 12:31:04 +01001932 struct neighbour *n = neigh_entry->key.n;
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001933 u32 dip = ntohl(*((__be32 *) n->primary_key));
Yotam Gigia6bf9e92016-07-05 11:27:44 +02001934 char rauht_pl[MLXSW_REG_RAUHT_LEN];
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001935
1936 mlxsw_reg_rauht_pack4(rauht_pl, op, neigh_entry->rif, neigh_entry->ha,
1937 dip);
Arkadi Sharshevsky7cfcbc72017-08-24 08:40:08 +02001938 if (neigh_entry->counter_valid)
1939 mlxsw_reg_rauht_pack_counter(rauht_pl,
1940 neigh_entry->counter_index);
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001941 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rauht), rauht_pl);
1942}
1943
1944static void
Arkadi Sharshevskyd5eb89c2017-07-18 10:10:15 +02001945mlxsw_sp_router_neigh_entry_op6(struct mlxsw_sp *mlxsw_sp,
1946 struct mlxsw_sp_neigh_entry *neigh_entry,
1947 enum mlxsw_reg_rauht_op op)
1948{
1949 struct neighbour *n = neigh_entry->key.n;
1950 char rauht_pl[MLXSW_REG_RAUHT_LEN];
1951 const char *dip = n->primary_key;
1952
1953 mlxsw_reg_rauht_pack6(rauht_pl, op, neigh_entry->rif, neigh_entry->ha,
1954 dip);
Arkadi Sharshevsky7cfcbc72017-08-24 08:40:08 +02001955 if (neigh_entry->counter_valid)
1956 mlxsw_reg_rauht_pack_counter(rauht_pl,
1957 neigh_entry->counter_index);
Arkadi Sharshevskyd5eb89c2017-07-18 10:10:15 +02001958 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rauht), rauht_pl);
1959}
1960
Arkadi Sharshevsky1d1056d82017-08-31 17:59:13 +02001961bool mlxsw_sp_neigh_ipv6_ignore(struct mlxsw_sp_neigh_entry *neigh_entry)
Arkadi Sharshevskyd5eb89c2017-07-18 10:10:15 +02001962{
Arkadi Sharshevsky1d1056d82017-08-31 17:59:13 +02001963 struct neighbour *n = neigh_entry->key.n;
1964
Arkadi Sharshevskyd5eb89c2017-07-18 10:10:15 +02001965 /* Packets with a link-local destination address are trapped
1966 * after LPM lookup and never reach the neighbour table, so
1967 * there is no need to program such neighbours to the device.
1968 */
1969 if (ipv6_addr_type((struct in6_addr *) &n->primary_key) &
1970 IPV6_ADDR_LINKLOCAL)
1971 return true;
1972 return false;
1973}
1974
1975static void
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001976mlxsw_sp_neigh_entry_update(struct mlxsw_sp *mlxsw_sp,
1977 struct mlxsw_sp_neigh_entry *neigh_entry,
1978 bool adding)
1979{
1980 if (!adding && !neigh_entry->connected)
1981 return;
1982 neigh_entry->connected = adding;
Ido Schimmelb5f3e0d2017-07-24 09:56:00 +02001983 if (neigh_entry->key.n->tbl->family == AF_INET) {
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001984 mlxsw_sp_router_neigh_entry_op4(mlxsw_sp, neigh_entry,
1985 mlxsw_sp_rauht_op(adding));
Ido Schimmelb5f3e0d2017-07-24 09:56:00 +02001986 } else if (neigh_entry->key.n->tbl->family == AF_INET6) {
Arkadi Sharshevsky1d1056d82017-08-31 17:59:13 +02001987 if (mlxsw_sp_neigh_ipv6_ignore(neigh_entry))
Arkadi Sharshevskyd5eb89c2017-07-18 10:10:15 +02001988 return;
1989 mlxsw_sp_router_neigh_entry_op6(mlxsw_sp, neigh_entry,
1990 mlxsw_sp_rauht_op(adding));
1991 } else {
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001992 WARN_ON_ONCE(1);
Arkadi Sharshevskyd5eb89c2017-07-18 10:10:15 +02001993 }
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001994}
1995
Arkadi Sharshevskya481d712017-08-24 08:40:10 +02001996void
1997mlxsw_sp_neigh_entry_counter_update(struct mlxsw_sp *mlxsw_sp,
1998 struct mlxsw_sp_neigh_entry *neigh_entry,
1999 bool adding)
2000{
2001 if (adding)
2002 mlxsw_sp_neigh_counter_alloc(mlxsw_sp, neigh_entry);
2003 else
2004 mlxsw_sp_neigh_counter_free(mlxsw_sp, neigh_entry);
2005 mlxsw_sp_neigh_entry_update(mlxsw_sp, neigh_entry, true);
2006}
2007
Ido Schimmelceb88812017-11-02 17:14:07 +01002008struct mlxsw_sp_netevent_work {
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002009 struct work_struct work;
2010 struct mlxsw_sp *mlxsw_sp;
2011 struct neighbour *n;
2012};
2013
2014static void mlxsw_sp_router_neigh_event_work(struct work_struct *work)
2015{
Ido Schimmelceb88812017-11-02 17:14:07 +01002016 struct mlxsw_sp_netevent_work *net_work =
2017 container_of(work, struct mlxsw_sp_netevent_work, work);
2018 struct mlxsw_sp *mlxsw_sp = net_work->mlxsw_sp;
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002019 struct mlxsw_sp_neigh_entry *neigh_entry;
Ido Schimmelceb88812017-11-02 17:14:07 +01002020 struct neighbour *n = net_work->n;
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002021 unsigned char ha[ETH_ALEN];
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002022 bool entry_connected;
Ido Schimmel93a87e52016-12-23 09:32:49 +01002023 u8 nud_state, dead;
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002024
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002025 /* If these parameters are changed after we release the lock,
2026 * then we are guaranteed to receive another event letting us
2027 * know about it.
2028 */
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002029 read_lock_bh(&n->lock);
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002030 memcpy(ha, n->ha, ETH_ALEN);
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002031 nud_state = n->nud_state;
Ido Schimmel93a87e52016-12-23 09:32:49 +01002032 dead = n->dead;
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002033 read_unlock_bh(&n->lock);
2034
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002035 rtnl_lock();
Ido Schimmel93a87e52016-12-23 09:32:49 +01002036 entry_connected = nud_state & NUD_VALID && !dead;
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002037 neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp, n);
2038 if (!entry_connected && !neigh_entry)
2039 goto out;
2040 if (!neigh_entry) {
2041 neigh_entry = mlxsw_sp_neigh_entry_create(mlxsw_sp, n);
2042 if (IS_ERR(neigh_entry))
2043 goto out;
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002044 }
2045
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002046 memcpy(neigh_entry->ha, ha, ETH_ALEN);
2047 mlxsw_sp_neigh_entry_update(mlxsw_sp, neigh_entry, entry_connected);
2048 mlxsw_sp_nexthop_neigh_update(mlxsw_sp, neigh_entry, !entry_connected);
2049
2050 if (!neigh_entry->connected && list_empty(&neigh_entry->nexthop_list))
2051 mlxsw_sp_neigh_entry_destroy(mlxsw_sp, neigh_entry);
2052
2053out:
2054 rtnl_unlock();
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002055 neigh_release(n);
Ido Schimmelceb88812017-11-02 17:14:07 +01002056 kfree(net_work);
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002057}
2058
Ido Schimmel28678f02017-11-02 17:14:10 +01002059static int mlxsw_sp_mp_hash_init(struct mlxsw_sp *mlxsw_sp);
2060
2061static void mlxsw_sp_router_mp_hash_event_work(struct work_struct *work)
2062{
2063 struct mlxsw_sp_netevent_work *net_work =
2064 container_of(work, struct mlxsw_sp_netevent_work, work);
2065 struct mlxsw_sp *mlxsw_sp = net_work->mlxsw_sp;
2066
2067 mlxsw_sp_mp_hash_init(mlxsw_sp);
2068 kfree(net_work);
2069}
2070
2071static int mlxsw_sp_router_netevent_event(struct notifier_block *nb,
Ido Schimmel48fac882017-11-02 17:14:06 +01002072 unsigned long event, void *ptr)
Yotam Gigic723c7352016-07-05 11:27:43 +02002073{
Ido Schimmelceb88812017-11-02 17:14:07 +01002074 struct mlxsw_sp_netevent_work *net_work;
Yotam Gigic723c7352016-07-05 11:27:43 +02002075 struct mlxsw_sp_port *mlxsw_sp_port;
Ido Schimmel28678f02017-11-02 17:14:10 +01002076 struct mlxsw_sp_router *router;
Yotam Gigic723c7352016-07-05 11:27:43 +02002077 struct mlxsw_sp *mlxsw_sp;
2078 unsigned long interval;
2079 struct neigh_parms *p;
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002080 struct neighbour *n;
Ido Schimmel28678f02017-11-02 17:14:10 +01002081 struct net *net;
Yotam Gigic723c7352016-07-05 11:27:43 +02002082
2083 switch (event) {
2084 case NETEVENT_DELAY_PROBE_TIME_UPDATE:
2085 p = ptr;
2086
2087 /* We don't care about changes in the default table. */
Ido Schimmelb5f3e0d2017-07-24 09:56:00 +02002088 if (!p->dev || (p->tbl->family != AF_INET &&
2089 p->tbl->family != AF_INET6))
Yotam Gigic723c7352016-07-05 11:27:43 +02002090 return NOTIFY_DONE;
2091
2092 /* We are in atomic context and can't take RTNL mutex,
2093 * so use RCU variant to walk the device chain.
2094 */
2095 mlxsw_sp_port = mlxsw_sp_port_lower_dev_hold(p->dev);
2096 if (!mlxsw_sp_port)
2097 return NOTIFY_DONE;
2098
2099 mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2100 interval = jiffies_to_msecs(NEIGH_VAR(p, DELAY_PROBE_TIME));
Ido Schimmel9011b672017-05-16 19:38:25 +02002101 mlxsw_sp->router->neighs_update.interval = interval;
Yotam Gigic723c7352016-07-05 11:27:43 +02002102
2103 mlxsw_sp_port_dev_put(mlxsw_sp_port);
2104 break;
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002105 case NETEVENT_NEIGH_UPDATE:
2106 n = ptr;
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002107
Ido Schimmelb5f3e0d2017-07-24 09:56:00 +02002108 if (n->tbl->family != AF_INET && n->tbl->family != AF_INET6)
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002109 return NOTIFY_DONE;
2110
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002111 mlxsw_sp_port = mlxsw_sp_port_lower_dev_hold(n->dev);
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002112 if (!mlxsw_sp_port)
2113 return NOTIFY_DONE;
2114
Ido Schimmelceb88812017-11-02 17:14:07 +01002115 net_work = kzalloc(sizeof(*net_work), GFP_ATOMIC);
2116 if (!net_work) {
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002117 mlxsw_sp_port_dev_put(mlxsw_sp_port);
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002118 return NOTIFY_BAD;
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002119 }
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002120
Ido Schimmelceb88812017-11-02 17:14:07 +01002121 INIT_WORK(&net_work->work, mlxsw_sp_router_neigh_event_work);
2122 net_work->mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2123 net_work->n = n;
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002124
2125 /* Take a reference to ensure the neighbour won't be
2126 * destructed until we drop the reference in delayed
2127 * work.
2128 */
2129 neigh_clone(n);
Ido Schimmelceb88812017-11-02 17:14:07 +01002130 mlxsw_core_schedule_work(&net_work->work);
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002131 mlxsw_sp_port_dev_put(mlxsw_sp_port);
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002132 break;
Ido Schimmel28678f02017-11-02 17:14:10 +01002133 case NETEVENT_MULTIPATH_HASH_UPDATE:
2134 net = ptr;
2135
2136 if (!net_eq(net, &init_net))
2137 return NOTIFY_DONE;
2138
2139 net_work = kzalloc(sizeof(*net_work), GFP_ATOMIC);
2140 if (!net_work)
2141 return NOTIFY_BAD;
2142
2143 router = container_of(nb, struct mlxsw_sp_router, netevent_nb);
2144 INIT_WORK(&net_work->work, mlxsw_sp_router_mp_hash_event_work);
2145 net_work->mlxsw_sp = router->mlxsw_sp;
2146 mlxsw_core_schedule_work(&net_work->work);
2147 break;
Yotam Gigic723c7352016-07-05 11:27:43 +02002148 }
2149
2150 return NOTIFY_DONE;
2151}
2152
Jiri Pirko6cf3c972016-07-05 11:27:39 +02002153static int mlxsw_sp_neigh_init(struct mlxsw_sp *mlxsw_sp)
2154{
Yotam Gigic723c7352016-07-05 11:27:43 +02002155 int err;
2156
Ido Schimmel9011b672017-05-16 19:38:25 +02002157 err = rhashtable_init(&mlxsw_sp->router->neigh_ht,
Yotam Gigic723c7352016-07-05 11:27:43 +02002158 &mlxsw_sp_neigh_ht_params);
2159 if (err)
2160 return err;
2161
2162 /* Initialize the polling interval according to the default
2163 * table.
2164 */
2165 mlxsw_sp_router_neighs_update_interval_init(mlxsw_sp);
2166
Yotam Gigi0b2361d2016-07-05 11:27:52 +02002167 /* Create the delayed works for the activity_update */
Ido Schimmel9011b672017-05-16 19:38:25 +02002168 INIT_DELAYED_WORK(&mlxsw_sp->router->neighs_update.dw,
Yotam Gigic723c7352016-07-05 11:27:43 +02002169 mlxsw_sp_router_neighs_update_work);
Ido Schimmel9011b672017-05-16 19:38:25 +02002170 INIT_DELAYED_WORK(&mlxsw_sp->router->nexthop_probe_dw,
Yotam Gigi0b2361d2016-07-05 11:27:52 +02002171 mlxsw_sp_router_probe_unresolved_nexthops);
Ido Schimmel9011b672017-05-16 19:38:25 +02002172 mlxsw_core_schedule_dw(&mlxsw_sp->router->neighs_update.dw, 0);
2173 mlxsw_core_schedule_dw(&mlxsw_sp->router->nexthop_probe_dw, 0);
Yotam Gigic723c7352016-07-05 11:27:43 +02002174 return 0;
Jiri Pirko6cf3c972016-07-05 11:27:39 +02002175}
2176
2177static void mlxsw_sp_neigh_fini(struct mlxsw_sp *mlxsw_sp)
2178{
Ido Schimmel9011b672017-05-16 19:38:25 +02002179 cancel_delayed_work_sync(&mlxsw_sp->router->neighs_update.dw);
2180 cancel_delayed_work_sync(&mlxsw_sp->router->nexthop_probe_dw);
2181 rhashtable_destroy(&mlxsw_sp->router->neigh_ht);
Jiri Pirko6cf3c972016-07-05 11:27:39 +02002182}
2183
Ido Schimmel9665b742017-02-08 11:16:42 +01002184static void mlxsw_sp_neigh_rif_gone_sync(struct mlxsw_sp *mlxsw_sp,
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01002185 struct mlxsw_sp_rif *rif)
Ido Schimmel9665b742017-02-08 11:16:42 +01002186{
2187 struct mlxsw_sp_neigh_entry *neigh_entry, *tmp;
2188
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01002189 list_for_each_entry_safe(neigh_entry, tmp, &rif->neigh_list,
Ido Schimmel4a3c67a2017-07-21 20:31:38 +02002190 rif_list_node) {
2191 mlxsw_sp_neigh_entry_update(mlxsw_sp, neigh_entry, false);
Ido Schimmel9665b742017-02-08 11:16:42 +01002192 mlxsw_sp_neigh_entry_destroy(mlxsw_sp, neigh_entry);
Ido Schimmel4a3c67a2017-07-21 20:31:38 +02002193 }
Ido Schimmel9665b742017-02-08 11:16:42 +01002194}
2195
Petr Machata35225e42017-09-02 23:49:22 +02002196enum mlxsw_sp_nexthop_type {
2197 MLXSW_SP_NEXTHOP_TYPE_ETH,
Petr Machata1012b9a2017-09-02 23:49:23 +02002198 MLXSW_SP_NEXTHOP_TYPE_IPIP,
Petr Machata35225e42017-09-02 23:49:22 +02002199};
2200
Ido Schimmelc53b8e12017-02-08 11:16:30 +01002201struct mlxsw_sp_nexthop_key {
2202 struct fib_nh *fib_nh;
2203};
2204
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002205struct mlxsw_sp_nexthop {
2206 struct list_head neigh_list_node; /* member of neigh entry list */
Ido Schimmel9665b742017-02-08 11:16:42 +01002207 struct list_head rif_list_node;
Arkadi Sharshevskydbe45982017-09-25 10:32:23 +02002208 struct list_head router_list_node;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002209 struct mlxsw_sp_nexthop_group *nh_grp; /* pointer back to the group
2210 * this belongs to
2211 */
Ido Schimmelc53b8e12017-02-08 11:16:30 +01002212 struct rhash_head ht_node;
2213 struct mlxsw_sp_nexthop_key key;
Ido Schimmel58adf2c2017-07-18 10:10:19 +02002214 unsigned char gw_addr[sizeof(struct in6_addr)];
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02002215 int ifindex;
Ido Schimmel408bd942017-10-22 23:11:46 +02002216 int nh_weight;
Ido Schimmeleb789982017-10-22 23:11:48 +02002217 int norm_nh_weight;
2218 int num_adj_entries;
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01002219 struct mlxsw_sp_rif *rif;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002220 u8 should_offload:1, /* set indicates this neigh is connected and
2221 * should be put to KVD linear area of this group.
2222 */
2223 offloaded:1, /* set in case the neigh is actually put into
2224 * KVD linear area of this group.
2225 */
2226 update:1; /* set indicates that MAC of this neigh should be
2227 * updated in HW
2228 */
Petr Machata35225e42017-09-02 23:49:22 +02002229 enum mlxsw_sp_nexthop_type type;
2230 union {
2231 struct mlxsw_sp_neigh_entry *neigh_entry;
Petr Machata1012b9a2017-09-02 23:49:23 +02002232 struct mlxsw_sp_ipip_entry *ipip_entry;
Petr Machata35225e42017-09-02 23:49:22 +02002233 };
Arkadi Sharshevskya5390272017-09-25 10:32:28 +02002234 unsigned int counter_index;
2235 bool counter_valid;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002236};
2237
2238struct mlxsw_sp_nexthop_group {
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02002239 void *priv;
Ido Schimmele9ad5e72017-02-08 11:16:29 +01002240 struct rhash_head ht_node;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002241 struct list_head fib_list; /* list of fib entries that use this group */
Ido Schimmel58adf2c2017-07-18 10:10:19 +02002242 struct neigh_table *neigh_tbl;
Ido Schimmelb3e8d1e2017-02-08 11:16:32 +01002243 u8 adj_index_valid:1,
2244 gateway:1; /* routes using the group use a gateway */
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002245 u32 adj_index;
2246 u16 ecmp_size;
2247 u16 count;
Ido Schimmeleb789982017-10-22 23:11:48 +02002248 int sum_norm_weight;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002249 struct mlxsw_sp_nexthop nexthops[0];
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01002250#define nh_rif nexthops[0].rif
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002251};
2252
Arkadi Sharshevsky427e6522017-09-25 10:32:30 +02002253void mlxsw_sp_nexthop_counter_alloc(struct mlxsw_sp *mlxsw_sp,
2254 struct mlxsw_sp_nexthop *nh)
Arkadi Sharshevskya5390272017-09-25 10:32:28 +02002255{
2256 struct devlink *devlink;
2257
2258 devlink = priv_to_devlink(mlxsw_sp->core);
2259 if (!devlink_dpipe_table_counter_enabled(devlink,
2260 MLXSW_SP_DPIPE_TABLE_NAME_ADJ))
2261 return;
2262
2263 if (mlxsw_sp_flow_counter_alloc(mlxsw_sp, &nh->counter_index))
2264 return;
2265
2266 nh->counter_valid = true;
2267}
2268
Arkadi Sharshevsky427e6522017-09-25 10:32:30 +02002269void mlxsw_sp_nexthop_counter_free(struct mlxsw_sp *mlxsw_sp,
2270 struct mlxsw_sp_nexthop *nh)
Arkadi Sharshevskya5390272017-09-25 10:32:28 +02002271{
2272 if (!nh->counter_valid)
2273 return;
2274 mlxsw_sp_flow_counter_free(mlxsw_sp, nh->counter_index);
2275 nh->counter_valid = false;
2276}
2277
2278int mlxsw_sp_nexthop_counter_get(struct mlxsw_sp *mlxsw_sp,
2279 struct mlxsw_sp_nexthop *nh, u64 *p_counter)
2280{
2281 if (!nh->counter_valid)
2282 return -EINVAL;
2283
2284 return mlxsw_sp_flow_counter_get(mlxsw_sp, nh->counter_index,
2285 p_counter, NULL);
2286}
2287
Arkadi Sharshevskyc556cd22017-09-25 10:32:25 +02002288struct mlxsw_sp_nexthop *mlxsw_sp_nexthop_next(struct mlxsw_sp_router *router,
2289 struct mlxsw_sp_nexthop *nh)
2290{
2291 if (!nh) {
2292 if (list_empty(&router->nexthop_list))
2293 return NULL;
2294 else
2295 return list_first_entry(&router->nexthop_list,
2296 typeof(*nh), router_list_node);
2297 }
2298 if (list_is_last(&nh->router_list_node, &router->nexthop_list))
2299 return NULL;
2300 return list_next_entry(nh, router_list_node);
2301}
2302
2303bool mlxsw_sp_nexthop_offload(struct mlxsw_sp_nexthop *nh)
2304{
2305 return nh->offloaded;
2306}
2307
2308unsigned char *mlxsw_sp_nexthop_ha(struct mlxsw_sp_nexthop *nh)
2309{
2310 if (!nh->offloaded)
2311 return NULL;
2312 return nh->neigh_entry->ha;
2313}
2314
2315int mlxsw_sp_nexthop_indexes(struct mlxsw_sp_nexthop *nh, u32 *p_adj_index,
Ido Schimmele69cd9d2017-10-22 23:11:43 +02002316 u32 *p_adj_size, u32 *p_adj_hash_index)
Arkadi Sharshevskyc556cd22017-09-25 10:32:25 +02002317{
2318 struct mlxsw_sp_nexthop_group *nh_grp = nh->nh_grp;
2319 u32 adj_hash_index = 0;
2320 int i;
2321
2322 if (!nh->offloaded || !nh_grp->adj_index_valid)
2323 return -EINVAL;
2324
2325 *p_adj_index = nh_grp->adj_index;
Ido Schimmele69cd9d2017-10-22 23:11:43 +02002326 *p_adj_size = nh_grp->ecmp_size;
Arkadi Sharshevskyc556cd22017-09-25 10:32:25 +02002327
2328 for (i = 0; i < nh_grp->count; i++) {
2329 struct mlxsw_sp_nexthop *nh_iter = &nh_grp->nexthops[i];
2330
2331 if (nh_iter == nh)
2332 break;
2333 if (nh_iter->offloaded)
Ido Schimmeleb789982017-10-22 23:11:48 +02002334 adj_hash_index += nh_iter->num_adj_entries;
Arkadi Sharshevskyc556cd22017-09-25 10:32:25 +02002335 }
2336
2337 *p_adj_hash_index = adj_hash_index;
2338 return 0;
2339}
2340
2341struct mlxsw_sp_rif *mlxsw_sp_nexthop_rif(struct mlxsw_sp_nexthop *nh)
2342{
2343 return nh->rif;
2344}
2345
2346bool mlxsw_sp_nexthop_group_has_ipip(struct mlxsw_sp_nexthop *nh)
2347{
2348 struct mlxsw_sp_nexthop_group *nh_grp = nh->nh_grp;
2349 int i;
2350
2351 for (i = 0; i < nh_grp->count; i++) {
2352 struct mlxsw_sp_nexthop *nh_iter = &nh_grp->nexthops[i];
2353
2354 if (nh_iter->type == MLXSW_SP_NEXTHOP_TYPE_IPIP)
2355 return true;
2356 }
2357 return false;
2358}
2359
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02002360static struct fib_info *
2361mlxsw_sp_nexthop4_group_fi(const struct mlxsw_sp_nexthop_group *nh_grp)
2362{
2363 return nh_grp->priv;
2364}
2365
2366struct mlxsw_sp_nexthop_group_cmp_arg {
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02002367 enum mlxsw_sp_l3proto proto;
2368 union {
2369 struct fib_info *fi;
2370 struct mlxsw_sp_fib6_entry *fib6_entry;
2371 };
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02002372};
2373
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02002374static bool
2375mlxsw_sp_nexthop6_group_has_nexthop(const struct mlxsw_sp_nexthop_group *nh_grp,
2376 const struct in6_addr *gw, int ifindex)
2377{
2378 int i;
2379
2380 for (i = 0; i < nh_grp->count; i++) {
2381 const struct mlxsw_sp_nexthop *nh;
2382
2383 nh = &nh_grp->nexthops[i];
2384 if (nh->ifindex == ifindex &&
2385 ipv6_addr_equal(gw, (struct in6_addr *) nh->gw_addr))
2386 return true;
2387 }
2388
2389 return false;
2390}
2391
2392static bool
2393mlxsw_sp_nexthop6_group_cmp(const struct mlxsw_sp_nexthop_group *nh_grp,
2394 const struct mlxsw_sp_fib6_entry *fib6_entry)
2395{
2396 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
2397
2398 if (nh_grp->count != fib6_entry->nrt6)
2399 return false;
2400
2401 list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) {
2402 struct in6_addr *gw;
2403 int ifindex;
2404
2405 ifindex = mlxsw_sp_rt6->rt->dst.dev->ifindex;
2406 gw = &mlxsw_sp_rt6->rt->rt6i_gateway;
2407 if (!mlxsw_sp_nexthop6_group_has_nexthop(nh_grp, gw, ifindex))
2408 return false;
2409 }
2410
2411 return true;
2412}
2413
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02002414static int
2415mlxsw_sp_nexthop_group_cmp(struct rhashtable_compare_arg *arg, const void *ptr)
2416{
2417 const struct mlxsw_sp_nexthop_group_cmp_arg *cmp_arg = arg->key;
2418 const struct mlxsw_sp_nexthop_group *nh_grp = ptr;
2419
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02002420 switch (cmp_arg->proto) {
2421 case MLXSW_SP_L3_PROTO_IPV4:
2422 return cmp_arg->fi != mlxsw_sp_nexthop4_group_fi(nh_grp);
2423 case MLXSW_SP_L3_PROTO_IPV6:
2424 return !mlxsw_sp_nexthop6_group_cmp(nh_grp,
2425 cmp_arg->fib6_entry);
2426 default:
2427 WARN_ON(1);
2428 return 1;
2429 }
2430}
2431
2432static int
2433mlxsw_sp_nexthop_group_type(const struct mlxsw_sp_nexthop_group *nh_grp)
2434{
2435 return nh_grp->neigh_tbl->family;
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02002436}
2437
2438static u32 mlxsw_sp_nexthop_group_hash_obj(const void *data, u32 len, u32 seed)
2439{
2440 const struct mlxsw_sp_nexthop_group *nh_grp = data;
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02002441 const struct mlxsw_sp_nexthop *nh;
2442 struct fib_info *fi;
2443 unsigned int val;
2444 int i;
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02002445
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02002446 switch (mlxsw_sp_nexthop_group_type(nh_grp)) {
2447 case AF_INET:
2448 fi = mlxsw_sp_nexthop4_group_fi(nh_grp);
2449 return jhash(&fi, sizeof(fi), seed);
2450 case AF_INET6:
2451 val = nh_grp->count;
2452 for (i = 0; i < nh_grp->count; i++) {
2453 nh = &nh_grp->nexthops[i];
2454 val ^= nh->ifindex;
2455 }
2456 return jhash(&val, sizeof(val), seed);
2457 default:
2458 WARN_ON(1);
2459 return 0;
2460 }
2461}
2462
2463static u32
2464mlxsw_sp_nexthop6_group_hash(struct mlxsw_sp_fib6_entry *fib6_entry, u32 seed)
2465{
2466 unsigned int val = fib6_entry->nrt6;
2467 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
2468 struct net_device *dev;
2469
2470 list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) {
2471 dev = mlxsw_sp_rt6->rt->dst.dev;
2472 val ^= dev->ifindex;
2473 }
2474
2475 return jhash(&val, sizeof(val), seed);
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02002476}
2477
2478static u32
2479mlxsw_sp_nexthop_group_hash(const void *data, u32 len, u32 seed)
2480{
2481 const struct mlxsw_sp_nexthop_group_cmp_arg *cmp_arg = data;
2482
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02002483 switch (cmp_arg->proto) {
2484 case MLXSW_SP_L3_PROTO_IPV4:
2485 return jhash(&cmp_arg->fi, sizeof(cmp_arg->fi), seed);
2486 case MLXSW_SP_L3_PROTO_IPV6:
2487 return mlxsw_sp_nexthop6_group_hash(cmp_arg->fib6_entry, seed);
2488 default:
2489 WARN_ON(1);
2490 return 0;
2491 }
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02002492}
2493
Ido Schimmele9ad5e72017-02-08 11:16:29 +01002494static const struct rhashtable_params mlxsw_sp_nexthop_group_ht_params = {
Ido Schimmele9ad5e72017-02-08 11:16:29 +01002495 .head_offset = offsetof(struct mlxsw_sp_nexthop_group, ht_node),
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02002496 .hashfn = mlxsw_sp_nexthop_group_hash,
2497 .obj_hashfn = mlxsw_sp_nexthop_group_hash_obj,
2498 .obj_cmpfn = mlxsw_sp_nexthop_group_cmp,
Ido Schimmele9ad5e72017-02-08 11:16:29 +01002499};
2500
2501static int mlxsw_sp_nexthop_group_insert(struct mlxsw_sp *mlxsw_sp,
2502 struct mlxsw_sp_nexthop_group *nh_grp)
2503{
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02002504 if (mlxsw_sp_nexthop_group_type(nh_grp) == AF_INET6 &&
2505 !nh_grp->gateway)
2506 return 0;
2507
Ido Schimmel9011b672017-05-16 19:38:25 +02002508 return rhashtable_insert_fast(&mlxsw_sp->router->nexthop_group_ht,
Ido Schimmele9ad5e72017-02-08 11:16:29 +01002509 &nh_grp->ht_node,
2510 mlxsw_sp_nexthop_group_ht_params);
2511}
2512
2513static void mlxsw_sp_nexthop_group_remove(struct mlxsw_sp *mlxsw_sp,
2514 struct mlxsw_sp_nexthop_group *nh_grp)
2515{
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02002516 if (mlxsw_sp_nexthop_group_type(nh_grp) == AF_INET6 &&
2517 !nh_grp->gateway)
2518 return;
2519
Ido Schimmel9011b672017-05-16 19:38:25 +02002520 rhashtable_remove_fast(&mlxsw_sp->router->nexthop_group_ht,
Ido Schimmele9ad5e72017-02-08 11:16:29 +01002521 &nh_grp->ht_node,
2522 mlxsw_sp_nexthop_group_ht_params);
2523}
2524
2525static struct mlxsw_sp_nexthop_group *
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02002526mlxsw_sp_nexthop4_group_lookup(struct mlxsw_sp *mlxsw_sp,
2527 struct fib_info *fi)
Ido Schimmele9ad5e72017-02-08 11:16:29 +01002528{
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02002529 struct mlxsw_sp_nexthop_group_cmp_arg cmp_arg;
2530
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02002531 cmp_arg.proto = MLXSW_SP_L3_PROTO_IPV4;
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02002532 cmp_arg.fi = fi;
2533 return rhashtable_lookup_fast(&mlxsw_sp->router->nexthop_group_ht,
2534 &cmp_arg,
Ido Schimmele9ad5e72017-02-08 11:16:29 +01002535 mlxsw_sp_nexthop_group_ht_params);
2536}
2537
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02002538static struct mlxsw_sp_nexthop_group *
2539mlxsw_sp_nexthop6_group_lookup(struct mlxsw_sp *mlxsw_sp,
2540 struct mlxsw_sp_fib6_entry *fib6_entry)
2541{
2542 struct mlxsw_sp_nexthop_group_cmp_arg cmp_arg;
2543
2544 cmp_arg.proto = MLXSW_SP_L3_PROTO_IPV6;
2545 cmp_arg.fib6_entry = fib6_entry;
2546 return rhashtable_lookup_fast(&mlxsw_sp->router->nexthop_group_ht,
2547 &cmp_arg,
2548 mlxsw_sp_nexthop_group_ht_params);
2549}
2550
Ido Schimmelc53b8e12017-02-08 11:16:30 +01002551static const struct rhashtable_params mlxsw_sp_nexthop_ht_params = {
2552 .key_offset = offsetof(struct mlxsw_sp_nexthop, key),
2553 .head_offset = offsetof(struct mlxsw_sp_nexthop, ht_node),
2554 .key_len = sizeof(struct mlxsw_sp_nexthop_key),
2555};
2556
2557static int mlxsw_sp_nexthop_insert(struct mlxsw_sp *mlxsw_sp,
2558 struct mlxsw_sp_nexthop *nh)
2559{
Ido Schimmel9011b672017-05-16 19:38:25 +02002560 return rhashtable_insert_fast(&mlxsw_sp->router->nexthop_ht,
Ido Schimmelc53b8e12017-02-08 11:16:30 +01002561 &nh->ht_node, mlxsw_sp_nexthop_ht_params);
2562}
2563
2564static void mlxsw_sp_nexthop_remove(struct mlxsw_sp *mlxsw_sp,
2565 struct mlxsw_sp_nexthop *nh)
2566{
Ido Schimmel9011b672017-05-16 19:38:25 +02002567 rhashtable_remove_fast(&mlxsw_sp->router->nexthop_ht, &nh->ht_node,
Ido Schimmelc53b8e12017-02-08 11:16:30 +01002568 mlxsw_sp_nexthop_ht_params);
2569}
2570
Ido Schimmelad178c82017-02-08 11:16:40 +01002571static struct mlxsw_sp_nexthop *
2572mlxsw_sp_nexthop_lookup(struct mlxsw_sp *mlxsw_sp,
2573 struct mlxsw_sp_nexthop_key key)
2574{
Ido Schimmel9011b672017-05-16 19:38:25 +02002575 return rhashtable_lookup_fast(&mlxsw_sp->router->nexthop_ht, &key,
Ido Schimmelad178c82017-02-08 11:16:40 +01002576 mlxsw_sp_nexthop_ht_params);
2577}
2578
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002579static int mlxsw_sp_adj_index_mass_update_vr(struct mlxsw_sp *mlxsw_sp,
Ido Schimmel76610eb2017-03-10 08:53:41 +01002580 const struct mlxsw_sp_fib *fib,
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002581 u32 adj_index, u16 ecmp_size,
2582 u32 new_adj_index,
2583 u16 new_ecmp_size)
2584{
2585 char raleu_pl[MLXSW_REG_RALEU_LEN];
2586
Ido Schimmel1a9234e662016-09-19 08:29:26 +02002587 mlxsw_reg_raleu_pack(raleu_pl,
Ido Schimmel76610eb2017-03-10 08:53:41 +01002588 (enum mlxsw_reg_ralxx_protocol) fib->proto,
2589 fib->vr->id, adj_index, ecmp_size, new_adj_index,
Ido Schimmel1a9234e662016-09-19 08:29:26 +02002590 new_ecmp_size);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002591 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raleu), raleu_pl);
2592}
2593
2594static int mlxsw_sp_adj_index_mass_update(struct mlxsw_sp *mlxsw_sp,
2595 struct mlxsw_sp_nexthop_group *nh_grp,
2596 u32 old_adj_index, u16 old_ecmp_size)
2597{
2598 struct mlxsw_sp_fib_entry *fib_entry;
Ido Schimmel76610eb2017-03-10 08:53:41 +01002599 struct mlxsw_sp_fib *fib = NULL;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002600 int err;
2601
2602 list_for_each_entry(fib_entry, &nh_grp->fib_list, nexthop_group_node) {
Ido Schimmel76610eb2017-03-10 08:53:41 +01002603 if (fib == fib_entry->fib_node->fib)
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002604 continue;
Ido Schimmel76610eb2017-03-10 08:53:41 +01002605 fib = fib_entry->fib_node->fib;
2606 err = mlxsw_sp_adj_index_mass_update_vr(mlxsw_sp, fib,
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002607 old_adj_index,
2608 old_ecmp_size,
2609 nh_grp->adj_index,
2610 nh_grp->ecmp_size);
2611 if (err)
2612 return err;
2613 }
2614 return 0;
2615}
2616
Ido Schimmeleb789982017-10-22 23:11:48 +02002617static int __mlxsw_sp_nexthop_update(struct mlxsw_sp *mlxsw_sp, u32 adj_index,
2618 struct mlxsw_sp_nexthop *nh)
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002619{
2620 struct mlxsw_sp_neigh_entry *neigh_entry = nh->neigh_entry;
2621 char ratr_pl[MLXSW_REG_RATR_LEN];
2622
2623 mlxsw_reg_ratr_pack(ratr_pl, MLXSW_REG_RATR_OP_WRITE_WRITE_ENTRY,
Petr Machata89e41982017-09-02 23:49:15 +02002624 true, MLXSW_REG_RATR_TYPE_ETHERNET,
2625 adj_index, neigh_entry->rif);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002626 mlxsw_reg_ratr_eth_entry_pack(ratr_pl, neigh_entry->ha);
Arkadi Sharshevskya5390272017-09-25 10:32:28 +02002627 if (nh->counter_valid)
2628 mlxsw_reg_ratr_counter_pack(ratr_pl, nh->counter_index, true);
2629 else
2630 mlxsw_reg_ratr_counter_pack(ratr_pl, 0, false);
2631
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002632 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ratr), ratr_pl);
2633}
2634
Ido Schimmeleb789982017-10-22 23:11:48 +02002635int mlxsw_sp_nexthop_update(struct mlxsw_sp *mlxsw_sp, u32 adj_index,
2636 struct mlxsw_sp_nexthop *nh)
2637{
2638 int i;
2639
2640 for (i = 0; i < nh->num_adj_entries; i++) {
2641 int err;
2642
2643 err = __mlxsw_sp_nexthop_update(mlxsw_sp, adj_index + i, nh);
2644 if (err)
2645 return err;
2646 }
2647
2648 return 0;
2649}
2650
2651static int __mlxsw_sp_nexthop_ipip_update(struct mlxsw_sp *mlxsw_sp,
2652 u32 adj_index,
2653 struct mlxsw_sp_nexthop *nh)
Petr Machata1012b9a2017-09-02 23:49:23 +02002654{
2655 const struct mlxsw_sp_ipip_ops *ipip_ops;
2656
2657 ipip_ops = mlxsw_sp->router->ipip_ops_arr[nh->ipip_entry->ipipt];
2658 return ipip_ops->nexthop_update(mlxsw_sp, adj_index, nh->ipip_entry);
2659}
2660
Ido Schimmeleb789982017-10-22 23:11:48 +02002661static int mlxsw_sp_nexthop_ipip_update(struct mlxsw_sp *mlxsw_sp,
2662 u32 adj_index,
2663 struct mlxsw_sp_nexthop *nh)
2664{
2665 int i;
2666
2667 for (i = 0; i < nh->num_adj_entries; i++) {
2668 int err;
2669
2670 err = __mlxsw_sp_nexthop_ipip_update(mlxsw_sp, adj_index + i,
2671 nh);
2672 if (err)
2673 return err;
2674 }
2675
2676 return 0;
2677}
2678
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002679static int
Petr Machata35225e42017-09-02 23:49:22 +02002680mlxsw_sp_nexthop_group_update(struct mlxsw_sp *mlxsw_sp,
2681 struct mlxsw_sp_nexthop_group *nh_grp,
2682 bool reallocate)
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002683{
2684 u32 adj_index = nh_grp->adj_index; /* base */
2685 struct mlxsw_sp_nexthop *nh;
2686 int i;
2687 int err;
2688
2689 for (i = 0; i < nh_grp->count; i++) {
2690 nh = &nh_grp->nexthops[i];
2691
2692 if (!nh->should_offload) {
2693 nh->offloaded = 0;
2694 continue;
2695 }
2696
Ido Schimmela59b7e02017-01-23 11:11:42 +01002697 if (nh->update || reallocate) {
Petr Machata35225e42017-09-02 23:49:22 +02002698 switch (nh->type) {
2699 case MLXSW_SP_NEXTHOP_TYPE_ETH:
Arkadi Sharshevskya5390272017-09-25 10:32:28 +02002700 err = mlxsw_sp_nexthop_update
Petr Machata35225e42017-09-02 23:49:22 +02002701 (mlxsw_sp, adj_index, nh);
2702 break;
Petr Machata1012b9a2017-09-02 23:49:23 +02002703 case MLXSW_SP_NEXTHOP_TYPE_IPIP:
2704 err = mlxsw_sp_nexthop_ipip_update
2705 (mlxsw_sp, adj_index, nh);
2706 break;
Petr Machata35225e42017-09-02 23:49:22 +02002707 }
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002708 if (err)
2709 return err;
2710 nh->update = 0;
2711 nh->offloaded = 1;
2712 }
Ido Schimmeleb789982017-10-22 23:11:48 +02002713 adj_index += nh->num_adj_entries;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002714 }
2715 return 0;
2716}
2717
Ido Schimmel1819ae32017-07-21 18:04:28 +02002718static bool
2719mlxsw_sp_fib_node_entry_is_first(const struct mlxsw_sp_fib_node *fib_node,
2720 const struct mlxsw_sp_fib_entry *fib_entry);
2721
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002722static int
2723mlxsw_sp_nexthop_fib_entries_update(struct mlxsw_sp *mlxsw_sp,
2724 struct mlxsw_sp_nexthop_group *nh_grp)
2725{
2726 struct mlxsw_sp_fib_entry *fib_entry;
2727 int err;
2728
2729 list_for_each_entry(fib_entry, &nh_grp->fib_list, nexthop_group_node) {
Ido Schimmel1819ae32017-07-21 18:04:28 +02002730 if (!mlxsw_sp_fib_node_entry_is_first(fib_entry->fib_node,
2731 fib_entry))
2732 continue;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002733 err = mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
2734 if (err)
2735 return err;
2736 }
2737 return 0;
2738}
2739
2740static void
Ido Schimmel77d964e2017-08-02 09:56:05 +02002741mlxsw_sp_fib_entry_offload_refresh(struct mlxsw_sp_fib_entry *fib_entry,
2742 enum mlxsw_reg_ralue_op op, int err);
2743
2744static void
2745mlxsw_sp_nexthop_fib_entries_refresh(struct mlxsw_sp_nexthop_group *nh_grp)
2746{
2747 enum mlxsw_reg_ralue_op op = MLXSW_REG_RALUE_OP_WRITE_WRITE;
2748 struct mlxsw_sp_fib_entry *fib_entry;
2749
2750 list_for_each_entry(fib_entry, &nh_grp->fib_list, nexthop_group_node) {
2751 if (!mlxsw_sp_fib_node_entry_is_first(fib_entry->fib_node,
2752 fib_entry))
2753 continue;
2754 mlxsw_sp_fib_entry_offload_refresh(fib_entry, op, 0);
2755 }
2756}
2757
Ido Schimmel425a08c2017-10-22 23:11:47 +02002758static void mlxsw_sp_adj_grp_size_round_up(u16 *p_adj_grp_size)
2759{
2760 /* Valid sizes for an adjacency group are:
2761 * 1-64, 512, 1024, 2048 and 4096.
2762 */
2763 if (*p_adj_grp_size <= 64)
2764 return;
2765 else if (*p_adj_grp_size <= 512)
2766 *p_adj_grp_size = 512;
2767 else if (*p_adj_grp_size <= 1024)
2768 *p_adj_grp_size = 1024;
2769 else if (*p_adj_grp_size <= 2048)
2770 *p_adj_grp_size = 2048;
2771 else
2772 *p_adj_grp_size = 4096;
2773}
2774
2775static void mlxsw_sp_adj_grp_size_round_down(u16 *p_adj_grp_size,
2776 unsigned int alloc_size)
2777{
2778 if (alloc_size >= 4096)
2779 *p_adj_grp_size = 4096;
2780 else if (alloc_size >= 2048)
2781 *p_adj_grp_size = 2048;
2782 else if (alloc_size >= 1024)
2783 *p_adj_grp_size = 1024;
2784 else if (alloc_size >= 512)
2785 *p_adj_grp_size = 512;
2786}
2787
2788static int mlxsw_sp_fix_adj_grp_size(struct mlxsw_sp *mlxsw_sp,
2789 u16 *p_adj_grp_size)
2790{
2791 unsigned int alloc_size;
2792 int err;
2793
2794 /* Round up the requested group size to the next size supported
2795 * by the device and make sure the request can be satisfied.
2796 */
2797 mlxsw_sp_adj_grp_size_round_up(p_adj_grp_size);
2798 err = mlxsw_sp_kvdl_alloc_size_query(mlxsw_sp, *p_adj_grp_size,
2799 &alloc_size);
2800 if (err)
2801 return err;
2802 /* It is possible the allocation results in more allocated
2803 * entries than requested. Try to use as much of them as
2804 * possible.
2805 */
2806 mlxsw_sp_adj_grp_size_round_down(p_adj_grp_size, alloc_size);
2807
2808 return 0;
2809}
2810
Ido Schimmel77d964e2017-08-02 09:56:05 +02002811static void
Ido Schimmeleb789982017-10-22 23:11:48 +02002812mlxsw_sp_nexthop_group_normalize(struct mlxsw_sp_nexthop_group *nh_grp)
2813{
2814 int i, g = 0, sum_norm_weight = 0;
2815 struct mlxsw_sp_nexthop *nh;
2816
2817 for (i = 0; i < nh_grp->count; i++) {
2818 nh = &nh_grp->nexthops[i];
2819
2820 if (!nh->should_offload)
2821 continue;
2822 if (g > 0)
2823 g = gcd(nh->nh_weight, g);
2824 else
2825 g = nh->nh_weight;
2826 }
2827
2828 for (i = 0; i < nh_grp->count; i++) {
2829 nh = &nh_grp->nexthops[i];
2830
2831 if (!nh->should_offload)
2832 continue;
2833 nh->norm_nh_weight = nh->nh_weight / g;
2834 sum_norm_weight += nh->norm_nh_weight;
2835 }
2836
2837 nh_grp->sum_norm_weight = sum_norm_weight;
2838}
2839
2840static void
2841mlxsw_sp_nexthop_group_rebalance(struct mlxsw_sp_nexthop_group *nh_grp)
2842{
2843 int total = nh_grp->sum_norm_weight;
2844 u16 ecmp_size = nh_grp->ecmp_size;
2845 int i, weight = 0, lower_bound = 0;
2846
2847 for (i = 0; i < nh_grp->count; i++) {
2848 struct mlxsw_sp_nexthop *nh = &nh_grp->nexthops[i];
2849 int upper_bound;
2850
2851 if (!nh->should_offload)
2852 continue;
2853 weight += nh->norm_nh_weight;
2854 upper_bound = DIV_ROUND_CLOSEST(ecmp_size * weight, total);
2855 nh->num_adj_entries = upper_bound - lower_bound;
2856 lower_bound = upper_bound;
2857 }
2858}
2859
2860static void
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002861mlxsw_sp_nexthop_group_refresh(struct mlxsw_sp *mlxsw_sp,
2862 struct mlxsw_sp_nexthop_group *nh_grp)
2863{
Ido Schimmeleb789982017-10-22 23:11:48 +02002864 u16 ecmp_size, old_ecmp_size;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002865 struct mlxsw_sp_nexthop *nh;
2866 bool offload_change = false;
2867 u32 adj_index;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002868 bool old_adj_index_valid;
2869 u32 old_adj_index;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002870 int i;
2871 int err;
2872
Ido Schimmelb3e8d1e2017-02-08 11:16:32 +01002873 if (!nh_grp->gateway) {
2874 mlxsw_sp_nexthop_fib_entries_update(mlxsw_sp, nh_grp);
2875 return;
2876 }
2877
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002878 for (i = 0; i < nh_grp->count; i++) {
2879 nh = &nh_grp->nexthops[i];
2880
Petr Machata56b8a9e2017-07-31 09:27:29 +02002881 if (nh->should_offload != nh->offloaded) {
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002882 offload_change = true;
2883 if (nh->should_offload)
2884 nh->update = 1;
2885 }
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002886 }
2887 if (!offload_change) {
2888 /* Nothing was added or removed, so no need to reallocate. Just
2889 * update MAC on existing adjacency indexes.
2890 */
Petr Machata35225e42017-09-02 23:49:22 +02002891 err = mlxsw_sp_nexthop_group_update(mlxsw_sp, nh_grp, false);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002892 if (err) {
2893 dev_warn(mlxsw_sp->bus_info->dev, "Failed to update neigh MAC in adjacency table.\n");
2894 goto set_trap;
2895 }
2896 return;
2897 }
Ido Schimmeleb789982017-10-22 23:11:48 +02002898 mlxsw_sp_nexthop_group_normalize(nh_grp);
2899 if (!nh_grp->sum_norm_weight)
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002900 /* No neigh of this group is connected so we just set
2901 * the trap and let everthing flow through kernel.
2902 */
2903 goto set_trap;
2904
Ido Schimmeleb789982017-10-22 23:11:48 +02002905 ecmp_size = nh_grp->sum_norm_weight;
Ido Schimmel425a08c2017-10-22 23:11:47 +02002906 err = mlxsw_sp_fix_adj_grp_size(mlxsw_sp, &ecmp_size);
2907 if (err)
2908 /* No valid allocation size available. */
2909 goto set_trap;
2910
Arkadi Sharshevsky13124442017-03-25 08:28:22 +01002911 err = mlxsw_sp_kvdl_alloc(mlxsw_sp, ecmp_size, &adj_index);
2912 if (err) {
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002913 /* We ran out of KVD linear space, just set the
2914 * trap and let everything flow through kernel.
2915 */
2916 dev_warn(mlxsw_sp->bus_info->dev, "Failed to allocate KVD linear area for nexthop group.\n");
2917 goto set_trap;
2918 }
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002919 old_adj_index_valid = nh_grp->adj_index_valid;
2920 old_adj_index = nh_grp->adj_index;
2921 old_ecmp_size = nh_grp->ecmp_size;
2922 nh_grp->adj_index_valid = 1;
2923 nh_grp->adj_index = adj_index;
2924 nh_grp->ecmp_size = ecmp_size;
Ido Schimmeleb789982017-10-22 23:11:48 +02002925 mlxsw_sp_nexthop_group_rebalance(nh_grp);
Petr Machata35225e42017-09-02 23:49:22 +02002926 err = mlxsw_sp_nexthop_group_update(mlxsw_sp, nh_grp, true);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002927 if (err) {
2928 dev_warn(mlxsw_sp->bus_info->dev, "Failed to update neigh MAC in adjacency table.\n");
2929 goto set_trap;
2930 }
2931
2932 if (!old_adj_index_valid) {
2933 /* The trap was set for fib entries, so we have to call
2934 * fib entry update to unset it and use adjacency index.
2935 */
2936 err = mlxsw_sp_nexthop_fib_entries_update(mlxsw_sp, nh_grp);
2937 if (err) {
2938 dev_warn(mlxsw_sp->bus_info->dev, "Failed to add adjacency index to fib entries.\n");
2939 goto set_trap;
2940 }
2941 return;
2942 }
2943
2944 err = mlxsw_sp_adj_index_mass_update(mlxsw_sp, nh_grp,
2945 old_adj_index, old_ecmp_size);
2946 mlxsw_sp_kvdl_free(mlxsw_sp, old_adj_index);
2947 if (err) {
2948 dev_warn(mlxsw_sp->bus_info->dev, "Failed to mass-update adjacency index for nexthop group.\n");
2949 goto set_trap;
2950 }
Ido Schimmel77d964e2017-08-02 09:56:05 +02002951
2952 /* Offload state within the group changed, so update the flags. */
2953 mlxsw_sp_nexthop_fib_entries_refresh(nh_grp);
2954
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002955 return;
2956
2957set_trap:
2958 old_adj_index_valid = nh_grp->adj_index_valid;
2959 nh_grp->adj_index_valid = 0;
2960 for (i = 0; i < nh_grp->count; i++) {
2961 nh = &nh_grp->nexthops[i];
2962 nh->offloaded = 0;
2963 }
2964 err = mlxsw_sp_nexthop_fib_entries_update(mlxsw_sp, nh_grp);
2965 if (err)
2966 dev_warn(mlxsw_sp->bus_info->dev, "Failed to set traps for fib entries.\n");
2967 if (old_adj_index_valid)
2968 mlxsw_sp_kvdl_free(mlxsw_sp, nh_grp->adj_index);
2969}
2970
2971static void __mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp_nexthop *nh,
2972 bool removing)
2973{
Petr Machata213666a2017-07-31 09:27:30 +02002974 if (!removing)
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002975 nh->should_offload = 1;
Petr Machata213666a2017-07-31 09:27:30 +02002976 else if (nh->offloaded)
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002977 nh->should_offload = 0;
2978 nh->update = 1;
2979}
2980
2981static void
2982mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp *mlxsw_sp,
2983 struct mlxsw_sp_neigh_entry *neigh_entry,
2984 bool removing)
2985{
2986 struct mlxsw_sp_nexthop *nh;
2987
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002988 list_for_each_entry(nh, &neigh_entry->nexthop_list,
2989 neigh_list_node) {
2990 __mlxsw_sp_nexthop_neigh_update(nh, removing);
2991 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nh_grp);
2992 }
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002993}
2994
Ido Schimmel9665b742017-02-08 11:16:42 +01002995static void mlxsw_sp_nexthop_rif_init(struct mlxsw_sp_nexthop *nh,
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01002996 struct mlxsw_sp_rif *rif)
Ido Schimmel9665b742017-02-08 11:16:42 +01002997{
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01002998 if (nh->rif)
Ido Schimmel9665b742017-02-08 11:16:42 +01002999 return;
3000
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003001 nh->rif = rif;
3002 list_add(&nh->rif_list_node, &rif->nexthop_list);
Ido Schimmel9665b742017-02-08 11:16:42 +01003003}
3004
3005static void mlxsw_sp_nexthop_rif_fini(struct mlxsw_sp_nexthop *nh)
3006{
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003007 if (!nh->rif)
Ido Schimmel9665b742017-02-08 11:16:42 +01003008 return;
3009
3010 list_del(&nh->rif_list_node);
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003011 nh->rif = NULL;
Ido Schimmel9665b742017-02-08 11:16:42 +01003012}
3013
Ido Schimmela8c97012017-02-08 11:16:35 +01003014static int mlxsw_sp_nexthop_neigh_init(struct mlxsw_sp *mlxsw_sp,
3015 struct mlxsw_sp_nexthop *nh)
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003016{
3017 struct mlxsw_sp_neigh_entry *neigh_entry;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003018 struct neighbour *n;
Ido Schimmel93a87e52016-12-23 09:32:49 +01003019 u8 nud_state, dead;
Ido Schimmelc53b8e12017-02-08 11:16:30 +01003020 int err;
3021
Ido Schimmelad178c82017-02-08 11:16:40 +01003022 if (!nh->nh_grp->gateway || nh->neigh_entry)
Ido Schimmelb8399a12017-02-08 11:16:33 +01003023 return 0;
3024
Jiri Pirko33b13412016-11-10 12:31:04 +01003025 /* Take a reference of neigh here ensuring that neigh would
Petr Machata8de3c172017-07-31 09:27:25 +02003026 * not be destructed before the nexthop entry is finished.
Jiri Pirko33b13412016-11-10 12:31:04 +01003027 * The reference is taken either in neigh_lookup() or
Ido Schimmelfd76d912017-02-06 16:20:17 +01003028 * in neigh_create() in case n is not found.
Jiri Pirko33b13412016-11-10 12:31:04 +01003029 */
Ido Schimmel58adf2c2017-07-18 10:10:19 +02003030 n = neigh_lookup(nh->nh_grp->neigh_tbl, &nh->gw_addr, nh->rif->dev);
Jiri Pirko33b13412016-11-10 12:31:04 +01003031 if (!n) {
Ido Schimmel58adf2c2017-07-18 10:10:19 +02003032 n = neigh_create(nh->nh_grp->neigh_tbl, &nh->gw_addr,
3033 nh->rif->dev);
Ido Schimmela8c97012017-02-08 11:16:35 +01003034 if (IS_ERR(n))
3035 return PTR_ERR(n);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003036 neigh_event_send(n, NULL);
Jiri Pirko33b13412016-11-10 12:31:04 +01003037 }
3038 neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp, n);
3039 if (!neigh_entry) {
Ido Schimmel5c8802f2017-02-06 16:20:13 +01003040 neigh_entry = mlxsw_sp_neigh_entry_create(mlxsw_sp, n);
3041 if (IS_ERR(neigh_entry)) {
Ido Schimmelc53b8e12017-02-08 11:16:30 +01003042 err = -EINVAL;
3043 goto err_neigh_entry_create;
Ido Schimmel5c8802f2017-02-06 16:20:13 +01003044 }
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003045 }
Yotam Gigib2157142016-07-05 11:27:51 +02003046
3047 /* If that is the first nexthop connected to that neigh, add to
3048 * nexthop_neighs_list
3049 */
3050 if (list_empty(&neigh_entry->nexthop_list))
3051 list_add_tail(&neigh_entry->nexthop_neighs_list_node,
Ido Schimmel9011b672017-05-16 19:38:25 +02003052 &mlxsw_sp->router->nexthop_neighs_list);
Yotam Gigib2157142016-07-05 11:27:51 +02003053
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003054 nh->neigh_entry = neigh_entry;
3055 list_add_tail(&nh->neigh_list_node, &neigh_entry->nexthop_list);
3056 read_lock_bh(&n->lock);
3057 nud_state = n->nud_state;
Ido Schimmel93a87e52016-12-23 09:32:49 +01003058 dead = n->dead;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003059 read_unlock_bh(&n->lock);
Ido Schimmel93a87e52016-12-23 09:32:49 +01003060 __mlxsw_sp_nexthop_neigh_update(nh, !(nud_state & NUD_VALID && !dead));
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003061
3062 return 0;
Ido Schimmelc53b8e12017-02-08 11:16:30 +01003063
3064err_neigh_entry_create:
3065 neigh_release(n);
Ido Schimmelc53b8e12017-02-08 11:16:30 +01003066 return err;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003067}
3068
Ido Schimmela8c97012017-02-08 11:16:35 +01003069static void mlxsw_sp_nexthop_neigh_fini(struct mlxsw_sp *mlxsw_sp,
3070 struct mlxsw_sp_nexthop *nh)
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003071{
3072 struct mlxsw_sp_neigh_entry *neigh_entry = nh->neigh_entry;
Ido Schimmela8c97012017-02-08 11:16:35 +01003073 struct neighbour *n;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003074
Ido Schimmelb8399a12017-02-08 11:16:33 +01003075 if (!neigh_entry)
Ido Schimmela8c97012017-02-08 11:16:35 +01003076 return;
3077 n = neigh_entry->key.n;
Ido Schimmelb8399a12017-02-08 11:16:33 +01003078
Ido Schimmel58312122016-12-23 09:32:50 +01003079 __mlxsw_sp_nexthop_neigh_update(nh, true);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003080 list_del(&nh->neigh_list_node);
Ido Schimmele58be792017-02-08 11:16:28 +01003081 nh->neigh_entry = NULL;
Yotam Gigib2157142016-07-05 11:27:51 +02003082
3083 /* If that is the last nexthop connected to that neigh, remove from
3084 * nexthop_neighs_list
3085 */
Ido Schimmele58be792017-02-08 11:16:28 +01003086 if (list_empty(&neigh_entry->nexthop_list))
3087 list_del(&neigh_entry->nexthop_neighs_list_node);
Yotam Gigib2157142016-07-05 11:27:51 +02003088
Ido Schimmel5c8802f2017-02-06 16:20:13 +01003089 if (!neigh_entry->connected && list_empty(&neigh_entry->nexthop_list))
3090 mlxsw_sp_neigh_entry_destroy(mlxsw_sp, neigh_entry);
3091
3092 neigh_release(n);
Ido Schimmela8c97012017-02-08 11:16:35 +01003093}
Ido Schimmelc53b8e12017-02-08 11:16:30 +01003094
Petr Machata1012b9a2017-09-02 23:49:23 +02003095static int mlxsw_sp_nexthop_ipip_init(struct mlxsw_sp *mlxsw_sp,
Petr Machata1012b9a2017-09-02 23:49:23 +02003096 struct mlxsw_sp_nexthop *nh,
3097 struct net_device *ol_dev)
3098{
3099 if (!nh->nh_grp->gateway || nh->ipip_entry)
3100 return 0;
3101
Petr Machata4cccb732017-10-16 16:26:39 +02003102 nh->ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
3103 if (!nh->ipip_entry)
3104 return -ENOENT;
Petr Machata1012b9a2017-09-02 23:49:23 +02003105
3106 __mlxsw_sp_nexthop_neigh_update(nh, false);
3107 return 0;
3108}
3109
3110static void mlxsw_sp_nexthop_ipip_fini(struct mlxsw_sp *mlxsw_sp,
3111 struct mlxsw_sp_nexthop *nh)
3112{
3113 struct mlxsw_sp_ipip_entry *ipip_entry = nh->ipip_entry;
3114
3115 if (!ipip_entry)
3116 return;
3117
3118 __mlxsw_sp_nexthop_neigh_update(nh, true);
Petr Machata1012b9a2017-09-02 23:49:23 +02003119 nh->ipip_entry = NULL;
3120}
3121
3122static bool mlxsw_sp_nexthop4_ipip_type(const struct mlxsw_sp *mlxsw_sp,
3123 const struct fib_nh *fib_nh,
3124 enum mlxsw_sp_ipip_type *p_ipipt)
3125{
3126 struct net_device *dev = fib_nh->nh_dev;
3127
3128 return dev &&
3129 fib_nh->nh_parent->fib_type == RTN_UNICAST &&
3130 mlxsw_sp_netdev_ipip_type(mlxsw_sp, dev, p_ipipt);
3131}
3132
Petr Machata35225e42017-09-02 23:49:22 +02003133static void mlxsw_sp_nexthop_type_fini(struct mlxsw_sp *mlxsw_sp,
3134 struct mlxsw_sp_nexthop *nh)
3135{
3136 switch (nh->type) {
3137 case MLXSW_SP_NEXTHOP_TYPE_ETH:
3138 mlxsw_sp_nexthop_neigh_fini(mlxsw_sp, nh);
3139 mlxsw_sp_nexthop_rif_fini(nh);
3140 break;
Petr Machata1012b9a2017-09-02 23:49:23 +02003141 case MLXSW_SP_NEXTHOP_TYPE_IPIP:
Petr Machatade0f43c2017-10-02 12:14:57 +02003142 mlxsw_sp_nexthop_rif_fini(nh);
Petr Machata1012b9a2017-09-02 23:49:23 +02003143 mlxsw_sp_nexthop_ipip_fini(mlxsw_sp, nh);
3144 break;
Petr Machata35225e42017-09-02 23:49:22 +02003145 }
3146}
3147
3148static int mlxsw_sp_nexthop4_type_init(struct mlxsw_sp *mlxsw_sp,
3149 struct mlxsw_sp_nexthop *nh,
3150 struct fib_nh *fib_nh)
3151{
Petr Machata1012b9a2017-09-02 23:49:23 +02003152 struct mlxsw_sp_router *router = mlxsw_sp->router;
Petr Machata35225e42017-09-02 23:49:22 +02003153 struct net_device *dev = fib_nh->nh_dev;
Petr Machata1012b9a2017-09-02 23:49:23 +02003154 enum mlxsw_sp_ipip_type ipipt;
Petr Machata35225e42017-09-02 23:49:22 +02003155 struct mlxsw_sp_rif *rif;
3156 int err;
3157
Petr Machata1012b9a2017-09-02 23:49:23 +02003158 if (mlxsw_sp_nexthop4_ipip_type(mlxsw_sp, fib_nh, &ipipt) &&
3159 router->ipip_ops_arr[ipipt]->can_offload(mlxsw_sp, dev,
3160 MLXSW_SP_L3_PROTO_IPV4)) {
3161 nh->type = MLXSW_SP_NEXTHOP_TYPE_IPIP;
Petr Machata4cccb732017-10-16 16:26:39 +02003162 err = mlxsw_sp_nexthop_ipip_init(mlxsw_sp, nh, dev);
Petr Machatade0f43c2017-10-02 12:14:57 +02003163 if (err)
3164 return err;
3165 mlxsw_sp_nexthop_rif_init(nh, &nh->ipip_entry->ol_lb->common);
3166 return 0;
Petr Machata1012b9a2017-09-02 23:49:23 +02003167 }
3168
Petr Machata35225e42017-09-02 23:49:22 +02003169 nh->type = MLXSW_SP_NEXTHOP_TYPE_ETH;
3170 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
3171 if (!rif)
3172 return 0;
3173
3174 mlxsw_sp_nexthop_rif_init(nh, rif);
3175 err = mlxsw_sp_nexthop_neigh_init(mlxsw_sp, nh);
3176 if (err)
3177 goto err_neigh_init;
3178
3179 return 0;
3180
3181err_neigh_init:
3182 mlxsw_sp_nexthop_rif_fini(nh);
3183 return err;
3184}
3185
3186static void mlxsw_sp_nexthop4_type_fini(struct mlxsw_sp *mlxsw_sp,
3187 struct mlxsw_sp_nexthop *nh)
3188{
3189 mlxsw_sp_nexthop_type_fini(mlxsw_sp, nh);
3190}
3191
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02003192static int mlxsw_sp_nexthop4_init(struct mlxsw_sp *mlxsw_sp,
3193 struct mlxsw_sp_nexthop_group *nh_grp,
3194 struct mlxsw_sp_nexthop *nh,
3195 struct fib_nh *fib_nh)
Ido Schimmela8c97012017-02-08 11:16:35 +01003196{
3197 struct net_device *dev = fib_nh->nh_dev;
Ido Schimmeldf6dd79b2017-02-08 14:36:49 +01003198 struct in_device *in_dev;
Ido Schimmela8c97012017-02-08 11:16:35 +01003199 int err;
3200
3201 nh->nh_grp = nh_grp;
3202 nh->key.fib_nh = fib_nh;
Ido Schimmel408bd942017-10-22 23:11:46 +02003203#ifdef CONFIG_IP_ROUTE_MULTIPATH
3204 nh->nh_weight = fib_nh->nh_weight;
3205#else
3206 nh->nh_weight = 1;
3207#endif
Ido Schimmel58adf2c2017-07-18 10:10:19 +02003208 memcpy(&nh->gw_addr, &fib_nh->nh_gw, sizeof(fib_nh->nh_gw));
Ido Schimmela8c97012017-02-08 11:16:35 +01003209 err = mlxsw_sp_nexthop_insert(mlxsw_sp, nh);
3210 if (err)
3211 return err;
3212
Arkadi Sharshevskya5390272017-09-25 10:32:28 +02003213 mlxsw_sp_nexthop_counter_alloc(mlxsw_sp, nh);
Arkadi Sharshevskydbe45982017-09-25 10:32:23 +02003214 list_add_tail(&nh->router_list_node, &mlxsw_sp->router->nexthop_list);
3215
Ido Schimmel97989ee2017-03-10 08:53:38 +01003216 if (!dev)
3217 return 0;
3218
Ido Schimmeldf6dd79b2017-02-08 14:36:49 +01003219 in_dev = __in_dev_get_rtnl(dev);
3220 if (in_dev && IN_DEV_IGNORE_ROUTES_WITH_LINKDOWN(in_dev) &&
3221 fib_nh->nh_flags & RTNH_F_LINKDOWN)
3222 return 0;
3223
Petr Machata35225e42017-09-02 23:49:22 +02003224 err = mlxsw_sp_nexthop4_type_init(mlxsw_sp, nh, fib_nh);
Ido Schimmela8c97012017-02-08 11:16:35 +01003225 if (err)
3226 goto err_nexthop_neigh_init;
3227
3228 return 0;
3229
3230err_nexthop_neigh_init:
3231 mlxsw_sp_nexthop_remove(mlxsw_sp, nh);
3232 return err;
3233}
3234
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02003235static void mlxsw_sp_nexthop4_fini(struct mlxsw_sp *mlxsw_sp,
3236 struct mlxsw_sp_nexthop *nh)
Ido Schimmela8c97012017-02-08 11:16:35 +01003237{
Petr Machata35225e42017-09-02 23:49:22 +02003238 mlxsw_sp_nexthop4_type_fini(mlxsw_sp, nh);
Arkadi Sharshevskydbe45982017-09-25 10:32:23 +02003239 list_del(&nh->router_list_node);
Arkadi Sharshevskya5390272017-09-25 10:32:28 +02003240 mlxsw_sp_nexthop_counter_free(mlxsw_sp, nh);
Ido Schimmelc53b8e12017-02-08 11:16:30 +01003241 mlxsw_sp_nexthop_remove(mlxsw_sp, nh);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003242}
3243
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02003244static void mlxsw_sp_nexthop4_event(struct mlxsw_sp *mlxsw_sp,
3245 unsigned long event, struct fib_nh *fib_nh)
Ido Schimmelad178c82017-02-08 11:16:40 +01003246{
3247 struct mlxsw_sp_nexthop_key key;
3248 struct mlxsw_sp_nexthop *nh;
Ido Schimmelad178c82017-02-08 11:16:40 +01003249
Ido Schimmel9011b672017-05-16 19:38:25 +02003250 if (mlxsw_sp->router->aborted)
Ido Schimmelad178c82017-02-08 11:16:40 +01003251 return;
3252
3253 key.fib_nh = fib_nh;
3254 nh = mlxsw_sp_nexthop_lookup(mlxsw_sp, key);
3255 if (WARN_ON_ONCE(!nh))
3256 return;
3257
Ido Schimmelad178c82017-02-08 11:16:40 +01003258 switch (event) {
3259 case FIB_EVENT_NH_ADD:
Petr Machata35225e42017-09-02 23:49:22 +02003260 mlxsw_sp_nexthop4_type_init(mlxsw_sp, nh, fib_nh);
Ido Schimmelad178c82017-02-08 11:16:40 +01003261 break;
3262 case FIB_EVENT_NH_DEL:
Petr Machata35225e42017-09-02 23:49:22 +02003263 mlxsw_sp_nexthop4_type_fini(mlxsw_sp, nh);
Ido Schimmelad178c82017-02-08 11:16:40 +01003264 break;
3265 }
3266
3267 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nh_grp);
3268}
3269
Ido Schimmel9665b742017-02-08 11:16:42 +01003270static void mlxsw_sp_nexthop_rif_gone_sync(struct mlxsw_sp *mlxsw_sp,
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003271 struct mlxsw_sp_rif *rif)
Ido Schimmel9665b742017-02-08 11:16:42 +01003272{
3273 struct mlxsw_sp_nexthop *nh, *tmp;
3274
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003275 list_for_each_entry_safe(nh, tmp, &rif->nexthop_list, rif_list_node) {
Petr Machata35225e42017-09-02 23:49:22 +02003276 mlxsw_sp_nexthop_type_fini(mlxsw_sp, nh);
Ido Schimmel9665b742017-02-08 11:16:42 +01003277 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nh_grp);
3278 }
3279}
3280
Petr Machata9b014512017-09-02 23:49:20 +02003281static bool mlxsw_sp_fi_is_gateway(const struct mlxsw_sp *mlxsw_sp,
3282 const struct fib_info *fi)
3283{
Petr Machata1012b9a2017-09-02 23:49:23 +02003284 return fi->fib_nh->nh_scope == RT_SCOPE_LINK ||
3285 mlxsw_sp_nexthop4_ipip_type(mlxsw_sp, fi->fib_nh, NULL);
Petr Machata9b014512017-09-02 23:49:20 +02003286}
3287
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003288static struct mlxsw_sp_nexthop_group *
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02003289mlxsw_sp_nexthop4_group_create(struct mlxsw_sp *mlxsw_sp, struct fib_info *fi)
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003290{
3291 struct mlxsw_sp_nexthop_group *nh_grp;
3292 struct mlxsw_sp_nexthop *nh;
3293 struct fib_nh *fib_nh;
3294 size_t alloc_size;
3295 int i;
3296 int err;
3297
3298 alloc_size = sizeof(*nh_grp) +
3299 fi->fib_nhs * sizeof(struct mlxsw_sp_nexthop);
3300 nh_grp = kzalloc(alloc_size, GFP_KERNEL);
3301 if (!nh_grp)
3302 return ERR_PTR(-ENOMEM);
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02003303 nh_grp->priv = fi;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003304 INIT_LIST_HEAD(&nh_grp->fib_list);
Ido Schimmel58adf2c2017-07-18 10:10:19 +02003305 nh_grp->neigh_tbl = &arp_tbl;
3306
Petr Machata9b014512017-09-02 23:49:20 +02003307 nh_grp->gateway = mlxsw_sp_fi_is_gateway(mlxsw_sp, fi);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003308 nh_grp->count = fi->fib_nhs;
Ido Schimmel7387dbb2017-07-12 09:12:53 +02003309 fib_info_hold(fi);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003310 for (i = 0; i < nh_grp->count; i++) {
3311 nh = &nh_grp->nexthops[i];
3312 fib_nh = &fi->fib_nh[i];
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02003313 err = mlxsw_sp_nexthop4_init(mlxsw_sp, nh_grp, nh, fib_nh);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003314 if (err)
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02003315 goto err_nexthop4_init;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003316 }
Ido Schimmele9ad5e72017-02-08 11:16:29 +01003317 err = mlxsw_sp_nexthop_group_insert(mlxsw_sp, nh_grp);
3318 if (err)
3319 goto err_nexthop_group_insert;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003320 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
3321 return nh_grp;
3322
Ido Schimmele9ad5e72017-02-08 11:16:29 +01003323err_nexthop_group_insert:
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02003324err_nexthop4_init:
Ido Schimmeldf6dd79b2017-02-08 14:36:49 +01003325 for (i--; i >= 0; i--) {
3326 nh = &nh_grp->nexthops[i];
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02003327 mlxsw_sp_nexthop4_fini(mlxsw_sp, nh);
Ido Schimmeldf6dd79b2017-02-08 14:36:49 +01003328 }
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02003329 fib_info_put(fi);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003330 kfree(nh_grp);
3331 return ERR_PTR(err);
3332}
3333
3334static void
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02003335mlxsw_sp_nexthop4_group_destroy(struct mlxsw_sp *mlxsw_sp,
3336 struct mlxsw_sp_nexthop_group *nh_grp)
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003337{
3338 struct mlxsw_sp_nexthop *nh;
3339 int i;
3340
Ido Schimmele9ad5e72017-02-08 11:16:29 +01003341 mlxsw_sp_nexthop_group_remove(mlxsw_sp, nh_grp);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003342 for (i = 0; i < nh_grp->count; i++) {
3343 nh = &nh_grp->nexthops[i];
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02003344 mlxsw_sp_nexthop4_fini(mlxsw_sp, nh);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003345 }
Ido Schimmel58312122016-12-23 09:32:50 +01003346 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
3347 WARN_ON_ONCE(nh_grp->adj_index_valid);
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02003348 fib_info_put(mlxsw_sp_nexthop4_group_fi(nh_grp));
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003349 kfree(nh_grp);
3350}
3351
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02003352static int mlxsw_sp_nexthop4_group_get(struct mlxsw_sp *mlxsw_sp,
3353 struct mlxsw_sp_fib_entry *fib_entry,
3354 struct fib_info *fi)
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003355{
3356 struct mlxsw_sp_nexthop_group *nh_grp;
3357
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02003358 nh_grp = mlxsw_sp_nexthop4_group_lookup(mlxsw_sp, fi);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003359 if (!nh_grp) {
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02003360 nh_grp = mlxsw_sp_nexthop4_group_create(mlxsw_sp, fi);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003361 if (IS_ERR(nh_grp))
3362 return PTR_ERR(nh_grp);
3363 }
3364 list_add_tail(&fib_entry->nexthop_group_node, &nh_grp->fib_list);
3365 fib_entry->nh_group = nh_grp;
3366 return 0;
3367}
3368
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02003369static void mlxsw_sp_nexthop4_group_put(struct mlxsw_sp *mlxsw_sp,
3370 struct mlxsw_sp_fib_entry *fib_entry)
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003371{
3372 struct mlxsw_sp_nexthop_group *nh_grp = fib_entry->nh_group;
3373
3374 list_del(&fib_entry->nexthop_group_node);
3375 if (!list_empty(&nh_grp->fib_list))
3376 return;
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02003377 mlxsw_sp_nexthop4_group_destroy(mlxsw_sp, nh_grp);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003378}
3379
Ido Schimmel013b20f2017-02-08 11:16:36 +01003380static bool
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02003381mlxsw_sp_fib4_entry_should_offload(const struct mlxsw_sp_fib_entry *fib_entry)
3382{
3383 struct mlxsw_sp_fib4_entry *fib4_entry;
3384
3385 fib4_entry = container_of(fib_entry, struct mlxsw_sp_fib4_entry,
3386 common);
3387 return !fib4_entry->tos;
3388}
3389
3390static bool
Ido Schimmel013b20f2017-02-08 11:16:36 +01003391mlxsw_sp_fib_entry_should_offload(const struct mlxsw_sp_fib_entry *fib_entry)
3392{
3393 struct mlxsw_sp_nexthop_group *nh_group = fib_entry->nh_group;
3394
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02003395 switch (fib_entry->fib_node->fib->proto) {
3396 case MLXSW_SP_L3_PROTO_IPV4:
3397 if (!mlxsw_sp_fib4_entry_should_offload(fib_entry))
3398 return false;
3399 break;
3400 case MLXSW_SP_L3_PROTO_IPV6:
3401 break;
3402 }
Ido Schimmel9aecce12017-02-09 10:28:42 +01003403
Ido Schimmel013b20f2017-02-08 11:16:36 +01003404 switch (fib_entry->type) {
3405 case MLXSW_SP_FIB_ENTRY_TYPE_REMOTE:
3406 return !!nh_group->adj_index_valid;
3407 case MLXSW_SP_FIB_ENTRY_TYPE_LOCAL:
Ido Schimmel70ad3502017-02-08 11:16:38 +01003408 return !!nh_group->nh_rif;
Petr Machata4607f6d2017-09-02 23:49:25 +02003409 case MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP:
3410 return true;
Ido Schimmel013b20f2017-02-08 11:16:36 +01003411 default:
3412 return false;
3413 }
3414}
3415
Ido Schimmel428b8512017-08-03 13:28:28 +02003416static struct mlxsw_sp_nexthop *
3417mlxsw_sp_rt6_nexthop(struct mlxsw_sp_nexthop_group *nh_grp,
3418 const struct mlxsw_sp_rt6 *mlxsw_sp_rt6)
3419{
3420 int i;
3421
3422 for (i = 0; i < nh_grp->count; i++) {
3423 struct mlxsw_sp_nexthop *nh = &nh_grp->nexthops[i];
3424 struct rt6_info *rt = mlxsw_sp_rt6->rt;
3425
3426 if (nh->rif && nh->rif->dev == rt->dst.dev &&
3427 ipv6_addr_equal((const struct in6_addr *) &nh->gw_addr,
3428 &rt->rt6i_gateway))
3429 return nh;
3430 continue;
3431 }
3432
3433 return NULL;
3434}
3435
Ido Schimmel3984d1a2017-08-02 09:56:03 +02003436static void
3437mlxsw_sp_fib4_entry_offload_set(struct mlxsw_sp_fib_entry *fib_entry)
3438{
3439 struct mlxsw_sp_nexthop_group *nh_grp = fib_entry->nh_group;
3440 int i;
3441
Petr Machata4607f6d2017-09-02 23:49:25 +02003442 if (fib_entry->type == MLXSW_SP_FIB_ENTRY_TYPE_LOCAL ||
3443 fib_entry->type == MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP) {
Ido Schimmel3984d1a2017-08-02 09:56:03 +02003444 nh_grp->nexthops->key.fib_nh->nh_flags |= RTNH_F_OFFLOAD;
3445 return;
3446 }
3447
3448 for (i = 0; i < nh_grp->count; i++) {
3449 struct mlxsw_sp_nexthop *nh = &nh_grp->nexthops[i];
3450
3451 if (nh->offloaded)
3452 nh->key.fib_nh->nh_flags |= RTNH_F_OFFLOAD;
3453 else
3454 nh->key.fib_nh->nh_flags &= ~RTNH_F_OFFLOAD;
3455 }
3456}
3457
3458static void
3459mlxsw_sp_fib4_entry_offload_unset(struct mlxsw_sp_fib_entry *fib_entry)
3460{
3461 struct mlxsw_sp_nexthop_group *nh_grp = fib_entry->nh_group;
3462 int i;
3463
3464 for (i = 0; i < nh_grp->count; i++) {
3465 struct mlxsw_sp_nexthop *nh = &nh_grp->nexthops[i];
3466
3467 nh->key.fib_nh->nh_flags &= ~RTNH_F_OFFLOAD;
3468 }
3469}
3470
Ido Schimmel428b8512017-08-03 13:28:28 +02003471static void
3472mlxsw_sp_fib6_entry_offload_set(struct mlxsw_sp_fib_entry *fib_entry)
3473{
3474 struct mlxsw_sp_fib6_entry *fib6_entry;
3475 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
3476
3477 fib6_entry = container_of(fib_entry, struct mlxsw_sp_fib6_entry,
3478 common);
3479
3480 if (fib_entry->type == MLXSW_SP_FIB_ENTRY_TYPE_LOCAL) {
3481 list_first_entry(&fib6_entry->rt6_list, struct mlxsw_sp_rt6,
Ido Schimmelfe400792017-08-15 09:09:49 +02003482 list)->rt->rt6i_nh_flags |= RTNH_F_OFFLOAD;
Ido Schimmel428b8512017-08-03 13:28:28 +02003483 return;
3484 }
3485
3486 list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) {
3487 struct mlxsw_sp_nexthop_group *nh_grp = fib_entry->nh_group;
3488 struct mlxsw_sp_nexthop *nh;
3489
3490 nh = mlxsw_sp_rt6_nexthop(nh_grp, mlxsw_sp_rt6);
3491 if (nh && nh->offloaded)
Ido Schimmelfe400792017-08-15 09:09:49 +02003492 mlxsw_sp_rt6->rt->rt6i_nh_flags |= RTNH_F_OFFLOAD;
Ido Schimmel428b8512017-08-03 13:28:28 +02003493 else
Ido Schimmelfe400792017-08-15 09:09:49 +02003494 mlxsw_sp_rt6->rt->rt6i_nh_flags &= ~RTNH_F_OFFLOAD;
Ido Schimmel428b8512017-08-03 13:28:28 +02003495 }
3496}
3497
3498static void
3499mlxsw_sp_fib6_entry_offload_unset(struct mlxsw_sp_fib_entry *fib_entry)
3500{
3501 struct mlxsw_sp_fib6_entry *fib6_entry;
3502 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
3503
3504 fib6_entry = container_of(fib_entry, struct mlxsw_sp_fib6_entry,
3505 common);
3506 list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) {
3507 struct rt6_info *rt = mlxsw_sp_rt6->rt;
3508
Ido Schimmelfe400792017-08-15 09:09:49 +02003509 rt->rt6i_nh_flags &= ~RTNH_F_OFFLOAD;
Ido Schimmel428b8512017-08-03 13:28:28 +02003510 }
3511}
3512
Ido Schimmel013b20f2017-02-08 11:16:36 +01003513static void mlxsw_sp_fib_entry_offload_set(struct mlxsw_sp_fib_entry *fib_entry)
3514{
Ido Schimmel76610eb2017-03-10 08:53:41 +01003515 switch (fib_entry->fib_node->fib->proto) {
Ido Schimmel013b20f2017-02-08 11:16:36 +01003516 case MLXSW_SP_L3_PROTO_IPV4:
Ido Schimmel3984d1a2017-08-02 09:56:03 +02003517 mlxsw_sp_fib4_entry_offload_set(fib_entry);
Ido Schimmel013b20f2017-02-08 11:16:36 +01003518 break;
3519 case MLXSW_SP_L3_PROTO_IPV6:
Ido Schimmel428b8512017-08-03 13:28:28 +02003520 mlxsw_sp_fib6_entry_offload_set(fib_entry);
3521 break;
Ido Schimmel013b20f2017-02-08 11:16:36 +01003522 }
3523}
3524
3525static void
3526mlxsw_sp_fib_entry_offload_unset(struct mlxsw_sp_fib_entry *fib_entry)
3527{
Ido Schimmel76610eb2017-03-10 08:53:41 +01003528 switch (fib_entry->fib_node->fib->proto) {
Ido Schimmel013b20f2017-02-08 11:16:36 +01003529 case MLXSW_SP_L3_PROTO_IPV4:
Ido Schimmel3984d1a2017-08-02 09:56:03 +02003530 mlxsw_sp_fib4_entry_offload_unset(fib_entry);
Ido Schimmel013b20f2017-02-08 11:16:36 +01003531 break;
3532 case MLXSW_SP_L3_PROTO_IPV6:
Ido Schimmel428b8512017-08-03 13:28:28 +02003533 mlxsw_sp_fib6_entry_offload_unset(fib_entry);
3534 break;
Ido Schimmel013b20f2017-02-08 11:16:36 +01003535 }
Ido Schimmel013b20f2017-02-08 11:16:36 +01003536}
3537
3538static void
3539mlxsw_sp_fib_entry_offload_refresh(struct mlxsw_sp_fib_entry *fib_entry,
3540 enum mlxsw_reg_ralue_op op, int err)
3541{
3542 switch (op) {
3543 case MLXSW_REG_RALUE_OP_WRITE_DELETE:
Ido Schimmel013b20f2017-02-08 11:16:36 +01003544 return mlxsw_sp_fib_entry_offload_unset(fib_entry);
3545 case MLXSW_REG_RALUE_OP_WRITE_WRITE:
3546 if (err)
3547 return;
Ido Schimmel1353ee72017-08-02 09:56:04 +02003548 if (mlxsw_sp_fib_entry_should_offload(fib_entry))
Ido Schimmel013b20f2017-02-08 11:16:36 +01003549 mlxsw_sp_fib_entry_offload_set(fib_entry);
Petr Machata85f44a12017-10-02 12:21:58 +02003550 else
Ido Schimmel013b20f2017-02-08 11:16:36 +01003551 mlxsw_sp_fib_entry_offload_unset(fib_entry);
3552 return;
3553 default:
3554 return;
3555 }
3556}
3557
Ido Schimmel9dbf4d72017-07-18 10:10:24 +02003558static void
3559mlxsw_sp_fib_entry_ralue_pack(char *ralue_pl,
3560 const struct mlxsw_sp_fib_entry *fib_entry,
3561 enum mlxsw_reg_ralue_op op)
3562{
3563 struct mlxsw_sp_fib *fib = fib_entry->fib_node->fib;
3564 enum mlxsw_reg_ralxx_protocol proto;
3565 u32 *p_dip;
3566
3567 proto = (enum mlxsw_reg_ralxx_protocol) fib->proto;
3568
3569 switch (fib->proto) {
3570 case MLXSW_SP_L3_PROTO_IPV4:
3571 p_dip = (u32 *) fib_entry->fib_node->key.addr;
3572 mlxsw_reg_ralue_pack4(ralue_pl, proto, op, fib->vr->id,
3573 fib_entry->fib_node->key.prefix_len,
3574 *p_dip);
3575 break;
3576 case MLXSW_SP_L3_PROTO_IPV6:
3577 mlxsw_reg_ralue_pack6(ralue_pl, proto, op, fib->vr->id,
3578 fib_entry->fib_node->key.prefix_len,
3579 fib_entry->fib_node->key.addr);
3580 break;
3581 }
3582}
3583
3584static int mlxsw_sp_fib_entry_op_remote(struct mlxsw_sp *mlxsw_sp,
3585 struct mlxsw_sp_fib_entry *fib_entry,
3586 enum mlxsw_reg_ralue_op op)
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003587{
3588 char ralue_pl[MLXSW_REG_RALUE_LEN];
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003589 enum mlxsw_reg_ralue_trap_action trap_action;
3590 u16 trap_id = 0;
3591 u32 adjacency_index = 0;
3592 u16 ecmp_size = 0;
3593
3594 /* In case the nexthop group adjacency index is valid, use it
3595 * with provided ECMP size. Otherwise, setup trap and pass
3596 * traffic to kernel.
3597 */
Ido Schimmel4b411472017-02-08 11:16:37 +01003598 if (mlxsw_sp_fib_entry_should_offload(fib_entry)) {
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003599 trap_action = MLXSW_REG_RALUE_TRAP_ACTION_NOP;
3600 adjacency_index = fib_entry->nh_group->adj_index;
3601 ecmp_size = fib_entry->nh_group->ecmp_size;
3602 } else {
3603 trap_action = MLXSW_REG_RALUE_TRAP_ACTION_TRAP;
3604 trap_id = MLXSW_TRAP_ID_RTR_INGRESS0;
3605 }
3606
Ido Schimmel9dbf4d72017-07-18 10:10:24 +02003607 mlxsw_sp_fib_entry_ralue_pack(ralue_pl, fib_entry, op);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003608 mlxsw_reg_ralue_act_remote_pack(ralue_pl, trap_action, trap_id,
3609 adjacency_index, ecmp_size);
3610 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
3611}
3612
Ido Schimmel9dbf4d72017-07-18 10:10:24 +02003613static int mlxsw_sp_fib_entry_op_local(struct mlxsw_sp *mlxsw_sp,
3614 struct mlxsw_sp_fib_entry *fib_entry,
3615 enum mlxsw_reg_ralue_op op)
Jiri Pirko61c503f2016-07-04 08:23:11 +02003616{
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003617 struct mlxsw_sp_rif *rif = fib_entry->nh_group->nh_rif;
Ido Schimmel70ad3502017-02-08 11:16:38 +01003618 enum mlxsw_reg_ralue_trap_action trap_action;
Jiri Pirko61c503f2016-07-04 08:23:11 +02003619 char ralue_pl[MLXSW_REG_RALUE_LEN];
Ido Schimmel70ad3502017-02-08 11:16:38 +01003620 u16 trap_id = 0;
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003621 u16 rif_index = 0;
Ido Schimmel70ad3502017-02-08 11:16:38 +01003622
3623 if (mlxsw_sp_fib_entry_should_offload(fib_entry)) {
3624 trap_action = MLXSW_REG_RALUE_TRAP_ACTION_NOP;
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003625 rif_index = rif->rif_index;
Ido Schimmel70ad3502017-02-08 11:16:38 +01003626 } else {
3627 trap_action = MLXSW_REG_RALUE_TRAP_ACTION_TRAP;
3628 trap_id = MLXSW_TRAP_ID_RTR_INGRESS0;
3629 }
Jiri Pirko61c503f2016-07-04 08:23:11 +02003630
Ido Schimmel9dbf4d72017-07-18 10:10:24 +02003631 mlxsw_sp_fib_entry_ralue_pack(ralue_pl, fib_entry, op);
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003632 mlxsw_reg_ralue_act_local_pack(ralue_pl, trap_action, trap_id,
3633 rif_index);
Jiri Pirko61c503f2016-07-04 08:23:11 +02003634 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
3635}
3636
Ido Schimmel9dbf4d72017-07-18 10:10:24 +02003637static int mlxsw_sp_fib_entry_op_trap(struct mlxsw_sp *mlxsw_sp,
3638 struct mlxsw_sp_fib_entry *fib_entry,
3639 enum mlxsw_reg_ralue_op op)
Jiri Pirko61c503f2016-07-04 08:23:11 +02003640{
3641 char ralue_pl[MLXSW_REG_RALUE_LEN];
Jiri Pirko61c503f2016-07-04 08:23:11 +02003642
Ido Schimmel9dbf4d72017-07-18 10:10:24 +02003643 mlxsw_sp_fib_entry_ralue_pack(ralue_pl, fib_entry, op);
Jiri Pirko61c503f2016-07-04 08:23:11 +02003644 mlxsw_reg_ralue_act_ip2me_pack(ralue_pl);
3645 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
3646}
3647
Petr Machata4607f6d2017-09-02 23:49:25 +02003648static int
3649mlxsw_sp_fib_entry_op_ipip_decap(struct mlxsw_sp *mlxsw_sp,
3650 struct mlxsw_sp_fib_entry *fib_entry,
3651 enum mlxsw_reg_ralue_op op)
3652{
3653 struct mlxsw_sp_ipip_entry *ipip_entry = fib_entry->decap.ipip_entry;
3654 const struct mlxsw_sp_ipip_ops *ipip_ops;
3655
3656 if (WARN_ON(!ipip_entry))
3657 return -EINVAL;
3658
3659 ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt];
3660 return ipip_ops->fib_entry_op(mlxsw_sp, ipip_entry, op,
3661 fib_entry->decap.tunnel_index);
3662}
3663
Ido Schimmel9dbf4d72017-07-18 10:10:24 +02003664static int __mlxsw_sp_fib_entry_op(struct mlxsw_sp *mlxsw_sp,
3665 struct mlxsw_sp_fib_entry *fib_entry,
3666 enum mlxsw_reg_ralue_op op)
Jiri Pirko61c503f2016-07-04 08:23:11 +02003667{
3668 switch (fib_entry->type) {
3669 case MLXSW_SP_FIB_ENTRY_TYPE_REMOTE:
Ido Schimmel9dbf4d72017-07-18 10:10:24 +02003670 return mlxsw_sp_fib_entry_op_remote(mlxsw_sp, fib_entry, op);
Jiri Pirko61c503f2016-07-04 08:23:11 +02003671 case MLXSW_SP_FIB_ENTRY_TYPE_LOCAL:
Ido Schimmel9dbf4d72017-07-18 10:10:24 +02003672 return mlxsw_sp_fib_entry_op_local(mlxsw_sp, fib_entry, op);
Jiri Pirko61c503f2016-07-04 08:23:11 +02003673 case MLXSW_SP_FIB_ENTRY_TYPE_TRAP:
Ido Schimmel9dbf4d72017-07-18 10:10:24 +02003674 return mlxsw_sp_fib_entry_op_trap(mlxsw_sp, fib_entry, op);
Petr Machata4607f6d2017-09-02 23:49:25 +02003675 case MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP:
3676 return mlxsw_sp_fib_entry_op_ipip_decap(mlxsw_sp,
3677 fib_entry, op);
Jiri Pirko61c503f2016-07-04 08:23:11 +02003678 }
3679 return -EINVAL;
3680}
3681
3682static int mlxsw_sp_fib_entry_op(struct mlxsw_sp *mlxsw_sp,
3683 struct mlxsw_sp_fib_entry *fib_entry,
3684 enum mlxsw_reg_ralue_op op)
3685{
Ido Schimmel9dbf4d72017-07-18 10:10:24 +02003686 int err = __mlxsw_sp_fib_entry_op(mlxsw_sp, fib_entry, op);
Ido Schimmel013b20f2017-02-08 11:16:36 +01003687
Ido Schimmel013b20f2017-02-08 11:16:36 +01003688 mlxsw_sp_fib_entry_offload_refresh(fib_entry, op, err);
Ido Schimmel9dbf4d72017-07-18 10:10:24 +02003689
Ido Schimmel013b20f2017-02-08 11:16:36 +01003690 return err;
Jiri Pirko61c503f2016-07-04 08:23:11 +02003691}
3692
3693static int mlxsw_sp_fib_entry_update(struct mlxsw_sp *mlxsw_sp,
3694 struct mlxsw_sp_fib_entry *fib_entry)
3695{
Jiri Pirko7146da32016-09-01 10:37:41 +02003696 return mlxsw_sp_fib_entry_op(mlxsw_sp, fib_entry,
3697 MLXSW_REG_RALUE_OP_WRITE_WRITE);
Jiri Pirko61c503f2016-07-04 08:23:11 +02003698}
3699
3700static int mlxsw_sp_fib_entry_del(struct mlxsw_sp *mlxsw_sp,
3701 struct mlxsw_sp_fib_entry *fib_entry)
3702{
3703 return mlxsw_sp_fib_entry_op(mlxsw_sp, fib_entry,
3704 MLXSW_REG_RALUE_OP_WRITE_DELETE);
3705}
3706
Jiri Pirko61c503f2016-07-04 08:23:11 +02003707static int
Ido Schimmel013b20f2017-02-08 11:16:36 +01003708mlxsw_sp_fib4_entry_type_set(struct mlxsw_sp *mlxsw_sp,
3709 const struct fib_entry_notifier_info *fen_info,
3710 struct mlxsw_sp_fib_entry *fib_entry)
Jiri Pirko61c503f2016-07-04 08:23:11 +02003711{
Petr Machata4607f6d2017-09-02 23:49:25 +02003712 union mlxsw_sp_l3addr dip = { .addr4 = htonl(fen_info->dst) };
3713 struct net_device *dev = fen_info->fi->fib_dev;
3714 struct mlxsw_sp_ipip_entry *ipip_entry;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02003715 struct fib_info *fi = fen_info->fi;
Jiri Pirko61c503f2016-07-04 08:23:11 +02003716
Ido Schimmel97989ee2017-03-10 08:53:38 +01003717 switch (fen_info->type) {
Ido Schimmel97989ee2017-03-10 08:53:38 +01003718 case RTN_LOCAL:
Petr Machata4607f6d2017-09-02 23:49:25 +02003719 ipip_entry = mlxsw_sp_ipip_entry_find_by_decap(mlxsw_sp, dev,
3720 MLXSW_SP_L3_PROTO_IPV4, dip);
3721 if (ipip_entry) {
3722 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP;
3723 return mlxsw_sp_fib_entry_decap_init(mlxsw_sp,
3724 fib_entry,
3725 ipip_entry);
3726 }
3727 /* fall through */
3728 case RTN_BROADCAST:
Jiri Pirko61c503f2016-07-04 08:23:11 +02003729 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
3730 return 0;
Ido Schimmel97989ee2017-03-10 08:53:38 +01003731 case RTN_UNREACHABLE: /* fall through */
3732 case RTN_BLACKHOLE: /* fall through */
3733 case RTN_PROHIBIT:
3734 /* Packets hitting these routes need to be trapped, but
3735 * can do so with a lower priority than packets directed
3736 * at the host, so use action type local instead of trap.
3737 */
Jiri Pirkob45f64d2016-09-26 12:52:31 +02003738 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_LOCAL;
Ido Schimmel97989ee2017-03-10 08:53:38 +01003739 return 0;
3740 case RTN_UNICAST:
Petr Machata9b014512017-09-02 23:49:20 +02003741 if (mlxsw_sp_fi_is_gateway(mlxsw_sp, fi))
Ido Schimmel97989ee2017-03-10 08:53:38 +01003742 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_REMOTE;
Petr Machata9b014512017-09-02 23:49:20 +02003743 else
3744 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_LOCAL;
Ido Schimmel97989ee2017-03-10 08:53:38 +01003745 return 0;
3746 default:
3747 return -EINVAL;
3748 }
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003749}
3750
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02003751static struct mlxsw_sp_fib4_entry *
Ido Schimmel9aecce12017-02-09 10:28:42 +01003752mlxsw_sp_fib4_entry_create(struct mlxsw_sp *mlxsw_sp,
3753 struct mlxsw_sp_fib_node *fib_node,
3754 const struct fib_entry_notifier_info *fen_info)
Jiri Pirko5b004412016-09-01 10:37:40 +02003755{
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02003756 struct mlxsw_sp_fib4_entry *fib4_entry;
Jiri Pirko5b004412016-09-01 10:37:40 +02003757 struct mlxsw_sp_fib_entry *fib_entry;
Ido Schimmel9aecce12017-02-09 10:28:42 +01003758 int err;
3759
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02003760 fib4_entry = kzalloc(sizeof(*fib4_entry), GFP_KERNEL);
3761 if (!fib4_entry)
3762 return ERR_PTR(-ENOMEM);
3763 fib_entry = &fib4_entry->common;
Ido Schimmel9aecce12017-02-09 10:28:42 +01003764
3765 err = mlxsw_sp_fib4_entry_type_set(mlxsw_sp, fen_info, fib_entry);
3766 if (err)
3767 goto err_fib4_entry_type_set;
3768
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02003769 err = mlxsw_sp_nexthop4_group_get(mlxsw_sp, fib_entry, fen_info->fi);
Ido Schimmel9aecce12017-02-09 10:28:42 +01003770 if (err)
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02003771 goto err_nexthop4_group_get;
Ido Schimmel9aecce12017-02-09 10:28:42 +01003772
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02003773 fib4_entry->prio = fen_info->fi->fib_priority;
3774 fib4_entry->tb_id = fen_info->tb_id;
3775 fib4_entry->type = fen_info->type;
3776 fib4_entry->tos = fen_info->tos;
Ido Schimmel9aecce12017-02-09 10:28:42 +01003777
3778 fib_entry->fib_node = fib_node;
3779
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02003780 return fib4_entry;
Ido Schimmel9aecce12017-02-09 10:28:42 +01003781
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02003782err_nexthop4_group_get:
Ido Schimmel9aecce12017-02-09 10:28:42 +01003783err_fib4_entry_type_set:
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02003784 kfree(fib4_entry);
Ido Schimmel9aecce12017-02-09 10:28:42 +01003785 return ERR_PTR(err);
3786}
3787
3788static void mlxsw_sp_fib4_entry_destroy(struct mlxsw_sp *mlxsw_sp,
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02003789 struct mlxsw_sp_fib4_entry *fib4_entry)
Ido Schimmel9aecce12017-02-09 10:28:42 +01003790{
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02003791 mlxsw_sp_nexthop4_group_put(mlxsw_sp, &fib4_entry->common);
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02003792 kfree(fib4_entry);
Ido Schimmel9aecce12017-02-09 10:28:42 +01003793}
3794
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02003795static struct mlxsw_sp_fib4_entry *
Ido Schimmel9aecce12017-02-09 10:28:42 +01003796mlxsw_sp_fib4_entry_lookup(struct mlxsw_sp *mlxsw_sp,
3797 const struct fib_entry_notifier_info *fen_info)
3798{
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02003799 struct mlxsw_sp_fib4_entry *fib4_entry;
Ido Schimmel9aecce12017-02-09 10:28:42 +01003800 struct mlxsw_sp_fib_node *fib_node;
Ido Schimmel160e22a2017-07-18 10:10:20 +02003801 struct mlxsw_sp_fib *fib;
3802 struct mlxsw_sp_vr *vr;
Ido Schimmel9aecce12017-02-09 10:28:42 +01003803
Ido Schimmel160e22a2017-07-18 10:10:20 +02003804 vr = mlxsw_sp_vr_find(mlxsw_sp, fen_info->tb_id);
3805 if (!vr)
3806 return NULL;
3807 fib = mlxsw_sp_vr_fib(vr, MLXSW_SP_L3_PROTO_IPV4);
3808
3809 fib_node = mlxsw_sp_fib_node_lookup(fib, &fen_info->dst,
3810 sizeof(fen_info->dst),
3811 fen_info->dst_len);
3812 if (!fib_node)
Ido Schimmel9aecce12017-02-09 10:28:42 +01003813 return NULL;
3814
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02003815 list_for_each_entry(fib4_entry, &fib_node->entry_list, common.list) {
3816 if (fib4_entry->tb_id == fen_info->tb_id &&
3817 fib4_entry->tos == fen_info->tos &&
3818 fib4_entry->type == fen_info->type &&
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02003819 mlxsw_sp_nexthop4_group_fi(fib4_entry->common.nh_group) ==
3820 fen_info->fi) {
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02003821 return fib4_entry;
Ido Schimmel9aecce12017-02-09 10:28:42 +01003822 }
3823 }
3824
3825 return NULL;
3826}
3827
3828static const struct rhashtable_params mlxsw_sp_fib_ht_params = {
3829 .key_offset = offsetof(struct mlxsw_sp_fib_node, key),
3830 .head_offset = offsetof(struct mlxsw_sp_fib_node, ht_node),
3831 .key_len = sizeof(struct mlxsw_sp_fib_key),
3832 .automatic_shrinking = true,
3833};
3834
3835static int mlxsw_sp_fib_node_insert(struct mlxsw_sp_fib *fib,
3836 struct mlxsw_sp_fib_node *fib_node)
3837{
3838 return rhashtable_insert_fast(&fib->ht, &fib_node->ht_node,
3839 mlxsw_sp_fib_ht_params);
3840}
3841
3842static void mlxsw_sp_fib_node_remove(struct mlxsw_sp_fib *fib,
3843 struct mlxsw_sp_fib_node *fib_node)
3844{
3845 rhashtable_remove_fast(&fib->ht, &fib_node->ht_node,
3846 mlxsw_sp_fib_ht_params);
3847}
3848
3849static struct mlxsw_sp_fib_node *
3850mlxsw_sp_fib_node_lookup(struct mlxsw_sp_fib *fib, const void *addr,
3851 size_t addr_len, unsigned char prefix_len)
3852{
3853 struct mlxsw_sp_fib_key key;
3854
3855 memset(&key, 0, sizeof(key));
3856 memcpy(key.addr, addr, addr_len);
3857 key.prefix_len = prefix_len;
3858 return rhashtable_lookup_fast(&fib->ht, &key, mlxsw_sp_fib_ht_params);
3859}
3860
3861static struct mlxsw_sp_fib_node *
Ido Schimmel76610eb2017-03-10 08:53:41 +01003862mlxsw_sp_fib_node_create(struct mlxsw_sp_fib *fib, const void *addr,
Ido Schimmel9aecce12017-02-09 10:28:42 +01003863 size_t addr_len, unsigned char prefix_len)
3864{
3865 struct mlxsw_sp_fib_node *fib_node;
3866
3867 fib_node = kzalloc(sizeof(*fib_node), GFP_KERNEL);
3868 if (!fib_node)
3869 return NULL;
3870
3871 INIT_LIST_HEAD(&fib_node->entry_list);
Ido Schimmel76610eb2017-03-10 08:53:41 +01003872 list_add(&fib_node->list, &fib->node_list);
Ido Schimmel9aecce12017-02-09 10:28:42 +01003873 memcpy(fib_node->key.addr, addr, addr_len);
3874 fib_node->key.prefix_len = prefix_len;
Ido Schimmel9aecce12017-02-09 10:28:42 +01003875
3876 return fib_node;
3877}
3878
3879static void mlxsw_sp_fib_node_destroy(struct mlxsw_sp_fib_node *fib_node)
3880{
Ido Schimmel9aecce12017-02-09 10:28:42 +01003881 list_del(&fib_node->list);
3882 WARN_ON(!list_empty(&fib_node->entry_list));
3883 kfree(fib_node);
3884}
3885
3886static bool
3887mlxsw_sp_fib_node_entry_is_first(const struct mlxsw_sp_fib_node *fib_node,
3888 const struct mlxsw_sp_fib_entry *fib_entry)
3889{
3890 return list_first_entry(&fib_node->entry_list,
3891 struct mlxsw_sp_fib_entry, list) == fib_entry;
3892}
3893
Ido Schimmelfc922bb2017-08-14 10:54:05 +02003894static int mlxsw_sp_fib_lpm_tree_link(struct mlxsw_sp *mlxsw_sp,
3895 struct mlxsw_sp_fib *fib,
3896 struct mlxsw_sp_fib_node *fib_node)
3897{
3898 struct mlxsw_sp_prefix_usage req_prefix_usage = {{ 0 } };
3899 struct mlxsw_sp_lpm_tree *lpm_tree;
3900 int err;
3901
3902 /* Since the tree is shared between all virtual routers we must
3903 * make sure it contains all the required prefix lengths. This
3904 * can be computed by either adding the new prefix length to the
3905 * existing prefix usage of a bound tree, or by aggregating the
3906 * prefix lengths across all virtual routers and adding the new
3907 * one as well.
3908 */
3909 if (fib->lpm_tree)
3910 mlxsw_sp_prefix_usage_cpy(&req_prefix_usage,
3911 &fib->lpm_tree->prefix_usage);
3912 else
3913 mlxsw_sp_vrs_prefixes(mlxsw_sp, fib->proto, &req_prefix_usage);
3914 mlxsw_sp_prefix_usage_set(&req_prefix_usage, fib_node->key.prefix_len);
3915
3916 lpm_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, &req_prefix_usage,
3917 fib->proto);
3918 if (IS_ERR(lpm_tree))
3919 return PTR_ERR(lpm_tree);
3920
3921 if (fib->lpm_tree && fib->lpm_tree->id == lpm_tree->id)
3922 return 0;
3923
3924 err = mlxsw_sp_vrs_lpm_tree_replace(mlxsw_sp, fib, lpm_tree);
3925 if (err)
3926 return err;
3927
3928 return 0;
3929}
3930
3931static void mlxsw_sp_fib_lpm_tree_unlink(struct mlxsw_sp *mlxsw_sp,
3932 struct mlxsw_sp_fib *fib)
3933{
Ido Schimmelfc922bb2017-08-14 10:54:05 +02003934 if (!mlxsw_sp_prefix_usage_none(&fib->prefix_usage))
3935 return;
3936 mlxsw_sp_vr_lpm_tree_unbind(mlxsw_sp, fib);
3937 mlxsw_sp_lpm_tree_put(mlxsw_sp, fib->lpm_tree);
3938 fib->lpm_tree = NULL;
3939}
3940
Ido Schimmel9aecce12017-02-09 10:28:42 +01003941static void mlxsw_sp_fib_node_prefix_inc(struct mlxsw_sp_fib_node *fib_node)
3942{
3943 unsigned char prefix_len = fib_node->key.prefix_len;
Ido Schimmel76610eb2017-03-10 08:53:41 +01003944 struct mlxsw_sp_fib *fib = fib_node->fib;
Ido Schimmel9aecce12017-02-09 10:28:42 +01003945
3946 if (fib->prefix_ref_count[prefix_len]++ == 0)
3947 mlxsw_sp_prefix_usage_set(&fib->prefix_usage, prefix_len);
3948}
3949
3950static void mlxsw_sp_fib_node_prefix_dec(struct mlxsw_sp_fib_node *fib_node)
3951{
3952 unsigned char prefix_len = fib_node->key.prefix_len;
Ido Schimmel76610eb2017-03-10 08:53:41 +01003953 struct mlxsw_sp_fib *fib = fib_node->fib;
Ido Schimmel9aecce12017-02-09 10:28:42 +01003954
3955 if (--fib->prefix_ref_count[prefix_len] == 0)
3956 mlxsw_sp_prefix_usage_clear(&fib->prefix_usage, prefix_len);
3957}
3958
Ido Schimmel76610eb2017-03-10 08:53:41 +01003959static int mlxsw_sp_fib_node_init(struct mlxsw_sp *mlxsw_sp,
3960 struct mlxsw_sp_fib_node *fib_node,
3961 struct mlxsw_sp_fib *fib)
3962{
Ido Schimmel76610eb2017-03-10 08:53:41 +01003963 int err;
3964
3965 err = mlxsw_sp_fib_node_insert(fib, fib_node);
3966 if (err)
3967 return err;
3968 fib_node->fib = fib;
3969
Ido Schimmelfc922bb2017-08-14 10:54:05 +02003970 err = mlxsw_sp_fib_lpm_tree_link(mlxsw_sp, fib, fib_node);
3971 if (err)
3972 goto err_fib_lpm_tree_link;
Ido Schimmel76610eb2017-03-10 08:53:41 +01003973
3974 mlxsw_sp_fib_node_prefix_inc(fib_node);
3975
3976 return 0;
3977
Ido Schimmelfc922bb2017-08-14 10:54:05 +02003978err_fib_lpm_tree_link:
Ido Schimmel76610eb2017-03-10 08:53:41 +01003979 fib_node->fib = NULL;
3980 mlxsw_sp_fib_node_remove(fib, fib_node);
3981 return err;
3982}
3983
3984static void mlxsw_sp_fib_node_fini(struct mlxsw_sp *mlxsw_sp,
3985 struct mlxsw_sp_fib_node *fib_node)
3986{
Ido Schimmel76610eb2017-03-10 08:53:41 +01003987 struct mlxsw_sp_fib *fib = fib_node->fib;
3988
3989 mlxsw_sp_fib_node_prefix_dec(fib_node);
Ido Schimmelfc922bb2017-08-14 10:54:05 +02003990 mlxsw_sp_fib_lpm_tree_unlink(mlxsw_sp, fib);
Ido Schimmel76610eb2017-03-10 08:53:41 +01003991 fib_node->fib = NULL;
3992 mlxsw_sp_fib_node_remove(fib, fib_node);
3993}
3994
Ido Schimmel9aecce12017-02-09 10:28:42 +01003995static struct mlxsw_sp_fib_node *
Ido Schimmel731ea1c2017-07-18 10:10:21 +02003996mlxsw_sp_fib_node_get(struct mlxsw_sp *mlxsw_sp, u32 tb_id, const void *addr,
3997 size_t addr_len, unsigned char prefix_len,
3998 enum mlxsw_sp_l3proto proto)
Ido Schimmel9aecce12017-02-09 10:28:42 +01003999{
4000 struct mlxsw_sp_fib_node *fib_node;
Ido Schimmel76610eb2017-03-10 08:53:41 +01004001 struct mlxsw_sp_fib *fib;
Jiri Pirko5b004412016-09-01 10:37:40 +02004002 struct mlxsw_sp_vr *vr;
4003 int err;
4004
David Ahernf8fa9b42017-10-18 09:56:56 -07004005 vr = mlxsw_sp_vr_get(mlxsw_sp, tb_id, NULL);
Jiri Pirko5b004412016-09-01 10:37:40 +02004006 if (IS_ERR(vr))
4007 return ERR_CAST(vr);
Ido Schimmel731ea1c2017-07-18 10:10:21 +02004008 fib = mlxsw_sp_vr_fib(vr, proto);
Jiri Pirko5b004412016-09-01 10:37:40 +02004009
Ido Schimmel731ea1c2017-07-18 10:10:21 +02004010 fib_node = mlxsw_sp_fib_node_lookup(fib, addr, addr_len, prefix_len);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004011 if (fib_node)
4012 return fib_node;
4013
Ido Schimmel731ea1c2017-07-18 10:10:21 +02004014 fib_node = mlxsw_sp_fib_node_create(fib, addr, addr_len, prefix_len);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004015 if (!fib_node) {
Jiri Pirko5b004412016-09-01 10:37:40 +02004016 err = -ENOMEM;
Ido Schimmel9aecce12017-02-09 10:28:42 +01004017 goto err_fib_node_create;
Jiri Pirko5b004412016-09-01 10:37:40 +02004018 }
Jiri Pirko5b004412016-09-01 10:37:40 +02004019
Ido Schimmel76610eb2017-03-10 08:53:41 +01004020 err = mlxsw_sp_fib_node_init(mlxsw_sp, fib_node, fib);
4021 if (err)
4022 goto err_fib_node_init;
4023
Ido Schimmel9aecce12017-02-09 10:28:42 +01004024 return fib_node;
Jiri Pirko5b004412016-09-01 10:37:40 +02004025
Ido Schimmel76610eb2017-03-10 08:53:41 +01004026err_fib_node_init:
4027 mlxsw_sp_fib_node_destroy(fib_node);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004028err_fib_node_create:
Ido Schimmel76610eb2017-03-10 08:53:41 +01004029 mlxsw_sp_vr_put(vr);
Jiri Pirko5b004412016-09-01 10:37:40 +02004030 return ERR_PTR(err);
4031}
4032
Ido Schimmel731ea1c2017-07-18 10:10:21 +02004033static void mlxsw_sp_fib_node_put(struct mlxsw_sp *mlxsw_sp,
4034 struct mlxsw_sp_fib_node *fib_node)
Jiri Pirko5b004412016-09-01 10:37:40 +02004035{
Ido Schimmel76610eb2017-03-10 08:53:41 +01004036 struct mlxsw_sp_vr *vr = fib_node->fib->vr;
Jiri Pirko5b004412016-09-01 10:37:40 +02004037
Ido Schimmel9aecce12017-02-09 10:28:42 +01004038 if (!list_empty(&fib_node->entry_list))
4039 return;
Ido Schimmel76610eb2017-03-10 08:53:41 +01004040 mlxsw_sp_fib_node_fini(mlxsw_sp, fib_node);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004041 mlxsw_sp_fib_node_destroy(fib_node);
Ido Schimmel76610eb2017-03-10 08:53:41 +01004042 mlxsw_sp_vr_put(vr);
Jiri Pirko5b004412016-09-01 10:37:40 +02004043}
4044
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004045static struct mlxsw_sp_fib4_entry *
Ido Schimmel9aecce12017-02-09 10:28:42 +01004046mlxsw_sp_fib4_node_entry_find(const struct mlxsw_sp_fib_node *fib_node,
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004047 const struct mlxsw_sp_fib4_entry *new4_entry)
Jiri Pirko61c503f2016-07-04 08:23:11 +02004048{
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004049 struct mlxsw_sp_fib4_entry *fib4_entry;
Ido Schimmel9aecce12017-02-09 10:28:42 +01004050
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004051 list_for_each_entry(fib4_entry, &fib_node->entry_list, common.list) {
4052 if (fib4_entry->tb_id > new4_entry->tb_id)
Ido Schimmel9aecce12017-02-09 10:28:42 +01004053 continue;
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004054 if (fib4_entry->tb_id != new4_entry->tb_id)
Ido Schimmel9aecce12017-02-09 10:28:42 +01004055 break;
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004056 if (fib4_entry->tos > new4_entry->tos)
Ido Schimmel9aecce12017-02-09 10:28:42 +01004057 continue;
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004058 if (fib4_entry->prio >= new4_entry->prio ||
4059 fib4_entry->tos < new4_entry->tos)
4060 return fib4_entry;
Ido Schimmel9aecce12017-02-09 10:28:42 +01004061 }
4062
4063 return NULL;
4064}
4065
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004066static int
4067mlxsw_sp_fib4_node_list_append(struct mlxsw_sp_fib4_entry *fib4_entry,
4068 struct mlxsw_sp_fib4_entry *new4_entry)
Ido Schimmel4283bce2017-02-09 10:28:43 +01004069{
4070 struct mlxsw_sp_fib_node *fib_node;
4071
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004072 if (WARN_ON(!fib4_entry))
Ido Schimmel4283bce2017-02-09 10:28:43 +01004073 return -EINVAL;
4074
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004075 fib_node = fib4_entry->common.fib_node;
4076 list_for_each_entry_from(fib4_entry, &fib_node->entry_list,
4077 common.list) {
4078 if (fib4_entry->tb_id != new4_entry->tb_id ||
4079 fib4_entry->tos != new4_entry->tos ||
4080 fib4_entry->prio != new4_entry->prio)
Ido Schimmel4283bce2017-02-09 10:28:43 +01004081 break;
4082 }
4083
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004084 list_add_tail(&new4_entry->common.list, &fib4_entry->common.list);
Ido Schimmel4283bce2017-02-09 10:28:43 +01004085 return 0;
4086}
4087
Ido Schimmel9aecce12017-02-09 10:28:42 +01004088static int
Ido Schimmel9efbee62017-07-18 10:10:28 +02004089mlxsw_sp_fib4_node_list_insert(struct mlxsw_sp_fib4_entry *new4_entry,
Ido Schimmel599cf8f2017-02-09 10:28:44 +01004090 bool replace, bool append)
Ido Schimmel9aecce12017-02-09 10:28:42 +01004091{
Ido Schimmel9efbee62017-07-18 10:10:28 +02004092 struct mlxsw_sp_fib_node *fib_node = new4_entry->common.fib_node;
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004093 struct mlxsw_sp_fib4_entry *fib4_entry;
Ido Schimmel9aecce12017-02-09 10:28:42 +01004094
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004095 fib4_entry = mlxsw_sp_fib4_node_entry_find(fib_node, new4_entry);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004096
Ido Schimmel4283bce2017-02-09 10:28:43 +01004097 if (append)
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004098 return mlxsw_sp_fib4_node_list_append(fib4_entry, new4_entry);
4099 if (replace && WARN_ON(!fib4_entry))
Ido Schimmel599cf8f2017-02-09 10:28:44 +01004100 return -EINVAL;
Ido Schimmel4283bce2017-02-09 10:28:43 +01004101
Ido Schimmel599cf8f2017-02-09 10:28:44 +01004102 /* Insert new entry before replaced one, so that we can later
4103 * remove the second.
4104 */
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004105 if (fib4_entry) {
4106 list_add_tail(&new4_entry->common.list,
4107 &fib4_entry->common.list);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004108 } else {
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004109 struct mlxsw_sp_fib4_entry *last;
Ido Schimmel9aecce12017-02-09 10:28:42 +01004110
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004111 list_for_each_entry(last, &fib_node->entry_list, common.list) {
4112 if (new4_entry->tb_id > last->tb_id)
Ido Schimmel9aecce12017-02-09 10:28:42 +01004113 break;
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004114 fib4_entry = last;
Ido Schimmel9aecce12017-02-09 10:28:42 +01004115 }
4116
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004117 if (fib4_entry)
4118 list_add(&new4_entry->common.list,
4119 &fib4_entry->common.list);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004120 else
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004121 list_add(&new4_entry->common.list,
4122 &fib_node->entry_list);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004123 }
4124
4125 return 0;
4126}
4127
4128static void
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004129mlxsw_sp_fib4_node_list_remove(struct mlxsw_sp_fib4_entry *fib4_entry)
Ido Schimmel9aecce12017-02-09 10:28:42 +01004130{
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004131 list_del(&fib4_entry->common.list);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004132}
4133
Ido Schimmel80c238f2017-07-18 10:10:29 +02004134static int mlxsw_sp_fib_node_entry_add(struct mlxsw_sp *mlxsw_sp,
4135 struct mlxsw_sp_fib_entry *fib_entry)
Ido Schimmel9aecce12017-02-09 10:28:42 +01004136{
Ido Schimmel9efbee62017-07-18 10:10:28 +02004137 struct mlxsw_sp_fib_node *fib_node = fib_entry->fib_node;
4138
Ido Schimmel9aecce12017-02-09 10:28:42 +01004139 if (!mlxsw_sp_fib_node_entry_is_first(fib_node, fib_entry))
4140 return 0;
4141
4142 /* To prevent packet loss, overwrite the previously offloaded
4143 * entry.
4144 */
4145 if (!list_is_singular(&fib_node->entry_list)) {
4146 enum mlxsw_reg_ralue_op op = MLXSW_REG_RALUE_OP_WRITE_DELETE;
4147 struct mlxsw_sp_fib_entry *n = list_next_entry(fib_entry, list);
4148
4149 mlxsw_sp_fib_entry_offload_refresh(n, op, 0);
4150 }
4151
4152 return mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
4153}
4154
Ido Schimmel80c238f2017-07-18 10:10:29 +02004155static void mlxsw_sp_fib_node_entry_del(struct mlxsw_sp *mlxsw_sp,
4156 struct mlxsw_sp_fib_entry *fib_entry)
Ido Schimmel9aecce12017-02-09 10:28:42 +01004157{
Ido Schimmel9efbee62017-07-18 10:10:28 +02004158 struct mlxsw_sp_fib_node *fib_node = fib_entry->fib_node;
4159
Ido Schimmel9aecce12017-02-09 10:28:42 +01004160 if (!mlxsw_sp_fib_node_entry_is_first(fib_node, fib_entry))
4161 return;
4162
4163 /* Promote the next entry by overwriting the deleted entry */
4164 if (!list_is_singular(&fib_node->entry_list)) {
4165 struct mlxsw_sp_fib_entry *n = list_next_entry(fib_entry, list);
4166 enum mlxsw_reg_ralue_op op = MLXSW_REG_RALUE_OP_WRITE_DELETE;
4167
4168 mlxsw_sp_fib_entry_update(mlxsw_sp, n);
4169 mlxsw_sp_fib_entry_offload_refresh(fib_entry, op, 0);
4170 return;
4171 }
4172
4173 mlxsw_sp_fib_entry_del(mlxsw_sp, fib_entry);
4174}
4175
4176static int mlxsw_sp_fib4_node_entry_link(struct mlxsw_sp *mlxsw_sp,
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004177 struct mlxsw_sp_fib4_entry *fib4_entry,
Ido Schimmel599cf8f2017-02-09 10:28:44 +01004178 bool replace, bool append)
Ido Schimmel9aecce12017-02-09 10:28:42 +01004179{
Ido Schimmel9aecce12017-02-09 10:28:42 +01004180 int err;
4181
Ido Schimmel9efbee62017-07-18 10:10:28 +02004182 err = mlxsw_sp_fib4_node_list_insert(fib4_entry, replace, append);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004183 if (err)
4184 return err;
4185
Ido Schimmel80c238f2017-07-18 10:10:29 +02004186 err = mlxsw_sp_fib_node_entry_add(mlxsw_sp, &fib4_entry->common);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004187 if (err)
Ido Schimmel80c238f2017-07-18 10:10:29 +02004188 goto err_fib_node_entry_add;
Ido Schimmel9aecce12017-02-09 10:28:42 +01004189
Ido Schimmel9aecce12017-02-09 10:28:42 +01004190 return 0;
4191
Ido Schimmel80c238f2017-07-18 10:10:29 +02004192err_fib_node_entry_add:
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004193 mlxsw_sp_fib4_node_list_remove(fib4_entry);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004194 return err;
4195}
4196
4197static void
4198mlxsw_sp_fib4_node_entry_unlink(struct mlxsw_sp *mlxsw_sp,
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004199 struct mlxsw_sp_fib4_entry *fib4_entry)
Ido Schimmel9aecce12017-02-09 10:28:42 +01004200{
Ido Schimmel80c238f2017-07-18 10:10:29 +02004201 mlxsw_sp_fib_node_entry_del(mlxsw_sp, &fib4_entry->common);
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004202 mlxsw_sp_fib4_node_list_remove(fib4_entry);
Petr Machata4607f6d2017-09-02 23:49:25 +02004203
4204 if (fib4_entry->common.type == MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP)
4205 mlxsw_sp_fib_entry_decap_fini(mlxsw_sp, &fib4_entry->common);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004206}
4207
Ido Schimmel599cf8f2017-02-09 10:28:44 +01004208static void mlxsw_sp_fib4_entry_replace(struct mlxsw_sp *mlxsw_sp,
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004209 struct mlxsw_sp_fib4_entry *fib4_entry,
Ido Schimmel599cf8f2017-02-09 10:28:44 +01004210 bool replace)
4211{
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004212 struct mlxsw_sp_fib_node *fib_node = fib4_entry->common.fib_node;
4213 struct mlxsw_sp_fib4_entry *replaced;
Ido Schimmel599cf8f2017-02-09 10:28:44 +01004214
4215 if (!replace)
4216 return;
4217
4218 /* We inserted the new entry before replaced one */
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004219 replaced = list_next_entry(fib4_entry, common.list);
Ido Schimmel599cf8f2017-02-09 10:28:44 +01004220
4221 mlxsw_sp_fib4_node_entry_unlink(mlxsw_sp, replaced);
4222 mlxsw_sp_fib4_entry_destroy(mlxsw_sp, replaced);
Ido Schimmel731ea1c2017-07-18 10:10:21 +02004223 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
Ido Schimmel599cf8f2017-02-09 10:28:44 +01004224}
4225
Ido Schimmel9aecce12017-02-09 10:28:42 +01004226static int
4227mlxsw_sp_router_fib4_add(struct mlxsw_sp *mlxsw_sp,
Ido Schimmel4283bce2017-02-09 10:28:43 +01004228 const struct fib_entry_notifier_info *fen_info,
Ido Schimmel599cf8f2017-02-09 10:28:44 +01004229 bool replace, bool append)
Ido Schimmel9aecce12017-02-09 10:28:42 +01004230{
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004231 struct mlxsw_sp_fib4_entry *fib4_entry;
Ido Schimmel9aecce12017-02-09 10:28:42 +01004232 struct mlxsw_sp_fib_node *fib_node;
Jiri Pirko61c503f2016-07-04 08:23:11 +02004233 int err;
4234
Ido Schimmel9011b672017-05-16 19:38:25 +02004235 if (mlxsw_sp->router->aborted)
Jiri Pirkob45f64d2016-09-26 12:52:31 +02004236 return 0;
4237
Ido Schimmel731ea1c2017-07-18 10:10:21 +02004238 fib_node = mlxsw_sp_fib_node_get(mlxsw_sp, fen_info->tb_id,
4239 &fen_info->dst, sizeof(fen_info->dst),
4240 fen_info->dst_len,
4241 MLXSW_SP_L3_PROTO_IPV4);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004242 if (IS_ERR(fib_node)) {
4243 dev_warn(mlxsw_sp->bus_info->dev, "Failed to get FIB node\n");
4244 return PTR_ERR(fib_node);
4245 }
4246
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004247 fib4_entry = mlxsw_sp_fib4_entry_create(mlxsw_sp, fib_node, fen_info);
4248 if (IS_ERR(fib4_entry)) {
Ido Schimmel9aecce12017-02-09 10:28:42 +01004249 dev_warn(mlxsw_sp->bus_info->dev, "Failed to create FIB entry\n");
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004250 err = PTR_ERR(fib4_entry);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004251 goto err_fib4_entry_create;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02004252 }
Jiri Pirko61c503f2016-07-04 08:23:11 +02004253
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004254 err = mlxsw_sp_fib4_node_entry_link(mlxsw_sp, fib4_entry, replace,
Ido Schimmel599cf8f2017-02-09 10:28:44 +01004255 append);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02004256 if (err) {
Ido Schimmel9aecce12017-02-09 10:28:42 +01004257 dev_warn(mlxsw_sp->bus_info->dev, "Failed to link FIB entry to node\n");
4258 goto err_fib4_node_entry_link;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02004259 }
Ido Schimmel9aecce12017-02-09 10:28:42 +01004260
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004261 mlxsw_sp_fib4_entry_replace(mlxsw_sp, fib4_entry, replace);
Ido Schimmel599cf8f2017-02-09 10:28:44 +01004262
Jiri Pirko61c503f2016-07-04 08:23:11 +02004263 return 0;
4264
Ido Schimmel9aecce12017-02-09 10:28:42 +01004265err_fib4_node_entry_link:
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004266 mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib4_entry);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004267err_fib4_entry_create:
Ido Schimmel731ea1c2017-07-18 10:10:21 +02004268 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
Jiri Pirko61c503f2016-07-04 08:23:11 +02004269 return err;
4270}
4271
Jiri Pirko37956d72016-10-20 16:05:43 +02004272static void mlxsw_sp_router_fib4_del(struct mlxsw_sp *mlxsw_sp,
4273 struct fib_entry_notifier_info *fen_info)
Jiri Pirko61c503f2016-07-04 08:23:11 +02004274{
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004275 struct mlxsw_sp_fib4_entry *fib4_entry;
Ido Schimmel9aecce12017-02-09 10:28:42 +01004276 struct mlxsw_sp_fib_node *fib_node;
Jiri Pirko61c503f2016-07-04 08:23:11 +02004277
Ido Schimmel9011b672017-05-16 19:38:25 +02004278 if (mlxsw_sp->router->aborted)
Jiri Pirko37956d72016-10-20 16:05:43 +02004279 return;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02004280
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004281 fib4_entry = mlxsw_sp_fib4_entry_lookup(mlxsw_sp, fen_info);
4282 if (WARN_ON(!fib4_entry))
Jiri Pirko37956d72016-10-20 16:05:43 +02004283 return;
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004284 fib_node = fib4_entry->common.fib_node;
Jiri Pirko5b004412016-09-01 10:37:40 +02004285
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004286 mlxsw_sp_fib4_node_entry_unlink(mlxsw_sp, fib4_entry);
4287 mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib4_entry);
Ido Schimmel731ea1c2017-07-18 10:10:21 +02004288 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
Jiri Pirko61c503f2016-07-04 08:23:11 +02004289}
Jiri Pirkob45f64d2016-09-26 12:52:31 +02004290
Ido Schimmel428b8512017-08-03 13:28:28 +02004291static bool mlxsw_sp_fib6_rt_should_ignore(const struct rt6_info *rt)
4292{
4293 /* Packets with link-local destination IP arriving to the router
4294 * are trapped to the CPU, so no need to program specific routes
4295 * for them.
4296 */
4297 if (ipv6_addr_type(&rt->rt6i_dst.addr) & IPV6_ADDR_LINKLOCAL)
4298 return true;
4299
4300 /* Multicast routes aren't supported, so ignore them. Neighbour
4301 * Discovery packets are specifically trapped.
4302 */
4303 if (ipv6_addr_type(&rt->rt6i_dst.addr) & IPV6_ADDR_MULTICAST)
4304 return true;
4305
4306 /* Cloned routes are irrelevant in the forwarding path. */
4307 if (rt->rt6i_flags & RTF_CACHE)
4308 return true;
4309
4310 return false;
4311}
4312
4313static struct mlxsw_sp_rt6 *mlxsw_sp_rt6_create(struct rt6_info *rt)
4314{
4315 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
4316
4317 mlxsw_sp_rt6 = kzalloc(sizeof(*mlxsw_sp_rt6), GFP_KERNEL);
4318 if (!mlxsw_sp_rt6)
4319 return ERR_PTR(-ENOMEM);
4320
4321 /* In case of route replace, replaced route is deleted with
4322 * no notification. Take reference to prevent accessing freed
4323 * memory.
4324 */
4325 mlxsw_sp_rt6->rt = rt;
4326 rt6_hold(rt);
4327
4328 return mlxsw_sp_rt6;
4329}
4330
4331#if IS_ENABLED(CONFIG_IPV6)
4332static void mlxsw_sp_rt6_release(struct rt6_info *rt)
4333{
4334 rt6_release(rt);
4335}
4336#else
4337static void mlxsw_sp_rt6_release(struct rt6_info *rt)
4338{
4339}
4340#endif
4341
4342static void mlxsw_sp_rt6_destroy(struct mlxsw_sp_rt6 *mlxsw_sp_rt6)
4343{
4344 mlxsw_sp_rt6_release(mlxsw_sp_rt6->rt);
4345 kfree(mlxsw_sp_rt6);
4346}
4347
4348static bool mlxsw_sp_fib6_rt_can_mp(const struct rt6_info *rt)
4349{
4350 /* RTF_CACHE routes are ignored */
4351 return (rt->rt6i_flags & (RTF_GATEWAY | RTF_ADDRCONF)) == RTF_GATEWAY;
4352}
4353
4354static struct rt6_info *
4355mlxsw_sp_fib6_entry_rt(const struct mlxsw_sp_fib6_entry *fib6_entry)
4356{
4357 return list_first_entry(&fib6_entry->rt6_list, struct mlxsw_sp_rt6,
4358 list)->rt;
4359}
4360
4361static struct mlxsw_sp_fib6_entry *
4362mlxsw_sp_fib6_node_mp_entry_find(const struct mlxsw_sp_fib_node *fib_node,
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02004363 const struct rt6_info *nrt, bool replace)
Ido Schimmel428b8512017-08-03 13:28:28 +02004364{
4365 struct mlxsw_sp_fib6_entry *fib6_entry;
4366
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02004367 if (!mlxsw_sp_fib6_rt_can_mp(nrt) || replace)
Ido Schimmel428b8512017-08-03 13:28:28 +02004368 return NULL;
4369
4370 list_for_each_entry(fib6_entry, &fib_node->entry_list, common.list) {
4371 struct rt6_info *rt = mlxsw_sp_fib6_entry_rt(fib6_entry);
4372
4373 /* RT6_TABLE_LOCAL and RT6_TABLE_MAIN share the same
4374 * virtual router.
4375 */
4376 if (rt->rt6i_table->tb6_id > nrt->rt6i_table->tb6_id)
4377 continue;
4378 if (rt->rt6i_table->tb6_id != nrt->rt6i_table->tb6_id)
4379 break;
4380 if (rt->rt6i_metric < nrt->rt6i_metric)
4381 continue;
4382 if (rt->rt6i_metric == nrt->rt6i_metric &&
4383 mlxsw_sp_fib6_rt_can_mp(rt))
4384 return fib6_entry;
4385 if (rt->rt6i_metric > nrt->rt6i_metric)
4386 break;
4387 }
4388
4389 return NULL;
4390}
4391
4392static struct mlxsw_sp_rt6 *
4393mlxsw_sp_fib6_entry_rt_find(const struct mlxsw_sp_fib6_entry *fib6_entry,
4394 const struct rt6_info *rt)
4395{
4396 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
4397
4398 list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) {
4399 if (mlxsw_sp_rt6->rt == rt)
4400 return mlxsw_sp_rt6;
4401 }
4402
4403 return NULL;
4404}
4405
Petr Machata8f28a302017-09-02 23:49:24 +02004406static bool mlxsw_sp_nexthop6_ipip_type(const struct mlxsw_sp *mlxsw_sp,
4407 const struct rt6_info *rt,
4408 enum mlxsw_sp_ipip_type *ret)
4409{
4410 return rt->dst.dev &&
4411 mlxsw_sp_netdev_ipip_type(mlxsw_sp, rt->dst.dev, ret);
4412}
4413
Petr Machata35225e42017-09-02 23:49:22 +02004414static int mlxsw_sp_nexthop6_type_init(struct mlxsw_sp *mlxsw_sp,
4415 struct mlxsw_sp_nexthop_group *nh_grp,
4416 struct mlxsw_sp_nexthop *nh,
4417 const struct rt6_info *rt)
Ido Schimmel428b8512017-08-03 13:28:28 +02004418{
Petr Machata8f28a302017-09-02 23:49:24 +02004419 struct mlxsw_sp_router *router = mlxsw_sp->router;
Ido Schimmel428b8512017-08-03 13:28:28 +02004420 struct net_device *dev = rt->dst.dev;
Petr Machata8f28a302017-09-02 23:49:24 +02004421 enum mlxsw_sp_ipip_type ipipt;
Ido Schimmel428b8512017-08-03 13:28:28 +02004422 struct mlxsw_sp_rif *rif;
4423 int err;
4424
Petr Machata8f28a302017-09-02 23:49:24 +02004425 if (mlxsw_sp_nexthop6_ipip_type(mlxsw_sp, rt, &ipipt) &&
4426 router->ipip_ops_arr[ipipt]->can_offload(mlxsw_sp, dev,
4427 MLXSW_SP_L3_PROTO_IPV6)) {
4428 nh->type = MLXSW_SP_NEXTHOP_TYPE_IPIP;
Petr Machata4cccb732017-10-16 16:26:39 +02004429 err = mlxsw_sp_nexthop_ipip_init(mlxsw_sp, nh, dev);
Petr Machatade0f43c2017-10-02 12:14:57 +02004430 if (err)
4431 return err;
4432 mlxsw_sp_nexthop_rif_init(nh, &nh->ipip_entry->ol_lb->common);
4433 return 0;
Petr Machata8f28a302017-09-02 23:49:24 +02004434 }
4435
Petr Machata35225e42017-09-02 23:49:22 +02004436 nh->type = MLXSW_SP_NEXTHOP_TYPE_ETH;
Ido Schimmel428b8512017-08-03 13:28:28 +02004437 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
4438 if (!rif)
4439 return 0;
4440 mlxsw_sp_nexthop_rif_init(nh, rif);
4441
4442 err = mlxsw_sp_nexthop_neigh_init(mlxsw_sp, nh);
4443 if (err)
4444 goto err_nexthop_neigh_init;
4445
4446 return 0;
4447
4448err_nexthop_neigh_init:
4449 mlxsw_sp_nexthop_rif_fini(nh);
4450 return err;
4451}
4452
Petr Machata35225e42017-09-02 23:49:22 +02004453static void mlxsw_sp_nexthop6_type_fini(struct mlxsw_sp *mlxsw_sp,
4454 struct mlxsw_sp_nexthop *nh)
4455{
4456 mlxsw_sp_nexthop_type_fini(mlxsw_sp, nh);
4457}
4458
4459static int mlxsw_sp_nexthop6_init(struct mlxsw_sp *mlxsw_sp,
4460 struct mlxsw_sp_nexthop_group *nh_grp,
4461 struct mlxsw_sp_nexthop *nh,
4462 const struct rt6_info *rt)
4463{
4464 struct net_device *dev = rt->dst.dev;
4465
4466 nh->nh_grp = nh_grp;
Ido Schimmel408bd942017-10-22 23:11:46 +02004467 nh->nh_weight = 1;
Petr Machata35225e42017-09-02 23:49:22 +02004468 memcpy(&nh->gw_addr, &rt->rt6i_gateway, sizeof(nh->gw_addr));
Arkadi Sharshevskya5390272017-09-25 10:32:28 +02004469 mlxsw_sp_nexthop_counter_alloc(mlxsw_sp, nh);
Petr Machata35225e42017-09-02 23:49:22 +02004470
Arkadi Sharshevskydbe45982017-09-25 10:32:23 +02004471 list_add_tail(&nh->router_list_node, &mlxsw_sp->router->nexthop_list);
4472
Petr Machata35225e42017-09-02 23:49:22 +02004473 if (!dev)
4474 return 0;
4475 nh->ifindex = dev->ifindex;
4476
4477 return mlxsw_sp_nexthop6_type_init(mlxsw_sp, nh_grp, nh, rt);
4478}
4479
Ido Schimmel428b8512017-08-03 13:28:28 +02004480static void mlxsw_sp_nexthop6_fini(struct mlxsw_sp *mlxsw_sp,
4481 struct mlxsw_sp_nexthop *nh)
4482{
Petr Machata35225e42017-09-02 23:49:22 +02004483 mlxsw_sp_nexthop6_type_fini(mlxsw_sp, nh);
Arkadi Sharshevskydbe45982017-09-25 10:32:23 +02004484 list_del(&nh->router_list_node);
Arkadi Sharshevskya5390272017-09-25 10:32:28 +02004485 mlxsw_sp_nexthop_counter_free(mlxsw_sp, nh);
Ido Schimmel428b8512017-08-03 13:28:28 +02004486}
4487
Petr Machataf6050ee2017-09-02 23:49:21 +02004488static bool mlxsw_sp_rt6_is_gateway(const struct mlxsw_sp *mlxsw_sp,
4489 const struct rt6_info *rt)
4490{
Petr Machata8f28a302017-09-02 23:49:24 +02004491 return rt->rt6i_flags & RTF_GATEWAY ||
4492 mlxsw_sp_nexthop6_ipip_type(mlxsw_sp, rt, NULL);
Petr Machataf6050ee2017-09-02 23:49:21 +02004493}
4494
Ido Schimmel428b8512017-08-03 13:28:28 +02004495static struct mlxsw_sp_nexthop_group *
4496mlxsw_sp_nexthop6_group_create(struct mlxsw_sp *mlxsw_sp,
4497 struct mlxsw_sp_fib6_entry *fib6_entry)
4498{
4499 struct mlxsw_sp_nexthop_group *nh_grp;
4500 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
4501 struct mlxsw_sp_nexthop *nh;
4502 size_t alloc_size;
4503 int i = 0;
4504 int err;
4505
4506 alloc_size = sizeof(*nh_grp) +
4507 fib6_entry->nrt6 * sizeof(struct mlxsw_sp_nexthop);
4508 nh_grp = kzalloc(alloc_size, GFP_KERNEL);
4509 if (!nh_grp)
4510 return ERR_PTR(-ENOMEM);
4511 INIT_LIST_HEAD(&nh_grp->fib_list);
4512#if IS_ENABLED(CONFIG_IPV6)
4513 nh_grp->neigh_tbl = &nd_tbl;
4514#endif
4515 mlxsw_sp_rt6 = list_first_entry(&fib6_entry->rt6_list,
4516 struct mlxsw_sp_rt6, list);
Petr Machataf6050ee2017-09-02 23:49:21 +02004517 nh_grp->gateway = mlxsw_sp_rt6_is_gateway(mlxsw_sp, mlxsw_sp_rt6->rt);
Ido Schimmel428b8512017-08-03 13:28:28 +02004518 nh_grp->count = fib6_entry->nrt6;
4519 for (i = 0; i < nh_grp->count; i++) {
4520 struct rt6_info *rt = mlxsw_sp_rt6->rt;
4521
4522 nh = &nh_grp->nexthops[i];
4523 err = mlxsw_sp_nexthop6_init(mlxsw_sp, nh_grp, nh, rt);
4524 if (err)
4525 goto err_nexthop6_init;
4526 mlxsw_sp_rt6 = list_next_entry(mlxsw_sp_rt6, list);
4527 }
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02004528
4529 err = mlxsw_sp_nexthop_group_insert(mlxsw_sp, nh_grp);
4530 if (err)
4531 goto err_nexthop_group_insert;
4532
Ido Schimmel428b8512017-08-03 13:28:28 +02004533 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
4534 return nh_grp;
4535
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02004536err_nexthop_group_insert:
Ido Schimmel428b8512017-08-03 13:28:28 +02004537err_nexthop6_init:
4538 for (i--; i >= 0; i--) {
4539 nh = &nh_grp->nexthops[i];
4540 mlxsw_sp_nexthop6_fini(mlxsw_sp, nh);
4541 }
4542 kfree(nh_grp);
4543 return ERR_PTR(err);
4544}
4545
4546static void
4547mlxsw_sp_nexthop6_group_destroy(struct mlxsw_sp *mlxsw_sp,
4548 struct mlxsw_sp_nexthop_group *nh_grp)
4549{
4550 struct mlxsw_sp_nexthop *nh;
4551 int i = nh_grp->count;
4552
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02004553 mlxsw_sp_nexthop_group_remove(mlxsw_sp, nh_grp);
Ido Schimmel428b8512017-08-03 13:28:28 +02004554 for (i--; i >= 0; i--) {
4555 nh = &nh_grp->nexthops[i];
4556 mlxsw_sp_nexthop6_fini(mlxsw_sp, nh);
4557 }
4558 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
4559 WARN_ON(nh_grp->adj_index_valid);
4560 kfree(nh_grp);
4561}
4562
4563static int mlxsw_sp_nexthop6_group_get(struct mlxsw_sp *mlxsw_sp,
4564 struct mlxsw_sp_fib6_entry *fib6_entry)
4565{
4566 struct mlxsw_sp_nexthop_group *nh_grp;
4567
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02004568 nh_grp = mlxsw_sp_nexthop6_group_lookup(mlxsw_sp, fib6_entry);
4569 if (!nh_grp) {
4570 nh_grp = mlxsw_sp_nexthop6_group_create(mlxsw_sp, fib6_entry);
4571 if (IS_ERR(nh_grp))
4572 return PTR_ERR(nh_grp);
4573 }
Ido Schimmel428b8512017-08-03 13:28:28 +02004574
4575 list_add_tail(&fib6_entry->common.nexthop_group_node,
4576 &nh_grp->fib_list);
4577 fib6_entry->common.nh_group = nh_grp;
4578
4579 return 0;
4580}
4581
4582static void mlxsw_sp_nexthop6_group_put(struct mlxsw_sp *mlxsw_sp,
4583 struct mlxsw_sp_fib_entry *fib_entry)
4584{
4585 struct mlxsw_sp_nexthop_group *nh_grp = fib_entry->nh_group;
4586
4587 list_del(&fib_entry->nexthop_group_node);
4588 if (!list_empty(&nh_grp->fib_list))
4589 return;
4590 mlxsw_sp_nexthop6_group_destroy(mlxsw_sp, nh_grp);
4591}
4592
4593static int
4594mlxsw_sp_nexthop6_group_update(struct mlxsw_sp *mlxsw_sp,
4595 struct mlxsw_sp_fib6_entry *fib6_entry)
4596{
4597 struct mlxsw_sp_nexthop_group *old_nh_grp = fib6_entry->common.nh_group;
4598 int err;
4599
4600 fib6_entry->common.nh_group = NULL;
4601 list_del(&fib6_entry->common.nexthop_group_node);
4602
4603 err = mlxsw_sp_nexthop6_group_get(mlxsw_sp, fib6_entry);
4604 if (err)
4605 goto err_nexthop6_group_get;
4606
4607 /* In case this entry is offloaded, then the adjacency index
4608 * currently associated with it in the device's table is that
4609 * of the old group. Start using the new one instead.
4610 */
4611 err = mlxsw_sp_fib_node_entry_add(mlxsw_sp, &fib6_entry->common);
4612 if (err)
4613 goto err_fib_node_entry_add;
4614
4615 if (list_empty(&old_nh_grp->fib_list))
4616 mlxsw_sp_nexthop6_group_destroy(mlxsw_sp, old_nh_grp);
4617
4618 return 0;
4619
4620err_fib_node_entry_add:
4621 mlxsw_sp_nexthop6_group_put(mlxsw_sp, &fib6_entry->common);
4622err_nexthop6_group_get:
4623 list_add_tail(&fib6_entry->common.nexthop_group_node,
4624 &old_nh_grp->fib_list);
4625 fib6_entry->common.nh_group = old_nh_grp;
4626 return err;
4627}
4628
4629static int
4630mlxsw_sp_fib6_entry_nexthop_add(struct mlxsw_sp *mlxsw_sp,
4631 struct mlxsw_sp_fib6_entry *fib6_entry,
4632 struct rt6_info *rt)
4633{
4634 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
4635 int err;
4636
4637 mlxsw_sp_rt6 = mlxsw_sp_rt6_create(rt);
4638 if (IS_ERR(mlxsw_sp_rt6))
4639 return PTR_ERR(mlxsw_sp_rt6);
4640
4641 list_add_tail(&mlxsw_sp_rt6->list, &fib6_entry->rt6_list);
4642 fib6_entry->nrt6++;
4643
4644 err = mlxsw_sp_nexthop6_group_update(mlxsw_sp, fib6_entry);
4645 if (err)
4646 goto err_nexthop6_group_update;
4647
4648 return 0;
4649
4650err_nexthop6_group_update:
4651 fib6_entry->nrt6--;
4652 list_del(&mlxsw_sp_rt6->list);
4653 mlxsw_sp_rt6_destroy(mlxsw_sp_rt6);
4654 return err;
4655}
4656
4657static void
4658mlxsw_sp_fib6_entry_nexthop_del(struct mlxsw_sp *mlxsw_sp,
4659 struct mlxsw_sp_fib6_entry *fib6_entry,
4660 struct rt6_info *rt)
4661{
4662 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
4663
4664 mlxsw_sp_rt6 = mlxsw_sp_fib6_entry_rt_find(fib6_entry, rt);
4665 if (WARN_ON(!mlxsw_sp_rt6))
4666 return;
4667
4668 fib6_entry->nrt6--;
4669 list_del(&mlxsw_sp_rt6->list);
4670 mlxsw_sp_nexthop6_group_update(mlxsw_sp, fib6_entry);
4671 mlxsw_sp_rt6_destroy(mlxsw_sp_rt6);
4672}
4673
Petr Machataf6050ee2017-09-02 23:49:21 +02004674static void mlxsw_sp_fib6_entry_type_set(struct mlxsw_sp *mlxsw_sp,
4675 struct mlxsw_sp_fib_entry *fib_entry,
Ido Schimmel428b8512017-08-03 13:28:28 +02004676 const struct rt6_info *rt)
4677{
4678 /* Packets hitting RTF_REJECT routes need to be discarded by the
4679 * stack. We can rely on their destination device not having a
4680 * RIF (it's the loopback device) and can thus use action type
4681 * local, which will cause them to be trapped with a lower
4682 * priority than packets that need to be locally received.
4683 */
Ido Schimmeld3b6d372017-09-01 10:58:55 +02004684 if (rt->rt6i_flags & (RTF_LOCAL | RTF_ANYCAST))
Ido Schimmel428b8512017-08-03 13:28:28 +02004685 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
4686 else if (rt->rt6i_flags & RTF_REJECT)
4687 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_LOCAL;
Petr Machataf6050ee2017-09-02 23:49:21 +02004688 else if (mlxsw_sp_rt6_is_gateway(mlxsw_sp, rt))
Ido Schimmel428b8512017-08-03 13:28:28 +02004689 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_REMOTE;
4690 else
4691 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_LOCAL;
4692}
4693
4694static void
4695mlxsw_sp_fib6_entry_rt_destroy_all(struct mlxsw_sp_fib6_entry *fib6_entry)
4696{
4697 struct mlxsw_sp_rt6 *mlxsw_sp_rt6, *tmp;
4698
4699 list_for_each_entry_safe(mlxsw_sp_rt6, tmp, &fib6_entry->rt6_list,
4700 list) {
4701 fib6_entry->nrt6--;
4702 list_del(&mlxsw_sp_rt6->list);
4703 mlxsw_sp_rt6_destroy(mlxsw_sp_rt6);
4704 }
4705}
4706
4707static struct mlxsw_sp_fib6_entry *
4708mlxsw_sp_fib6_entry_create(struct mlxsw_sp *mlxsw_sp,
4709 struct mlxsw_sp_fib_node *fib_node,
4710 struct rt6_info *rt)
4711{
4712 struct mlxsw_sp_fib6_entry *fib6_entry;
4713 struct mlxsw_sp_fib_entry *fib_entry;
4714 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
4715 int err;
4716
4717 fib6_entry = kzalloc(sizeof(*fib6_entry), GFP_KERNEL);
4718 if (!fib6_entry)
4719 return ERR_PTR(-ENOMEM);
4720 fib_entry = &fib6_entry->common;
4721
4722 mlxsw_sp_rt6 = mlxsw_sp_rt6_create(rt);
4723 if (IS_ERR(mlxsw_sp_rt6)) {
4724 err = PTR_ERR(mlxsw_sp_rt6);
4725 goto err_rt6_create;
4726 }
4727
Petr Machataf6050ee2017-09-02 23:49:21 +02004728 mlxsw_sp_fib6_entry_type_set(mlxsw_sp, fib_entry, mlxsw_sp_rt6->rt);
Ido Schimmel428b8512017-08-03 13:28:28 +02004729
4730 INIT_LIST_HEAD(&fib6_entry->rt6_list);
4731 list_add_tail(&mlxsw_sp_rt6->list, &fib6_entry->rt6_list);
4732 fib6_entry->nrt6 = 1;
4733 err = mlxsw_sp_nexthop6_group_get(mlxsw_sp, fib6_entry);
4734 if (err)
4735 goto err_nexthop6_group_get;
4736
4737 fib_entry->fib_node = fib_node;
4738
4739 return fib6_entry;
4740
4741err_nexthop6_group_get:
4742 list_del(&mlxsw_sp_rt6->list);
4743 mlxsw_sp_rt6_destroy(mlxsw_sp_rt6);
4744err_rt6_create:
4745 kfree(fib6_entry);
4746 return ERR_PTR(err);
4747}
4748
4749static void mlxsw_sp_fib6_entry_destroy(struct mlxsw_sp *mlxsw_sp,
4750 struct mlxsw_sp_fib6_entry *fib6_entry)
4751{
4752 mlxsw_sp_nexthop6_group_put(mlxsw_sp, &fib6_entry->common);
4753 mlxsw_sp_fib6_entry_rt_destroy_all(fib6_entry);
4754 WARN_ON(fib6_entry->nrt6);
4755 kfree(fib6_entry);
4756}
4757
4758static struct mlxsw_sp_fib6_entry *
4759mlxsw_sp_fib6_node_entry_find(const struct mlxsw_sp_fib_node *fib_node,
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02004760 const struct rt6_info *nrt, bool replace)
Ido Schimmel428b8512017-08-03 13:28:28 +02004761{
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02004762 struct mlxsw_sp_fib6_entry *fib6_entry, *fallback = NULL;
Ido Schimmel428b8512017-08-03 13:28:28 +02004763
4764 list_for_each_entry(fib6_entry, &fib_node->entry_list, common.list) {
4765 struct rt6_info *rt = mlxsw_sp_fib6_entry_rt(fib6_entry);
4766
4767 if (rt->rt6i_table->tb6_id > nrt->rt6i_table->tb6_id)
4768 continue;
4769 if (rt->rt6i_table->tb6_id != nrt->rt6i_table->tb6_id)
4770 break;
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02004771 if (replace && rt->rt6i_metric == nrt->rt6i_metric) {
4772 if (mlxsw_sp_fib6_rt_can_mp(rt) ==
4773 mlxsw_sp_fib6_rt_can_mp(nrt))
4774 return fib6_entry;
4775 if (mlxsw_sp_fib6_rt_can_mp(nrt))
4776 fallback = fallback ?: fib6_entry;
4777 }
Ido Schimmel428b8512017-08-03 13:28:28 +02004778 if (rt->rt6i_metric > nrt->rt6i_metric)
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02004779 return fallback ?: fib6_entry;
Ido Schimmel428b8512017-08-03 13:28:28 +02004780 }
4781
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02004782 return fallback;
Ido Schimmel428b8512017-08-03 13:28:28 +02004783}
4784
4785static int
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02004786mlxsw_sp_fib6_node_list_insert(struct mlxsw_sp_fib6_entry *new6_entry,
4787 bool replace)
Ido Schimmel428b8512017-08-03 13:28:28 +02004788{
4789 struct mlxsw_sp_fib_node *fib_node = new6_entry->common.fib_node;
4790 struct rt6_info *nrt = mlxsw_sp_fib6_entry_rt(new6_entry);
4791 struct mlxsw_sp_fib6_entry *fib6_entry;
4792
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02004793 fib6_entry = mlxsw_sp_fib6_node_entry_find(fib_node, nrt, replace);
4794
4795 if (replace && WARN_ON(!fib6_entry))
4796 return -EINVAL;
Ido Schimmel428b8512017-08-03 13:28:28 +02004797
4798 if (fib6_entry) {
4799 list_add_tail(&new6_entry->common.list,
4800 &fib6_entry->common.list);
4801 } else {
4802 struct mlxsw_sp_fib6_entry *last;
4803
4804 list_for_each_entry(last, &fib_node->entry_list, common.list) {
4805 struct rt6_info *rt = mlxsw_sp_fib6_entry_rt(last);
4806
4807 if (nrt->rt6i_table->tb6_id > rt->rt6i_table->tb6_id)
4808 break;
4809 fib6_entry = last;
4810 }
4811
4812 if (fib6_entry)
4813 list_add(&new6_entry->common.list,
4814 &fib6_entry->common.list);
4815 else
4816 list_add(&new6_entry->common.list,
4817 &fib_node->entry_list);
4818 }
4819
4820 return 0;
4821}
4822
4823static void
4824mlxsw_sp_fib6_node_list_remove(struct mlxsw_sp_fib6_entry *fib6_entry)
4825{
4826 list_del(&fib6_entry->common.list);
4827}
4828
4829static int mlxsw_sp_fib6_node_entry_link(struct mlxsw_sp *mlxsw_sp,
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02004830 struct mlxsw_sp_fib6_entry *fib6_entry,
4831 bool replace)
Ido Schimmel428b8512017-08-03 13:28:28 +02004832{
4833 int err;
4834
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02004835 err = mlxsw_sp_fib6_node_list_insert(fib6_entry, replace);
Ido Schimmel428b8512017-08-03 13:28:28 +02004836 if (err)
4837 return err;
4838
4839 err = mlxsw_sp_fib_node_entry_add(mlxsw_sp, &fib6_entry->common);
4840 if (err)
4841 goto err_fib_node_entry_add;
4842
4843 return 0;
4844
4845err_fib_node_entry_add:
4846 mlxsw_sp_fib6_node_list_remove(fib6_entry);
4847 return err;
4848}
4849
4850static void
4851mlxsw_sp_fib6_node_entry_unlink(struct mlxsw_sp *mlxsw_sp,
4852 struct mlxsw_sp_fib6_entry *fib6_entry)
4853{
4854 mlxsw_sp_fib_node_entry_del(mlxsw_sp, &fib6_entry->common);
4855 mlxsw_sp_fib6_node_list_remove(fib6_entry);
4856}
4857
4858static struct mlxsw_sp_fib6_entry *
4859mlxsw_sp_fib6_entry_lookup(struct mlxsw_sp *mlxsw_sp,
4860 const struct rt6_info *rt)
4861{
4862 struct mlxsw_sp_fib6_entry *fib6_entry;
4863 struct mlxsw_sp_fib_node *fib_node;
4864 struct mlxsw_sp_fib *fib;
4865 struct mlxsw_sp_vr *vr;
4866
4867 vr = mlxsw_sp_vr_find(mlxsw_sp, rt->rt6i_table->tb6_id);
4868 if (!vr)
4869 return NULL;
4870 fib = mlxsw_sp_vr_fib(vr, MLXSW_SP_L3_PROTO_IPV6);
4871
4872 fib_node = mlxsw_sp_fib_node_lookup(fib, &rt->rt6i_dst.addr,
4873 sizeof(rt->rt6i_dst.addr),
4874 rt->rt6i_dst.plen);
4875 if (!fib_node)
4876 return NULL;
4877
4878 list_for_each_entry(fib6_entry, &fib_node->entry_list, common.list) {
4879 struct rt6_info *iter_rt = mlxsw_sp_fib6_entry_rt(fib6_entry);
4880
4881 if (rt->rt6i_table->tb6_id == iter_rt->rt6i_table->tb6_id &&
4882 rt->rt6i_metric == iter_rt->rt6i_metric &&
4883 mlxsw_sp_fib6_entry_rt_find(fib6_entry, rt))
4884 return fib6_entry;
4885 }
4886
4887 return NULL;
4888}
4889
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02004890static void mlxsw_sp_fib6_entry_replace(struct mlxsw_sp *mlxsw_sp,
4891 struct mlxsw_sp_fib6_entry *fib6_entry,
4892 bool replace)
4893{
4894 struct mlxsw_sp_fib_node *fib_node = fib6_entry->common.fib_node;
4895 struct mlxsw_sp_fib6_entry *replaced;
4896
4897 if (!replace)
4898 return;
4899
4900 replaced = list_next_entry(fib6_entry, common.list);
4901
4902 mlxsw_sp_fib6_node_entry_unlink(mlxsw_sp, replaced);
4903 mlxsw_sp_fib6_entry_destroy(mlxsw_sp, replaced);
4904 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
4905}
4906
Ido Schimmel428b8512017-08-03 13:28:28 +02004907static int mlxsw_sp_router_fib6_add(struct mlxsw_sp *mlxsw_sp,
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02004908 struct rt6_info *rt, bool replace)
Ido Schimmel428b8512017-08-03 13:28:28 +02004909{
4910 struct mlxsw_sp_fib6_entry *fib6_entry;
4911 struct mlxsw_sp_fib_node *fib_node;
4912 int err;
4913
4914 if (mlxsw_sp->router->aborted)
4915 return 0;
4916
Ido Schimmelf36f5ac2017-08-03 13:28:30 +02004917 if (rt->rt6i_src.plen)
4918 return -EINVAL;
4919
Ido Schimmel428b8512017-08-03 13:28:28 +02004920 if (mlxsw_sp_fib6_rt_should_ignore(rt))
4921 return 0;
4922
4923 fib_node = mlxsw_sp_fib_node_get(mlxsw_sp, rt->rt6i_table->tb6_id,
4924 &rt->rt6i_dst.addr,
4925 sizeof(rt->rt6i_dst.addr),
4926 rt->rt6i_dst.plen,
4927 MLXSW_SP_L3_PROTO_IPV6);
4928 if (IS_ERR(fib_node))
4929 return PTR_ERR(fib_node);
4930
4931 /* Before creating a new entry, try to append route to an existing
4932 * multipath entry.
4933 */
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02004934 fib6_entry = mlxsw_sp_fib6_node_mp_entry_find(fib_node, rt, replace);
Ido Schimmel428b8512017-08-03 13:28:28 +02004935 if (fib6_entry) {
4936 err = mlxsw_sp_fib6_entry_nexthop_add(mlxsw_sp, fib6_entry, rt);
4937 if (err)
4938 goto err_fib6_entry_nexthop_add;
4939 return 0;
4940 }
4941
4942 fib6_entry = mlxsw_sp_fib6_entry_create(mlxsw_sp, fib_node, rt);
4943 if (IS_ERR(fib6_entry)) {
4944 err = PTR_ERR(fib6_entry);
4945 goto err_fib6_entry_create;
4946 }
4947
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02004948 err = mlxsw_sp_fib6_node_entry_link(mlxsw_sp, fib6_entry, replace);
Ido Schimmel428b8512017-08-03 13:28:28 +02004949 if (err)
4950 goto err_fib6_node_entry_link;
4951
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02004952 mlxsw_sp_fib6_entry_replace(mlxsw_sp, fib6_entry, replace);
4953
Ido Schimmel428b8512017-08-03 13:28:28 +02004954 return 0;
4955
4956err_fib6_node_entry_link:
4957 mlxsw_sp_fib6_entry_destroy(mlxsw_sp, fib6_entry);
4958err_fib6_entry_create:
4959err_fib6_entry_nexthop_add:
4960 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
4961 return err;
4962}
4963
4964static void mlxsw_sp_router_fib6_del(struct mlxsw_sp *mlxsw_sp,
4965 struct rt6_info *rt)
4966{
4967 struct mlxsw_sp_fib6_entry *fib6_entry;
4968 struct mlxsw_sp_fib_node *fib_node;
4969
4970 if (mlxsw_sp->router->aborted)
4971 return;
4972
4973 if (mlxsw_sp_fib6_rt_should_ignore(rt))
4974 return;
4975
4976 fib6_entry = mlxsw_sp_fib6_entry_lookup(mlxsw_sp, rt);
4977 if (WARN_ON(!fib6_entry))
4978 return;
4979
4980 /* If route is part of a multipath entry, but not the last one
4981 * removed, then only reduce its nexthop group.
4982 */
4983 if (!list_is_singular(&fib6_entry->rt6_list)) {
4984 mlxsw_sp_fib6_entry_nexthop_del(mlxsw_sp, fib6_entry, rt);
4985 return;
4986 }
4987
4988 fib_node = fib6_entry->common.fib_node;
4989
4990 mlxsw_sp_fib6_node_entry_unlink(mlxsw_sp, fib6_entry);
4991 mlxsw_sp_fib6_entry_destroy(mlxsw_sp, fib6_entry);
4992 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
4993}
4994
Ido Schimmelbc65a8a2017-07-18 10:10:25 +02004995static int __mlxsw_sp_router_set_abort_trap(struct mlxsw_sp *mlxsw_sp,
4996 enum mlxsw_reg_ralxx_protocol proto,
4997 u8 tree_id)
Jiri Pirkob45f64d2016-09-26 12:52:31 +02004998{
4999 char ralta_pl[MLXSW_REG_RALTA_LEN];
5000 char ralst_pl[MLXSW_REG_RALST_LEN];
Ido Schimmelb5d90e62017-03-10 08:53:43 +01005001 int i, err;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005002
Ido Schimmelbc65a8a2017-07-18 10:10:25 +02005003 mlxsw_reg_ralta_pack(ralta_pl, true, proto, tree_id);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005004 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralta), ralta_pl);
5005 if (err)
5006 return err;
5007
Ido Schimmelbc65a8a2017-07-18 10:10:25 +02005008 mlxsw_reg_ralst_pack(ralst_pl, 0xff, tree_id);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005009 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralst), ralst_pl);
5010 if (err)
5011 return err;
5012
Ido Schimmelb5d90e62017-03-10 08:53:43 +01005013 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
Ido Schimmel9011b672017-05-16 19:38:25 +02005014 struct mlxsw_sp_vr *vr = &mlxsw_sp->router->vrs[i];
Ido Schimmelb5d90e62017-03-10 08:53:43 +01005015 char raltb_pl[MLXSW_REG_RALTB_LEN];
5016 char ralue_pl[MLXSW_REG_RALUE_LEN];
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005017
Ido Schimmelbc65a8a2017-07-18 10:10:25 +02005018 mlxsw_reg_raltb_pack(raltb_pl, vr->id, proto, tree_id);
Ido Schimmelb5d90e62017-03-10 08:53:43 +01005019 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raltb),
5020 raltb_pl);
5021 if (err)
5022 return err;
5023
Ido Schimmelbc65a8a2017-07-18 10:10:25 +02005024 mlxsw_reg_ralue_pack(ralue_pl, proto,
5025 MLXSW_REG_RALUE_OP_WRITE_WRITE, vr->id, 0);
Ido Schimmelb5d90e62017-03-10 08:53:43 +01005026 mlxsw_reg_ralue_act_ip2me_pack(ralue_pl);
5027 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue),
5028 ralue_pl);
5029 if (err)
5030 return err;
5031 }
5032
5033 return 0;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005034}
5035
Yotam Gigid42b0962017-09-27 08:23:20 +02005036static int mlxsw_sp_router_fibmr_add(struct mlxsw_sp *mlxsw_sp,
5037 struct mfc_entry_notifier_info *men_info,
5038 bool replace)
5039{
5040 struct mlxsw_sp_vr *vr;
5041
5042 if (mlxsw_sp->router->aborted)
5043 return 0;
5044
David Ahernf8fa9b42017-10-18 09:56:56 -07005045 vr = mlxsw_sp_vr_get(mlxsw_sp, men_info->tb_id, NULL);
Yotam Gigid42b0962017-09-27 08:23:20 +02005046 if (IS_ERR(vr))
5047 return PTR_ERR(vr);
5048
5049 return mlxsw_sp_mr_route4_add(vr->mr4_table, men_info->mfc, replace);
5050}
5051
5052static void mlxsw_sp_router_fibmr_del(struct mlxsw_sp *mlxsw_sp,
5053 struct mfc_entry_notifier_info *men_info)
5054{
5055 struct mlxsw_sp_vr *vr;
5056
5057 if (mlxsw_sp->router->aborted)
5058 return;
5059
5060 vr = mlxsw_sp_vr_find(mlxsw_sp, men_info->tb_id);
5061 if (WARN_ON(!vr))
5062 return;
5063
5064 mlxsw_sp_mr_route4_del(vr->mr4_table, men_info->mfc);
5065 mlxsw_sp_vr_put(vr);
5066}
5067
5068static int
5069mlxsw_sp_router_fibmr_vif_add(struct mlxsw_sp *mlxsw_sp,
5070 struct vif_entry_notifier_info *ven_info)
5071{
5072 struct mlxsw_sp_rif *rif;
5073 struct mlxsw_sp_vr *vr;
5074
5075 if (mlxsw_sp->router->aborted)
5076 return 0;
5077
David Ahernf8fa9b42017-10-18 09:56:56 -07005078 vr = mlxsw_sp_vr_get(mlxsw_sp, ven_info->tb_id, NULL);
Yotam Gigid42b0962017-09-27 08:23:20 +02005079 if (IS_ERR(vr))
5080 return PTR_ERR(vr);
5081
5082 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, ven_info->dev);
5083 return mlxsw_sp_mr_vif_add(vr->mr4_table, ven_info->dev,
5084 ven_info->vif_index,
5085 ven_info->vif_flags, rif);
5086}
5087
5088static void
5089mlxsw_sp_router_fibmr_vif_del(struct mlxsw_sp *mlxsw_sp,
5090 struct vif_entry_notifier_info *ven_info)
5091{
5092 struct mlxsw_sp_vr *vr;
5093
5094 if (mlxsw_sp->router->aborted)
5095 return;
5096
5097 vr = mlxsw_sp_vr_find(mlxsw_sp, ven_info->tb_id);
5098 if (WARN_ON(!vr))
5099 return;
5100
5101 mlxsw_sp_mr_vif_del(vr->mr4_table, ven_info->vif_index);
5102 mlxsw_sp_vr_put(vr);
5103}
5104
Ido Schimmelbc65a8a2017-07-18 10:10:25 +02005105static int mlxsw_sp_router_set_abort_trap(struct mlxsw_sp *mlxsw_sp)
5106{
5107 enum mlxsw_reg_ralxx_protocol proto = MLXSW_REG_RALXX_PROTOCOL_IPV4;
5108 int err;
5109
5110 err = __mlxsw_sp_router_set_abort_trap(mlxsw_sp, proto,
5111 MLXSW_SP_LPM_TREE_MIN);
5112 if (err)
5113 return err;
5114
Yotam Gigid42b0962017-09-27 08:23:20 +02005115 /* The multicast router code does not need an abort trap as by default,
5116 * packets that don't match any routes are trapped to the CPU.
5117 */
5118
Ido Schimmelbc65a8a2017-07-18 10:10:25 +02005119 proto = MLXSW_REG_RALXX_PROTOCOL_IPV6;
5120 return __mlxsw_sp_router_set_abort_trap(mlxsw_sp, proto,
5121 MLXSW_SP_LPM_TREE_MIN + 1);
5122}
5123
Ido Schimmel9aecce12017-02-09 10:28:42 +01005124static void mlxsw_sp_fib4_node_flush(struct mlxsw_sp *mlxsw_sp,
5125 struct mlxsw_sp_fib_node *fib_node)
5126{
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02005127 struct mlxsw_sp_fib4_entry *fib4_entry, *tmp;
Ido Schimmel9aecce12017-02-09 10:28:42 +01005128
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02005129 list_for_each_entry_safe(fib4_entry, tmp, &fib_node->entry_list,
5130 common.list) {
5131 bool do_break = &tmp->common.list == &fib_node->entry_list;
Ido Schimmel9aecce12017-02-09 10:28:42 +01005132
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02005133 mlxsw_sp_fib4_node_entry_unlink(mlxsw_sp, fib4_entry);
5134 mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib4_entry);
Ido Schimmel731ea1c2017-07-18 10:10:21 +02005135 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
Ido Schimmel9aecce12017-02-09 10:28:42 +01005136 /* Break when entry list is empty and node was freed.
5137 * Otherwise, we'll access freed memory in the next
5138 * iteration.
5139 */
5140 if (do_break)
5141 break;
5142 }
5143}
5144
Ido Schimmel428b8512017-08-03 13:28:28 +02005145static void mlxsw_sp_fib6_node_flush(struct mlxsw_sp *mlxsw_sp,
5146 struct mlxsw_sp_fib_node *fib_node)
5147{
5148 struct mlxsw_sp_fib6_entry *fib6_entry, *tmp;
5149
5150 list_for_each_entry_safe(fib6_entry, tmp, &fib_node->entry_list,
5151 common.list) {
5152 bool do_break = &tmp->common.list == &fib_node->entry_list;
5153
5154 mlxsw_sp_fib6_node_entry_unlink(mlxsw_sp, fib6_entry);
5155 mlxsw_sp_fib6_entry_destroy(mlxsw_sp, fib6_entry);
5156 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
5157 if (do_break)
5158 break;
5159 }
5160}
5161
Ido Schimmel9aecce12017-02-09 10:28:42 +01005162static void mlxsw_sp_fib_node_flush(struct mlxsw_sp *mlxsw_sp,
5163 struct mlxsw_sp_fib_node *fib_node)
5164{
Ido Schimmel76610eb2017-03-10 08:53:41 +01005165 switch (fib_node->fib->proto) {
Ido Schimmel9aecce12017-02-09 10:28:42 +01005166 case MLXSW_SP_L3_PROTO_IPV4:
5167 mlxsw_sp_fib4_node_flush(mlxsw_sp, fib_node);
5168 break;
5169 case MLXSW_SP_L3_PROTO_IPV6:
Ido Schimmel428b8512017-08-03 13:28:28 +02005170 mlxsw_sp_fib6_node_flush(mlxsw_sp, fib_node);
Ido Schimmel9aecce12017-02-09 10:28:42 +01005171 break;
5172 }
5173}
5174
Ido Schimmel76610eb2017-03-10 08:53:41 +01005175static void mlxsw_sp_vr_fib_flush(struct mlxsw_sp *mlxsw_sp,
5176 struct mlxsw_sp_vr *vr,
5177 enum mlxsw_sp_l3proto proto)
5178{
5179 struct mlxsw_sp_fib *fib = mlxsw_sp_vr_fib(vr, proto);
5180 struct mlxsw_sp_fib_node *fib_node, *tmp;
5181
5182 list_for_each_entry_safe(fib_node, tmp, &fib->node_list, list) {
5183 bool do_break = &tmp->list == &fib->node_list;
5184
5185 mlxsw_sp_fib_node_flush(mlxsw_sp, fib_node);
5186 if (do_break)
5187 break;
5188 }
5189}
5190
Ido Schimmelac571de2016-11-14 11:26:32 +01005191static void mlxsw_sp_router_fib_flush(struct mlxsw_sp *mlxsw_sp)
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005192{
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005193 int i;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005194
Jiri Pirkoc1a38312016-10-21 16:07:23 +02005195 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
Ido Schimmel9011b672017-05-16 19:38:25 +02005196 struct mlxsw_sp_vr *vr = &mlxsw_sp->router->vrs[i];
Ido Schimmelac571de2016-11-14 11:26:32 +01005197
Ido Schimmel76610eb2017-03-10 08:53:41 +01005198 if (!mlxsw_sp_vr_is_used(vr))
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005199 continue;
Yotam Gigid42b0962017-09-27 08:23:20 +02005200
5201 mlxsw_sp_mr_table_flush(vr->mr4_table);
Ido Schimmel76610eb2017-03-10 08:53:41 +01005202 mlxsw_sp_vr_fib_flush(mlxsw_sp, vr, MLXSW_SP_L3_PROTO_IPV4);
Ido Schimmela3d9bc52017-07-18 10:10:22 +02005203
5204 /* If virtual router was only used for IPv4, then it's no
5205 * longer used.
5206 */
5207 if (!mlxsw_sp_vr_is_used(vr))
5208 continue;
5209 mlxsw_sp_vr_fib_flush(mlxsw_sp, vr, MLXSW_SP_L3_PROTO_IPV6);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005210 }
Ido Schimmelac571de2016-11-14 11:26:32 +01005211}
5212
Ido Schimmelbc65a8a2017-07-18 10:10:25 +02005213static void mlxsw_sp_router_fib_abort(struct mlxsw_sp *mlxsw_sp)
Ido Schimmelac571de2016-11-14 11:26:32 +01005214{
5215 int err;
5216
Ido Schimmel9011b672017-05-16 19:38:25 +02005217 if (mlxsw_sp->router->aborted)
Ido Schimmeld331d302016-11-16 09:51:58 +01005218 return;
5219 dev_warn(mlxsw_sp->bus_info->dev, "FIB abort triggered. Note that FIB entries are no longer being offloaded to this device.\n");
Ido Schimmelac571de2016-11-14 11:26:32 +01005220 mlxsw_sp_router_fib_flush(mlxsw_sp);
Ido Schimmel9011b672017-05-16 19:38:25 +02005221 mlxsw_sp->router->aborted = true;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005222 err = mlxsw_sp_router_set_abort_trap(mlxsw_sp);
5223 if (err)
5224 dev_warn(mlxsw_sp->bus_info->dev, "Failed to set abort trap.\n");
5225}
5226
Ido Schimmel30572242016-12-03 16:45:01 +01005227struct mlxsw_sp_fib_event_work {
Ido Schimmela0e47612017-02-06 16:20:10 +01005228 struct work_struct work;
Ido Schimmelad178c82017-02-08 11:16:40 +01005229 union {
Ido Schimmel428b8512017-08-03 13:28:28 +02005230 struct fib6_entry_notifier_info fen6_info;
Ido Schimmelad178c82017-02-08 11:16:40 +01005231 struct fib_entry_notifier_info fen_info;
Ido Schimmel5d7bfd12017-03-16 09:08:14 +01005232 struct fib_rule_notifier_info fr_info;
Ido Schimmelad178c82017-02-08 11:16:40 +01005233 struct fib_nh_notifier_info fnh_info;
Yotam Gigid42b0962017-09-27 08:23:20 +02005234 struct mfc_entry_notifier_info men_info;
5235 struct vif_entry_notifier_info ven_info;
Ido Schimmelad178c82017-02-08 11:16:40 +01005236 };
Ido Schimmel30572242016-12-03 16:45:01 +01005237 struct mlxsw_sp *mlxsw_sp;
5238 unsigned long event;
5239};
5240
Ido Schimmel66a57632017-08-03 13:28:26 +02005241static void mlxsw_sp_router_fib4_event_work(struct work_struct *work)
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005242{
Ido Schimmel30572242016-12-03 16:45:01 +01005243 struct mlxsw_sp_fib_event_work *fib_work =
Ido Schimmela0e47612017-02-06 16:20:10 +01005244 container_of(work, struct mlxsw_sp_fib_event_work, work);
Ido Schimmel30572242016-12-03 16:45:01 +01005245 struct mlxsw_sp *mlxsw_sp = fib_work->mlxsw_sp;
Ido Schimmel599cf8f2017-02-09 10:28:44 +01005246 bool replace, append;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005247 int err;
5248
Ido Schimmel30572242016-12-03 16:45:01 +01005249 /* Protect internal structures from changes */
5250 rtnl_lock();
5251 switch (fib_work->event) {
Ido Schimmel599cf8f2017-02-09 10:28:44 +01005252 case FIB_EVENT_ENTRY_REPLACE: /* fall through */
Ido Schimmel4283bce2017-02-09 10:28:43 +01005253 case FIB_EVENT_ENTRY_APPEND: /* fall through */
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005254 case FIB_EVENT_ENTRY_ADD:
Ido Schimmel599cf8f2017-02-09 10:28:44 +01005255 replace = fib_work->event == FIB_EVENT_ENTRY_REPLACE;
Ido Schimmel4283bce2017-02-09 10:28:43 +01005256 append = fib_work->event == FIB_EVENT_ENTRY_APPEND;
5257 err = mlxsw_sp_router_fib4_add(mlxsw_sp, &fib_work->fen_info,
Ido Schimmel599cf8f2017-02-09 10:28:44 +01005258 replace, append);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005259 if (err)
Ido Schimmelbc65a8a2017-07-18 10:10:25 +02005260 mlxsw_sp_router_fib_abort(mlxsw_sp);
Ido Schimmel30572242016-12-03 16:45:01 +01005261 fib_info_put(fib_work->fen_info.fi);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005262 break;
5263 case FIB_EVENT_ENTRY_DEL:
Ido Schimmel30572242016-12-03 16:45:01 +01005264 mlxsw_sp_router_fib4_del(mlxsw_sp, &fib_work->fen_info);
5265 fib_info_put(fib_work->fen_info.fi);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005266 break;
David Ahern1f279232017-10-27 17:37:14 -07005267 case FIB_EVENT_RULE_ADD:
5268 /* if we get here, a rule was added that we do not support.
5269 * just do the fib_abort
5270 */
5271 mlxsw_sp_router_fib_abort(mlxsw_sp);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005272 break;
Ido Schimmelad178c82017-02-08 11:16:40 +01005273 case FIB_EVENT_NH_ADD: /* fall through */
5274 case FIB_EVENT_NH_DEL:
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02005275 mlxsw_sp_nexthop4_event(mlxsw_sp, fib_work->event,
5276 fib_work->fnh_info.fib_nh);
Ido Schimmelad178c82017-02-08 11:16:40 +01005277 fib_info_put(fib_work->fnh_info.fib_nh->nh_parent);
5278 break;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005279 }
Ido Schimmel30572242016-12-03 16:45:01 +01005280 rtnl_unlock();
5281 kfree(fib_work);
5282}
5283
Ido Schimmel66a57632017-08-03 13:28:26 +02005284static void mlxsw_sp_router_fib6_event_work(struct work_struct *work)
5285{
Ido Schimmel583419f2017-08-03 13:28:27 +02005286 struct mlxsw_sp_fib_event_work *fib_work =
5287 container_of(work, struct mlxsw_sp_fib_event_work, work);
5288 struct mlxsw_sp *mlxsw_sp = fib_work->mlxsw_sp;
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02005289 bool replace;
Ido Schimmel428b8512017-08-03 13:28:28 +02005290 int err;
Ido Schimmel583419f2017-08-03 13:28:27 +02005291
5292 rtnl_lock();
5293 switch (fib_work->event) {
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02005294 case FIB_EVENT_ENTRY_REPLACE: /* fall through */
Ido Schimmel428b8512017-08-03 13:28:28 +02005295 case FIB_EVENT_ENTRY_ADD:
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02005296 replace = fib_work->event == FIB_EVENT_ENTRY_REPLACE;
Ido Schimmel428b8512017-08-03 13:28:28 +02005297 err = mlxsw_sp_router_fib6_add(mlxsw_sp,
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02005298 fib_work->fen6_info.rt, replace);
Ido Schimmel428b8512017-08-03 13:28:28 +02005299 if (err)
5300 mlxsw_sp_router_fib_abort(mlxsw_sp);
5301 mlxsw_sp_rt6_release(fib_work->fen6_info.rt);
5302 break;
5303 case FIB_EVENT_ENTRY_DEL:
5304 mlxsw_sp_router_fib6_del(mlxsw_sp, fib_work->fen6_info.rt);
5305 mlxsw_sp_rt6_release(fib_work->fen6_info.rt);
5306 break;
David Ahern1f279232017-10-27 17:37:14 -07005307 case FIB_EVENT_RULE_ADD:
5308 /* if we get here, a rule was added that we do not support.
5309 * just do the fib_abort
5310 */
5311 mlxsw_sp_router_fib_abort(mlxsw_sp);
Ido Schimmel583419f2017-08-03 13:28:27 +02005312 break;
5313 }
5314 rtnl_unlock();
5315 kfree(fib_work);
Ido Schimmel66a57632017-08-03 13:28:26 +02005316}
5317
Yotam Gigid42b0962017-09-27 08:23:20 +02005318static void mlxsw_sp_router_fibmr_event_work(struct work_struct *work)
5319{
5320 struct mlxsw_sp_fib_event_work *fib_work =
5321 container_of(work, struct mlxsw_sp_fib_event_work, work);
5322 struct mlxsw_sp *mlxsw_sp = fib_work->mlxsw_sp;
Yotam Gigid42b0962017-09-27 08:23:20 +02005323 bool replace;
5324 int err;
5325
5326 rtnl_lock();
5327 switch (fib_work->event) {
5328 case FIB_EVENT_ENTRY_REPLACE: /* fall through */
5329 case FIB_EVENT_ENTRY_ADD:
5330 replace = fib_work->event == FIB_EVENT_ENTRY_REPLACE;
5331
5332 err = mlxsw_sp_router_fibmr_add(mlxsw_sp, &fib_work->men_info,
5333 replace);
5334 if (err)
5335 mlxsw_sp_router_fib_abort(mlxsw_sp);
5336 ipmr_cache_put(fib_work->men_info.mfc);
5337 break;
5338 case FIB_EVENT_ENTRY_DEL:
5339 mlxsw_sp_router_fibmr_del(mlxsw_sp, &fib_work->men_info);
5340 ipmr_cache_put(fib_work->men_info.mfc);
5341 break;
5342 case FIB_EVENT_VIF_ADD:
5343 err = mlxsw_sp_router_fibmr_vif_add(mlxsw_sp,
5344 &fib_work->ven_info);
5345 if (err)
5346 mlxsw_sp_router_fib_abort(mlxsw_sp);
5347 dev_put(fib_work->ven_info.dev);
5348 break;
5349 case FIB_EVENT_VIF_DEL:
5350 mlxsw_sp_router_fibmr_vif_del(mlxsw_sp,
5351 &fib_work->ven_info);
5352 dev_put(fib_work->ven_info.dev);
5353 break;
David Ahern1f279232017-10-27 17:37:14 -07005354 case FIB_EVENT_RULE_ADD:
5355 /* if we get here, a rule was added that we do not support.
5356 * just do the fib_abort
5357 */
5358 mlxsw_sp_router_fib_abort(mlxsw_sp);
Yotam Gigid42b0962017-09-27 08:23:20 +02005359 break;
5360 }
5361 rtnl_unlock();
5362 kfree(fib_work);
5363}
5364
Ido Schimmel66a57632017-08-03 13:28:26 +02005365static void mlxsw_sp_router_fib4_event(struct mlxsw_sp_fib_event_work *fib_work,
5366 struct fib_notifier_info *info)
5367{
David Ahern3c75f9b2017-10-18 15:01:38 -07005368 struct fib_entry_notifier_info *fen_info;
David Ahern3c75f9b2017-10-18 15:01:38 -07005369 struct fib_nh_notifier_info *fnh_info;
5370
Ido Schimmel66a57632017-08-03 13:28:26 +02005371 switch (fib_work->event) {
5372 case FIB_EVENT_ENTRY_REPLACE: /* fall through */
5373 case FIB_EVENT_ENTRY_APPEND: /* fall through */
5374 case FIB_EVENT_ENTRY_ADD: /* fall through */
5375 case FIB_EVENT_ENTRY_DEL:
David Ahern3c75f9b2017-10-18 15:01:38 -07005376 fen_info = container_of(info, struct fib_entry_notifier_info,
5377 info);
5378 fib_work->fen_info = *fen_info;
5379 /* Take reference on fib_info to prevent it from being
Ido Schimmel66a57632017-08-03 13:28:26 +02005380 * freed while work is queued. Release it afterwards.
5381 */
5382 fib_info_hold(fib_work->fen_info.fi);
5383 break;
Ido Schimmel66a57632017-08-03 13:28:26 +02005384 case FIB_EVENT_NH_ADD: /* fall through */
5385 case FIB_EVENT_NH_DEL:
David Ahern3c75f9b2017-10-18 15:01:38 -07005386 fnh_info = container_of(info, struct fib_nh_notifier_info,
5387 info);
5388 fib_work->fnh_info = *fnh_info;
Ido Schimmel66a57632017-08-03 13:28:26 +02005389 fib_info_hold(fib_work->fnh_info.fib_nh->nh_parent);
5390 break;
5391 }
5392}
5393
5394static void mlxsw_sp_router_fib6_event(struct mlxsw_sp_fib_event_work *fib_work,
5395 struct fib_notifier_info *info)
5396{
David Ahern3c75f9b2017-10-18 15:01:38 -07005397 struct fib6_entry_notifier_info *fen6_info;
David Ahern3c75f9b2017-10-18 15:01:38 -07005398
Ido Schimmel583419f2017-08-03 13:28:27 +02005399 switch (fib_work->event) {
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02005400 case FIB_EVENT_ENTRY_REPLACE: /* fall through */
Ido Schimmel428b8512017-08-03 13:28:28 +02005401 case FIB_EVENT_ENTRY_ADD: /* fall through */
5402 case FIB_EVENT_ENTRY_DEL:
David Ahern3c75f9b2017-10-18 15:01:38 -07005403 fen6_info = container_of(info, struct fib6_entry_notifier_info,
5404 info);
5405 fib_work->fen6_info = *fen6_info;
Ido Schimmel428b8512017-08-03 13:28:28 +02005406 rt6_hold(fib_work->fen6_info.rt);
5407 break;
Ido Schimmel583419f2017-08-03 13:28:27 +02005408 }
Ido Schimmel66a57632017-08-03 13:28:26 +02005409}
5410
Yotam Gigid42b0962017-09-27 08:23:20 +02005411static void
5412mlxsw_sp_router_fibmr_event(struct mlxsw_sp_fib_event_work *fib_work,
5413 struct fib_notifier_info *info)
5414{
5415 switch (fib_work->event) {
5416 case FIB_EVENT_ENTRY_REPLACE: /* fall through */
5417 case FIB_EVENT_ENTRY_ADD: /* fall through */
5418 case FIB_EVENT_ENTRY_DEL:
5419 memcpy(&fib_work->men_info, info, sizeof(fib_work->men_info));
5420 ipmr_cache_hold(fib_work->men_info.mfc);
5421 break;
5422 case FIB_EVENT_VIF_ADD: /* fall through */
5423 case FIB_EVENT_VIF_DEL:
5424 memcpy(&fib_work->ven_info, info, sizeof(fib_work->ven_info));
5425 dev_hold(fib_work->ven_info.dev);
5426 break;
David Ahern1f279232017-10-27 17:37:14 -07005427 }
5428}
5429
5430static int mlxsw_sp_router_fib_rule_event(unsigned long event,
5431 struct fib_notifier_info *info,
5432 struct mlxsw_sp *mlxsw_sp)
5433{
5434 struct netlink_ext_ack *extack = info->extack;
5435 struct fib_rule_notifier_info *fr_info;
5436 struct fib_rule *rule;
5437 int err = 0;
5438
5439 /* nothing to do at the moment */
5440 if (event == FIB_EVENT_RULE_DEL)
5441 return 0;
5442
5443 if (mlxsw_sp->router->aborted)
5444 return 0;
5445
5446 fr_info = container_of(info, struct fib_rule_notifier_info, info);
5447 rule = fr_info->rule;
5448
5449 switch (info->family) {
5450 case AF_INET:
5451 if (!fib4_rule_default(rule) && !rule->l3mdev)
5452 err = -1;
5453 break;
5454 case AF_INET6:
5455 if (!fib6_rule_default(rule) && !rule->l3mdev)
5456 err = -1;
5457 break;
5458 case RTNL_FAMILY_IPMR:
5459 if (!ipmr_rule_default(rule) && !rule->l3mdev)
5460 err = -1;
Yotam Gigid42b0962017-09-27 08:23:20 +02005461 break;
5462 }
David Ahern1f279232017-10-27 17:37:14 -07005463
5464 if (err < 0)
5465 NL_SET_ERR_MSG(extack, "spectrum: FIB rules not supported. Aborting offload");
5466
5467 return err;
Yotam Gigid42b0962017-09-27 08:23:20 +02005468}
5469
Ido Schimmel30572242016-12-03 16:45:01 +01005470/* Called with rcu_read_lock() */
5471static int mlxsw_sp_router_fib_event(struct notifier_block *nb,
5472 unsigned long event, void *ptr)
5473{
Ido Schimmel30572242016-12-03 16:45:01 +01005474 struct mlxsw_sp_fib_event_work *fib_work;
5475 struct fib_notifier_info *info = ptr;
Ido Schimmel7e39d112017-05-16 19:38:28 +02005476 struct mlxsw_sp_router *router;
David Ahern1f279232017-10-27 17:37:14 -07005477 int err;
Ido Schimmel30572242016-12-03 16:45:01 +01005478
Ido Schimmel8e29f972017-09-15 15:31:07 +02005479 if (!net_eq(info->net, &init_net) ||
Yotam Gigi664375e2017-09-27 08:23:22 +02005480 (info->family != AF_INET && info->family != AF_INET6 &&
5481 info->family != RTNL_FAMILY_IPMR))
Ido Schimmel30572242016-12-03 16:45:01 +01005482 return NOTIFY_DONE;
5483
David Ahern1f279232017-10-27 17:37:14 -07005484 router = container_of(nb, struct mlxsw_sp_router, fib_nb);
5485
5486 switch (event) {
5487 case FIB_EVENT_RULE_ADD: /* fall through */
5488 case FIB_EVENT_RULE_DEL:
5489 err = mlxsw_sp_router_fib_rule_event(event, info,
5490 router->mlxsw_sp);
5491 if (!err)
5492 return NOTIFY_DONE;
5493 }
5494
Ido Schimmel30572242016-12-03 16:45:01 +01005495 fib_work = kzalloc(sizeof(*fib_work), GFP_ATOMIC);
5496 if (WARN_ON(!fib_work))
5497 return NOTIFY_BAD;
5498
Ido Schimmel7e39d112017-05-16 19:38:28 +02005499 fib_work->mlxsw_sp = router->mlxsw_sp;
Ido Schimmel30572242016-12-03 16:45:01 +01005500 fib_work->event = event;
5501
Ido Schimmel66a57632017-08-03 13:28:26 +02005502 switch (info->family) {
5503 case AF_INET:
5504 INIT_WORK(&fib_work->work, mlxsw_sp_router_fib4_event_work);
5505 mlxsw_sp_router_fib4_event(fib_work, info);
Ido Schimmel30572242016-12-03 16:45:01 +01005506 break;
Ido Schimmel66a57632017-08-03 13:28:26 +02005507 case AF_INET6:
5508 INIT_WORK(&fib_work->work, mlxsw_sp_router_fib6_event_work);
5509 mlxsw_sp_router_fib6_event(fib_work, info);
Ido Schimmelad178c82017-02-08 11:16:40 +01005510 break;
Yotam Gigid42b0962017-09-27 08:23:20 +02005511 case RTNL_FAMILY_IPMR:
5512 INIT_WORK(&fib_work->work, mlxsw_sp_router_fibmr_event_work);
5513 mlxsw_sp_router_fibmr_event(fib_work, info);
5514 break;
Ido Schimmel30572242016-12-03 16:45:01 +01005515 }
5516
Ido Schimmela0e47612017-02-06 16:20:10 +01005517 mlxsw_core_schedule_work(&fib_work->work);
Ido Schimmel30572242016-12-03 16:45:01 +01005518
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005519 return NOTIFY_DONE;
5520}
5521
Ido Schimmel4724ba562017-03-10 08:53:39 +01005522static struct mlxsw_sp_rif *
5523mlxsw_sp_rif_find_by_dev(const struct mlxsw_sp *mlxsw_sp,
5524 const struct net_device *dev)
5525{
5526 int i;
5527
5528 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++)
Ido Schimmel5f9efff2017-05-16 19:38:27 +02005529 if (mlxsw_sp->router->rifs[i] &&
5530 mlxsw_sp->router->rifs[i]->dev == dev)
5531 return mlxsw_sp->router->rifs[i];
Ido Schimmel4724ba562017-03-10 08:53:39 +01005532
5533 return NULL;
5534}
5535
5536static int mlxsw_sp_router_rif_disable(struct mlxsw_sp *mlxsw_sp, u16 rif)
5537{
5538 char ritr_pl[MLXSW_REG_RITR_LEN];
5539 int err;
5540
5541 mlxsw_reg_ritr_rif_pack(ritr_pl, rif);
5542 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
5543 if (WARN_ON_ONCE(err))
5544 return err;
5545
5546 mlxsw_reg_ritr_enable_set(ritr_pl, false);
5547 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
5548}
5549
5550static void mlxsw_sp_router_rif_gone_sync(struct mlxsw_sp *mlxsw_sp,
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01005551 struct mlxsw_sp_rif *rif)
Ido Schimmel4724ba562017-03-10 08:53:39 +01005552{
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01005553 mlxsw_sp_router_rif_disable(mlxsw_sp, rif->rif_index);
5554 mlxsw_sp_nexthop_rif_gone_sync(mlxsw_sp, rif);
5555 mlxsw_sp_neigh_rif_gone_sync(mlxsw_sp, rif);
Ido Schimmel4724ba562017-03-10 08:53:39 +01005556}
5557
Arkadi Sharshevsky5ea12372017-07-18 10:10:13 +02005558static bool
5559mlxsw_sp_rif_should_config(struct mlxsw_sp_rif *rif, struct net_device *dev,
5560 unsigned long event)
Ido Schimmel4724ba562017-03-10 08:53:39 +01005561{
Arkadi Sharshevsky5ea12372017-07-18 10:10:13 +02005562 struct inet6_dev *inet6_dev;
5563 bool addr_list_empty = true;
5564 struct in_device *idev;
5565
Ido Schimmel4724ba562017-03-10 08:53:39 +01005566 switch (event) {
5567 case NETDEV_UP:
Petr Machataf1b1f272017-07-31 09:27:28 +02005568 return rif == NULL;
Ido Schimmel4724ba562017-03-10 08:53:39 +01005569 case NETDEV_DOWN:
Arkadi Sharshevsky5ea12372017-07-18 10:10:13 +02005570 idev = __in_dev_get_rtnl(dev);
5571 if (idev && idev->ifa_list)
5572 addr_list_empty = false;
5573
5574 inet6_dev = __in6_dev_get(dev);
5575 if (addr_list_empty && inet6_dev &&
5576 !list_empty(&inet6_dev->addr_list))
5577 addr_list_empty = false;
5578
5579 if (rif && addr_list_empty &&
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01005580 !netif_is_l3_slave(rif->dev))
Ido Schimmel4724ba562017-03-10 08:53:39 +01005581 return true;
5582 /* It is possible we already removed the RIF ourselves
5583 * if it was assigned to a netdev that is now a bridge
5584 * or LAG slave.
5585 */
5586 return false;
5587 }
5588
5589 return false;
5590}
5591
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02005592static enum mlxsw_sp_rif_type
5593mlxsw_sp_dev_rif_type(const struct mlxsw_sp *mlxsw_sp,
5594 const struct net_device *dev)
5595{
5596 enum mlxsw_sp_fid_type type;
5597
Petr Machata6ddb7422017-09-02 23:49:19 +02005598 if (mlxsw_sp_netdev_ipip_type(mlxsw_sp, dev, NULL))
5599 return MLXSW_SP_RIF_TYPE_IPIP_LB;
5600
5601 /* Otherwise RIF type is derived from the type of the underlying FID. */
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02005602 if (is_vlan_dev(dev) && netif_is_bridge_master(vlan_dev_real_dev(dev)))
5603 type = MLXSW_SP_FID_TYPE_8021Q;
5604 else if (netif_is_bridge_master(dev) && br_vlan_enabled(dev))
5605 type = MLXSW_SP_FID_TYPE_8021Q;
5606 else if (netif_is_bridge_master(dev))
5607 type = MLXSW_SP_FID_TYPE_8021D;
5608 else
5609 type = MLXSW_SP_FID_TYPE_RFID;
5610
5611 return mlxsw_sp_fid_type_rif_type(mlxsw_sp, type);
5612}
5613
Ido Schimmelde5ed992017-06-04 16:53:40 +02005614static int mlxsw_sp_rif_index_alloc(struct mlxsw_sp *mlxsw_sp, u16 *p_rif_index)
Ido Schimmel4724ba562017-03-10 08:53:39 +01005615{
5616 int i;
5617
Ido Schimmelde5ed992017-06-04 16:53:40 +02005618 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++) {
5619 if (!mlxsw_sp->router->rifs[i]) {
5620 *p_rif_index = i;
5621 return 0;
5622 }
5623 }
Ido Schimmel4724ba562017-03-10 08:53:39 +01005624
Ido Schimmelde5ed992017-06-04 16:53:40 +02005625 return -ENOBUFS;
Ido Schimmel4724ba562017-03-10 08:53:39 +01005626}
5627
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02005628static struct mlxsw_sp_rif *mlxsw_sp_rif_alloc(size_t rif_size, u16 rif_index,
5629 u16 vr_id,
5630 struct net_device *l3_dev)
Ido Schimmel4724ba562017-03-10 08:53:39 +01005631{
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01005632 struct mlxsw_sp_rif *rif;
Ido Schimmel4724ba562017-03-10 08:53:39 +01005633
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02005634 rif = kzalloc(rif_size, GFP_KERNEL);
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01005635 if (!rif)
Ido Schimmel4724ba562017-03-10 08:53:39 +01005636 return NULL;
5637
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01005638 INIT_LIST_HEAD(&rif->nexthop_list);
5639 INIT_LIST_HEAD(&rif->neigh_list);
5640 ether_addr_copy(rif->addr, l3_dev->dev_addr);
5641 rif->mtu = l3_dev->mtu;
5642 rif->vr_id = vr_id;
5643 rif->dev = l3_dev;
5644 rif->rif_index = rif_index;
Ido Schimmel4724ba562017-03-10 08:53:39 +01005645
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01005646 return rif;
Ido Schimmel4724ba562017-03-10 08:53:39 +01005647}
5648
Ido Schimmel5f9efff2017-05-16 19:38:27 +02005649struct mlxsw_sp_rif *mlxsw_sp_rif_by_index(const struct mlxsw_sp *mlxsw_sp,
5650 u16 rif_index)
5651{
5652 return mlxsw_sp->router->rifs[rif_index];
5653}
5654
Arkadi Sharshevskyfd1b9d42017-03-28 17:24:16 +02005655u16 mlxsw_sp_rif_index(const struct mlxsw_sp_rif *rif)
5656{
5657 return rif->rif_index;
5658}
5659
Petr Machata92107cf2017-09-02 23:49:28 +02005660u16 mlxsw_sp_ipip_lb_rif_index(const struct mlxsw_sp_rif_ipip_lb *lb_rif)
5661{
5662 return lb_rif->common.rif_index;
5663}
5664
5665u16 mlxsw_sp_ipip_lb_ul_vr_id(const struct mlxsw_sp_rif_ipip_lb *lb_rif)
5666{
5667 return lb_rif->ul_vr_id;
5668}
5669
Arkadi Sharshevskyfd1b9d42017-03-28 17:24:16 +02005670int mlxsw_sp_rif_dev_ifindex(const struct mlxsw_sp_rif *rif)
5671{
5672 return rif->dev->ifindex;
5673}
5674
Yotam Gigi91e4d592017-09-19 10:00:19 +02005675const struct net_device *mlxsw_sp_rif_dev(const struct mlxsw_sp_rif *rif)
5676{
5677 return rif->dev;
5678}
5679
Ido Schimmel4724ba562017-03-10 08:53:39 +01005680static struct mlxsw_sp_rif *
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02005681mlxsw_sp_rif_create(struct mlxsw_sp *mlxsw_sp,
David Ahernf8fa9b42017-10-18 09:56:56 -07005682 const struct mlxsw_sp_rif_params *params,
5683 struct netlink_ext_ack *extack)
Ido Schimmel4724ba562017-03-10 08:53:39 +01005684{
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02005685 u32 tb_id = l3mdev_fib_table(params->dev);
5686 const struct mlxsw_sp_rif_ops *ops;
Petr Machata010cadf2017-09-02 23:49:18 +02005687 struct mlxsw_sp_fid *fid = NULL;
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02005688 enum mlxsw_sp_rif_type type;
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01005689 struct mlxsw_sp_rif *rif;
Ido Schimmela1107482017-05-26 08:37:39 +02005690 struct mlxsw_sp_vr *vr;
5691 u16 rif_index;
Ido Schimmel4724ba562017-03-10 08:53:39 +01005692 int err;
5693
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02005694 type = mlxsw_sp_dev_rif_type(mlxsw_sp, params->dev);
5695 ops = mlxsw_sp->router->rif_ops_arr[type];
5696
David Ahernf8fa9b42017-10-18 09:56:56 -07005697 vr = mlxsw_sp_vr_get(mlxsw_sp, tb_id ? : RT_TABLE_MAIN, extack);
Ido Schimmelc9ec53f2017-05-26 08:37:38 +02005698 if (IS_ERR(vr))
5699 return ERR_CAST(vr);
Petr Machata28a04c72017-10-02 12:14:56 +02005700 vr->rif_count++;
Ido Schimmelc9ec53f2017-05-26 08:37:38 +02005701
Ido Schimmelde5ed992017-06-04 16:53:40 +02005702 err = mlxsw_sp_rif_index_alloc(mlxsw_sp, &rif_index);
David Ahernf8fa9b42017-10-18 09:56:56 -07005703 if (err) {
5704 NL_SET_ERR_MSG(extack, "spectrum: Exceeded number of supported router interfaces");
Ido Schimmelde5ed992017-06-04 16:53:40 +02005705 goto err_rif_index_alloc;
David Ahernf8fa9b42017-10-18 09:56:56 -07005706 }
Ido Schimmel4724ba562017-03-10 08:53:39 +01005707
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02005708 rif = mlxsw_sp_rif_alloc(ops->rif_size, rif_index, vr->id, params->dev);
Ido Schimmela13a5942017-05-26 08:37:33 +02005709 if (!rif) {
5710 err = -ENOMEM;
5711 goto err_rif_alloc;
5712 }
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02005713 rif->mlxsw_sp = mlxsw_sp;
5714 rif->ops = ops;
Ido Schimmela13a5942017-05-26 08:37:33 +02005715
Petr Machata010cadf2017-09-02 23:49:18 +02005716 if (ops->fid_get) {
5717 fid = ops->fid_get(rif);
5718 if (IS_ERR(fid)) {
5719 err = PTR_ERR(fid);
5720 goto err_fid_get;
5721 }
5722 rif->fid = fid;
Ido Schimmel4d93cee2017-05-26 08:37:34 +02005723 }
5724
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02005725 if (ops->setup)
5726 ops->setup(rif, params);
5727
5728 err = ops->configure(rif);
Ido Schimmel4724ba562017-03-10 08:53:39 +01005729 if (err)
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02005730 goto err_configure;
Ido Schimmel4724ba562017-03-10 08:53:39 +01005731
Yotam Gigid42b0962017-09-27 08:23:20 +02005732 err = mlxsw_sp_mr_rif_add(vr->mr4_table, rif);
5733 if (err)
5734 goto err_mr_rif_add;
5735
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02005736 mlxsw_sp_rif_counters_alloc(rif);
Ido Schimmel5f9efff2017-05-16 19:38:27 +02005737 mlxsw_sp->router->rifs[rif_index] = rif;
Ido Schimmel4724ba562017-03-10 08:53:39 +01005738
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01005739 return rif;
Ido Schimmel4724ba562017-03-10 08:53:39 +01005740
Yotam Gigid42b0962017-09-27 08:23:20 +02005741err_mr_rif_add:
5742 ops->deconfigure(rif);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02005743err_configure:
Petr Machata010cadf2017-09-02 23:49:18 +02005744 if (fid)
5745 mlxsw_sp_fid_put(fid);
Ido Schimmela1107482017-05-26 08:37:39 +02005746err_fid_get:
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02005747 kfree(rif);
5748err_rif_alloc:
Ido Schimmelde5ed992017-06-04 16:53:40 +02005749err_rif_index_alloc:
Petr Machata28a04c72017-10-02 12:14:56 +02005750 vr->rif_count--;
Ido Schimmelc9ec53f2017-05-26 08:37:38 +02005751 mlxsw_sp_vr_put(vr);
Ido Schimmel4724ba562017-03-10 08:53:39 +01005752 return ERR_PTR(err);
5753}
5754
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02005755void mlxsw_sp_rif_destroy(struct mlxsw_sp_rif *rif)
Ido Schimmel4724ba562017-03-10 08:53:39 +01005756{
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02005757 const struct mlxsw_sp_rif_ops *ops = rif->ops;
5758 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
Ido Schimmela1107482017-05-26 08:37:39 +02005759 struct mlxsw_sp_fid *fid = rif->fid;
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02005760 struct mlxsw_sp_vr *vr;
Ido Schimmel4724ba562017-03-10 08:53:39 +01005761
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01005762 mlxsw_sp_router_rif_gone_sync(mlxsw_sp, rif);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02005763 vr = &mlxsw_sp->router->vrs[rif->vr_id];
Arkadi Sharshevskye0c0afd2017-03-28 17:24:15 +02005764
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02005765 mlxsw_sp->router->rifs[rif->rif_index] = NULL;
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02005766 mlxsw_sp_rif_counters_free(rif);
Yotam Gigid42b0962017-09-27 08:23:20 +02005767 mlxsw_sp_mr_rif_del(vr->mr4_table, rif);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02005768 ops->deconfigure(rif);
Petr Machata010cadf2017-09-02 23:49:18 +02005769 if (fid)
5770 /* Loopback RIFs are not associated with a FID. */
5771 mlxsw_sp_fid_put(fid);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02005772 kfree(rif);
Petr Machata28a04c72017-10-02 12:14:56 +02005773 vr->rif_count--;
Ido Schimmelc9ec53f2017-05-26 08:37:38 +02005774 mlxsw_sp_vr_put(vr);
Ido Schimmel4724ba562017-03-10 08:53:39 +01005775}
5776
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02005777static void
5778mlxsw_sp_rif_subport_params_init(struct mlxsw_sp_rif_params *params,
5779 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
5780{
5781 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
5782
5783 params->vid = mlxsw_sp_port_vlan->vid;
5784 params->lag = mlxsw_sp_port->lagged;
5785 if (params->lag)
5786 params->lag_id = mlxsw_sp_port->lag_id;
5787 else
5788 params->system_port = mlxsw_sp_port->local_port;
5789}
5790
Ido Schimmel7cbecf22017-05-26 08:37:28 +02005791static int
Ido Schimmela1107482017-05-26 08:37:39 +02005792mlxsw_sp_port_vlan_router_join(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan,
David Ahernf8fa9b42017-10-18 09:56:56 -07005793 struct net_device *l3_dev,
5794 struct netlink_ext_ack *extack)
Ido Schimmel4724ba562017-03-10 08:53:39 +01005795{
Ido Schimmel7cbecf22017-05-26 08:37:28 +02005796 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
Ido Schimmel1b8f09a2017-05-26 08:37:36 +02005797 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
Ido Schimmel7cbecf22017-05-26 08:37:28 +02005798 u16 vid = mlxsw_sp_port_vlan->vid;
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01005799 struct mlxsw_sp_rif *rif;
Ido Schimmela1107482017-05-26 08:37:39 +02005800 struct mlxsw_sp_fid *fid;
Ido Schimmel03ea01e2017-05-23 21:56:30 +02005801 int err;
Ido Schimmel4724ba562017-03-10 08:53:39 +01005802
Ido Schimmel1b8f09a2017-05-26 08:37:36 +02005803 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev);
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01005804 if (!rif) {
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02005805 struct mlxsw_sp_rif_params params = {
5806 .dev = l3_dev,
5807 };
5808
5809 mlxsw_sp_rif_subport_params_init(&params, mlxsw_sp_port_vlan);
David Ahernf8fa9b42017-10-18 09:56:56 -07005810 rif = mlxsw_sp_rif_create(mlxsw_sp, &params, extack);
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01005811 if (IS_ERR(rif))
5812 return PTR_ERR(rif);
Ido Schimmel4724ba562017-03-10 08:53:39 +01005813 }
5814
Ido Schimmela1107482017-05-26 08:37:39 +02005815 /* FID was already created, just take a reference */
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02005816 fid = rif->ops->fid_get(rif);
Ido Schimmela1107482017-05-26 08:37:39 +02005817 err = mlxsw_sp_fid_port_vid_map(fid, mlxsw_sp_port, vid);
5818 if (err)
5819 goto err_fid_port_vid_map;
5820
Ido Schimmel7cbecf22017-05-26 08:37:28 +02005821 err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, false);
Ido Schimmel03ea01e2017-05-23 21:56:30 +02005822 if (err)
5823 goto err_port_vid_learning_set;
5824
Ido Schimmel7cbecf22017-05-26 08:37:28 +02005825 err = mlxsw_sp_port_vid_stp_set(mlxsw_sp_port, vid,
Ido Schimmel03ea01e2017-05-23 21:56:30 +02005826 BR_STATE_FORWARDING);
5827 if (err)
5828 goto err_port_vid_stp_set;
5829
Ido Schimmela1107482017-05-26 08:37:39 +02005830 mlxsw_sp_port_vlan->fid = fid;
Ido Schimmel4724ba562017-03-10 08:53:39 +01005831
Ido Schimmel4724ba562017-03-10 08:53:39 +01005832 return 0;
Ido Schimmel03ea01e2017-05-23 21:56:30 +02005833
5834err_port_vid_stp_set:
Ido Schimmel7cbecf22017-05-26 08:37:28 +02005835 mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, true);
Ido Schimmel03ea01e2017-05-23 21:56:30 +02005836err_port_vid_learning_set:
Ido Schimmela1107482017-05-26 08:37:39 +02005837 mlxsw_sp_fid_port_vid_unmap(fid, mlxsw_sp_port, vid);
5838err_fid_port_vid_map:
5839 mlxsw_sp_fid_put(fid);
Ido Schimmel03ea01e2017-05-23 21:56:30 +02005840 return err;
Ido Schimmel4724ba562017-03-10 08:53:39 +01005841}
5842
Ido Schimmela1107482017-05-26 08:37:39 +02005843void
5844mlxsw_sp_port_vlan_router_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
Ido Schimmel4724ba562017-03-10 08:53:39 +01005845{
Ido Schimmelce95e152017-05-26 08:37:27 +02005846 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
Ido Schimmel7cbecf22017-05-26 08:37:28 +02005847 struct mlxsw_sp_fid *fid = mlxsw_sp_port_vlan->fid;
Ido Schimmelce95e152017-05-26 08:37:27 +02005848 u16 vid = mlxsw_sp_port_vlan->vid;
Ido Schimmelce95e152017-05-26 08:37:27 +02005849
Ido Schimmela1107482017-05-26 08:37:39 +02005850 if (WARN_ON(mlxsw_sp_fid_type(fid) != MLXSW_SP_FID_TYPE_RFID))
5851 return;
Ido Schimmel4aafc362017-05-26 08:37:25 +02005852
Ido Schimmela1107482017-05-26 08:37:39 +02005853 mlxsw_sp_port_vlan->fid = NULL;
Ido Schimmel7cbecf22017-05-26 08:37:28 +02005854 mlxsw_sp_port_vid_stp_set(mlxsw_sp_port, vid, BR_STATE_BLOCKING);
5855 mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, true);
Ido Schimmela1107482017-05-26 08:37:39 +02005856 mlxsw_sp_fid_port_vid_unmap(fid, mlxsw_sp_port, vid);
5857 /* If router port holds the last reference on the rFID, then the
5858 * associated Sub-port RIF will be destroyed.
5859 */
5860 mlxsw_sp_fid_put(fid);
Ido Schimmel4724ba562017-03-10 08:53:39 +01005861}
5862
Ido Schimmel7cbecf22017-05-26 08:37:28 +02005863static int mlxsw_sp_inetaddr_port_vlan_event(struct net_device *l3_dev,
5864 struct net_device *port_dev,
David Ahernf8fa9b42017-10-18 09:56:56 -07005865 unsigned long event, u16 vid,
5866 struct netlink_ext_ack *extack)
Ido Schimmel4724ba562017-03-10 08:53:39 +01005867{
5868 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(port_dev);
Ido Schimmelce95e152017-05-26 08:37:27 +02005869 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
Ido Schimmel4724ba562017-03-10 08:53:39 +01005870
Ido Schimmelce95e152017-05-26 08:37:27 +02005871 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
Ido Schimmel7cbecf22017-05-26 08:37:28 +02005872 if (WARN_ON(!mlxsw_sp_port_vlan))
5873 return -EINVAL;
Ido Schimmel4724ba562017-03-10 08:53:39 +01005874
5875 switch (event) {
5876 case NETDEV_UP:
Ido Schimmela1107482017-05-26 08:37:39 +02005877 return mlxsw_sp_port_vlan_router_join(mlxsw_sp_port_vlan,
David Ahernf8fa9b42017-10-18 09:56:56 -07005878 l3_dev, extack);
Ido Schimmel4724ba562017-03-10 08:53:39 +01005879 case NETDEV_DOWN:
Ido Schimmela1107482017-05-26 08:37:39 +02005880 mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port_vlan);
Ido Schimmel4724ba562017-03-10 08:53:39 +01005881 break;
5882 }
5883
5884 return 0;
5885}
5886
5887static int mlxsw_sp_inetaddr_port_event(struct net_device *port_dev,
David Ahernf8fa9b42017-10-18 09:56:56 -07005888 unsigned long event,
5889 struct netlink_ext_ack *extack)
Ido Schimmel4724ba562017-03-10 08:53:39 +01005890{
Jiri Pirko2b94e582017-04-18 16:55:37 +02005891 if (netif_is_bridge_port(port_dev) ||
5892 netif_is_lag_port(port_dev) ||
5893 netif_is_ovs_port(port_dev))
Ido Schimmel4724ba562017-03-10 08:53:39 +01005894 return 0;
5895
David Ahernf8fa9b42017-10-18 09:56:56 -07005896 return mlxsw_sp_inetaddr_port_vlan_event(port_dev, port_dev, event, 1,
5897 extack);
Ido Schimmel4724ba562017-03-10 08:53:39 +01005898}
5899
5900static int __mlxsw_sp_inetaddr_lag_event(struct net_device *l3_dev,
5901 struct net_device *lag_dev,
David Ahernf8fa9b42017-10-18 09:56:56 -07005902 unsigned long event, u16 vid,
5903 struct netlink_ext_ack *extack)
Ido Schimmel4724ba562017-03-10 08:53:39 +01005904{
5905 struct net_device *port_dev;
5906 struct list_head *iter;
5907 int err;
5908
5909 netdev_for_each_lower_dev(lag_dev, port_dev, iter) {
5910 if (mlxsw_sp_port_dev_check(port_dev)) {
Ido Schimmel7cbecf22017-05-26 08:37:28 +02005911 err = mlxsw_sp_inetaddr_port_vlan_event(l3_dev,
5912 port_dev,
David Ahernf8fa9b42017-10-18 09:56:56 -07005913 event, vid,
5914 extack);
Ido Schimmel4724ba562017-03-10 08:53:39 +01005915 if (err)
5916 return err;
5917 }
5918 }
5919
5920 return 0;
5921}
5922
5923static int mlxsw_sp_inetaddr_lag_event(struct net_device *lag_dev,
David Ahernf8fa9b42017-10-18 09:56:56 -07005924 unsigned long event,
5925 struct netlink_ext_ack *extack)
Ido Schimmel4724ba562017-03-10 08:53:39 +01005926{
5927 if (netif_is_bridge_port(lag_dev))
5928 return 0;
5929
David Ahernf8fa9b42017-10-18 09:56:56 -07005930 return __mlxsw_sp_inetaddr_lag_event(lag_dev, lag_dev, event, 1,
5931 extack);
Ido Schimmel4724ba562017-03-10 08:53:39 +01005932}
5933
Ido Schimmel4724ba562017-03-10 08:53:39 +01005934static int mlxsw_sp_inetaddr_bridge_event(struct net_device *l3_dev,
David Ahernf8fa9b42017-10-18 09:56:56 -07005935 unsigned long event,
5936 struct netlink_ext_ack *extack)
Ido Schimmel4724ba562017-03-10 08:53:39 +01005937{
5938 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(l3_dev);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02005939 struct mlxsw_sp_rif_params params = {
5940 .dev = l3_dev,
5941 };
Ido Schimmela1107482017-05-26 08:37:39 +02005942 struct mlxsw_sp_rif *rif;
Ido Schimmel4724ba562017-03-10 08:53:39 +01005943
5944 switch (event) {
5945 case NETDEV_UP:
David Ahernf8fa9b42017-10-18 09:56:56 -07005946 rif = mlxsw_sp_rif_create(mlxsw_sp, &params, extack);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02005947 if (IS_ERR(rif))
5948 return PTR_ERR(rif);
5949 break;
Ido Schimmel4724ba562017-03-10 08:53:39 +01005950 case NETDEV_DOWN:
Ido Schimmela1107482017-05-26 08:37:39 +02005951 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02005952 mlxsw_sp_rif_destroy(rif);
Ido Schimmel4724ba562017-03-10 08:53:39 +01005953 break;
5954 }
5955
5956 return 0;
5957}
5958
5959static int mlxsw_sp_inetaddr_vlan_event(struct net_device *vlan_dev,
David Ahernf8fa9b42017-10-18 09:56:56 -07005960 unsigned long event,
5961 struct netlink_ext_ack *extack)
Ido Schimmel4724ba562017-03-10 08:53:39 +01005962{
5963 struct net_device *real_dev = vlan_dev_real_dev(vlan_dev);
Ido Schimmel4724ba562017-03-10 08:53:39 +01005964 u16 vid = vlan_dev_vlan_id(vlan_dev);
5965
Ido Schimmel6b27c8a2017-06-28 09:03:12 +03005966 if (netif_is_bridge_port(vlan_dev))
5967 return 0;
5968
Ido Schimmel4724ba562017-03-10 08:53:39 +01005969 if (mlxsw_sp_port_dev_check(real_dev))
Ido Schimmel7cbecf22017-05-26 08:37:28 +02005970 return mlxsw_sp_inetaddr_port_vlan_event(vlan_dev, real_dev,
David Ahernf8fa9b42017-10-18 09:56:56 -07005971 event, vid, extack);
Ido Schimmel4724ba562017-03-10 08:53:39 +01005972 else if (netif_is_lag_master(real_dev))
5973 return __mlxsw_sp_inetaddr_lag_event(vlan_dev, real_dev, event,
David Ahernf8fa9b42017-10-18 09:56:56 -07005974 vid, extack);
Ido Schimmelc57529e2017-05-26 08:37:31 +02005975 else if (netif_is_bridge_master(real_dev) && br_vlan_enabled(real_dev))
David Ahernf8fa9b42017-10-18 09:56:56 -07005976 return mlxsw_sp_inetaddr_bridge_event(vlan_dev, event, extack);
Ido Schimmel4724ba562017-03-10 08:53:39 +01005977
5978 return 0;
5979}
5980
Ido Schimmelb1e45522017-04-30 19:47:14 +03005981static int __mlxsw_sp_inetaddr_event(struct net_device *dev,
David Ahernf8fa9b42017-10-18 09:56:56 -07005982 unsigned long event,
5983 struct netlink_ext_ack *extack)
Ido Schimmelb1e45522017-04-30 19:47:14 +03005984{
5985 if (mlxsw_sp_port_dev_check(dev))
David Ahernf8fa9b42017-10-18 09:56:56 -07005986 return mlxsw_sp_inetaddr_port_event(dev, event, extack);
Ido Schimmelb1e45522017-04-30 19:47:14 +03005987 else if (netif_is_lag_master(dev))
David Ahernf8fa9b42017-10-18 09:56:56 -07005988 return mlxsw_sp_inetaddr_lag_event(dev, event, extack);
Ido Schimmelb1e45522017-04-30 19:47:14 +03005989 else if (netif_is_bridge_master(dev))
David Ahernf8fa9b42017-10-18 09:56:56 -07005990 return mlxsw_sp_inetaddr_bridge_event(dev, event, extack);
Ido Schimmelb1e45522017-04-30 19:47:14 +03005991 else if (is_vlan_dev(dev))
David Ahernf8fa9b42017-10-18 09:56:56 -07005992 return mlxsw_sp_inetaddr_vlan_event(dev, event, extack);
Ido Schimmelb1e45522017-04-30 19:47:14 +03005993 else
5994 return 0;
5995}
5996
Ido Schimmel4724ba562017-03-10 08:53:39 +01005997int mlxsw_sp_inetaddr_event(struct notifier_block *unused,
5998 unsigned long event, void *ptr)
5999{
6000 struct in_ifaddr *ifa = (struct in_ifaddr *) ptr;
6001 struct net_device *dev = ifa->ifa_dev->dev;
6002 struct mlxsw_sp *mlxsw_sp;
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01006003 struct mlxsw_sp_rif *rif;
Ido Schimmel4724ba562017-03-10 08:53:39 +01006004 int err = 0;
6005
David Ahern89d5dd22017-10-18 09:56:55 -07006006 /* NETDEV_UP event is handled by mlxsw_sp_inetaddr_valid_event */
6007 if (event == NETDEV_UP)
6008 goto out;
6009
6010 mlxsw_sp = mlxsw_sp_lower_get(dev);
6011 if (!mlxsw_sp)
6012 goto out;
6013
6014 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
6015 if (!mlxsw_sp_rif_should_config(rif, dev, event))
6016 goto out;
6017
David Ahernf8fa9b42017-10-18 09:56:56 -07006018 err = __mlxsw_sp_inetaddr_event(dev, event, NULL);
David Ahern89d5dd22017-10-18 09:56:55 -07006019out:
6020 return notifier_from_errno(err);
6021}
6022
6023int mlxsw_sp_inetaddr_valid_event(struct notifier_block *unused,
6024 unsigned long event, void *ptr)
6025{
6026 struct in_validator_info *ivi = (struct in_validator_info *) ptr;
6027 struct net_device *dev = ivi->ivi_dev->dev;
6028 struct mlxsw_sp *mlxsw_sp;
6029 struct mlxsw_sp_rif *rif;
6030 int err = 0;
6031
Ido Schimmel4724ba562017-03-10 08:53:39 +01006032 mlxsw_sp = mlxsw_sp_lower_get(dev);
6033 if (!mlxsw_sp)
6034 goto out;
6035
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01006036 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
Arkadi Sharshevsky5ea12372017-07-18 10:10:13 +02006037 if (!mlxsw_sp_rif_should_config(rif, dev, event))
Ido Schimmel4724ba562017-03-10 08:53:39 +01006038 goto out;
6039
David Ahernf8fa9b42017-10-18 09:56:56 -07006040 err = __mlxsw_sp_inetaddr_event(dev, event, ivi->extack);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006041out:
6042 return notifier_from_errno(err);
6043}
6044
Arkadi Sharshevsky5ea12372017-07-18 10:10:13 +02006045struct mlxsw_sp_inet6addr_event_work {
6046 struct work_struct work;
6047 struct net_device *dev;
6048 unsigned long event;
6049};
6050
6051static void mlxsw_sp_inet6addr_event_work(struct work_struct *work)
6052{
6053 struct mlxsw_sp_inet6addr_event_work *inet6addr_work =
6054 container_of(work, struct mlxsw_sp_inet6addr_event_work, work);
6055 struct net_device *dev = inet6addr_work->dev;
6056 unsigned long event = inet6addr_work->event;
6057 struct mlxsw_sp *mlxsw_sp;
6058 struct mlxsw_sp_rif *rif;
6059
6060 rtnl_lock();
6061 mlxsw_sp = mlxsw_sp_lower_get(dev);
6062 if (!mlxsw_sp)
6063 goto out;
6064
6065 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
6066 if (!mlxsw_sp_rif_should_config(rif, dev, event))
6067 goto out;
6068
David Ahernf8fa9b42017-10-18 09:56:56 -07006069 __mlxsw_sp_inetaddr_event(dev, event, NULL);
Arkadi Sharshevsky5ea12372017-07-18 10:10:13 +02006070out:
6071 rtnl_unlock();
6072 dev_put(dev);
6073 kfree(inet6addr_work);
6074}
6075
6076/* Called with rcu_read_lock() */
6077int mlxsw_sp_inet6addr_event(struct notifier_block *unused,
6078 unsigned long event, void *ptr)
6079{
6080 struct inet6_ifaddr *if6 = (struct inet6_ifaddr *) ptr;
6081 struct mlxsw_sp_inet6addr_event_work *inet6addr_work;
6082 struct net_device *dev = if6->idev->dev;
6083
David Ahern89d5dd22017-10-18 09:56:55 -07006084 /* NETDEV_UP event is handled by mlxsw_sp_inet6addr_valid_event */
6085 if (event == NETDEV_UP)
6086 return NOTIFY_DONE;
6087
Arkadi Sharshevsky5ea12372017-07-18 10:10:13 +02006088 if (!mlxsw_sp_port_dev_lower_find_rcu(dev))
6089 return NOTIFY_DONE;
6090
6091 inet6addr_work = kzalloc(sizeof(*inet6addr_work), GFP_ATOMIC);
6092 if (!inet6addr_work)
6093 return NOTIFY_BAD;
6094
6095 INIT_WORK(&inet6addr_work->work, mlxsw_sp_inet6addr_event_work);
6096 inet6addr_work->dev = dev;
6097 inet6addr_work->event = event;
6098 dev_hold(dev);
6099 mlxsw_core_schedule_work(&inet6addr_work->work);
6100
6101 return NOTIFY_DONE;
6102}
6103
David Ahern89d5dd22017-10-18 09:56:55 -07006104int mlxsw_sp_inet6addr_valid_event(struct notifier_block *unused,
6105 unsigned long event, void *ptr)
6106{
6107 struct in6_validator_info *i6vi = (struct in6_validator_info *) ptr;
6108 struct net_device *dev = i6vi->i6vi_dev->dev;
6109 struct mlxsw_sp *mlxsw_sp;
6110 struct mlxsw_sp_rif *rif;
6111 int err = 0;
6112
6113 mlxsw_sp = mlxsw_sp_lower_get(dev);
6114 if (!mlxsw_sp)
6115 goto out;
6116
6117 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
6118 if (!mlxsw_sp_rif_should_config(rif, dev, event))
6119 goto out;
6120
David Ahernf8fa9b42017-10-18 09:56:56 -07006121 err = __mlxsw_sp_inetaddr_event(dev, event, i6vi->extack);
David Ahern89d5dd22017-10-18 09:56:55 -07006122out:
6123 return notifier_from_errno(err);
6124}
6125
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01006126static int mlxsw_sp_rif_edit(struct mlxsw_sp *mlxsw_sp, u16 rif_index,
Ido Schimmel4724ba562017-03-10 08:53:39 +01006127 const char *mac, int mtu)
6128{
6129 char ritr_pl[MLXSW_REG_RITR_LEN];
6130 int err;
6131
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01006132 mlxsw_reg_ritr_rif_pack(ritr_pl, rif_index);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006133 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
6134 if (err)
6135 return err;
6136
6137 mlxsw_reg_ritr_mtu_set(ritr_pl, mtu);
6138 mlxsw_reg_ritr_if_mac_memcpy_to(ritr_pl, mac);
6139 mlxsw_reg_ritr_op_set(ritr_pl, MLXSW_REG_RITR_RIF_CREATE);
6140 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
6141}
6142
6143int mlxsw_sp_netdevice_router_port_event(struct net_device *dev)
6144{
6145 struct mlxsw_sp *mlxsw_sp;
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01006146 struct mlxsw_sp_rif *rif;
Ido Schimmela1107482017-05-26 08:37:39 +02006147 u16 fid_index;
Ido Schimmel4724ba562017-03-10 08:53:39 +01006148 int err;
6149
6150 mlxsw_sp = mlxsw_sp_lower_get(dev);
6151 if (!mlxsw_sp)
6152 return 0;
6153
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01006154 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
6155 if (!rif)
Ido Schimmel4724ba562017-03-10 08:53:39 +01006156 return 0;
Ido Schimmela1107482017-05-26 08:37:39 +02006157 fid_index = mlxsw_sp_fid_index(rif->fid);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006158
Ido Schimmela1107482017-05-26 08:37:39 +02006159 err = mlxsw_sp_rif_fdb_op(mlxsw_sp, rif->addr, fid_index, false);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006160 if (err)
6161 return err;
6162
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01006163 err = mlxsw_sp_rif_edit(mlxsw_sp, rif->rif_index, dev->dev_addr,
6164 dev->mtu);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006165 if (err)
6166 goto err_rif_edit;
6167
Ido Schimmela1107482017-05-26 08:37:39 +02006168 err = mlxsw_sp_rif_fdb_op(mlxsw_sp, dev->dev_addr, fid_index, true);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006169 if (err)
6170 goto err_rif_fdb_op;
6171
Yotam Gigifd890fe2017-09-27 08:23:21 +02006172 if (rif->mtu != dev->mtu) {
6173 struct mlxsw_sp_vr *vr;
6174
6175 /* The RIF is relevant only to its mr_table instance, as unlike
6176 * unicast routing, in multicast routing a RIF cannot be shared
6177 * between several multicast routing tables.
6178 */
6179 vr = &mlxsw_sp->router->vrs[rif->vr_id];
6180 mlxsw_sp_mr_rif_mtu_update(vr->mr4_table, rif, dev->mtu);
6181 }
6182
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01006183 ether_addr_copy(rif->addr, dev->dev_addr);
6184 rif->mtu = dev->mtu;
Ido Schimmel4724ba562017-03-10 08:53:39 +01006185
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01006186 netdev_dbg(dev, "Updated RIF=%d\n", rif->rif_index);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006187
6188 return 0;
6189
6190err_rif_fdb_op:
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01006191 mlxsw_sp_rif_edit(mlxsw_sp, rif->rif_index, rif->addr, rif->mtu);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006192err_rif_edit:
Ido Schimmela1107482017-05-26 08:37:39 +02006193 mlxsw_sp_rif_fdb_op(mlxsw_sp, rif->addr, fid_index, true);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006194 return err;
6195}
6196
Ido Schimmelb1e45522017-04-30 19:47:14 +03006197static int mlxsw_sp_port_vrf_join(struct mlxsw_sp *mlxsw_sp,
David Ahernf8fa9b42017-10-18 09:56:56 -07006198 struct net_device *l3_dev,
6199 struct netlink_ext_ack *extack)
Ido Schimmel7179eb52017-03-16 09:08:18 +01006200{
Ido Schimmelb1e45522017-04-30 19:47:14 +03006201 struct mlxsw_sp_rif *rif;
Ido Schimmel7179eb52017-03-16 09:08:18 +01006202
Ido Schimmelb1e45522017-04-30 19:47:14 +03006203 /* If netdev is already associated with a RIF, then we need to
6204 * destroy it and create a new one with the new virtual router ID.
Ido Schimmel7179eb52017-03-16 09:08:18 +01006205 */
Ido Schimmelb1e45522017-04-30 19:47:14 +03006206 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev);
6207 if (rif)
David Ahernf8fa9b42017-10-18 09:56:56 -07006208 __mlxsw_sp_inetaddr_event(l3_dev, NETDEV_DOWN, extack);
Ido Schimmel7179eb52017-03-16 09:08:18 +01006209
David Ahernf8fa9b42017-10-18 09:56:56 -07006210 return __mlxsw_sp_inetaddr_event(l3_dev, NETDEV_UP, extack);
Ido Schimmel7179eb52017-03-16 09:08:18 +01006211}
6212
Ido Schimmelb1e45522017-04-30 19:47:14 +03006213static void mlxsw_sp_port_vrf_leave(struct mlxsw_sp *mlxsw_sp,
6214 struct net_device *l3_dev)
Ido Schimmel7179eb52017-03-16 09:08:18 +01006215{
Ido Schimmelb1e45522017-04-30 19:47:14 +03006216 struct mlxsw_sp_rif *rif;
Ido Schimmel7179eb52017-03-16 09:08:18 +01006217
Ido Schimmelb1e45522017-04-30 19:47:14 +03006218 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev);
6219 if (!rif)
Ido Schimmel7179eb52017-03-16 09:08:18 +01006220 return;
David Ahernf8fa9b42017-10-18 09:56:56 -07006221 __mlxsw_sp_inetaddr_event(l3_dev, NETDEV_DOWN, NULL);
Ido Schimmel7179eb52017-03-16 09:08:18 +01006222}
6223
Ido Schimmelb1e45522017-04-30 19:47:14 +03006224int mlxsw_sp_netdevice_vrf_event(struct net_device *l3_dev, unsigned long event,
6225 struct netdev_notifier_changeupper_info *info)
Ido Schimmel3d70e4582017-03-16 09:08:19 +01006226{
Ido Schimmelb1e45522017-04-30 19:47:14 +03006227 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(l3_dev);
6228 int err = 0;
Ido Schimmel3d70e4582017-03-16 09:08:19 +01006229
Ido Schimmelb1e45522017-04-30 19:47:14 +03006230 if (!mlxsw_sp)
6231 return 0;
Ido Schimmel3d70e4582017-03-16 09:08:19 +01006232
Ido Schimmelb1e45522017-04-30 19:47:14 +03006233 switch (event) {
6234 case NETDEV_PRECHANGEUPPER:
6235 return 0;
6236 case NETDEV_CHANGEUPPER:
David Ahernf8fa9b42017-10-18 09:56:56 -07006237 if (info->linking) {
6238 struct netlink_ext_ack *extack;
6239
6240 extack = netdev_notifier_info_to_extack(&info->info);
6241 err = mlxsw_sp_port_vrf_join(mlxsw_sp, l3_dev, extack);
6242 } else {
Ido Schimmelb1e45522017-04-30 19:47:14 +03006243 mlxsw_sp_port_vrf_leave(mlxsw_sp, l3_dev);
David Ahernf8fa9b42017-10-18 09:56:56 -07006244 }
Ido Schimmelb1e45522017-04-30 19:47:14 +03006245 break;
6246 }
Ido Schimmel3d70e4582017-03-16 09:08:19 +01006247
Ido Schimmelb1e45522017-04-30 19:47:14 +03006248 return err;
Ido Schimmel3d70e4582017-03-16 09:08:19 +01006249}
6250
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006251static struct mlxsw_sp_rif_subport *
6252mlxsw_sp_rif_subport_rif(const struct mlxsw_sp_rif *rif)
Ido Schimmela1107482017-05-26 08:37:39 +02006253{
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006254 return container_of(rif, struct mlxsw_sp_rif_subport, common);
Ido Schimmela1107482017-05-26 08:37:39 +02006255}
6256
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006257static void mlxsw_sp_rif_subport_setup(struct mlxsw_sp_rif *rif,
6258 const struct mlxsw_sp_rif_params *params)
6259{
6260 struct mlxsw_sp_rif_subport *rif_subport;
6261
6262 rif_subport = mlxsw_sp_rif_subport_rif(rif);
6263 rif_subport->vid = params->vid;
6264 rif_subport->lag = params->lag;
6265 if (params->lag)
6266 rif_subport->lag_id = params->lag_id;
6267 else
6268 rif_subport->system_port = params->system_port;
6269}
6270
6271static int mlxsw_sp_rif_subport_op(struct mlxsw_sp_rif *rif, bool enable)
6272{
6273 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
6274 struct mlxsw_sp_rif_subport *rif_subport;
6275 char ritr_pl[MLXSW_REG_RITR_LEN];
6276
6277 rif_subport = mlxsw_sp_rif_subport_rif(rif);
6278 mlxsw_reg_ritr_pack(ritr_pl, enable, MLXSW_REG_RITR_SP_IF,
Petr Machata9571e822017-09-02 23:49:14 +02006279 rif->rif_index, rif->vr_id, rif->dev->mtu);
6280 mlxsw_reg_ritr_mac_pack(ritr_pl, rif->dev->dev_addr);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006281 mlxsw_reg_ritr_sp_if_pack(ritr_pl, rif_subport->lag,
6282 rif_subport->lag ? rif_subport->lag_id :
6283 rif_subport->system_port,
6284 rif_subport->vid);
6285
6286 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
6287}
6288
6289static int mlxsw_sp_rif_subport_configure(struct mlxsw_sp_rif *rif)
6290{
Petr Machata010cadf2017-09-02 23:49:18 +02006291 int err;
6292
6293 err = mlxsw_sp_rif_subport_op(rif, true);
6294 if (err)
6295 return err;
6296
6297 err = mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
6298 mlxsw_sp_fid_index(rif->fid), true);
6299 if (err)
6300 goto err_rif_fdb_op;
6301
6302 mlxsw_sp_fid_rif_set(rif->fid, rif);
6303 return 0;
6304
6305err_rif_fdb_op:
6306 mlxsw_sp_rif_subport_op(rif, false);
6307 return err;
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006308}
6309
6310static void mlxsw_sp_rif_subport_deconfigure(struct mlxsw_sp_rif *rif)
6311{
Petr Machata010cadf2017-09-02 23:49:18 +02006312 struct mlxsw_sp_fid *fid = rif->fid;
6313
6314 mlxsw_sp_fid_rif_set(fid, NULL);
6315 mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
6316 mlxsw_sp_fid_index(fid), false);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006317 mlxsw_sp_rif_subport_op(rif, false);
6318}
6319
6320static struct mlxsw_sp_fid *
6321mlxsw_sp_rif_subport_fid_get(struct mlxsw_sp_rif *rif)
6322{
6323 return mlxsw_sp_fid_rfid_get(rif->mlxsw_sp, rif->rif_index);
6324}
6325
6326static const struct mlxsw_sp_rif_ops mlxsw_sp_rif_subport_ops = {
6327 .type = MLXSW_SP_RIF_TYPE_SUBPORT,
6328 .rif_size = sizeof(struct mlxsw_sp_rif_subport),
6329 .setup = mlxsw_sp_rif_subport_setup,
6330 .configure = mlxsw_sp_rif_subport_configure,
6331 .deconfigure = mlxsw_sp_rif_subport_deconfigure,
6332 .fid_get = mlxsw_sp_rif_subport_fid_get,
6333};
6334
6335static int mlxsw_sp_rif_vlan_fid_op(struct mlxsw_sp_rif *rif,
6336 enum mlxsw_reg_ritr_if_type type,
6337 u16 vid_fid, bool enable)
6338{
6339 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
6340 char ritr_pl[MLXSW_REG_RITR_LEN];
6341
6342 mlxsw_reg_ritr_pack(ritr_pl, enable, type, rif->rif_index, rif->vr_id,
Petr Machata9571e822017-09-02 23:49:14 +02006343 rif->dev->mtu);
6344 mlxsw_reg_ritr_mac_pack(ritr_pl, rif->dev->dev_addr);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006345 mlxsw_reg_ritr_fid_set(ritr_pl, type, vid_fid);
6346
6347 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
6348}
6349
Yotam Gigib35750f2017-10-09 11:15:33 +02006350u8 mlxsw_sp_router_port(const struct mlxsw_sp *mlxsw_sp)
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006351{
6352 return mlxsw_core_max_ports(mlxsw_sp->core) + 1;
6353}
6354
6355static int mlxsw_sp_rif_vlan_configure(struct mlxsw_sp_rif *rif)
6356{
6357 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
6358 u16 vid = mlxsw_sp_fid_8021q_vid(rif->fid);
6359 int err;
6360
6361 err = mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_VLAN_IF, vid, true);
6362 if (err)
6363 return err;
6364
Ido Schimmel0d284812017-07-18 10:10:12 +02006365 err = mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
6366 mlxsw_sp_router_port(mlxsw_sp), true);
6367 if (err)
6368 goto err_fid_mc_flood_set;
6369
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006370 err = mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
6371 mlxsw_sp_router_port(mlxsw_sp), true);
6372 if (err)
6373 goto err_fid_bc_flood_set;
6374
Petr Machata010cadf2017-09-02 23:49:18 +02006375 err = mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
6376 mlxsw_sp_fid_index(rif->fid), true);
6377 if (err)
6378 goto err_rif_fdb_op;
6379
6380 mlxsw_sp_fid_rif_set(rif->fid, rif);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006381 return 0;
6382
Petr Machata010cadf2017-09-02 23:49:18 +02006383err_rif_fdb_op:
6384 mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
6385 mlxsw_sp_router_port(mlxsw_sp), false);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006386err_fid_bc_flood_set:
Ido Schimmel0d284812017-07-18 10:10:12 +02006387 mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
6388 mlxsw_sp_router_port(mlxsw_sp), false);
6389err_fid_mc_flood_set:
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006390 mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_VLAN_IF, vid, false);
6391 return err;
6392}
6393
6394static void mlxsw_sp_rif_vlan_deconfigure(struct mlxsw_sp_rif *rif)
6395{
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006396 u16 vid = mlxsw_sp_fid_8021q_vid(rif->fid);
Petr Machata010cadf2017-09-02 23:49:18 +02006397 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
6398 struct mlxsw_sp_fid *fid = rif->fid;
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006399
Petr Machata010cadf2017-09-02 23:49:18 +02006400 mlxsw_sp_fid_rif_set(fid, NULL);
6401 mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
6402 mlxsw_sp_fid_index(fid), false);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006403 mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
6404 mlxsw_sp_router_port(mlxsw_sp), false);
Ido Schimmel0d284812017-07-18 10:10:12 +02006405 mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
6406 mlxsw_sp_router_port(mlxsw_sp), false);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006407 mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_VLAN_IF, vid, false);
6408}
6409
6410static struct mlxsw_sp_fid *
6411mlxsw_sp_rif_vlan_fid_get(struct mlxsw_sp_rif *rif)
6412{
6413 u16 vid = is_vlan_dev(rif->dev) ? vlan_dev_vlan_id(rif->dev) : 1;
6414
6415 return mlxsw_sp_fid_8021q_get(rif->mlxsw_sp, vid);
6416}
6417
6418static const struct mlxsw_sp_rif_ops mlxsw_sp_rif_vlan_ops = {
6419 .type = MLXSW_SP_RIF_TYPE_VLAN,
6420 .rif_size = sizeof(struct mlxsw_sp_rif),
6421 .configure = mlxsw_sp_rif_vlan_configure,
6422 .deconfigure = mlxsw_sp_rif_vlan_deconfigure,
6423 .fid_get = mlxsw_sp_rif_vlan_fid_get,
6424};
6425
6426static int mlxsw_sp_rif_fid_configure(struct mlxsw_sp_rif *rif)
6427{
6428 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
6429 u16 fid_index = mlxsw_sp_fid_index(rif->fid);
6430 int err;
6431
6432 err = mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_FID_IF, fid_index,
6433 true);
6434 if (err)
6435 return err;
6436
Ido Schimmel0d284812017-07-18 10:10:12 +02006437 err = mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
6438 mlxsw_sp_router_port(mlxsw_sp), true);
6439 if (err)
6440 goto err_fid_mc_flood_set;
6441
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006442 err = mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
6443 mlxsw_sp_router_port(mlxsw_sp), true);
6444 if (err)
6445 goto err_fid_bc_flood_set;
6446
Petr Machata010cadf2017-09-02 23:49:18 +02006447 err = mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
6448 mlxsw_sp_fid_index(rif->fid), true);
6449 if (err)
6450 goto err_rif_fdb_op;
6451
6452 mlxsw_sp_fid_rif_set(rif->fid, rif);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006453 return 0;
6454
Petr Machata010cadf2017-09-02 23:49:18 +02006455err_rif_fdb_op:
6456 mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
6457 mlxsw_sp_router_port(mlxsw_sp), false);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006458err_fid_bc_flood_set:
Ido Schimmel0d284812017-07-18 10:10:12 +02006459 mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
6460 mlxsw_sp_router_port(mlxsw_sp), false);
6461err_fid_mc_flood_set:
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006462 mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_FID_IF, fid_index, false);
6463 return err;
6464}
6465
6466static void mlxsw_sp_rif_fid_deconfigure(struct mlxsw_sp_rif *rif)
6467{
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006468 u16 fid_index = mlxsw_sp_fid_index(rif->fid);
Petr Machata010cadf2017-09-02 23:49:18 +02006469 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
6470 struct mlxsw_sp_fid *fid = rif->fid;
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006471
Petr Machata010cadf2017-09-02 23:49:18 +02006472 mlxsw_sp_fid_rif_set(fid, NULL);
6473 mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
6474 mlxsw_sp_fid_index(fid), false);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006475 mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
6476 mlxsw_sp_router_port(mlxsw_sp), false);
Ido Schimmel0d284812017-07-18 10:10:12 +02006477 mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
6478 mlxsw_sp_router_port(mlxsw_sp), false);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006479 mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_FID_IF, fid_index, false);
6480}
6481
6482static struct mlxsw_sp_fid *
6483mlxsw_sp_rif_fid_fid_get(struct mlxsw_sp_rif *rif)
6484{
6485 return mlxsw_sp_fid_8021d_get(rif->mlxsw_sp, rif->dev->ifindex);
6486}
6487
6488static const struct mlxsw_sp_rif_ops mlxsw_sp_rif_fid_ops = {
6489 .type = MLXSW_SP_RIF_TYPE_FID,
6490 .rif_size = sizeof(struct mlxsw_sp_rif),
6491 .configure = mlxsw_sp_rif_fid_configure,
6492 .deconfigure = mlxsw_sp_rif_fid_deconfigure,
6493 .fid_get = mlxsw_sp_rif_fid_fid_get,
6494};
6495
Petr Machata6ddb7422017-09-02 23:49:19 +02006496static struct mlxsw_sp_rif_ipip_lb *
6497mlxsw_sp_rif_ipip_lb_rif(struct mlxsw_sp_rif *rif)
6498{
6499 return container_of(rif, struct mlxsw_sp_rif_ipip_lb, common);
6500}
6501
6502static void
6503mlxsw_sp_rif_ipip_lb_setup(struct mlxsw_sp_rif *rif,
6504 const struct mlxsw_sp_rif_params *params)
6505{
6506 struct mlxsw_sp_rif_params_ipip_lb *params_lb;
6507 struct mlxsw_sp_rif_ipip_lb *rif_lb;
6508
6509 params_lb = container_of(params, struct mlxsw_sp_rif_params_ipip_lb,
6510 common);
6511 rif_lb = mlxsw_sp_rif_ipip_lb_rif(rif);
6512 rif_lb->lb_config = params_lb->lb_config;
6513}
6514
6515static int
6516mlxsw_sp_rif_ipip_lb_op(struct mlxsw_sp_rif_ipip_lb *lb_rif,
6517 struct mlxsw_sp_vr *ul_vr, bool enable)
6518{
6519 struct mlxsw_sp_rif_ipip_lb_config lb_cf = lb_rif->lb_config;
6520 struct mlxsw_sp_rif *rif = &lb_rif->common;
6521 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
6522 char ritr_pl[MLXSW_REG_RITR_LEN];
6523 u32 saddr4;
6524
6525 switch (lb_cf.ul_protocol) {
6526 case MLXSW_SP_L3_PROTO_IPV4:
6527 saddr4 = be32_to_cpu(lb_cf.saddr.addr4);
6528 mlxsw_reg_ritr_pack(ritr_pl, enable, MLXSW_REG_RITR_LOOPBACK_IF,
6529 rif->rif_index, rif->vr_id, rif->dev->mtu);
6530 mlxsw_reg_ritr_loopback_ipip4_pack(ritr_pl, lb_cf.lb_ipipt,
6531 MLXSW_REG_RITR_LOOPBACK_IPIP_OPTIONS_GRE_KEY_PRESET,
6532 ul_vr->id, saddr4, lb_cf.okey);
6533 break;
6534
6535 case MLXSW_SP_L3_PROTO_IPV6:
6536 return -EAFNOSUPPORT;
6537 }
6538
6539 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
6540}
6541
6542static int
6543mlxsw_sp_rif_ipip_lb_configure(struct mlxsw_sp_rif *rif)
6544{
6545 struct mlxsw_sp_rif_ipip_lb *lb_rif = mlxsw_sp_rif_ipip_lb_rif(rif);
6546 u32 ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(rif->dev);
6547 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
6548 struct mlxsw_sp_vr *ul_vr;
6549 int err;
6550
David Ahernf8fa9b42017-10-18 09:56:56 -07006551 ul_vr = mlxsw_sp_vr_get(mlxsw_sp, ul_tb_id, NULL);
Petr Machata6ddb7422017-09-02 23:49:19 +02006552 if (IS_ERR(ul_vr))
6553 return PTR_ERR(ul_vr);
6554
6555 err = mlxsw_sp_rif_ipip_lb_op(lb_rif, ul_vr, true);
6556 if (err)
6557 goto err_loopback_op;
6558
6559 lb_rif->ul_vr_id = ul_vr->id;
6560 ++ul_vr->rif_count;
6561 return 0;
6562
6563err_loopback_op:
6564 mlxsw_sp_vr_put(ul_vr);
6565 return err;
6566}
6567
6568static void mlxsw_sp_rif_ipip_lb_deconfigure(struct mlxsw_sp_rif *rif)
6569{
6570 struct mlxsw_sp_rif_ipip_lb *lb_rif = mlxsw_sp_rif_ipip_lb_rif(rif);
6571 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
6572 struct mlxsw_sp_vr *ul_vr;
6573
6574 ul_vr = &mlxsw_sp->router->vrs[lb_rif->ul_vr_id];
6575 mlxsw_sp_rif_ipip_lb_op(lb_rif, ul_vr, false);
6576
6577 --ul_vr->rif_count;
6578 mlxsw_sp_vr_put(ul_vr);
6579}
6580
6581static const struct mlxsw_sp_rif_ops mlxsw_sp_rif_ipip_lb_ops = {
6582 .type = MLXSW_SP_RIF_TYPE_IPIP_LB,
6583 .rif_size = sizeof(struct mlxsw_sp_rif_ipip_lb),
6584 .setup = mlxsw_sp_rif_ipip_lb_setup,
6585 .configure = mlxsw_sp_rif_ipip_lb_configure,
6586 .deconfigure = mlxsw_sp_rif_ipip_lb_deconfigure,
6587};
6588
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006589static const struct mlxsw_sp_rif_ops *mlxsw_sp_rif_ops_arr[] = {
6590 [MLXSW_SP_RIF_TYPE_SUBPORT] = &mlxsw_sp_rif_subport_ops,
6591 [MLXSW_SP_RIF_TYPE_VLAN] = &mlxsw_sp_rif_vlan_ops,
6592 [MLXSW_SP_RIF_TYPE_FID] = &mlxsw_sp_rif_fid_ops,
Petr Machata6ddb7422017-09-02 23:49:19 +02006593 [MLXSW_SP_RIF_TYPE_IPIP_LB] = &mlxsw_sp_rif_ipip_lb_ops,
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006594};
6595
Ido Schimmel348b8fc2017-05-16 19:38:29 +02006596static int mlxsw_sp_rifs_init(struct mlxsw_sp *mlxsw_sp)
6597{
6598 u64 max_rifs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS);
6599
6600 mlxsw_sp->router->rifs = kcalloc(max_rifs,
6601 sizeof(struct mlxsw_sp_rif *),
6602 GFP_KERNEL);
6603 if (!mlxsw_sp->router->rifs)
6604 return -ENOMEM;
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006605
6606 mlxsw_sp->router->rif_ops_arr = mlxsw_sp_rif_ops_arr;
6607
Ido Schimmel348b8fc2017-05-16 19:38:29 +02006608 return 0;
6609}
6610
6611static void mlxsw_sp_rifs_fini(struct mlxsw_sp *mlxsw_sp)
6612{
6613 int i;
6614
6615 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++)
6616 WARN_ON_ONCE(mlxsw_sp->router->rifs[i]);
6617
6618 kfree(mlxsw_sp->router->rifs);
6619}
6620
Petr Machatadcbda282017-10-20 09:16:16 +02006621static int
6622mlxsw_sp_ipip_config_tigcr(struct mlxsw_sp *mlxsw_sp)
6623{
6624 char tigcr_pl[MLXSW_REG_TIGCR_LEN];
6625
6626 mlxsw_reg_tigcr_pack(tigcr_pl, true, 0);
6627 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(tigcr), tigcr_pl);
6628}
6629
Petr Machata38ebc0f2017-09-02 23:49:17 +02006630static int mlxsw_sp_ipips_init(struct mlxsw_sp *mlxsw_sp)
6631{
6632 mlxsw_sp->router->ipip_ops_arr = mlxsw_sp_ipip_ops_arr;
Petr Machata1012b9a2017-09-02 23:49:23 +02006633 INIT_LIST_HEAD(&mlxsw_sp->router->ipip_list);
Petr Machatadcbda282017-10-20 09:16:16 +02006634 return mlxsw_sp_ipip_config_tigcr(mlxsw_sp);
Petr Machata38ebc0f2017-09-02 23:49:17 +02006635}
6636
6637static void mlxsw_sp_ipips_fini(struct mlxsw_sp *mlxsw_sp)
6638{
Petr Machata1012b9a2017-09-02 23:49:23 +02006639 WARN_ON(!list_empty(&mlxsw_sp->router->ipip_list));
Petr Machata38ebc0f2017-09-02 23:49:17 +02006640}
6641
Ido Schimmelc3852ef2016-12-03 16:45:07 +01006642static void mlxsw_sp_router_fib_dump_flush(struct notifier_block *nb)
6643{
Ido Schimmel7e39d112017-05-16 19:38:28 +02006644 struct mlxsw_sp_router *router;
Ido Schimmelc3852ef2016-12-03 16:45:07 +01006645
6646 /* Flush pending FIB notifications and then flush the device's
6647 * table before requesting another dump. The FIB notification
6648 * block is unregistered, so no need to take RTNL.
6649 */
6650 mlxsw_core_flush_owq();
Ido Schimmel7e39d112017-05-16 19:38:28 +02006651 router = container_of(nb, struct mlxsw_sp_router, fib_nb);
6652 mlxsw_sp_router_fib_flush(router->mlxsw_sp);
Ido Schimmelc3852ef2016-12-03 16:45:07 +01006653}
6654
Ido Schimmelaf658b62017-11-02 17:14:09 +01006655#ifdef CONFIG_IP_ROUTE_MULTIPATH
6656static void mlxsw_sp_mp_hash_header_set(char *recr2_pl, int header)
6657{
6658 mlxsw_reg_recr2_outer_header_enables_set(recr2_pl, header, true);
6659}
6660
6661static void mlxsw_sp_mp_hash_field_set(char *recr2_pl, int field)
6662{
6663 mlxsw_reg_recr2_outer_header_fields_enable_set(recr2_pl, field, true);
6664}
6665
6666static void mlxsw_sp_mp4_hash_init(char *recr2_pl)
6667{
6668 bool only_l3 = !init_net.ipv4.sysctl_fib_multipath_hash_policy;
6669
6670 mlxsw_sp_mp_hash_header_set(recr2_pl,
6671 MLXSW_REG_RECR2_IPV4_EN_NOT_TCP_NOT_UDP);
6672 mlxsw_sp_mp_hash_header_set(recr2_pl, MLXSW_REG_RECR2_IPV4_EN_TCP_UDP);
6673 mlxsw_reg_recr2_ipv4_sip_enable(recr2_pl);
6674 mlxsw_reg_recr2_ipv4_dip_enable(recr2_pl);
6675 if (only_l3)
6676 return;
6677 mlxsw_sp_mp_hash_header_set(recr2_pl, MLXSW_REG_RECR2_TCP_UDP_EN_IPV4);
6678 mlxsw_sp_mp_hash_field_set(recr2_pl, MLXSW_REG_RECR2_IPV4_PROTOCOL);
6679 mlxsw_sp_mp_hash_field_set(recr2_pl, MLXSW_REG_RECR2_TCP_UDP_SPORT);
6680 mlxsw_sp_mp_hash_field_set(recr2_pl, MLXSW_REG_RECR2_TCP_UDP_DPORT);
6681}
6682
6683static void mlxsw_sp_mp6_hash_init(char *recr2_pl)
6684{
6685 mlxsw_sp_mp_hash_header_set(recr2_pl,
6686 MLXSW_REG_RECR2_IPV6_EN_NOT_TCP_NOT_UDP);
6687 mlxsw_sp_mp_hash_header_set(recr2_pl, MLXSW_REG_RECR2_IPV6_EN_TCP_UDP);
6688 mlxsw_reg_recr2_ipv6_sip_enable(recr2_pl);
6689 mlxsw_reg_recr2_ipv6_dip_enable(recr2_pl);
6690 mlxsw_sp_mp_hash_field_set(recr2_pl, MLXSW_REG_RECR2_IPV6_FLOW_LABEL);
6691 mlxsw_sp_mp_hash_field_set(recr2_pl, MLXSW_REG_RECR2_IPV6_NEXT_HEADER);
6692}
6693
6694static int mlxsw_sp_mp_hash_init(struct mlxsw_sp *mlxsw_sp)
6695{
6696 char recr2_pl[MLXSW_REG_RECR2_LEN];
6697 u32 seed;
6698
6699 get_random_bytes(&seed, sizeof(seed));
6700 mlxsw_reg_recr2_pack(recr2_pl, seed);
6701 mlxsw_sp_mp4_hash_init(recr2_pl);
6702 mlxsw_sp_mp6_hash_init(recr2_pl);
6703
6704 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(recr2), recr2_pl);
6705}
6706#else
6707static int mlxsw_sp_mp_hash_init(struct mlxsw_sp *mlxsw_sp)
6708{
6709 return 0;
6710}
6711#endif
6712
Ido Schimmel4724ba562017-03-10 08:53:39 +01006713static int __mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp)
6714{
6715 char rgcr_pl[MLXSW_REG_RGCR_LEN];
6716 u64 max_rifs;
6717 int err;
6718
6719 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_RIFS))
6720 return -EIO;
Ido Schimmel4724ba562017-03-10 08:53:39 +01006721 max_rifs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006722
Arkadi Sharshevskye29237e2017-07-18 10:10:09 +02006723 mlxsw_reg_rgcr_pack(rgcr_pl, true, true);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006724 mlxsw_reg_rgcr_max_router_interfaces_set(rgcr_pl, max_rifs);
6725 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rgcr), rgcr_pl);
6726 if (err)
Ido Schimmel348b8fc2017-05-16 19:38:29 +02006727 return err;
Ido Schimmel4724ba562017-03-10 08:53:39 +01006728 return 0;
Ido Schimmel4724ba562017-03-10 08:53:39 +01006729}
6730
6731static void __mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp)
6732{
6733 char rgcr_pl[MLXSW_REG_RGCR_LEN];
Ido Schimmel4724ba562017-03-10 08:53:39 +01006734
Arkadi Sharshevskye29237e2017-07-18 10:10:09 +02006735 mlxsw_reg_rgcr_pack(rgcr_pl, false, false);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006736 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rgcr), rgcr_pl);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006737}
6738
Jiri Pirkob45f64d2016-09-26 12:52:31 +02006739int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp)
6740{
Ido Schimmel9011b672017-05-16 19:38:25 +02006741 struct mlxsw_sp_router *router;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02006742 int err;
6743
Ido Schimmel9011b672017-05-16 19:38:25 +02006744 router = kzalloc(sizeof(*mlxsw_sp->router), GFP_KERNEL);
6745 if (!router)
6746 return -ENOMEM;
6747 mlxsw_sp->router = router;
6748 router->mlxsw_sp = mlxsw_sp;
6749
6750 INIT_LIST_HEAD(&mlxsw_sp->router->nexthop_neighs_list);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02006751 err = __mlxsw_sp_router_init(mlxsw_sp);
6752 if (err)
Ido Schimmel9011b672017-05-16 19:38:25 +02006753 goto err_router_init;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02006754
Ido Schimmel348b8fc2017-05-16 19:38:29 +02006755 err = mlxsw_sp_rifs_init(mlxsw_sp);
6756 if (err)
6757 goto err_rifs_init;
6758
Petr Machata38ebc0f2017-09-02 23:49:17 +02006759 err = mlxsw_sp_ipips_init(mlxsw_sp);
6760 if (err)
6761 goto err_ipips_init;
6762
Ido Schimmel9011b672017-05-16 19:38:25 +02006763 err = rhashtable_init(&mlxsw_sp->router->nexthop_ht,
Ido Schimmelc53b8e12017-02-08 11:16:30 +01006764 &mlxsw_sp_nexthop_ht_params);
6765 if (err)
6766 goto err_nexthop_ht_init;
6767
Ido Schimmel9011b672017-05-16 19:38:25 +02006768 err = rhashtable_init(&mlxsw_sp->router->nexthop_group_ht,
Ido Schimmele9ad5e72017-02-08 11:16:29 +01006769 &mlxsw_sp_nexthop_group_ht_params);
6770 if (err)
6771 goto err_nexthop_group_ht_init;
6772
Arkadi Sharshevskydbe45982017-09-25 10:32:23 +02006773 INIT_LIST_HEAD(&mlxsw_sp->router->nexthop_list);
Ido Schimmel8494ab02017-03-24 08:02:47 +01006774 err = mlxsw_sp_lpm_init(mlxsw_sp);
6775 if (err)
6776 goto err_lpm_init;
6777
Yotam Gigid42b0962017-09-27 08:23:20 +02006778 err = mlxsw_sp_mr_init(mlxsw_sp, &mlxsw_sp_mr_tcam_ops);
6779 if (err)
6780 goto err_mr_init;
6781
Jiri Pirkob45f64d2016-09-26 12:52:31 +02006782 err = mlxsw_sp_vrs_init(mlxsw_sp);
6783 if (err)
6784 goto err_vrs_init;
6785
Ido Schimmel8c9583a2016-10-27 15:12:57 +02006786 err = mlxsw_sp_neigh_init(mlxsw_sp);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02006787 if (err)
6788 goto err_neigh_init;
6789
Ido Schimmel48fac882017-11-02 17:14:06 +01006790 mlxsw_sp->router->netevent_nb.notifier_call =
6791 mlxsw_sp_router_netevent_event;
6792 err = register_netevent_notifier(&mlxsw_sp->router->netevent_nb);
6793 if (err)
6794 goto err_register_netevent_notifier;
6795
Ido Schimmelaf658b62017-11-02 17:14:09 +01006796 err = mlxsw_sp_mp_hash_init(mlxsw_sp);
6797 if (err)
6798 goto err_mp_hash_init;
6799
Ido Schimmel7e39d112017-05-16 19:38:28 +02006800 mlxsw_sp->router->fib_nb.notifier_call = mlxsw_sp_router_fib_event;
6801 err = register_fib_notifier(&mlxsw_sp->router->fib_nb,
Ido Schimmelc3852ef2016-12-03 16:45:07 +01006802 mlxsw_sp_router_fib_dump_flush);
6803 if (err)
6804 goto err_register_fib_notifier;
6805
Jiri Pirkob45f64d2016-09-26 12:52:31 +02006806 return 0;
6807
Ido Schimmelc3852ef2016-12-03 16:45:07 +01006808err_register_fib_notifier:
Ido Schimmelaf658b62017-11-02 17:14:09 +01006809err_mp_hash_init:
Ido Schimmel48fac882017-11-02 17:14:06 +01006810 unregister_netevent_notifier(&mlxsw_sp->router->netevent_nb);
6811err_register_netevent_notifier:
Ido Schimmelc3852ef2016-12-03 16:45:07 +01006812 mlxsw_sp_neigh_fini(mlxsw_sp);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02006813err_neigh_init:
6814 mlxsw_sp_vrs_fini(mlxsw_sp);
6815err_vrs_init:
Yotam Gigid42b0962017-09-27 08:23:20 +02006816 mlxsw_sp_mr_fini(mlxsw_sp);
6817err_mr_init:
Ido Schimmel8494ab02017-03-24 08:02:47 +01006818 mlxsw_sp_lpm_fini(mlxsw_sp);
6819err_lpm_init:
Ido Schimmel9011b672017-05-16 19:38:25 +02006820 rhashtable_destroy(&mlxsw_sp->router->nexthop_group_ht);
Ido Schimmele9ad5e72017-02-08 11:16:29 +01006821err_nexthop_group_ht_init:
Ido Schimmel9011b672017-05-16 19:38:25 +02006822 rhashtable_destroy(&mlxsw_sp->router->nexthop_ht);
Ido Schimmelc53b8e12017-02-08 11:16:30 +01006823err_nexthop_ht_init:
Petr Machata38ebc0f2017-09-02 23:49:17 +02006824 mlxsw_sp_ipips_fini(mlxsw_sp);
6825err_ipips_init:
Ido Schimmel348b8fc2017-05-16 19:38:29 +02006826 mlxsw_sp_rifs_fini(mlxsw_sp);
6827err_rifs_init:
Jiri Pirkob45f64d2016-09-26 12:52:31 +02006828 __mlxsw_sp_router_fini(mlxsw_sp);
Ido Schimmel9011b672017-05-16 19:38:25 +02006829err_router_init:
6830 kfree(mlxsw_sp->router);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02006831 return err;
6832}
6833
6834void mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp)
6835{
Ido Schimmel7e39d112017-05-16 19:38:28 +02006836 unregister_fib_notifier(&mlxsw_sp->router->fib_nb);
Ido Schimmel48fac882017-11-02 17:14:06 +01006837 unregister_netevent_notifier(&mlxsw_sp->router->netevent_nb);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02006838 mlxsw_sp_neigh_fini(mlxsw_sp);
6839 mlxsw_sp_vrs_fini(mlxsw_sp);
Yotam Gigid42b0962017-09-27 08:23:20 +02006840 mlxsw_sp_mr_fini(mlxsw_sp);
Ido Schimmel8494ab02017-03-24 08:02:47 +01006841 mlxsw_sp_lpm_fini(mlxsw_sp);
Ido Schimmel9011b672017-05-16 19:38:25 +02006842 rhashtable_destroy(&mlxsw_sp->router->nexthop_group_ht);
6843 rhashtable_destroy(&mlxsw_sp->router->nexthop_ht);
Petr Machata38ebc0f2017-09-02 23:49:17 +02006844 mlxsw_sp_ipips_fini(mlxsw_sp);
Ido Schimmel348b8fc2017-05-16 19:38:29 +02006845 mlxsw_sp_rifs_fini(mlxsw_sp);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02006846 __mlxsw_sp_router_fini(mlxsw_sp);
Ido Schimmel9011b672017-05-16 19:38:25 +02006847 kfree(mlxsw_sp->router);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02006848}