blob: d5094b81adbf531045f67945ce9e583b9a31661e [file] [log] [blame]
Ido Schimmel464dce12016-07-02 11:00:15 +02001/*
2 * drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
Petr Machata6ddb7422017-09-02 23:49:19 +02003 * Copyright (c) 2016-2017 Mellanox Technologies. All rights reserved.
Ido Schimmel464dce12016-07-02 11:00:15 +02004 * Copyright (c) 2016 Jiri Pirko <jiri@mellanox.com>
5 * Copyright (c) 2016 Ido Schimmel <idosch@mellanox.com>
Yotam Gigic723c7352016-07-05 11:27:43 +02006 * Copyright (c) 2016 Yotam Gigi <yotamg@mellanox.com>
Petr Machata6ddb7422017-09-02 23:49:19 +02007 * Copyright (c) 2017 Petr Machata <petrm@mellanox.com>
Ido Schimmel464dce12016-07-02 11:00:15 +02008 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions are met:
11 *
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. Neither the names of the copyright holders nor the names of its
18 * contributors may be used to endorse or promote products derived from
19 * this software without specific prior written permission.
20 *
21 * Alternatively, this software may be distributed under the terms of the
22 * GNU General Public License ("GPL") version 2 as published by the Free
23 * Software Foundation.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
26 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
29 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
36 */
37
38#include <linux/kernel.h>
39#include <linux/types.h>
Jiri Pirko5e9c16c2016-07-04 08:23:04 +020040#include <linux/rhashtable.h>
41#include <linux/bitops.h>
42#include <linux/in6.h>
Yotam Gigic723c7352016-07-05 11:27:43 +020043#include <linux/notifier.h>
Ido Schimmeldf6dd79b2017-02-08 14:36:49 +010044#include <linux/inetdevice.h>
Ido Schimmel9db032b2017-03-16 09:08:17 +010045#include <linux/netdevice.h>
Ido Schimmel03ea01e2017-05-23 21:56:30 +020046#include <linux/if_bridge.h>
Ido Schimmelb5f3e0d2017-07-24 09:56:00 +020047#include <linux/socket.h>
Ido Schimmel428b8512017-08-03 13:28:28 +020048#include <linux/route.h>
Ido Schimmeleb789982017-10-22 23:11:48 +020049#include <linux/gcd.h>
Yotam Gigic723c7352016-07-05 11:27:43 +020050#include <net/netevent.h>
Jiri Pirko6cf3c972016-07-05 11:27:39 +020051#include <net/neighbour.h>
52#include <net/arp.h>
Jiri Pirkob45f64d2016-09-26 12:52:31 +020053#include <net/ip_fib.h>
Ido Schimmel583419f2017-08-03 13:28:27 +020054#include <net/ip6_fib.h>
Ido Schimmel5d7bfd12017-03-16 09:08:14 +010055#include <net/fib_rules.h>
Petr Machata6ddb7422017-09-02 23:49:19 +020056#include <net/ip_tunnels.h>
Ido Schimmel57837882017-03-16 09:08:16 +010057#include <net/l3mdev.h>
Arkadi Sharshevsky5ea12372017-07-18 10:10:13 +020058#include <net/addrconf.h>
Arkadi Sharshevskyd5eb89c2017-07-18 10:10:15 +020059#include <net/ndisc.h>
60#include <net/ipv6.h>
Ido Schimmel04b1d4e2017-08-03 13:28:11 +020061#include <net/fib_notifier.h>
Ido Schimmel464dce12016-07-02 11:00:15 +020062
63#include "spectrum.h"
64#include "core.h"
65#include "reg.h"
Arkadi Sharshevskye0c0afd2017-03-28 17:24:15 +020066#include "spectrum_cnt.h"
67#include "spectrum_dpipe.h"
Petr Machata38ebc0f2017-09-02 23:49:17 +020068#include "spectrum_ipip.h"
Yotam Gigid42b0962017-09-27 08:23:20 +020069#include "spectrum_mr.h"
70#include "spectrum_mr_tcam.h"
Arkadi Sharshevskye0c0afd2017-03-28 17:24:15 +020071#include "spectrum_router.h"
Ido Schimmel464dce12016-07-02 11:00:15 +020072
Ido Schimmel9011b672017-05-16 19:38:25 +020073struct mlxsw_sp_vr;
74struct mlxsw_sp_lpm_tree;
Ido Schimmele4f3c1c2017-05-26 08:37:40 +020075struct mlxsw_sp_rif_ops;
Ido Schimmel9011b672017-05-16 19:38:25 +020076
77struct mlxsw_sp_router {
78 struct mlxsw_sp *mlxsw_sp;
Ido Schimmel5f9efff2017-05-16 19:38:27 +020079 struct mlxsw_sp_rif **rifs;
Ido Schimmel9011b672017-05-16 19:38:25 +020080 struct mlxsw_sp_vr *vrs;
81 struct rhashtable neigh_ht;
82 struct rhashtable nexthop_group_ht;
83 struct rhashtable nexthop_ht;
Arkadi Sharshevskydbe45982017-09-25 10:32:23 +020084 struct list_head nexthop_list;
Ido Schimmel9011b672017-05-16 19:38:25 +020085 struct {
86 struct mlxsw_sp_lpm_tree *trees;
87 unsigned int tree_count;
88 } lpm;
89 struct {
90 struct delayed_work dw;
91 unsigned long interval; /* ms */
92 } neighs_update;
93 struct delayed_work nexthop_probe_dw;
94#define MLXSW_SP_UNRESOLVED_NH_PROBE_INTERVAL 5000 /* ms */
95 struct list_head nexthop_neighs_list;
Petr Machata1012b9a2017-09-02 23:49:23 +020096 struct list_head ipip_list;
Ido Schimmel9011b672017-05-16 19:38:25 +020097 bool aborted;
Ido Schimmel7e39d112017-05-16 19:38:28 +020098 struct notifier_block fib_nb;
Ido Schimmel48fac882017-11-02 17:14:06 +010099 struct notifier_block netevent_nb;
Ido Schimmele4f3c1c2017-05-26 08:37:40 +0200100 const struct mlxsw_sp_rif_ops **rif_ops_arr;
Petr Machata38ebc0f2017-09-02 23:49:17 +0200101 const struct mlxsw_sp_ipip_ops **ipip_ops_arr;
Ido Schimmel9011b672017-05-16 19:38:25 +0200102};
103
Ido Schimmel4724ba562017-03-10 08:53:39 +0100104struct mlxsw_sp_rif {
105 struct list_head nexthop_list;
106 struct list_head neigh_list;
107 struct net_device *dev;
Ido Schimmela1107482017-05-26 08:37:39 +0200108 struct mlxsw_sp_fid *fid;
Ido Schimmel4724ba562017-03-10 08:53:39 +0100109 unsigned char addr[ETH_ALEN];
110 int mtu;
Arkadi Sharshevskybf952332017-03-17 09:38:00 +0100111 u16 rif_index;
Ido Schimmel69132292017-03-10 08:53:42 +0100112 u16 vr_id;
Ido Schimmele4f3c1c2017-05-26 08:37:40 +0200113 const struct mlxsw_sp_rif_ops *ops;
114 struct mlxsw_sp *mlxsw_sp;
115
Arkadi Sharshevskye0c0afd2017-03-28 17:24:15 +0200116 unsigned int counter_ingress;
117 bool counter_ingress_valid;
118 unsigned int counter_egress;
119 bool counter_egress_valid;
Ido Schimmel4724ba562017-03-10 08:53:39 +0100120};
121
Ido Schimmele4f3c1c2017-05-26 08:37:40 +0200122struct mlxsw_sp_rif_params {
123 struct net_device *dev;
124 union {
125 u16 system_port;
126 u16 lag_id;
127 };
128 u16 vid;
129 bool lag;
130};
131
Ido Schimmel4d93cee2017-05-26 08:37:34 +0200132struct mlxsw_sp_rif_subport {
133 struct mlxsw_sp_rif common;
134 union {
135 u16 system_port;
136 u16 lag_id;
137 };
138 u16 vid;
139 bool lag;
140};
141
Petr Machata6ddb7422017-09-02 23:49:19 +0200142struct mlxsw_sp_rif_ipip_lb {
143 struct mlxsw_sp_rif common;
144 struct mlxsw_sp_rif_ipip_lb_config lb_config;
145 u16 ul_vr_id; /* Reserved for Spectrum-2. */
146};
147
148struct mlxsw_sp_rif_params_ipip_lb {
149 struct mlxsw_sp_rif_params common;
150 struct mlxsw_sp_rif_ipip_lb_config lb_config;
151};
152
Ido Schimmele4f3c1c2017-05-26 08:37:40 +0200153struct mlxsw_sp_rif_ops {
154 enum mlxsw_sp_rif_type type;
155 size_t rif_size;
156
157 void (*setup)(struct mlxsw_sp_rif *rif,
158 const struct mlxsw_sp_rif_params *params);
159 int (*configure)(struct mlxsw_sp_rif *rif);
160 void (*deconfigure)(struct mlxsw_sp_rif *rif);
161 struct mlxsw_sp_fid * (*fid_get)(struct mlxsw_sp_rif *rif);
162};
163
Arkadi Sharshevskye0c0afd2017-03-28 17:24:15 +0200164static unsigned int *
165mlxsw_sp_rif_p_counter_get(struct mlxsw_sp_rif *rif,
166 enum mlxsw_sp_rif_counter_dir dir)
167{
168 switch (dir) {
169 case MLXSW_SP_RIF_COUNTER_EGRESS:
170 return &rif->counter_egress;
171 case MLXSW_SP_RIF_COUNTER_INGRESS:
172 return &rif->counter_ingress;
173 }
174 return NULL;
175}
176
177static bool
178mlxsw_sp_rif_counter_valid_get(struct mlxsw_sp_rif *rif,
179 enum mlxsw_sp_rif_counter_dir dir)
180{
181 switch (dir) {
182 case MLXSW_SP_RIF_COUNTER_EGRESS:
183 return rif->counter_egress_valid;
184 case MLXSW_SP_RIF_COUNTER_INGRESS:
185 return rif->counter_ingress_valid;
186 }
187 return false;
188}
189
190static void
191mlxsw_sp_rif_counter_valid_set(struct mlxsw_sp_rif *rif,
192 enum mlxsw_sp_rif_counter_dir dir,
193 bool valid)
194{
195 switch (dir) {
196 case MLXSW_SP_RIF_COUNTER_EGRESS:
197 rif->counter_egress_valid = valid;
198 break;
199 case MLXSW_SP_RIF_COUNTER_INGRESS:
200 rif->counter_ingress_valid = valid;
201 break;
202 }
203}
204
205static int mlxsw_sp_rif_counter_edit(struct mlxsw_sp *mlxsw_sp, u16 rif_index,
206 unsigned int counter_index, bool enable,
207 enum mlxsw_sp_rif_counter_dir dir)
208{
209 char ritr_pl[MLXSW_REG_RITR_LEN];
210 bool is_egress = false;
211 int err;
212
213 if (dir == MLXSW_SP_RIF_COUNTER_EGRESS)
214 is_egress = true;
215 mlxsw_reg_ritr_rif_pack(ritr_pl, rif_index);
216 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
217 if (err)
218 return err;
219
220 mlxsw_reg_ritr_counter_pack(ritr_pl, counter_index, enable,
221 is_egress);
222 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
223}
224
225int mlxsw_sp_rif_counter_value_get(struct mlxsw_sp *mlxsw_sp,
226 struct mlxsw_sp_rif *rif,
227 enum mlxsw_sp_rif_counter_dir dir, u64 *cnt)
228{
229 char ricnt_pl[MLXSW_REG_RICNT_LEN];
230 unsigned int *p_counter_index;
231 bool valid;
232 int err;
233
234 valid = mlxsw_sp_rif_counter_valid_get(rif, dir);
235 if (!valid)
236 return -EINVAL;
237
238 p_counter_index = mlxsw_sp_rif_p_counter_get(rif, dir);
239 if (!p_counter_index)
240 return -EINVAL;
241 mlxsw_reg_ricnt_pack(ricnt_pl, *p_counter_index,
242 MLXSW_REG_RICNT_OPCODE_NOP);
243 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ricnt), ricnt_pl);
244 if (err)
245 return err;
246 *cnt = mlxsw_reg_ricnt_good_unicast_packets_get(ricnt_pl);
247 return 0;
248}
249
250static int mlxsw_sp_rif_counter_clear(struct mlxsw_sp *mlxsw_sp,
251 unsigned int counter_index)
252{
253 char ricnt_pl[MLXSW_REG_RICNT_LEN];
254
255 mlxsw_reg_ricnt_pack(ricnt_pl, counter_index,
256 MLXSW_REG_RICNT_OPCODE_CLEAR);
257 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ricnt), ricnt_pl);
258}
259
260int mlxsw_sp_rif_counter_alloc(struct mlxsw_sp *mlxsw_sp,
261 struct mlxsw_sp_rif *rif,
262 enum mlxsw_sp_rif_counter_dir dir)
263{
264 unsigned int *p_counter_index;
265 int err;
266
267 p_counter_index = mlxsw_sp_rif_p_counter_get(rif, dir);
268 if (!p_counter_index)
269 return -EINVAL;
270 err = mlxsw_sp_counter_alloc(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_RIF,
271 p_counter_index);
272 if (err)
273 return err;
274
275 err = mlxsw_sp_rif_counter_clear(mlxsw_sp, *p_counter_index);
276 if (err)
277 goto err_counter_clear;
278
279 err = mlxsw_sp_rif_counter_edit(mlxsw_sp, rif->rif_index,
280 *p_counter_index, true, dir);
281 if (err)
282 goto err_counter_edit;
283 mlxsw_sp_rif_counter_valid_set(rif, dir, true);
284 return 0;
285
286err_counter_edit:
287err_counter_clear:
288 mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_RIF,
289 *p_counter_index);
290 return err;
291}
292
293void mlxsw_sp_rif_counter_free(struct mlxsw_sp *mlxsw_sp,
294 struct mlxsw_sp_rif *rif,
295 enum mlxsw_sp_rif_counter_dir dir)
296{
297 unsigned int *p_counter_index;
298
Arkadi Sharshevsky6b1206b2017-05-18 09:18:53 +0200299 if (!mlxsw_sp_rif_counter_valid_get(rif, dir))
300 return;
301
Arkadi Sharshevskye0c0afd2017-03-28 17:24:15 +0200302 p_counter_index = mlxsw_sp_rif_p_counter_get(rif, dir);
303 if (WARN_ON(!p_counter_index))
304 return;
305 mlxsw_sp_rif_counter_edit(mlxsw_sp, rif->rif_index,
306 *p_counter_index, false, dir);
307 mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_RIF,
308 *p_counter_index);
309 mlxsw_sp_rif_counter_valid_set(rif, dir, false);
310}
311
Ido Schimmele4f3c1c2017-05-26 08:37:40 +0200312static void mlxsw_sp_rif_counters_alloc(struct mlxsw_sp_rif *rif)
313{
314 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
315 struct devlink *devlink;
316
317 devlink = priv_to_devlink(mlxsw_sp->core);
318 if (!devlink_dpipe_table_counter_enabled(devlink,
319 MLXSW_SP_DPIPE_TABLE_NAME_ERIF))
320 return;
321 mlxsw_sp_rif_counter_alloc(mlxsw_sp, rif, MLXSW_SP_RIF_COUNTER_EGRESS);
322}
323
324static void mlxsw_sp_rif_counters_free(struct mlxsw_sp_rif *rif)
325{
326 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
327
328 mlxsw_sp_rif_counter_free(mlxsw_sp, rif, MLXSW_SP_RIF_COUNTER_EGRESS);
329}
330
Ido Schimmel4724ba562017-03-10 08:53:39 +0100331static struct mlxsw_sp_rif *
332mlxsw_sp_rif_find_by_dev(const struct mlxsw_sp *mlxsw_sp,
333 const struct net_device *dev);
334
Ido Schimmel7dcc18a2017-07-18 10:10:30 +0200335#define MLXSW_SP_PREFIX_COUNT (sizeof(struct in6_addr) * BITS_PER_BYTE + 1)
Ido Schimmel9011b672017-05-16 19:38:25 +0200336
337struct mlxsw_sp_prefix_usage {
338 DECLARE_BITMAP(b, MLXSW_SP_PREFIX_COUNT);
339};
340
Jiri Pirko53342022016-07-04 08:23:08 +0200341#define mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage) \
342 for_each_set_bit(prefix, (prefix_usage)->b, MLXSW_SP_PREFIX_COUNT)
343
344static bool
345mlxsw_sp_prefix_usage_eq(struct mlxsw_sp_prefix_usage *prefix_usage1,
346 struct mlxsw_sp_prefix_usage *prefix_usage2)
347{
348 return !memcmp(prefix_usage1, prefix_usage2, sizeof(*prefix_usage1));
349}
350
Jiri Pirko6b75c482016-07-04 08:23:09 +0200351static bool
352mlxsw_sp_prefix_usage_none(struct mlxsw_sp_prefix_usage *prefix_usage)
353{
354 struct mlxsw_sp_prefix_usage prefix_usage_none = {{ 0 } };
355
356 return mlxsw_sp_prefix_usage_eq(prefix_usage, &prefix_usage_none);
357}
358
359static void
360mlxsw_sp_prefix_usage_cpy(struct mlxsw_sp_prefix_usage *prefix_usage1,
361 struct mlxsw_sp_prefix_usage *prefix_usage2)
362{
363 memcpy(prefix_usage1, prefix_usage2, sizeof(*prefix_usage1));
364}
365
366static void
Jiri Pirko5e9c16c2016-07-04 08:23:04 +0200367mlxsw_sp_prefix_usage_set(struct mlxsw_sp_prefix_usage *prefix_usage,
368 unsigned char prefix_len)
369{
370 set_bit(prefix_len, prefix_usage->b);
371}
372
373static void
374mlxsw_sp_prefix_usage_clear(struct mlxsw_sp_prefix_usage *prefix_usage,
375 unsigned char prefix_len)
376{
377 clear_bit(prefix_len, prefix_usage->b);
378}
379
380struct mlxsw_sp_fib_key {
381 unsigned char addr[sizeof(struct in6_addr)];
382 unsigned char prefix_len;
383};
384
Jiri Pirko61c503f2016-07-04 08:23:11 +0200385enum mlxsw_sp_fib_entry_type {
386 MLXSW_SP_FIB_ENTRY_TYPE_REMOTE,
387 MLXSW_SP_FIB_ENTRY_TYPE_LOCAL,
388 MLXSW_SP_FIB_ENTRY_TYPE_TRAP,
Petr Machata4607f6d2017-09-02 23:49:25 +0200389
390 /* This is a special case of local delivery, where a packet should be
391 * decapsulated on reception. Note that there is no corresponding ENCAP,
392 * because that's a type of next hop, not of FIB entry. (There can be
393 * several next hops in a REMOTE entry, and some of them may be
394 * encapsulating entries.)
395 */
396 MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP,
Jiri Pirko61c503f2016-07-04 08:23:11 +0200397};
398
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +0200399struct mlxsw_sp_nexthop_group;
Ido Schimmel9011b672017-05-16 19:38:25 +0200400struct mlxsw_sp_fib;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +0200401
Ido Schimmel9aecce12017-02-09 10:28:42 +0100402struct mlxsw_sp_fib_node {
403 struct list_head entry_list;
Jiri Pirkob45f64d2016-09-26 12:52:31 +0200404 struct list_head list;
Ido Schimmel9aecce12017-02-09 10:28:42 +0100405 struct rhash_head ht_node;
Ido Schimmel76610eb2017-03-10 08:53:41 +0100406 struct mlxsw_sp_fib *fib;
Ido Schimmel9aecce12017-02-09 10:28:42 +0100407 struct mlxsw_sp_fib_key key;
408};
409
Petr Machata4607f6d2017-09-02 23:49:25 +0200410struct mlxsw_sp_fib_entry_decap {
411 struct mlxsw_sp_ipip_entry *ipip_entry;
412 u32 tunnel_index;
413};
414
Ido Schimmel9aecce12017-02-09 10:28:42 +0100415struct mlxsw_sp_fib_entry {
416 struct list_head list;
417 struct mlxsw_sp_fib_node *fib_node;
418 enum mlxsw_sp_fib_entry_type type;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +0200419 struct list_head nexthop_group_node;
420 struct mlxsw_sp_nexthop_group *nh_group;
Petr Machata4607f6d2017-09-02 23:49:25 +0200421 struct mlxsw_sp_fib_entry_decap decap; /* Valid for decap entries. */
Jiri Pirko5e9c16c2016-07-04 08:23:04 +0200422};
423
Ido Schimmel4f1c7f12017-07-18 10:10:26 +0200424struct mlxsw_sp_fib4_entry {
425 struct mlxsw_sp_fib_entry common;
426 u32 tb_id;
427 u32 prio;
428 u8 tos;
429 u8 type;
430};
431
Ido Schimmel428b8512017-08-03 13:28:28 +0200432struct mlxsw_sp_fib6_entry {
433 struct mlxsw_sp_fib_entry common;
434 struct list_head rt6_list;
435 unsigned int nrt6;
436};
437
438struct mlxsw_sp_rt6 {
439 struct list_head list;
440 struct rt6_info *rt;
441};
442
Ido Schimmel9011b672017-05-16 19:38:25 +0200443struct mlxsw_sp_lpm_tree {
444 u8 id; /* tree ID */
445 unsigned int ref_count;
446 enum mlxsw_sp_l3proto proto;
447 struct mlxsw_sp_prefix_usage prefix_usage;
448};
449
Jiri Pirko5e9c16c2016-07-04 08:23:04 +0200450struct mlxsw_sp_fib {
451 struct rhashtable ht;
Ido Schimmel9aecce12017-02-09 10:28:42 +0100452 struct list_head node_list;
Ido Schimmel76610eb2017-03-10 08:53:41 +0100453 struct mlxsw_sp_vr *vr;
454 struct mlxsw_sp_lpm_tree *lpm_tree;
Jiri Pirko5e9c16c2016-07-04 08:23:04 +0200455 unsigned long prefix_ref_count[MLXSW_SP_PREFIX_COUNT];
456 struct mlxsw_sp_prefix_usage prefix_usage;
Ido Schimmel76610eb2017-03-10 08:53:41 +0100457 enum mlxsw_sp_l3proto proto;
Jiri Pirko5e9c16c2016-07-04 08:23:04 +0200458};
459
Ido Schimmel9011b672017-05-16 19:38:25 +0200460struct mlxsw_sp_vr {
461 u16 id; /* virtual router ID */
462 u32 tb_id; /* kernel fib table id */
463 unsigned int rif_count;
464 struct mlxsw_sp_fib *fib4;
Ido Schimmela3d9bc52017-07-18 10:10:22 +0200465 struct mlxsw_sp_fib *fib6;
Yotam Gigid42b0962017-09-27 08:23:20 +0200466 struct mlxsw_sp_mr_table *mr4_table;
Ido Schimmel9011b672017-05-16 19:38:25 +0200467};
468
Ido Schimmel9aecce12017-02-09 10:28:42 +0100469static const struct rhashtable_params mlxsw_sp_fib_ht_params;
Jiri Pirko5e9c16c2016-07-04 08:23:04 +0200470
Ido Schimmel76610eb2017-03-10 08:53:41 +0100471static struct mlxsw_sp_fib *mlxsw_sp_fib_create(struct mlxsw_sp_vr *vr,
472 enum mlxsw_sp_l3proto proto)
Jiri Pirko5e9c16c2016-07-04 08:23:04 +0200473{
474 struct mlxsw_sp_fib *fib;
475 int err;
476
477 fib = kzalloc(sizeof(*fib), GFP_KERNEL);
478 if (!fib)
479 return ERR_PTR(-ENOMEM);
480 err = rhashtable_init(&fib->ht, &mlxsw_sp_fib_ht_params);
481 if (err)
482 goto err_rhashtable_init;
Ido Schimmel9aecce12017-02-09 10:28:42 +0100483 INIT_LIST_HEAD(&fib->node_list);
Ido Schimmel76610eb2017-03-10 08:53:41 +0100484 fib->proto = proto;
485 fib->vr = vr;
Jiri Pirko5e9c16c2016-07-04 08:23:04 +0200486 return fib;
487
488err_rhashtable_init:
489 kfree(fib);
490 return ERR_PTR(err);
491}
492
493static void mlxsw_sp_fib_destroy(struct mlxsw_sp_fib *fib)
494{
Ido Schimmel9aecce12017-02-09 10:28:42 +0100495 WARN_ON(!list_empty(&fib->node_list));
Ido Schimmel76610eb2017-03-10 08:53:41 +0100496 WARN_ON(fib->lpm_tree);
Jiri Pirko5e9c16c2016-07-04 08:23:04 +0200497 rhashtable_destroy(&fib->ht);
498 kfree(fib);
499}
500
Jiri Pirko53342022016-07-04 08:23:08 +0200501static struct mlxsw_sp_lpm_tree *
Ido Schimmel382dbb42017-03-10 08:53:40 +0100502mlxsw_sp_lpm_tree_find_unused(struct mlxsw_sp *mlxsw_sp)
Jiri Pirko53342022016-07-04 08:23:08 +0200503{
504 static struct mlxsw_sp_lpm_tree *lpm_tree;
505 int i;
506
Ido Schimmel9011b672017-05-16 19:38:25 +0200507 for (i = 0; i < mlxsw_sp->router->lpm.tree_count; i++) {
508 lpm_tree = &mlxsw_sp->router->lpm.trees[i];
Ido Schimmel382dbb42017-03-10 08:53:40 +0100509 if (lpm_tree->ref_count == 0)
510 return lpm_tree;
Jiri Pirko53342022016-07-04 08:23:08 +0200511 }
512 return NULL;
513}
514
515static int mlxsw_sp_lpm_tree_alloc(struct mlxsw_sp *mlxsw_sp,
516 struct mlxsw_sp_lpm_tree *lpm_tree)
517{
518 char ralta_pl[MLXSW_REG_RALTA_LEN];
519
Ido Schimmel1a9234e662016-09-19 08:29:26 +0200520 mlxsw_reg_ralta_pack(ralta_pl, true,
521 (enum mlxsw_reg_ralxx_protocol) lpm_tree->proto,
522 lpm_tree->id);
Jiri Pirko53342022016-07-04 08:23:08 +0200523 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralta), ralta_pl);
524}
525
Ido Schimmelcc702672017-08-14 10:54:03 +0200526static void mlxsw_sp_lpm_tree_free(struct mlxsw_sp *mlxsw_sp,
527 struct mlxsw_sp_lpm_tree *lpm_tree)
Jiri Pirko53342022016-07-04 08:23:08 +0200528{
529 char ralta_pl[MLXSW_REG_RALTA_LEN];
530
Ido Schimmel1a9234e662016-09-19 08:29:26 +0200531 mlxsw_reg_ralta_pack(ralta_pl, false,
532 (enum mlxsw_reg_ralxx_protocol) lpm_tree->proto,
533 lpm_tree->id);
Ido Schimmelcc702672017-08-14 10:54:03 +0200534 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralta), ralta_pl);
Jiri Pirko53342022016-07-04 08:23:08 +0200535}
536
537static int
538mlxsw_sp_lpm_tree_left_struct_set(struct mlxsw_sp *mlxsw_sp,
539 struct mlxsw_sp_prefix_usage *prefix_usage,
540 struct mlxsw_sp_lpm_tree *lpm_tree)
541{
542 char ralst_pl[MLXSW_REG_RALST_LEN];
543 u8 root_bin = 0;
544 u8 prefix;
545 u8 last_prefix = MLXSW_REG_RALST_BIN_NO_CHILD;
546
547 mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage)
548 root_bin = prefix;
549
550 mlxsw_reg_ralst_pack(ralst_pl, root_bin, lpm_tree->id);
551 mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage) {
552 if (prefix == 0)
553 continue;
554 mlxsw_reg_ralst_bin_pack(ralst_pl, prefix, last_prefix,
555 MLXSW_REG_RALST_BIN_NO_CHILD);
556 last_prefix = prefix;
557 }
558 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralst), ralst_pl);
559}
560
561static struct mlxsw_sp_lpm_tree *
562mlxsw_sp_lpm_tree_create(struct mlxsw_sp *mlxsw_sp,
563 struct mlxsw_sp_prefix_usage *prefix_usage,
Ido Schimmel382dbb42017-03-10 08:53:40 +0100564 enum mlxsw_sp_l3proto proto)
Jiri Pirko53342022016-07-04 08:23:08 +0200565{
566 struct mlxsw_sp_lpm_tree *lpm_tree;
567 int err;
568
Ido Schimmel382dbb42017-03-10 08:53:40 +0100569 lpm_tree = mlxsw_sp_lpm_tree_find_unused(mlxsw_sp);
Jiri Pirko53342022016-07-04 08:23:08 +0200570 if (!lpm_tree)
571 return ERR_PTR(-EBUSY);
572 lpm_tree->proto = proto;
573 err = mlxsw_sp_lpm_tree_alloc(mlxsw_sp, lpm_tree);
574 if (err)
575 return ERR_PTR(err);
576
577 err = mlxsw_sp_lpm_tree_left_struct_set(mlxsw_sp, prefix_usage,
578 lpm_tree);
579 if (err)
580 goto err_left_struct_set;
Jiri Pirko2083d362016-10-25 11:25:56 +0200581 memcpy(&lpm_tree->prefix_usage, prefix_usage,
582 sizeof(lpm_tree->prefix_usage));
Jiri Pirko53342022016-07-04 08:23:08 +0200583 return lpm_tree;
584
585err_left_struct_set:
586 mlxsw_sp_lpm_tree_free(mlxsw_sp, lpm_tree);
587 return ERR_PTR(err);
588}
589
Ido Schimmelcc702672017-08-14 10:54:03 +0200590static void mlxsw_sp_lpm_tree_destroy(struct mlxsw_sp *mlxsw_sp,
591 struct mlxsw_sp_lpm_tree *lpm_tree)
Jiri Pirko53342022016-07-04 08:23:08 +0200592{
Ido Schimmelcc702672017-08-14 10:54:03 +0200593 mlxsw_sp_lpm_tree_free(mlxsw_sp, lpm_tree);
Jiri Pirko53342022016-07-04 08:23:08 +0200594}
595
596static struct mlxsw_sp_lpm_tree *
597mlxsw_sp_lpm_tree_get(struct mlxsw_sp *mlxsw_sp,
598 struct mlxsw_sp_prefix_usage *prefix_usage,
Ido Schimmel382dbb42017-03-10 08:53:40 +0100599 enum mlxsw_sp_l3proto proto)
Jiri Pirko53342022016-07-04 08:23:08 +0200600{
601 struct mlxsw_sp_lpm_tree *lpm_tree;
602 int i;
603
Ido Schimmel9011b672017-05-16 19:38:25 +0200604 for (i = 0; i < mlxsw_sp->router->lpm.tree_count; i++) {
605 lpm_tree = &mlxsw_sp->router->lpm.trees[i];
Jiri Pirko8b99bec2016-10-25 11:25:57 +0200606 if (lpm_tree->ref_count != 0 &&
607 lpm_tree->proto == proto &&
Jiri Pirko53342022016-07-04 08:23:08 +0200608 mlxsw_sp_prefix_usage_eq(&lpm_tree->prefix_usage,
609 prefix_usage))
Ido Schimmelfc922bb2017-08-14 10:54:05 +0200610 return lpm_tree;
Jiri Pirko53342022016-07-04 08:23:08 +0200611 }
Ido Schimmelfc922bb2017-08-14 10:54:05 +0200612 return mlxsw_sp_lpm_tree_create(mlxsw_sp, prefix_usage, proto);
613}
Jiri Pirko53342022016-07-04 08:23:08 +0200614
Ido Schimmelfc922bb2017-08-14 10:54:05 +0200615static void mlxsw_sp_lpm_tree_hold(struct mlxsw_sp_lpm_tree *lpm_tree)
616{
Jiri Pirko53342022016-07-04 08:23:08 +0200617 lpm_tree->ref_count++;
Jiri Pirko53342022016-07-04 08:23:08 +0200618}
619
Ido Schimmelcc702672017-08-14 10:54:03 +0200620static void mlxsw_sp_lpm_tree_put(struct mlxsw_sp *mlxsw_sp,
621 struct mlxsw_sp_lpm_tree *lpm_tree)
Jiri Pirko53342022016-07-04 08:23:08 +0200622{
623 if (--lpm_tree->ref_count == 0)
Ido Schimmelcc702672017-08-14 10:54:03 +0200624 mlxsw_sp_lpm_tree_destroy(mlxsw_sp, lpm_tree);
Jiri Pirko53342022016-07-04 08:23:08 +0200625}
626
Ido Schimmeld7a60302017-06-08 08:47:43 +0200627#define MLXSW_SP_LPM_TREE_MIN 1 /* tree 0 is reserved */
Ido Schimmel8494ab02017-03-24 08:02:47 +0100628
629static int mlxsw_sp_lpm_init(struct mlxsw_sp *mlxsw_sp)
Jiri Pirko53342022016-07-04 08:23:08 +0200630{
631 struct mlxsw_sp_lpm_tree *lpm_tree;
Ido Schimmel8494ab02017-03-24 08:02:47 +0100632 u64 max_trees;
Jiri Pirko53342022016-07-04 08:23:08 +0200633 int i;
634
Ido Schimmel8494ab02017-03-24 08:02:47 +0100635 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_LPM_TREES))
636 return -EIO;
637
638 max_trees = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_LPM_TREES);
Ido Schimmel9011b672017-05-16 19:38:25 +0200639 mlxsw_sp->router->lpm.tree_count = max_trees - MLXSW_SP_LPM_TREE_MIN;
640 mlxsw_sp->router->lpm.trees = kcalloc(mlxsw_sp->router->lpm.tree_count,
Ido Schimmel8494ab02017-03-24 08:02:47 +0100641 sizeof(struct mlxsw_sp_lpm_tree),
642 GFP_KERNEL);
Ido Schimmel9011b672017-05-16 19:38:25 +0200643 if (!mlxsw_sp->router->lpm.trees)
Ido Schimmel8494ab02017-03-24 08:02:47 +0100644 return -ENOMEM;
645
Ido Schimmel9011b672017-05-16 19:38:25 +0200646 for (i = 0; i < mlxsw_sp->router->lpm.tree_count; i++) {
647 lpm_tree = &mlxsw_sp->router->lpm.trees[i];
Jiri Pirko53342022016-07-04 08:23:08 +0200648 lpm_tree->id = i + MLXSW_SP_LPM_TREE_MIN;
649 }
Ido Schimmel8494ab02017-03-24 08:02:47 +0100650
651 return 0;
652}
653
654static void mlxsw_sp_lpm_fini(struct mlxsw_sp *mlxsw_sp)
655{
Ido Schimmel9011b672017-05-16 19:38:25 +0200656 kfree(mlxsw_sp->router->lpm.trees);
Jiri Pirko53342022016-07-04 08:23:08 +0200657}
658
Ido Schimmel76610eb2017-03-10 08:53:41 +0100659static bool mlxsw_sp_vr_is_used(const struct mlxsw_sp_vr *vr)
660{
Yotam Gigid42b0962017-09-27 08:23:20 +0200661 return !!vr->fib4 || !!vr->fib6 || !!vr->mr4_table;
Ido Schimmel76610eb2017-03-10 08:53:41 +0100662}
663
Jiri Pirko6b75c482016-07-04 08:23:09 +0200664static struct mlxsw_sp_vr *mlxsw_sp_vr_find_unused(struct mlxsw_sp *mlxsw_sp)
665{
666 struct mlxsw_sp_vr *vr;
667 int i;
668
Jiri Pirkoc1a38312016-10-21 16:07:23 +0200669 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
Ido Schimmel9011b672017-05-16 19:38:25 +0200670 vr = &mlxsw_sp->router->vrs[i];
Ido Schimmel76610eb2017-03-10 08:53:41 +0100671 if (!mlxsw_sp_vr_is_used(vr))
Jiri Pirko6b75c482016-07-04 08:23:09 +0200672 return vr;
673 }
674 return NULL;
675}
676
677static int mlxsw_sp_vr_lpm_tree_bind(struct mlxsw_sp *mlxsw_sp,
Ido Schimmel0adb2142017-08-14 10:54:04 +0200678 const struct mlxsw_sp_fib *fib, u8 tree_id)
Jiri Pirko6b75c482016-07-04 08:23:09 +0200679{
680 char raltb_pl[MLXSW_REG_RALTB_LEN];
681
Ido Schimmel76610eb2017-03-10 08:53:41 +0100682 mlxsw_reg_raltb_pack(raltb_pl, fib->vr->id,
683 (enum mlxsw_reg_ralxx_protocol) fib->proto,
Ido Schimmel0adb2142017-08-14 10:54:04 +0200684 tree_id);
Jiri Pirko6b75c482016-07-04 08:23:09 +0200685 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raltb), raltb_pl);
686}
687
688static int mlxsw_sp_vr_lpm_tree_unbind(struct mlxsw_sp *mlxsw_sp,
Ido Schimmel76610eb2017-03-10 08:53:41 +0100689 const struct mlxsw_sp_fib *fib)
Jiri Pirko6b75c482016-07-04 08:23:09 +0200690{
691 char raltb_pl[MLXSW_REG_RALTB_LEN];
692
693 /* Bind to tree 0 which is default */
Ido Schimmel76610eb2017-03-10 08:53:41 +0100694 mlxsw_reg_raltb_pack(raltb_pl, fib->vr->id,
695 (enum mlxsw_reg_ralxx_protocol) fib->proto, 0);
Jiri Pirko6b75c482016-07-04 08:23:09 +0200696 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raltb), raltb_pl);
697}
698
699static u32 mlxsw_sp_fix_tb_id(u32 tb_id)
700{
Yotam Gigi7e50d432017-09-27 08:23:19 +0200701 /* For our purpose, squash main, default and local tables into one */
702 if (tb_id == RT_TABLE_LOCAL || tb_id == RT_TABLE_DEFAULT)
Jiri Pirko6b75c482016-07-04 08:23:09 +0200703 tb_id = RT_TABLE_MAIN;
704 return tb_id;
705}
706
707static struct mlxsw_sp_vr *mlxsw_sp_vr_find(struct mlxsw_sp *mlxsw_sp,
Ido Schimmel76610eb2017-03-10 08:53:41 +0100708 u32 tb_id)
Jiri Pirko6b75c482016-07-04 08:23:09 +0200709{
710 struct mlxsw_sp_vr *vr;
711 int i;
712
713 tb_id = mlxsw_sp_fix_tb_id(tb_id);
Nogah Frankel9497c042016-09-20 11:16:54 +0200714
Jiri Pirkoc1a38312016-10-21 16:07:23 +0200715 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
Ido Schimmel9011b672017-05-16 19:38:25 +0200716 vr = &mlxsw_sp->router->vrs[i];
Ido Schimmel76610eb2017-03-10 08:53:41 +0100717 if (mlxsw_sp_vr_is_used(vr) && vr->tb_id == tb_id)
Jiri Pirko6b75c482016-07-04 08:23:09 +0200718 return vr;
719 }
720 return NULL;
721}
722
Ido Schimmel76610eb2017-03-10 08:53:41 +0100723static struct mlxsw_sp_fib *mlxsw_sp_vr_fib(const struct mlxsw_sp_vr *vr,
724 enum mlxsw_sp_l3proto proto)
Jiri Pirko6b75c482016-07-04 08:23:09 +0200725{
Ido Schimmel76610eb2017-03-10 08:53:41 +0100726 switch (proto) {
727 case MLXSW_SP_L3_PROTO_IPV4:
728 return vr->fib4;
729 case MLXSW_SP_L3_PROTO_IPV6:
Ido Schimmela3d9bc52017-07-18 10:10:22 +0200730 return vr->fib6;
Ido Schimmel76610eb2017-03-10 08:53:41 +0100731 }
732 return NULL;
733}
734
735static struct mlxsw_sp_vr *mlxsw_sp_vr_create(struct mlxsw_sp *mlxsw_sp,
David Ahernf8fa9b42017-10-18 09:56:56 -0700736 u32 tb_id,
737 struct netlink_ext_ack *extack)
Ido Schimmel76610eb2017-03-10 08:53:41 +0100738{
Jiri Pirko6b75c482016-07-04 08:23:09 +0200739 struct mlxsw_sp_vr *vr;
Ido Schimmela3d9bc52017-07-18 10:10:22 +0200740 int err;
Jiri Pirko6b75c482016-07-04 08:23:09 +0200741
742 vr = mlxsw_sp_vr_find_unused(mlxsw_sp);
David Ahernf8fa9b42017-10-18 09:56:56 -0700743 if (!vr) {
744 NL_SET_ERR_MSG(extack, "spectrum: Exceeded number of supported virtual routers");
Jiri Pirko6b75c482016-07-04 08:23:09 +0200745 return ERR_PTR(-EBUSY);
David Ahernf8fa9b42017-10-18 09:56:56 -0700746 }
Ido Schimmel76610eb2017-03-10 08:53:41 +0100747 vr->fib4 = mlxsw_sp_fib_create(vr, MLXSW_SP_L3_PROTO_IPV4);
748 if (IS_ERR(vr->fib4))
749 return ERR_CAST(vr->fib4);
Ido Schimmela3d9bc52017-07-18 10:10:22 +0200750 vr->fib6 = mlxsw_sp_fib_create(vr, MLXSW_SP_L3_PROTO_IPV6);
751 if (IS_ERR(vr->fib6)) {
752 err = PTR_ERR(vr->fib6);
753 goto err_fib6_create;
754 }
Yotam Gigid42b0962017-09-27 08:23:20 +0200755 vr->mr4_table = mlxsw_sp_mr_table_create(mlxsw_sp, vr->id,
756 MLXSW_SP_L3_PROTO_IPV4);
757 if (IS_ERR(vr->mr4_table)) {
758 err = PTR_ERR(vr->mr4_table);
759 goto err_mr_table_create;
760 }
Jiri Pirko6b75c482016-07-04 08:23:09 +0200761 vr->tb_id = tb_id;
Jiri Pirko6b75c482016-07-04 08:23:09 +0200762 return vr;
Ido Schimmela3d9bc52017-07-18 10:10:22 +0200763
Yotam Gigid42b0962017-09-27 08:23:20 +0200764err_mr_table_create:
765 mlxsw_sp_fib_destroy(vr->fib6);
766 vr->fib6 = NULL;
Ido Schimmela3d9bc52017-07-18 10:10:22 +0200767err_fib6_create:
768 mlxsw_sp_fib_destroy(vr->fib4);
769 vr->fib4 = NULL;
770 return ERR_PTR(err);
Jiri Pirko6b75c482016-07-04 08:23:09 +0200771}
772
Ido Schimmel76610eb2017-03-10 08:53:41 +0100773static void mlxsw_sp_vr_destroy(struct mlxsw_sp_vr *vr)
Jiri Pirko6b75c482016-07-04 08:23:09 +0200774{
Yotam Gigid42b0962017-09-27 08:23:20 +0200775 mlxsw_sp_mr_table_destroy(vr->mr4_table);
776 vr->mr4_table = NULL;
Ido Schimmela3d9bc52017-07-18 10:10:22 +0200777 mlxsw_sp_fib_destroy(vr->fib6);
778 vr->fib6 = NULL;
Ido Schimmel76610eb2017-03-10 08:53:41 +0100779 mlxsw_sp_fib_destroy(vr->fib4);
780 vr->fib4 = NULL;
Jiri Pirko6b75c482016-07-04 08:23:09 +0200781}
782
David Ahernf8fa9b42017-10-18 09:56:56 -0700783static struct mlxsw_sp_vr *mlxsw_sp_vr_get(struct mlxsw_sp *mlxsw_sp, u32 tb_id,
784 struct netlink_ext_ack *extack)
Jiri Pirko6b75c482016-07-04 08:23:09 +0200785{
786 struct mlxsw_sp_vr *vr;
Jiri Pirko6b75c482016-07-04 08:23:09 +0200787
788 tb_id = mlxsw_sp_fix_tb_id(tb_id);
Ido Schimmel76610eb2017-03-10 08:53:41 +0100789 vr = mlxsw_sp_vr_find(mlxsw_sp, tb_id);
790 if (!vr)
David Ahernf8fa9b42017-10-18 09:56:56 -0700791 vr = mlxsw_sp_vr_create(mlxsw_sp, tb_id, extack);
Jiri Pirko6b75c482016-07-04 08:23:09 +0200792 return vr;
793}
794
Ido Schimmel76610eb2017-03-10 08:53:41 +0100795static void mlxsw_sp_vr_put(struct mlxsw_sp_vr *vr)
Jiri Pirko6b75c482016-07-04 08:23:09 +0200796{
Ido Schimmela3d9bc52017-07-18 10:10:22 +0200797 if (!vr->rif_count && list_empty(&vr->fib4->node_list) &&
Yotam Gigid42b0962017-09-27 08:23:20 +0200798 list_empty(&vr->fib6->node_list) &&
799 mlxsw_sp_mr_table_empty(vr->mr4_table))
Ido Schimmel76610eb2017-03-10 08:53:41 +0100800 mlxsw_sp_vr_destroy(vr);
Jiri Pirko6b75c482016-07-04 08:23:09 +0200801}
802
Ido Schimmelfc922bb2017-08-14 10:54:05 +0200803static bool
804mlxsw_sp_vr_lpm_tree_should_replace(struct mlxsw_sp_vr *vr,
805 enum mlxsw_sp_l3proto proto, u8 tree_id)
806{
807 struct mlxsw_sp_fib *fib = mlxsw_sp_vr_fib(vr, proto);
808
809 if (!mlxsw_sp_vr_is_used(vr))
810 return false;
811 if (fib->lpm_tree && fib->lpm_tree->id == tree_id)
812 return true;
813 return false;
814}
815
816static int mlxsw_sp_vr_lpm_tree_replace(struct mlxsw_sp *mlxsw_sp,
817 struct mlxsw_sp_fib *fib,
818 struct mlxsw_sp_lpm_tree *new_tree)
819{
820 struct mlxsw_sp_lpm_tree *old_tree = fib->lpm_tree;
821 int err;
822
823 err = mlxsw_sp_vr_lpm_tree_bind(mlxsw_sp, fib, new_tree->id);
824 if (err)
825 return err;
826 fib->lpm_tree = new_tree;
827 mlxsw_sp_lpm_tree_hold(new_tree);
828 mlxsw_sp_lpm_tree_put(mlxsw_sp, old_tree);
829 return 0;
830}
831
832static int mlxsw_sp_vrs_lpm_tree_replace(struct mlxsw_sp *mlxsw_sp,
833 struct mlxsw_sp_fib *fib,
834 struct mlxsw_sp_lpm_tree *new_tree)
835{
836 struct mlxsw_sp_lpm_tree *old_tree = fib->lpm_tree;
837 enum mlxsw_sp_l3proto proto = fib->proto;
838 u8 old_id, new_id = new_tree->id;
839 struct mlxsw_sp_vr *vr;
840 int i, err;
841
842 if (!old_tree)
843 goto no_replace;
844 old_id = old_tree->id;
845
846 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
847 vr = &mlxsw_sp->router->vrs[i];
848 if (!mlxsw_sp_vr_lpm_tree_should_replace(vr, proto, old_id))
849 continue;
850 err = mlxsw_sp_vr_lpm_tree_replace(mlxsw_sp,
851 mlxsw_sp_vr_fib(vr, proto),
852 new_tree);
853 if (err)
854 goto err_tree_replace;
855 }
856
857 return 0;
858
859err_tree_replace:
860 for (i--; i >= 0; i--) {
861 if (!mlxsw_sp_vr_lpm_tree_should_replace(vr, proto, new_id))
862 continue;
863 mlxsw_sp_vr_lpm_tree_replace(mlxsw_sp,
864 mlxsw_sp_vr_fib(vr, proto),
865 old_tree);
866 }
867 return err;
868
869no_replace:
870 err = mlxsw_sp_vr_lpm_tree_bind(mlxsw_sp, fib, new_tree->id);
871 if (err)
872 return err;
873 fib->lpm_tree = new_tree;
874 mlxsw_sp_lpm_tree_hold(new_tree);
875 return 0;
876}
877
878static void
879mlxsw_sp_vrs_prefixes(struct mlxsw_sp *mlxsw_sp,
880 enum mlxsw_sp_l3proto proto,
881 struct mlxsw_sp_prefix_usage *req_prefix_usage)
882{
883 int i;
884
885 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
886 struct mlxsw_sp_vr *vr = &mlxsw_sp->router->vrs[i];
887 struct mlxsw_sp_fib *fib = mlxsw_sp_vr_fib(vr, proto);
888 unsigned char prefix;
889
890 if (!mlxsw_sp_vr_is_used(vr))
891 continue;
892 mlxsw_sp_prefix_usage_for_each(prefix, &fib->prefix_usage)
893 mlxsw_sp_prefix_usage_set(req_prefix_usage, prefix);
894 }
895}
896
Nogah Frankel9497c042016-09-20 11:16:54 +0200897static int mlxsw_sp_vrs_init(struct mlxsw_sp *mlxsw_sp)
Jiri Pirko6b75c482016-07-04 08:23:09 +0200898{
899 struct mlxsw_sp_vr *vr;
Jiri Pirkoc1a38312016-10-21 16:07:23 +0200900 u64 max_vrs;
Jiri Pirko6b75c482016-07-04 08:23:09 +0200901 int i;
902
Jiri Pirkoc1a38312016-10-21 16:07:23 +0200903 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_VRS))
Nogah Frankel9497c042016-09-20 11:16:54 +0200904 return -EIO;
905
Jiri Pirkoc1a38312016-10-21 16:07:23 +0200906 max_vrs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS);
Ido Schimmel9011b672017-05-16 19:38:25 +0200907 mlxsw_sp->router->vrs = kcalloc(max_vrs, sizeof(struct mlxsw_sp_vr),
908 GFP_KERNEL);
909 if (!mlxsw_sp->router->vrs)
Nogah Frankel9497c042016-09-20 11:16:54 +0200910 return -ENOMEM;
911
Jiri Pirkoc1a38312016-10-21 16:07:23 +0200912 for (i = 0; i < max_vrs; i++) {
Ido Schimmel9011b672017-05-16 19:38:25 +0200913 vr = &mlxsw_sp->router->vrs[i];
Jiri Pirko6b75c482016-07-04 08:23:09 +0200914 vr->id = i;
915 }
Nogah Frankel9497c042016-09-20 11:16:54 +0200916
917 return 0;
918}
919
Ido Schimmelac571de2016-11-14 11:26:32 +0100920static void mlxsw_sp_router_fib_flush(struct mlxsw_sp *mlxsw_sp);
921
Nogah Frankel9497c042016-09-20 11:16:54 +0200922static void mlxsw_sp_vrs_fini(struct mlxsw_sp *mlxsw_sp)
923{
Ido Schimmel30572242016-12-03 16:45:01 +0100924 /* At this stage we're guaranteed not to have new incoming
925 * FIB notifications and the work queue is free from FIBs
926 * sitting on top of mlxsw netdevs. However, we can still
927 * have other FIBs queued. Flush the queue before flushing
928 * the device's tables. No need for locks, as we're the only
929 * writer.
930 */
931 mlxsw_core_flush_owq();
Ido Schimmelac571de2016-11-14 11:26:32 +0100932 mlxsw_sp_router_fib_flush(mlxsw_sp);
Ido Schimmel9011b672017-05-16 19:38:25 +0200933 kfree(mlxsw_sp->router->vrs);
Jiri Pirko6b75c482016-07-04 08:23:09 +0200934}
935
Petr Machata6ddb7422017-09-02 23:49:19 +0200936static struct net_device *
937__mlxsw_sp_ipip_netdev_ul_dev_get(const struct net_device *ol_dev)
938{
939 struct ip_tunnel *tun = netdev_priv(ol_dev);
940 struct net *net = dev_net(ol_dev);
941
942 return __dev_get_by_index(net, tun->parms.link);
943}
944
945static u32 mlxsw_sp_ipip_dev_ul_tb_id(const struct net_device *ol_dev)
946{
947 struct net_device *d = __mlxsw_sp_ipip_netdev_ul_dev_get(ol_dev);
948
949 if (d)
950 return l3mdev_fib_table(d) ? : RT_TABLE_MAIN;
951 else
952 return l3mdev_fib_table(ol_dev) ? : RT_TABLE_MAIN;
953}
954
Petr Machata1012b9a2017-09-02 23:49:23 +0200955static struct mlxsw_sp_rif *
956mlxsw_sp_rif_create(struct mlxsw_sp *mlxsw_sp,
David Ahernf8fa9b42017-10-18 09:56:56 -0700957 const struct mlxsw_sp_rif_params *params,
958 struct netlink_ext_ack *extack);
Petr Machata1012b9a2017-09-02 23:49:23 +0200959
960static struct mlxsw_sp_rif_ipip_lb *
961mlxsw_sp_ipip_ol_ipip_lb_create(struct mlxsw_sp *mlxsw_sp,
962 enum mlxsw_sp_ipip_type ipipt,
963 struct net_device *ol_dev)
964{
965 struct mlxsw_sp_rif_params_ipip_lb lb_params;
966 const struct mlxsw_sp_ipip_ops *ipip_ops;
967 struct mlxsw_sp_rif *rif;
968
969 ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipipt];
970 lb_params = (struct mlxsw_sp_rif_params_ipip_lb) {
971 .common.dev = ol_dev,
972 .common.lag = false,
973 .lb_config = ipip_ops->ol_loopback_config(mlxsw_sp, ol_dev),
974 };
975
David Ahernf8fa9b42017-10-18 09:56:56 -0700976 rif = mlxsw_sp_rif_create(mlxsw_sp, &lb_params.common, NULL);
Petr Machata1012b9a2017-09-02 23:49:23 +0200977 if (IS_ERR(rif))
978 return ERR_CAST(rif);
979 return container_of(rif, struct mlxsw_sp_rif_ipip_lb, common);
980}
981
982static struct mlxsw_sp_ipip_entry *
983mlxsw_sp_ipip_entry_alloc(struct mlxsw_sp *mlxsw_sp,
984 enum mlxsw_sp_ipip_type ipipt,
985 struct net_device *ol_dev)
986{
987 struct mlxsw_sp_ipip_entry *ipip_entry;
988 struct mlxsw_sp_ipip_entry *ret = NULL;
989
990 ipip_entry = kzalloc(sizeof(*ipip_entry), GFP_KERNEL);
991 if (!ipip_entry)
992 return ERR_PTR(-ENOMEM);
993
994 ipip_entry->ol_lb = mlxsw_sp_ipip_ol_ipip_lb_create(mlxsw_sp, ipipt,
995 ol_dev);
996 if (IS_ERR(ipip_entry->ol_lb)) {
997 ret = ERR_CAST(ipip_entry->ol_lb);
998 goto err_ol_ipip_lb_create;
999 }
1000
1001 ipip_entry->ipipt = ipipt;
1002 ipip_entry->ol_dev = ol_dev;
1003
1004 return ipip_entry;
1005
1006err_ol_ipip_lb_create:
1007 kfree(ipip_entry);
1008 return ret;
1009}
1010
1011static void
Petr Machata4cccb732017-10-16 16:26:39 +02001012mlxsw_sp_ipip_entry_dealloc(struct mlxsw_sp_ipip_entry *ipip_entry)
Petr Machata1012b9a2017-09-02 23:49:23 +02001013{
Petr Machata1012b9a2017-09-02 23:49:23 +02001014 mlxsw_sp_rif_destroy(&ipip_entry->ol_lb->common);
1015 kfree(ipip_entry);
1016}
1017
1018static __be32
1019mlxsw_sp_ipip_netdev_saddr4(const struct net_device *ol_dev)
1020{
1021 struct ip_tunnel *tun = netdev_priv(ol_dev);
1022
1023 return tun->parms.iph.saddr;
1024}
1025
1026union mlxsw_sp_l3addr
1027mlxsw_sp_ipip_netdev_saddr(enum mlxsw_sp_l3proto proto,
1028 const struct net_device *ol_dev)
1029{
1030 switch (proto) {
1031 case MLXSW_SP_L3_PROTO_IPV4:
1032 return (union mlxsw_sp_l3addr) {
1033 .addr4 = mlxsw_sp_ipip_netdev_saddr4(ol_dev),
1034 };
1035 case MLXSW_SP_L3_PROTO_IPV6:
1036 break;
1037 };
1038
1039 WARN_ON(1);
1040 return (union mlxsw_sp_l3addr) {
1041 .addr4 = 0,
1042 };
1043}
1044
Petr Machataee954d1a2017-09-02 23:49:29 +02001045__be32 mlxsw_sp_ipip_netdev_daddr4(const struct net_device *ol_dev)
1046{
1047 struct ip_tunnel *tun = netdev_priv(ol_dev);
1048
1049 return tun->parms.iph.daddr;
1050}
1051
1052union mlxsw_sp_l3addr
1053mlxsw_sp_ipip_netdev_daddr(enum mlxsw_sp_l3proto proto,
1054 const struct net_device *ol_dev)
1055{
1056 switch (proto) {
1057 case MLXSW_SP_L3_PROTO_IPV4:
1058 return (union mlxsw_sp_l3addr) {
1059 .addr4 = mlxsw_sp_ipip_netdev_daddr4(ol_dev),
1060 };
1061 case MLXSW_SP_L3_PROTO_IPV6:
1062 break;
1063 };
1064
1065 WARN_ON(1);
1066 return (union mlxsw_sp_l3addr) {
1067 .addr4 = 0,
1068 };
1069}
1070
Petr Machata1012b9a2017-09-02 23:49:23 +02001071static bool mlxsw_sp_l3addr_eq(const union mlxsw_sp_l3addr *addr1,
1072 const union mlxsw_sp_l3addr *addr2)
1073{
1074 return !memcmp(addr1, addr2, sizeof(*addr1));
1075}
1076
1077static bool
1078mlxsw_sp_ipip_entry_saddr_matches(struct mlxsw_sp *mlxsw_sp,
1079 const enum mlxsw_sp_l3proto ul_proto,
1080 union mlxsw_sp_l3addr saddr,
1081 u32 ul_tb_id,
1082 struct mlxsw_sp_ipip_entry *ipip_entry)
1083{
1084 u32 tun_ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(ipip_entry->ol_dev);
1085 enum mlxsw_sp_ipip_type ipipt = ipip_entry->ipipt;
1086 union mlxsw_sp_l3addr tun_saddr;
1087
1088 if (mlxsw_sp->router->ipip_ops_arr[ipipt]->ul_proto != ul_proto)
1089 return false;
1090
1091 tun_saddr = mlxsw_sp_ipip_netdev_saddr(ul_proto, ipip_entry->ol_dev);
1092 return tun_ul_tb_id == ul_tb_id &&
1093 mlxsw_sp_l3addr_eq(&tun_saddr, &saddr);
1094}
1095
Petr Machata4607f6d2017-09-02 23:49:25 +02001096static int
1097mlxsw_sp_fib_entry_decap_init(struct mlxsw_sp *mlxsw_sp,
1098 struct mlxsw_sp_fib_entry *fib_entry,
1099 struct mlxsw_sp_ipip_entry *ipip_entry)
1100{
1101 u32 tunnel_index;
1102 int err;
1103
1104 err = mlxsw_sp_kvdl_alloc(mlxsw_sp, 1, &tunnel_index);
1105 if (err)
1106 return err;
1107
1108 ipip_entry->decap_fib_entry = fib_entry;
1109 fib_entry->decap.ipip_entry = ipip_entry;
1110 fib_entry->decap.tunnel_index = tunnel_index;
1111 return 0;
1112}
1113
1114static void mlxsw_sp_fib_entry_decap_fini(struct mlxsw_sp *mlxsw_sp,
1115 struct mlxsw_sp_fib_entry *fib_entry)
1116{
1117 /* Unlink this node from the IPIP entry that it's the decap entry of. */
1118 fib_entry->decap.ipip_entry->decap_fib_entry = NULL;
1119 fib_entry->decap.ipip_entry = NULL;
1120 mlxsw_sp_kvdl_free(mlxsw_sp, fib_entry->decap.tunnel_index);
1121}
1122
Petr Machata1cc38fb2017-09-02 23:49:26 +02001123static struct mlxsw_sp_fib_node *
1124mlxsw_sp_fib_node_lookup(struct mlxsw_sp_fib *fib, const void *addr,
1125 size_t addr_len, unsigned char prefix_len);
Petr Machata4607f6d2017-09-02 23:49:25 +02001126static int mlxsw_sp_fib_entry_update(struct mlxsw_sp *mlxsw_sp,
1127 struct mlxsw_sp_fib_entry *fib_entry);
1128
1129static void
1130mlxsw_sp_ipip_entry_demote_decap(struct mlxsw_sp *mlxsw_sp,
1131 struct mlxsw_sp_ipip_entry *ipip_entry)
1132{
1133 struct mlxsw_sp_fib_entry *fib_entry = ipip_entry->decap_fib_entry;
1134
1135 mlxsw_sp_fib_entry_decap_fini(mlxsw_sp, fib_entry);
1136 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
1137
1138 mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
1139}
1140
Petr Machata1cc38fb2017-09-02 23:49:26 +02001141static void
1142mlxsw_sp_ipip_entry_promote_decap(struct mlxsw_sp *mlxsw_sp,
1143 struct mlxsw_sp_ipip_entry *ipip_entry,
1144 struct mlxsw_sp_fib_entry *decap_fib_entry)
1145{
1146 if (mlxsw_sp_fib_entry_decap_init(mlxsw_sp, decap_fib_entry,
1147 ipip_entry))
1148 return;
1149 decap_fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP;
1150
1151 if (mlxsw_sp_fib_entry_update(mlxsw_sp, decap_fib_entry))
1152 mlxsw_sp_ipip_entry_demote_decap(mlxsw_sp, ipip_entry);
1153}
1154
1155/* Given an IPIP entry, find the corresponding decap route. */
1156static struct mlxsw_sp_fib_entry *
1157mlxsw_sp_ipip_entry_find_decap(struct mlxsw_sp *mlxsw_sp,
1158 struct mlxsw_sp_ipip_entry *ipip_entry)
1159{
1160 static struct mlxsw_sp_fib_node *fib_node;
1161 const struct mlxsw_sp_ipip_ops *ipip_ops;
1162 struct mlxsw_sp_fib_entry *fib_entry;
1163 unsigned char saddr_prefix_len;
1164 union mlxsw_sp_l3addr saddr;
1165 struct mlxsw_sp_fib *ul_fib;
1166 struct mlxsw_sp_vr *ul_vr;
1167 const void *saddrp;
1168 size_t saddr_len;
1169 u32 ul_tb_id;
1170 u32 saddr4;
1171
1172 ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt];
1173
1174 ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(ipip_entry->ol_dev);
1175 ul_vr = mlxsw_sp_vr_find(mlxsw_sp, ul_tb_id);
1176 if (!ul_vr)
1177 return NULL;
1178
1179 ul_fib = mlxsw_sp_vr_fib(ul_vr, ipip_ops->ul_proto);
1180 saddr = mlxsw_sp_ipip_netdev_saddr(ipip_ops->ul_proto,
1181 ipip_entry->ol_dev);
1182
1183 switch (ipip_ops->ul_proto) {
1184 case MLXSW_SP_L3_PROTO_IPV4:
1185 saddr4 = be32_to_cpu(saddr.addr4);
1186 saddrp = &saddr4;
1187 saddr_len = 4;
1188 saddr_prefix_len = 32;
1189 break;
1190 case MLXSW_SP_L3_PROTO_IPV6:
1191 WARN_ON(1);
1192 return NULL;
1193 }
1194
1195 fib_node = mlxsw_sp_fib_node_lookup(ul_fib, saddrp, saddr_len,
1196 saddr_prefix_len);
1197 if (!fib_node || list_empty(&fib_node->entry_list))
1198 return NULL;
1199
1200 fib_entry = list_first_entry(&fib_node->entry_list,
1201 struct mlxsw_sp_fib_entry, list);
1202 if (fib_entry->type != MLXSW_SP_FIB_ENTRY_TYPE_TRAP)
1203 return NULL;
1204
1205 return fib_entry;
1206}
1207
Petr Machata1012b9a2017-09-02 23:49:23 +02001208static struct mlxsw_sp_ipip_entry *
Petr Machata4cccb732017-10-16 16:26:39 +02001209mlxsw_sp_ipip_entry_create(struct mlxsw_sp *mlxsw_sp,
1210 enum mlxsw_sp_ipip_type ipipt,
1211 struct net_device *ol_dev)
Petr Machata1012b9a2017-09-02 23:49:23 +02001212{
1213 u32 ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(ol_dev);
1214 struct mlxsw_sp_router *router = mlxsw_sp->router;
1215 struct mlxsw_sp_ipip_entry *ipip_entry;
1216 enum mlxsw_sp_l3proto ul_proto;
1217 union mlxsw_sp_l3addr saddr;
1218
Petr Machata4cccb732017-10-16 16:26:39 +02001219 /* The configuration where several tunnels have the same local address
1220 * in the same underlay table needs special treatment in the HW. That is
1221 * currently not implemented in the driver.
1222 */
Petr Machata1012b9a2017-09-02 23:49:23 +02001223 list_for_each_entry(ipip_entry, &mlxsw_sp->router->ipip_list,
1224 ipip_list_node) {
Petr Machata1012b9a2017-09-02 23:49:23 +02001225 ul_proto = router->ipip_ops_arr[ipip_entry->ipipt]->ul_proto;
1226 saddr = mlxsw_sp_ipip_netdev_saddr(ul_proto, ol_dev);
1227 if (mlxsw_sp_ipip_entry_saddr_matches(mlxsw_sp, ul_proto, saddr,
1228 ul_tb_id, ipip_entry))
1229 return ERR_PTR(-EEXIST);
1230 }
1231
1232 ipip_entry = mlxsw_sp_ipip_entry_alloc(mlxsw_sp, ipipt, ol_dev);
1233 if (IS_ERR(ipip_entry))
1234 return ipip_entry;
1235
1236 list_add_tail(&ipip_entry->ipip_list_node,
1237 &mlxsw_sp->router->ipip_list);
1238
Petr Machata1012b9a2017-09-02 23:49:23 +02001239 return ipip_entry;
1240}
1241
1242static void
Petr Machata4cccb732017-10-16 16:26:39 +02001243mlxsw_sp_ipip_entry_destroy(struct mlxsw_sp *mlxsw_sp,
1244 struct mlxsw_sp_ipip_entry *ipip_entry)
Petr Machata1012b9a2017-09-02 23:49:23 +02001245{
Petr Machata4cccb732017-10-16 16:26:39 +02001246 list_del(&ipip_entry->ipip_list_node);
1247 mlxsw_sp_ipip_entry_dealloc(ipip_entry);
Petr Machata1012b9a2017-09-02 23:49:23 +02001248}
1249
Petr Machata4607f6d2017-09-02 23:49:25 +02001250static bool
1251mlxsw_sp_ipip_entry_matches_decap(struct mlxsw_sp *mlxsw_sp,
1252 const struct net_device *ul_dev,
1253 enum mlxsw_sp_l3proto ul_proto,
1254 union mlxsw_sp_l3addr ul_dip,
1255 struct mlxsw_sp_ipip_entry *ipip_entry)
1256{
1257 u32 ul_tb_id = l3mdev_fib_table(ul_dev) ? : RT_TABLE_MAIN;
1258 enum mlxsw_sp_ipip_type ipipt = ipip_entry->ipipt;
1259 struct net_device *ipip_ul_dev;
1260
1261 if (mlxsw_sp->router->ipip_ops_arr[ipipt]->ul_proto != ul_proto)
1262 return false;
1263
1264 ipip_ul_dev = __mlxsw_sp_ipip_netdev_ul_dev_get(ipip_entry->ol_dev);
1265 return mlxsw_sp_ipip_entry_saddr_matches(mlxsw_sp, ul_proto, ul_dip,
1266 ul_tb_id, ipip_entry) &&
1267 (!ipip_ul_dev || ipip_ul_dev == ul_dev);
1268}
1269
1270/* Given decap parameters, find the corresponding IPIP entry. */
1271static struct mlxsw_sp_ipip_entry *
1272mlxsw_sp_ipip_entry_find_by_decap(struct mlxsw_sp *mlxsw_sp,
1273 const struct net_device *ul_dev,
1274 enum mlxsw_sp_l3proto ul_proto,
1275 union mlxsw_sp_l3addr ul_dip)
1276{
1277 struct mlxsw_sp_ipip_entry *ipip_entry;
1278
1279 list_for_each_entry(ipip_entry, &mlxsw_sp->router->ipip_list,
1280 ipip_list_node)
1281 if (mlxsw_sp_ipip_entry_matches_decap(mlxsw_sp, ul_dev,
1282 ul_proto, ul_dip,
1283 ipip_entry))
1284 return ipip_entry;
1285
1286 return NULL;
1287}
1288
Petr Machata6698c162017-10-16 16:26:36 +02001289static bool mlxsw_sp_netdev_ipip_type(const struct mlxsw_sp *mlxsw_sp,
1290 const struct net_device *dev,
1291 enum mlxsw_sp_ipip_type *p_type)
1292{
1293 struct mlxsw_sp_router *router = mlxsw_sp->router;
1294 const struct mlxsw_sp_ipip_ops *ipip_ops;
1295 enum mlxsw_sp_ipip_type ipipt;
1296
1297 for (ipipt = 0; ipipt < MLXSW_SP_IPIP_TYPE_MAX; ++ipipt) {
1298 ipip_ops = router->ipip_ops_arr[ipipt];
1299 if (dev->type == ipip_ops->dev_type) {
1300 if (p_type)
1301 *p_type = ipipt;
1302 return true;
1303 }
1304 }
1305 return false;
1306}
1307
Petr Machata00635872017-10-16 16:26:37 +02001308bool mlxsw_sp_netdev_is_ipip(const struct mlxsw_sp *mlxsw_sp,
1309 const struct net_device *dev)
1310{
1311 return mlxsw_sp_netdev_ipip_type(mlxsw_sp, dev, NULL);
1312}
1313
1314static struct mlxsw_sp_ipip_entry *
1315mlxsw_sp_ipip_entry_find_by_ol_dev(struct mlxsw_sp *mlxsw_sp,
1316 const struct net_device *ol_dev)
1317{
1318 struct mlxsw_sp_ipip_entry *ipip_entry;
1319
1320 list_for_each_entry(ipip_entry, &mlxsw_sp->router->ipip_list,
1321 ipip_list_node)
1322 if (ipip_entry->ol_dev == ol_dev)
1323 return ipip_entry;
1324
1325 return NULL;
1326}
1327
1328static int mlxsw_sp_netdevice_ipip_reg_event(struct mlxsw_sp *mlxsw_sp,
1329 struct net_device *ol_dev)
1330{
1331 struct mlxsw_sp_router *router = mlxsw_sp->router;
1332 struct mlxsw_sp_ipip_entry *ipip_entry;
1333 enum mlxsw_sp_ipip_type ipipt;
1334
1335 mlxsw_sp_netdev_ipip_type(mlxsw_sp, ol_dev, &ipipt);
1336 if (router->ipip_ops_arr[ipipt]->can_offload(mlxsw_sp, ol_dev,
1337 MLXSW_SP_L3_PROTO_IPV4) ||
1338 router->ipip_ops_arr[ipipt]->can_offload(mlxsw_sp, ol_dev,
1339 MLXSW_SP_L3_PROTO_IPV6)) {
Petr Machata4cccb732017-10-16 16:26:39 +02001340 ipip_entry = mlxsw_sp_ipip_entry_create(mlxsw_sp, ipipt,
1341 ol_dev);
Petr Machata00635872017-10-16 16:26:37 +02001342 if (IS_ERR(ipip_entry))
1343 return PTR_ERR(ipip_entry);
1344 }
1345
1346 return 0;
1347}
1348
1349static void mlxsw_sp_netdevice_ipip_unreg_event(struct mlxsw_sp *mlxsw_sp,
1350 struct net_device *ol_dev)
1351{
1352 struct mlxsw_sp_ipip_entry *ipip_entry;
1353
1354 ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
1355 if (ipip_entry)
Petr Machata4cccb732017-10-16 16:26:39 +02001356 mlxsw_sp_ipip_entry_destroy(mlxsw_sp, ipip_entry);
Petr Machata00635872017-10-16 16:26:37 +02001357}
1358
1359static int mlxsw_sp_netdevice_ipip_up_event(struct mlxsw_sp *mlxsw_sp,
1360 struct net_device *ol_dev)
1361{
1362 struct mlxsw_sp_fib_entry *decap_fib_entry;
1363 struct mlxsw_sp_ipip_entry *ipip_entry;
1364
1365 ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
1366 if (ipip_entry) {
1367 decap_fib_entry = mlxsw_sp_ipip_entry_find_decap(mlxsw_sp,
1368 ipip_entry);
1369 if (decap_fib_entry)
1370 mlxsw_sp_ipip_entry_promote_decap(mlxsw_sp, ipip_entry,
1371 decap_fib_entry);
1372 }
1373
1374 return 0;
1375}
1376
1377static void mlxsw_sp_netdevice_ipip_down_event(struct mlxsw_sp *mlxsw_sp,
1378 struct net_device *ol_dev)
1379{
1380 struct mlxsw_sp_ipip_entry *ipip_entry;
1381
1382 ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
1383 if (ipip_entry && ipip_entry->decap_fib_entry)
1384 mlxsw_sp_ipip_entry_demote_decap(mlxsw_sp, ipip_entry);
1385}
1386
Petr Machataf63ce4e2017-10-16 16:26:38 +02001387static int mlxsw_sp_netdevice_ipip_vrf_event(struct mlxsw_sp *mlxsw_sp,
1388 struct net_device *ol_dev)
1389{
1390 struct mlxsw_sp_fib_entry *decap_fib_entry;
1391 struct mlxsw_sp_ipip_entry *ipip_entry;
1392 struct mlxsw_sp_rif_ipip_lb *lb_rif;
1393
1394 ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
1395 if (!ipip_entry)
1396 return 0;
1397
1398 /* When a tunneling device is moved to a different VRF, we need to
1399 * update the backing loopback. Since RIFs can't be edited, we need to
1400 * destroy and recreate it. That might create a window of opportunity
1401 * where RALUE and RATR registers end up referencing a RIF that's
1402 * already gone. RATRs are handled by the RIF destroy, and to take care
1403 * of RALUE, demote the decap route back.
1404 */
1405 if (ipip_entry->decap_fib_entry)
1406 mlxsw_sp_ipip_entry_demote_decap(mlxsw_sp, ipip_entry);
1407
1408 lb_rif = mlxsw_sp_ipip_ol_ipip_lb_create(mlxsw_sp, ipip_entry->ipipt,
1409 ol_dev);
1410 if (IS_ERR(lb_rif))
1411 return PTR_ERR(lb_rif);
1412 mlxsw_sp_rif_destroy(&ipip_entry->ol_lb->common);
1413 ipip_entry->ol_lb = lb_rif;
1414
1415 if (ol_dev->flags & IFF_UP) {
1416 decap_fib_entry = mlxsw_sp_ipip_entry_find_decap(mlxsw_sp,
1417 ipip_entry);
1418 if (decap_fib_entry)
1419 mlxsw_sp_ipip_entry_promote_decap(mlxsw_sp, ipip_entry,
1420 decap_fib_entry);
1421 }
1422
1423 return 0;
1424}
1425
Petr Machata00635872017-10-16 16:26:37 +02001426int mlxsw_sp_netdevice_ipip_event(struct mlxsw_sp *mlxsw_sp,
1427 struct net_device *ol_dev,
Petr Machataf63ce4e2017-10-16 16:26:38 +02001428 unsigned long event,
1429 struct netdev_notifier_changeupper_info *info)
Petr Machata00635872017-10-16 16:26:37 +02001430{
1431 switch (event) {
1432 case NETDEV_REGISTER:
1433 return mlxsw_sp_netdevice_ipip_reg_event(mlxsw_sp, ol_dev);
1434 case NETDEV_UNREGISTER:
1435 mlxsw_sp_netdevice_ipip_unreg_event(mlxsw_sp, ol_dev);
1436 return 0;
1437 case NETDEV_UP:
1438 return mlxsw_sp_netdevice_ipip_up_event(mlxsw_sp, ol_dev);
1439 case NETDEV_DOWN:
1440 mlxsw_sp_netdevice_ipip_down_event(mlxsw_sp, ol_dev);
1441 return 0;
Petr Machataf63ce4e2017-10-16 16:26:38 +02001442 case NETDEV_CHANGEUPPER:
1443 if (netif_is_l3_master(info->upper_dev))
1444 return mlxsw_sp_netdevice_ipip_vrf_event(mlxsw_sp,
1445 ol_dev);
1446 return 0;
Petr Machata00635872017-10-16 16:26:37 +02001447 }
1448 return 0;
1449}
1450
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001451struct mlxsw_sp_neigh_key {
Jiri Pirko33b13412016-11-10 12:31:04 +01001452 struct neighbour *n;
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001453};
1454
1455struct mlxsw_sp_neigh_entry {
Ido Schimmel9665b742017-02-08 11:16:42 +01001456 struct list_head rif_list_node;
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001457 struct rhash_head ht_node;
1458 struct mlxsw_sp_neigh_key key;
1459 u16 rif;
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001460 bool connected;
Yotam Gigia6bf9e92016-07-05 11:27:44 +02001461 unsigned char ha[ETH_ALEN];
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001462 struct list_head nexthop_list; /* list of nexthops using
1463 * this neigh entry
1464 */
Yotam Gigib2157142016-07-05 11:27:51 +02001465 struct list_head nexthop_neighs_list_node;
Arkadi Sharshevsky7cfcbc72017-08-24 08:40:08 +02001466 unsigned int counter_index;
1467 bool counter_valid;
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001468};
1469
1470static const struct rhashtable_params mlxsw_sp_neigh_ht_params = {
1471 .key_offset = offsetof(struct mlxsw_sp_neigh_entry, key),
1472 .head_offset = offsetof(struct mlxsw_sp_neigh_entry, ht_node),
1473 .key_len = sizeof(struct mlxsw_sp_neigh_key),
1474};
1475
Arkadi Sharshevskyf17cc842017-08-24 08:40:04 +02001476struct mlxsw_sp_neigh_entry *
1477mlxsw_sp_rif_neigh_next(struct mlxsw_sp_rif *rif,
1478 struct mlxsw_sp_neigh_entry *neigh_entry)
1479{
1480 if (!neigh_entry) {
1481 if (list_empty(&rif->neigh_list))
1482 return NULL;
1483 else
1484 return list_first_entry(&rif->neigh_list,
1485 typeof(*neigh_entry),
1486 rif_list_node);
1487 }
Arkadi Sharshevskyec2437f2017-09-25 10:32:24 +02001488 if (list_is_last(&neigh_entry->rif_list_node, &rif->neigh_list))
Arkadi Sharshevskyf17cc842017-08-24 08:40:04 +02001489 return NULL;
1490 return list_next_entry(neigh_entry, rif_list_node);
1491}
1492
1493int mlxsw_sp_neigh_entry_type(struct mlxsw_sp_neigh_entry *neigh_entry)
1494{
1495 return neigh_entry->key.n->tbl->family;
1496}
1497
1498unsigned char *
1499mlxsw_sp_neigh_entry_ha(struct mlxsw_sp_neigh_entry *neigh_entry)
1500{
1501 return neigh_entry->ha;
1502}
1503
1504u32 mlxsw_sp_neigh4_entry_dip(struct mlxsw_sp_neigh_entry *neigh_entry)
1505{
1506 struct neighbour *n;
1507
1508 n = neigh_entry->key.n;
1509 return ntohl(*((__be32 *) n->primary_key));
1510}
1511
Arkadi Sharshevsky02507682017-08-31 17:59:15 +02001512struct in6_addr *
1513mlxsw_sp_neigh6_entry_dip(struct mlxsw_sp_neigh_entry *neigh_entry)
1514{
1515 struct neighbour *n;
1516
1517 n = neigh_entry->key.n;
1518 return (struct in6_addr *) &n->primary_key;
1519}
1520
Arkadi Sharshevsky7cfcbc72017-08-24 08:40:08 +02001521int mlxsw_sp_neigh_counter_get(struct mlxsw_sp *mlxsw_sp,
1522 struct mlxsw_sp_neigh_entry *neigh_entry,
1523 u64 *p_counter)
1524{
1525 if (!neigh_entry->counter_valid)
1526 return -EINVAL;
1527
1528 return mlxsw_sp_flow_counter_get(mlxsw_sp, neigh_entry->counter_index,
1529 p_counter, NULL);
1530}
1531
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001532static struct mlxsw_sp_neigh_entry *
1533mlxsw_sp_neigh_entry_alloc(struct mlxsw_sp *mlxsw_sp, struct neighbour *n,
1534 u16 rif)
1535{
1536 struct mlxsw_sp_neigh_entry *neigh_entry;
1537
1538 neigh_entry = kzalloc(sizeof(*neigh_entry), GFP_KERNEL);
1539 if (!neigh_entry)
1540 return NULL;
1541
1542 neigh_entry->key.n = n;
1543 neigh_entry->rif = rif;
1544 INIT_LIST_HEAD(&neigh_entry->nexthop_list);
1545
1546 return neigh_entry;
1547}
1548
1549static void mlxsw_sp_neigh_entry_free(struct mlxsw_sp_neigh_entry *neigh_entry)
1550{
1551 kfree(neigh_entry);
1552}
1553
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001554static int
1555mlxsw_sp_neigh_entry_insert(struct mlxsw_sp *mlxsw_sp,
1556 struct mlxsw_sp_neigh_entry *neigh_entry)
1557{
Ido Schimmel9011b672017-05-16 19:38:25 +02001558 return rhashtable_insert_fast(&mlxsw_sp->router->neigh_ht,
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001559 &neigh_entry->ht_node,
1560 mlxsw_sp_neigh_ht_params);
1561}
1562
1563static void
1564mlxsw_sp_neigh_entry_remove(struct mlxsw_sp *mlxsw_sp,
1565 struct mlxsw_sp_neigh_entry *neigh_entry)
1566{
Ido Schimmel9011b672017-05-16 19:38:25 +02001567 rhashtable_remove_fast(&mlxsw_sp->router->neigh_ht,
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001568 &neigh_entry->ht_node,
1569 mlxsw_sp_neigh_ht_params);
1570}
1571
Arkadi Sharshevsky7cfcbc72017-08-24 08:40:08 +02001572static bool
Arkadi Sharshevsky1ed55742017-08-31 17:59:18 +02001573mlxsw_sp_neigh_counter_should_alloc(struct mlxsw_sp *mlxsw_sp,
1574 struct mlxsw_sp_neigh_entry *neigh_entry)
Arkadi Sharshevsky7cfcbc72017-08-24 08:40:08 +02001575{
1576 struct devlink *devlink;
Arkadi Sharshevsky1ed55742017-08-31 17:59:18 +02001577 const char *table_name;
1578
1579 switch (mlxsw_sp_neigh_entry_type(neigh_entry)) {
1580 case AF_INET:
1581 table_name = MLXSW_SP_DPIPE_TABLE_NAME_HOST4;
1582 break;
1583 case AF_INET6:
1584 table_name = MLXSW_SP_DPIPE_TABLE_NAME_HOST6;
1585 break;
1586 default:
1587 WARN_ON(1);
1588 return false;
1589 }
Arkadi Sharshevsky7cfcbc72017-08-24 08:40:08 +02001590
1591 devlink = priv_to_devlink(mlxsw_sp->core);
Arkadi Sharshevsky1ed55742017-08-31 17:59:18 +02001592 return devlink_dpipe_table_counter_enabled(devlink, table_name);
Arkadi Sharshevsky7cfcbc72017-08-24 08:40:08 +02001593}
1594
1595static void
1596mlxsw_sp_neigh_counter_alloc(struct mlxsw_sp *mlxsw_sp,
1597 struct mlxsw_sp_neigh_entry *neigh_entry)
1598{
Arkadi Sharshevsky1ed55742017-08-31 17:59:18 +02001599 if (!mlxsw_sp_neigh_counter_should_alloc(mlxsw_sp, neigh_entry))
Arkadi Sharshevsky7cfcbc72017-08-24 08:40:08 +02001600 return;
1601
1602 if (mlxsw_sp_flow_counter_alloc(mlxsw_sp, &neigh_entry->counter_index))
1603 return;
1604
1605 neigh_entry->counter_valid = true;
1606}
1607
1608static void
1609mlxsw_sp_neigh_counter_free(struct mlxsw_sp *mlxsw_sp,
1610 struct mlxsw_sp_neigh_entry *neigh_entry)
1611{
1612 if (!neigh_entry->counter_valid)
1613 return;
1614 mlxsw_sp_flow_counter_free(mlxsw_sp,
1615 neigh_entry->counter_index);
1616 neigh_entry->counter_valid = false;
1617}
1618
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001619static struct mlxsw_sp_neigh_entry *
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001620mlxsw_sp_neigh_entry_create(struct mlxsw_sp *mlxsw_sp, struct neighbour *n)
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001621{
1622 struct mlxsw_sp_neigh_entry *neigh_entry;
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01001623 struct mlxsw_sp_rif *rif;
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001624 int err;
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001625
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01001626 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, n->dev);
1627 if (!rif)
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001628 return ERR_PTR(-EINVAL);
1629
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01001630 neigh_entry = mlxsw_sp_neigh_entry_alloc(mlxsw_sp, n, rif->rif_index);
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001631 if (!neigh_entry)
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001632 return ERR_PTR(-ENOMEM);
1633
1634 err = mlxsw_sp_neigh_entry_insert(mlxsw_sp, neigh_entry);
1635 if (err)
1636 goto err_neigh_entry_insert;
1637
Arkadi Sharshevsky7cfcbc72017-08-24 08:40:08 +02001638 mlxsw_sp_neigh_counter_alloc(mlxsw_sp, neigh_entry);
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01001639 list_add(&neigh_entry->rif_list_node, &rif->neigh_list);
Ido Schimmel9665b742017-02-08 11:16:42 +01001640
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001641 return neigh_entry;
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001642
1643err_neigh_entry_insert:
1644 mlxsw_sp_neigh_entry_free(neigh_entry);
1645 return ERR_PTR(err);
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001646}
1647
1648static void
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001649mlxsw_sp_neigh_entry_destroy(struct mlxsw_sp *mlxsw_sp,
1650 struct mlxsw_sp_neigh_entry *neigh_entry)
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001651{
Ido Schimmel9665b742017-02-08 11:16:42 +01001652 list_del(&neigh_entry->rif_list_node);
Arkadi Sharshevsky7cfcbc72017-08-24 08:40:08 +02001653 mlxsw_sp_neigh_counter_free(mlxsw_sp, neigh_entry);
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001654 mlxsw_sp_neigh_entry_remove(mlxsw_sp, neigh_entry);
1655 mlxsw_sp_neigh_entry_free(neigh_entry);
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001656}
1657
1658static struct mlxsw_sp_neigh_entry *
Jiri Pirko33b13412016-11-10 12:31:04 +01001659mlxsw_sp_neigh_entry_lookup(struct mlxsw_sp *mlxsw_sp, struct neighbour *n)
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001660{
Jiri Pirko33b13412016-11-10 12:31:04 +01001661 struct mlxsw_sp_neigh_key key;
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001662
Jiri Pirko33b13412016-11-10 12:31:04 +01001663 key.n = n;
Ido Schimmel9011b672017-05-16 19:38:25 +02001664 return rhashtable_lookup_fast(&mlxsw_sp->router->neigh_ht,
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001665 &key, mlxsw_sp_neigh_ht_params);
1666}
1667
Yotam Gigic723c7352016-07-05 11:27:43 +02001668static void
1669mlxsw_sp_router_neighs_update_interval_init(struct mlxsw_sp *mlxsw_sp)
1670{
Arkadi Sharshevskya6c9b5d2017-07-18 10:10:18 +02001671 unsigned long interval;
Yotam Gigic723c7352016-07-05 11:27:43 +02001672
Ido Schimmelb5f3e0d2017-07-24 09:56:00 +02001673#if IS_ENABLED(CONFIG_IPV6)
Arkadi Sharshevskya6c9b5d2017-07-18 10:10:18 +02001674 interval = min_t(unsigned long,
1675 NEIGH_VAR(&arp_tbl.parms, DELAY_PROBE_TIME),
1676 NEIGH_VAR(&nd_tbl.parms, DELAY_PROBE_TIME));
Ido Schimmelb5f3e0d2017-07-24 09:56:00 +02001677#else
1678 interval = NEIGH_VAR(&arp_tbl.parms, DELAY_PROBE_TIME);
1679#endif
Ido Schimmel9011b672017-05-16 19:38:25 +02001680 mlxsw_sp->router->neighs_update.interval = jiffies_to_msecs(interval);
Yotam Gigic723c7352016-07-05 11:27:43 +02001681}
1682
1683static void mlxsw_sp_router_neigh_ent_ipv4_process(struct mlxsw_sp *mlxsw_sp,
1684 char *rauhtd_pl,
1685 int ent_index)
1686{
1687 struct net_device *dev;
1688 struct neighbour *n;
1689 __be32 dipn;
1690 u32 dip;
1691 u16 rif;
1692
1693 mlxsw_reg_rauhtd_ent_ipv4_unpack(rauhtd_pl, ent_index, &rif, &dip);
1694
Ido Schimmel5f9efff2017-05-16 19:38:27 +02001695 if (!mlxsw_sp->router->rifs[rif]) {
Yotam Gigic723c7352016-07-05 11:27:43 +02001696 dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Incorrect RIF in neighbour entry\n");
1697 return;
1698 }
1699
1700 dipn = htonl(dip);
Ido Schimmel5f9efff2017-05-16 19:38:27 +02001701 dev = mlxsw_sp->router->rifs[rif]->dev;
Yotam Gigic723c7352016-07-05 11:27:43 +02001702 n = neigh_lookup(&arp_tbl, &dipn, dev);
1703 if (!n) {
1704 netdev_err(dev, "Failed to find matching neighbour for IP=%pI4h\n",
1705 &dip);
1706 return;
1707 }
1708
1709 netdev_dbg(dev, "Updating neighbour with IP=%pI4h\n", &dip);
1710 neigh_event_send(n, NULL);
1711 neigh_release(n);
1712}
1713
Ido Schimmeldf9a21f2017-08-15 09:10:33 +02001714#if IS_ENABLED(CONFIG_IPV6)
Arkadi Sharshevsky60f040c2017-07-18 10:10:17 +02001715static void mlxsw_sp_router_neigh_ent_ipv6_process(struct mlxsw_sp *mlxsw_sp,
1716 char *rauhtd_pl,
1717 int rec_index)
1718{
1719 struct net_device *dev;
1720 struct neighbour *n;
1721 struct in6_addr dip;
1722 u16 rif;
1723
1724 mlxsw_reg_rauhtd_ent_ipv6_unpack(rauhtd_pl, rec_index, &rif,
1725 (char *) &dip);
1726
1727 if (!mlxsw_sp->router->rifs[rif]) {
1728 dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Incorrect RIF in neighbour entry\n");
1729 return;
1730 }
1731
1732 dev = mlxsw_sp->router->rifs[rif]->dev;
1733 n = neigh_lookup(&nd_tbl, &dip, dev);
1734 if (!n) {
1735 netdev_err(dev, "Failed to find matching neighbour for IP=%pI6c\n",
1736 &dip);
1737 return;
1738 }
1739
1740 netdev_dbg(dev, "Updating neighbour with IP=%pI6c\n", &dip);
1741 neigh_event_send(n, NULL);
1742 neigh_release(n);
1743}
Ido Schimmelb5f3e0d2017-07-24 09:56:00 +02001744#else
1745static void mlxsw_sp_router_neigh_ent_ipv6_process(struct mlxsw_sp *mlxsw_sp,
1746 char *rauhtd_pl,
1747 int rec_index)
1748{
1749}
1750#endif
Arkadi Sharshevsky60f040c2017-07-18 10:10:17 +02001751
Yotam Gigic723c7352016-07-05 11:27:43 +02001752static void mlxsw_sp_router_neigh_rec_ipv4_process(struct mlxsw_sp *mlxsw_sp,
1753 char *rauhtd_pl,
1754 int rec_index)
1755{
1756 u8 num_entries;
1757 int i;
1758
1759 num_entries = mlxsw_reg_rauhtd_ipv4_rec_num_entries_get(rauhtd_pl,
1760 rec_index);
1761 /* Hardware starts counting at 0, so add 1. */
1762 num_entries++;
1763
1764 /* Each record consists of several neighbour entries. */
1765 for (i = 0; i < num_entries; i++) {
1766 int ent_index;
1767
1768 ent_index = rec_index * MLXSW_REG_RAUHTD_IPV4_ENT_PER_REC + i;
1769 mlxsw_sp_router_neigh_ent_ipv4_process(mlxsw_sp, rauhtd_pl,
1770 ent_index);
1771 }
1772
1773}
1774
Arkadi Sharshevsky60f040c2017-07-18 10:10:17 +02001775static void mlxsw_sp_router_neigh_rec_ipv6_process(struct mlxsw_sp *mlxsw_sp,
1776 char *rauhtd_pl,
1777 int rec_index)
1778{
1779 /* One record contains one entry. */
1780 mlxsw_sp_router_neigh_ent_ipv6_process(mlxsw_sp, rauhtd_pl,
1781 rec_index);
1782}
1783
Yotam Gigic723c7352016-07-05 11:27:43 +02001784static void mlxsw_sp_router_neigh_rec_process(struct mlxsw_sp *mlxsw_sp,
1785 char *rauhtd_pl, int rec_index)
1786{
1787 switch (mlxsw_reg_rauhtd_rec_type_get(rauhtd_pl, rec_index)) {
1788 case MLXSW_REG_RAUHTD_TYPE_IPV4:
1789 mlxsw_sp_router_neigh_rec_ipv4_process(mlxsw_sp, rauhtd_pl,
1790 rec_index);
1791 break;
1792 case MLXSW_REG_RAUHTD_TYPE_IPV6:
Arkadi Sharshevsky60f040c2017-07-18 10:10:17 +02001793 mlxsw_sp_router_neigh_rec_ipv6_process(mlxsw_sp, rauhtd_pl,
1794 rec_index);
Yotam Gigic723c7352016-07-05 11:27:43 +02001795 break;
1796 }
1797}
1798
Arkadi Sharshevsky42cdb332016-11-11 16:34:26 +01001799static bool mlxsw_sp_router_rauhtd_is_full(char *rauhtd_pl)
1800{
1801 u8 num_rec, last_rec_index, num_entries;
1802
1803 num_rec = mlxsw_reg_rauhtd_num_rec_get(rauhtd_pl);
1804 last_rec_index = num_rec - 1;
1805
1806 if (num_rec < MLXSW_REG_RAUHTD_REC_MAX_NUM)
1807 return false;
1808 if (mlxsw_reg_rauhtd_rec_type_get(rauhtd_pl, last_rec_index) ==
1809 MLXSW_REG_RAUHTD_TYPE_IPV6)
1810 return true;
1811
1812 num_entries = mlxsw_reg_rauhtd_ipv4_rec_num_entries_get(rauhtd_pl,
1813 last_rec_index);
1814 if (++num_entries == MLXSW_REG_RAUHTD_IPV4_ENT_PER_REC)
1815 return true;
1816 return false;
1817}
1818
Arkadi Sharshevsky60f040c2017-07-18 10:10:17 +02001819static int
1820__mlxsw_sp_router_neighs_update_rauhtd(struct mlxsw_sp *mlxsw_sp,
1821 char *rauhtd_pl,
1822 enum mlxsw_reg_rauhtd_type type)
Yotam Gigic723c7352016-07-05 11:27:43 +02001823{
Arkadi Sharshevsky60f040c2017-07-18 10:10:17 +02001824 int i, num_rec;
1825 int err;
Yotam Gigic723c7352016-07-05 11:27:43 +02001826
1827 /* Make sure the neighbour's netdev isn't removed in the
1828 * process.
1829 */
1830 rtnl_lock();
1831 do {
Arkadi Sharshevsky60f040c2017-07-18 10:10:17 +02001832 mlxsw_reg_rauhtd_pack(rauhtd_pl, type);
Yotam Gigic723c7352016-07-05 11:27:43 +02001833 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(rauhtd),
1834 rauhtd_pl);
1835 if (err) {
Petr Machata7ff176f2017-10-02 12:21:57 +02001836 dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Failed to dump neighbour table\n");
Yotam Gigic723c7352016-07-05 11:27:43 +02001837 break;
1838 }
1839 num_rec = mlxsw_reg_rauhtd_num_rec_get(rauhtd_pl);
1840 for (i = 0; i < num_rec; i++)
1841 mlxsw_sp_router_neigh_rec_process(mlxsw_sp, rauhtd_pl,
1842 i);
Arkadi Sharshevsky42cdb332016-11-11 16:34:26 +01001843 } while (mlxsw_sp_router_rauhtd_is_full(rauhtd_pl));
Yotam Gigic723c7352016-07-05 11:27:43 +02001844 rtnl_unlock();
1845
Arkadi Sharshevsky60f040c2017-07-18 10:10:17 +02001846 return err;
1847}
1848
1849static int mlxsw_sp_router_neighs_update_rauhtd(struct mlxsw_sp *mlxsw_sp)
1850{
1851 enum mlxsw_reg_rauhtd_type type;
1852 char *rauhtd_pl;
1853 int err;
1854
1855 rauhtd_pl = kmalloc(MLXSW_REG_RAUHTD_LEN, GFP_KERNEL);
1856 if (!rauhtd_pl)
1857 return -ENOMEM;
1858
1859 type = MLXSW_REG_RAUHTD_TYPE_IPV4;
1860 err = __mlxsw_sp_router_neighs_update_rauhtd(mlxsw_sp, rauhtd_pl, type);
1861 if (err)
1862 goto out;
1863
1864 type = MLXSW_REG_RAUHTD_TYPE_IPV6;
1865 err = __mlxsw_sp_router_neighs_update_rauhtd(mlxsw_sp, rauhtd_pl, type);
1866out:
Yotam Gigic723c7352016-07-05 11:27:43 +02001867 kfree(rauhtd_pl);
Yotam Gigib2157142016-07-05 11:27:51 +02001868 return err;
1869}
1870
1871static void mlxsw_sp_router_neighs_update_nh(struct mlxsw_sp *mlxsw_sp)
1872{
1873 struct mlxsw_sp_neigh_entry *neigh_entry;
1874
1875 /* Take RTNL mutex here to prevent lists from changes */
1876 rtnl_lock();
Ido Schimmel9011b672017-05-16 19:38:25 +02001877 list_for_each_entry(neigh_entry, &mlxsw_sp->router->nexthop_neighs_list,
Ido Schimmel8a0b7272017-02-06 16:20:15 +01001878 nexthop_neighs_list_node)
Yotam Gigib2157142016-07-05 11:27:51 +02001879 /* If this neigh have nexthops, make the kernel think this neigh
1880 * is active regardless of the traffic.
1881 */
Ido Schimmel8a0b7272017-02-06 16:20:15 +01001882 neigh_event_send(neigh_entry->key.n, NULL);
Yotam Gigib2157142016-07-05 11:27:51 +02001883 rtnl_unlock();
1884}
1885
1886static void
1887mlxsw_sp_router_neighs_update_work_schedule(struct mlxsw_sp *mlxsw_sp)
1888{
Ido Schimmel9011b672017-05-16 19:38:25 +02001889 unsigned long interval = mlxsw_sp->router->neighs_update.interval;
Yotam Gigib2157142016-07-05 11:27:51 +02001890
Ido Schimmel9011b672017-05-16 19:38:25 +02001891 mlxsw_core_schedule_dw(&mlxsw_sp->router->neighs_update.dw,
Yotam Gigib2157142016-07-05 11:27:51 +02001892 msecs_to_jiffies(interval));
1893}
1894
1895static void mlxsw_sp_router_neighs_update_work(struct work_struct *work)
1896{
Ido Schimmel9011b672017-05-16 19:38:25 +02001897 struct mlxsw_sp_router *router;
Yotam Gigib2157142016-07-05 11:27:51 +02001898 int err;
1899
Ido Schimmel9011b672017-05-16 19:38:25 +02001900 router = container_of(work, struct mlxsw_sp_router,
1901 neighs_update.dw.work);
1902 err = mlxsw_sp_router_neighs_update_rauhtd(router->mlxsw_sp);
Yotam Gigib2157142016-07-05 11:27:51 +02001903 if (err)
Ido Schimmel9011b672017-05-16 19:38:25 +02001904 dev_err(router->mlxsw_sp->bus_info->dev, "Could not update kernel for neigh activity");
Yotam Gigib2157142016-07-05 11:27:51 +02001905
Ido Schimmel9011b672017-05-16 19:38:25 +02001906 mlxsw_sp_router_neighs_update_nh(router->mlxsw_sp);
Yotam Gigib2157142016-07-05 11:27:51 +02001907
Ido Schimmel9011b672017-05-16 19:38:25 +02001908 mlxsw_sp_router_neighs_update_work_schedule(router->mlxsw_sp);
Yotam Gigic723c7352016-07-05 11:27:43 +02001909}
1910
Yotam Gigi0b2361d2016-07-05 11:27:52 +02001911static void mlxsw_sp_router_probe_unresolved_nexthops(struct work_struct *work)
1912{
1913 struct mlxsw_sp_neigh_entry *neigh_entry;
Ido Schimmel9011b672017-05-16 19:38:25 +02001914 struct mlxsw_sp_router *router;
Yotam Gigi0b2361d2016-07-05 11:27:52 +02001915
Ido Schimmel9011b672017-05-16 19:38:25 +02001916 router = container_of(work, struct mlxsw_sp_router,
1917 nexthop_probe_dw.work);
Yotam Gigi0b2361d2016-07-05 11:27:52 +02001918 /* Iterate over nexthop neighbours, find those who are unresolved and
1919 * send arp on them. This solves the chicken-egg problem when
1920 * the nexthop wouldn't get offloaded until the neighbor is resolved
1921 * but it wouldn't get resolved ever in case traffic is flowing in HW
1922 * using different nexthop.
1923 *
1924 * Take RTNL mutex here to prevent lists from changes.
1925 */
1926 rtnl_lock();
Ido Schimmel9011b672017-05-16 19:38:25 +02001927 list_for_each_entry(neigh_entry, &router->nexthop_neighs_list,
Ido Schimmel8a0b7272017-02-06 16:20:15 +01001928 nexthop_neighs_list_node)
Ido Schimmel01b1aa32017-02-06 16:20:16 +01001929 if (!neigh_entry->connected)
Jiri Pirko33b13412016-11-10 12:31:04 +01001930 neigh_event_send(neigh_entry->key.n, NULL);
Yotam Gigi0b2361d2016-07-05 11:27:52 +02001931 rtnl_unlock();
1932
Ido Schimmel9011b672017-05-16 19:38:25 +02001933 mlxsw_core_schedule_dw(&router->nexthop_probe_dw,
Yotam Gigi0b2361d2016-07-05 11:27:52 +02001934 MLXSW_SP_UNRESOLVED_NH_PROBE_INTERVAL);
1935}
1936
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001937static void
1938mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp *mlxsw_sp,
1939 struct mlxsw_sp_neigh_entry *neigh_entry,
1940 bool removing);
1941
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001942static enum mlxsw_reg_rauht_op mlxsw_sp_rauht_op(bool adding)
Yotam Gigia6bf9e92016-07-05 11:27:44 +02001943{
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001944 return adding ? MLXSW_REG_RAUHT_OP_WRITE_ADD :
1945 MLXSW_REG_RAUHT_OP_WRITE_DELETE;
1946}
1947
1948static void
1949mlxsw_sp_router_neigh_entry_op4(struct mlxsw_sp *mlxsw_sp,
1950 struct mlxsw_sp_neigh_entry *neigh_entry,
1951 enum mlxsw_reg_rauht_op op)
1952{
Jiri Pirko33b13412016-11-10 12:31:04 +01001953 struct neighbour *n = neigh_entry->key.n;
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001954 u32 dip = ntohl(*((__be32 *) n->primary_key));
Yotam Gigia6bf9e92016-07-05 11:27:44 +02001955 char rauht_pl[MLXSW_REG_RAUHT_LEN];
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001956
1957 mlxsw_reg_rauht_pack4(rauht_pl, op, neigh_entry->rif, neigh_entry->ha,
1958 dip);
Arkadi Sharshevsky7cfcbc72017-08-24 08:40:08 +02001959 if (neigh_entry->counter_valid)
1960 mlxsw_reg_rauht_pack_counter(rauht_pl,
1961 neigh_entry->counter_index);
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001962 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rauht), rauht_pl);
1963}
1964
1965static void
Arkadi Sharshevskyd5eb89c2017-07-18 10:10:15 +02001966mlxsw_sp_router_neigh_entry_op6(struct mlxsw_sp *mlxsw_sp,
1967 struct mlxsw_sp_neigh_entry *neigh_entry,
1968 enum mlxsw_reg_rauht_op op)
1969{
1970 struct neighbour *n = neigh_entry->key.n;
1971 char rauht_pl[MLXSW_REG_RAUHT_LEN];
1972 const char *dip = n->primary_key;
1973
1974 mlxsw_reg_rauht_pack6(rauht_pl, op, neigh_entry->rif, neigh_entry->ha,
1975 dip);
Arkadi Sharshevsky7cfcbc72017-08-24 08:40:08 +02001976 if (neigh_entry->counter_valid)
1977 mlxsw_reg_rauht_pack_counter(rauht_pl,
1978 neigh_entry->counter_index);
Arkadi Sharshevskyd5eb89c2017-07-18 10:10:15 +02001979 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rauht), rauht_pl);
1980}
1981
Arkadi Sharshevsky1d1056d82017-08-31 17:59:13 +02001982bool mlxsw_sp_neigh_ipv6_ignore(struct mlxsw_sp_neigh_entry *neigh_entry)
Arkadi Sharshevskyd5eb89c2017-07-18 10:10:15 +02001983{
Arkadi Sharshevsky1d1056d82017-08-31 17:59:13 +02001984 struct neighbour *n = neigh_entry->key.n;
1985
Arkadi Sharshevskyd5eb89c2017-07-18 10:10:15 +02001986 /* Packets with a link-local destination address are trapped
1987 * after LPM lookup and never reach the neighbour table, so
1988 * there is no need to program such neighbours to the device.
1989 */
1990 if (ipv6_addr_type((struct in6_addr *) &n->primary_key) &
1991 IPV6_ADDR_LINKLOCAL)
1992 return true;
1993 return false;
1994}
1995
1996static void
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001997mlxsw_sp_neigh_entry_update(struct mlxsw_sp *mlxsw_sp,
1998 struct mlxsw_sp_neigh_entry *neigh_entry,
1999 bool adding)
2000{
2001 if (!adding && !neigh_entry->connected)
2002 return;
2003 neigh_entry->connected = adding;
Ido Schimmelb5f3e0d2017-07-24 09:56:00 +02002004 if (neigh_entry->key.n->tbl->family == AF_INET) {
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002005 mlxsw_sp_router_neigh_entry_op4(mlxsw_sp, neigh_entry,
2006 mlxsw_sp_rauht_op(adding));
Ido Schimmelb5f3e0d2017-07-24 09:56:00 +02002007 } else if (neigh_entry->key.n->tbl->family == AF_INET6) {
Arkadi Sharshevsky1d1056d82017-08-31 17:59:13 +02002008 if (mlxsw_sp_neigh_ipv6_ignore(neigh_entry))
Arkadi Sharshevskyd5eb89c2017-07-18 10:10:15 +02002009 return;
2010 mlxsw_sp_router_neigh_entry_op6(mlxsw_sp, neigh_entry,
2011 mlxsw_sp_rauht_op(adding));
2012 } else {
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002013 WARN_ON_ONCE(1);
Arkadi Sharshevskyd5eb89c2017-07-18 10:10:15 +02002014 }
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002015}
2016
Arkadi Sharshevskya481d712017-08-24 08:40:10 +02002017void
2018mlxsw_sp_neigh_entry_counter_update(struct mlxsw_sp *mlxsw_sp,
2019 struct mlxsw_sp_neigh_entry *neigh_entry,
2020 bool adding)
2021{
2022 if (adding)
2023 mlxsw_sp_neigh_counter_alloc(mlxsw_sp, neigh_entry);
2024 else
2025 mlxsw_sp_neigh_counter_free(mlxsw_sp, neigh_entry);
2026 mlxsw_sp_neigh_entry_update(mlxsw_sp, neigh_entry, true);
2027}
2028
Ido Schimmelceb88812017-11-02 17:14:07 +01002029struct mlxsw_sp_netevent_work {
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002030 struct work_struct work;
2031 struct mlxsw_sp *mlxsw_sp;
2032 struct neighbour *n;
2033};
2034
2035static void mlxsw_sp_router_neigh_event_work(struct work_struct *work)
2036{
Ido Schimmelceb88812017-11-02 17:14:07 +01002037 struct mlxsw_sp_netevent_work *net_work =
2038 container_of(work, struct mlxsw_sp_netevent_work, work);
2039 struct mlxsw_sp *mlxsw_sp = net_work->mlxsw_sp;
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002040 struct mlxsw_sp_neigh_entry *neigh_entry;
Ido Schimmelceb88812017-11-02 17:14:07 +01002041 struct neighbour *n = net_work->n;
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002042 unsigned char ha[ETH_ALEN];
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002043 bool entry_connected;
Ido Schimmel93a87e52016-12-23 09:32:49 +01002044 u8 nud_state, dead;
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002045
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002046 /* If these parameters are changed after we release the lock,
2047 * then we are guaranteed to receive another event letting us
2048 * know about it.
2049 */
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002050 read_lock_bh(&n->lock);
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002051 memcpy(ha, n->ha, ETH_ALEN);
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002052 nud_state = n->nud_state;
Ido Schimmel93a87e52016-12-23 09:32:49 +01002053 dead = n->dead;
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002054 read_unlock_bh(&n->lock);
2055
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002056 rtnl_lock();
Ido Schimmel93a87e52016-12-23 09:32:49 +01002057 entry_connected = nud_state & NUD_VALID && !dead;
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002058 neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp, n);
2059 if (!entry_connected && !neigh_entry)
2060 goto out;
2061 if (!neigh_entry) {
2062 neigh_entry = mlxsw_sp_neigh_entry_create(mlxsw_sp, n);
2063 if (IS_ERR(neigh_entry))
2064 goto out;
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002065 }
2066
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002067 memcpy(neigh_entry->ha, ha, ETH_ALEN);
2068 mlxsw_sp_neigh_entry_update(mlxsw_sp, neigh_entry, entry_connected);
2069 mlxsw_sp_nexthop_neigh_update(mlxsw_sp, neigh_entry, !entry_connected);
2070
2071 if (!neigh_entry->connected && list_empty(&neigh_entry->nexthop_list))
2072 mlxsw_sp_neigh_entry_destroy(mlxsw_sp, neigh_entry);
2073
2074out:
2075 rtnl_unlock();
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002076 neigh_release(n);
Ido Schimmelceb88812017-11-02 17:14:07 +01002077 kfree(net_work);
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002078}
2079
Ido Schimmel48fac882017-11-02 17:14:06 +01002080static int mlxsw_sp_router_netevent_event(struct notifier_block *unused,
2081 unsigned long event, void *ptr)
Yotam Gigic723c7352016-07-05 11:27:43 +02002082{
Ido Schimmelceb88812017-11-02 17:14:07 +01002083 struct mlxsw_sp_netevent_work *net_work;
Yotam Gigic723c7352016-07-05 11:27:43 +02002084 struct mlxsw_sp_port *mlxsw_sp_port;
2085 struct mlxsw_sp *mlxsw_sp;
2086 unsigned long interval;
2087 struct neigh_parms *p;
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002088 struct neighbour *n;
Yotam Gigic723c7352016-07-05 11:27:43 +02002089
2090 switch (event) {
2091 case NETEVENT_DELAY_PROBE_TIME_UPDATE:
2092 p = ptr;
2093
2094 /* We don't care about changes in the default table. */
Ido Schimmelb5f3e0d2017-07-24 09:56:00 +02002095 if (!p->dev || (p->tbl->family != AF_INET &&
2096 p->tbl->family != AF_INET6))
Yotam Gigic723c7352016-07-05 11:27:43 +02002097 return NOTIFY_DONE;
2098
2099 /* We are in atomic context and can't take RTNL mutex,
2100 * so use RCU variant to walk the device chain.
2101 */
2102 mlxsw_sp_port = mlxsw_sp_port_lower_dev_hold(p->dev);
2103 if (!mlxsw_sp_port)
2104 return NOTIFY_DONE;
2105
2106 mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2107 interval = jiffies_to_msecs(NEIGH_VAR(p, DELAY_PROBE_TIME));
Ido Schimmel9011b672017-05-16 19:38:25 +02002108 mlxsw_sp->router->neighs_update.interval = interval;
Yotam Gigic723c7352016-07-05 11:27:43 +02002109
2110 mlxsw_sp_port_dev_put(mlxsw_sp_port);
2111 break;
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002112 case NETEVENT_NEIGH_UPDATE:
2113 n = ptr;
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002114
Ido Schimmelb5f3e0d2017-07-24 09:56:00 +02002115 if (n->tbl->family != AF_INET && n->tbl->family != AF_INET6)
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002116 return NOTIFY_DONE;
2117
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002118 mlxsw_sp_port = mlxsw_sp_port_lower_dev_hold(n->dev);
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002119 if (!mlxsw_sp_port)
2120 return NOTIFY_DONE;
2121
Ido Schimmelceb88812017-11-02 17:14:07 +01002122 net_work = kzalloc(sizeof(*net_work), GFP_ATOMIC);
2123 if (!net_work) {
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002124 mlxsw_sp_port_dev_put(mlxsw_sp_port);
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002125 return NOTIFY_BAD;
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002126 }
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002127
Ido Schimmelceb88812017-11-02 17:14:07 +01002128 INIT_WORK(&net_work->work, mlxsw_sp_router_neigh_event_work);
2129 net_work->mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2130 net_work->n = n;
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002131
2132 /* Take a reference to ensure the neighbour won't be
2133 * destructed until we drop the reference in delayed
2134 * work.
2135 */
2136 neigh_clone(n);
Ido Schimmelceb88812017-11-02 17:14:07 +01002137 mlxsw_core_schedule_work(&net_work->work);
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002138 mlxsw_sp_port_dev_put(mlxsw_sp_port);
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002139 break;
Yotam Gigic723c7352016-07-05 11:27:43 +02002140 }
2141
2142 return NOTIFY_DONE;
2143}
2144
Jiri Pirko6cf3c972016-07-05 11:27:39 +02002145static int mlxsw_sp_neigh_init(struct mlxsw_sp *mlxsw_sp)
2146{
Yotam Gigic723c7352016-07-05 11:27:43 +02002147 int err;
2148
Ido Schimmel9011b672017-05-16 19:38:25 +02002149 err = rhashtable_init(&mlxsw_sp->router->neigh_ht,
Yotam Gigic723c7352016-07-05 11:27:43 +02002150 &mlxsw_sp_neigh_ht_params);
2151 if (err)
2152 return err;
2153
2154 /* Initialize the polling interval according to the default
2155 * table.
2156 */
2157 mlxsw_sp_router_neighs_update_interval_init(mlxsw_sp);
2158
Yotam Gigi0b2361d2016-07-05 11:27:52 +02002159 /* Create the delayed works for the activity_update */
Ido Schimmel9011b672017-05-16 19:38:25 +02002160 INIT_DELAYED_WORK(&mlxsw_sp->router->neighs_update.dw,
Yotam Gigic723c7352016-07-05 11:27:43 +02002161 mlxsw_sp_router_neighs_update_work);
Ido Schimmel9011b672017-05-16 19:38:25 +02002162 INIT_DELAYED_WORK(&mlxsw_sp->router->nexthop_probe_dw,
Yotam Gigi0b2361d2016-07-05 11:27:52 +02002163 mlxsw_sp_router_probe_unresolved_nexthops);
Ido Schimmel9011b672017-05-16 19:38:25 +02002164 mlxsw_core_schedule_dw(&mlxsw_sp->router->neighs_update.dw, 0);
2165 mlxsw_core_schedule_dw(&mlxsw_sp->router->nexthop_probe_dw, 0);
Yotam Gigic723c7352016-07-05 11:27:43 +02002166 return 0;
Jiri Pirko6cf3c972016-07-05 11:27:39 +02002167}
2168
2169static void mlxsw_sp_neigh_fini(struct mlxsw_sp *mlxsw_sp)
2170{
Ido Schimmel9011b672017-05-16 19:38:25 +02002171 cancel_delayed_work_sync(&mlxsw_sp->router->neighs_update.dw);
2172 cancel_delayed_work_sync(&mlxsw_sp->router->nexthop_probe_dw);
2173 rhashtable_destroy(&mlxsw_sp->router->neigh_ht);
Jiri Pirko6cf3c972016-07-05 11:27:39 +02002174}
2175
Ido Schimmel9665b742017-02-08 11:16:42 +01002176static void mlxsw_sp_neigh_rif_gone_sync(struct mlxsw_sp *mlxsw_sp,
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01002177 struct mlxsw_sp_rif *rif)
Ido Schimmel9665b742017-02-08 11:16:42 +01002178{
2179 struct mlxsw_sp_neigh_entry *neigh_entry, *tmp;
2180
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01002181 list_for_each_entry_safe(neigh_entry, tmp, &rif->neigh_list,
Ido Schimmel4a3c67a2017-07-21 20:31:38 +02002182 rif_list_node) {
2183 mlxsw_sp_neigh_entry_update(mlxsw_sp, neigh_entry, false);
Ido Schimmel9665b742017-02-08 11:16:42 +01002184 mlxsw_sp_neigh_entry_destroy(mlxsw_sp, neigh_entry);
Ido Schimmel4a3c67a2017-07-21 20:31:38 +02002185 }
Ido Schimmel9665b742017-02-08 11:16:42 +01002186}
2187
Petr Machata35225e42017-09-02 23:49:22 +02002188enum mlxsw_sp_nexthop_type {
2189 MLXSW_SP_NEXTHOP_TYPE_ETH,
Petr Machata1012b9a2017-09-02 23:49:23 +02002190 MLXSW_SP_NEXTHOP_TYPE_IPIP,
Petr Machata35225e42017-09-02 23:49:22 +02002191};
2192
Ido Schimmelc53b8e12017-02-08 11:16:30 +01002193struct mlxsw_sp_nexthop_key {
2194 struct fib_nh *fib_nh;
2195};
2196
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002197struct mlxsw_sp_nexthop {
2198 struct list_head neigh_list_node; /* member of neigh entry list */
Ido Schimmel9665b742017-02-08 11:16:42 +01002199 struct list_head rif_list_node;
Arkadi Sharshevskydbe45982017-09-25 10:32:23 +02002200 struct list_head router_list_node;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002201 struct mlxsw_sp_nexthop_group *nh_grp; /* pointer back to the group
2202 * this belongs to
2203 */
Ido Schimmelc53b8e12017-02-08 11:16:30 +01002204 struct rhash_head ht_node;
2205 struct mlxsw_sp_nexthop_key key;
Ido Schimmel58adf2c2017-07-18 10:10:19 +02002206 unsigned char gw_addr[sizeof(struct in6_addr)];
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02002207 int ifindex;
Ido Schimmel408bd942017-10-22 23:11:46 +02002208 int nh_weight;
Ido Schimmeleb789982017-10-22 23:11:48 +02002209 int norm_nh_weight;
2210 int num_adj_entries;
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01002211 struct mlxsw_sp_rif *rif;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002212 u8 should_offload:1, /* set indicates this neigh is connected and
2213 * should be put to KVD linear area of this group.
2214 */
2215 offloaded:1, /* set in case the neigh is actually put into
2216 * KVD linear area of this group.
2217 */
2218 update:1; /* set indicates that MAC of this neigh should be
2219 * updated in HW
2220 */
Petr Machata35225e42017-09-02 23:49:22 +02002221 enum mlxsw_sp_nexthop_type type;
2222 union {
2223 struct mlxsw_sp_neigh_entry *neigh_entry;
Petr Machata1012b9a2017-09-02 23:49:23 +02002224 struct mlxsw_sp_ipip_entry *ipip_entry;
Petr Machata35225e42017-09-02 23:49:22 +02002225 };
Arkadi Sharshevskya5390272017-09-25 10:32:28 +02002226 unsigned int counter_index;
2227 bool counter_valid;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002228};
2229
2230struct mlxsw_sp_nexthop_group {
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02002231 void *priv;
Ido Schimmele9ad5e72017-02-08 11:16:29 +01002232 struct rhash_head ht_node;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002233 struct list_head fib_list; /* list of fib entries that use this group */
Ido Schimmel58adf2c2017-07-18 10:10:19 +02002234 struct neigh_table *neigh_tbl;
Ido Schimmelb3e8d1e2017-02-08 11:16:32 +01002235 u8 adj_index_valid:1,
2236 gateway:1; /* routes using the group use a gateway */
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002237 u32 adj_index;
2238 u16 ecmp_size;
2239 u16 count;
Ido Schimmeleb789982017-10-22 23:11:48 +02002240 int sum_norm_weight;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002241 struct mlxsw_sp_nexthop nexthops[0];
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01002242#define nh_rif nexthops[0].rif
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002243};
2244
Arkadi Sharshevsky427e6522017-09-25 10:32:30 +02002245void mlxsw_sp_nexthop_counter_alloc(struct mlxsw_sp *mlxsw_sp,
2246 struct mlxsw_sp_nexthop *nh)
Arkadi Sharshevskya5390272017-09-25 10:32:28 +02002247{
2248 struct devlink *devlink;
2249
2250 devlink = priv_to_devlink(mlxsw_sp->core);
2251 if (!devlink_dpipe_table_counter_enabled(devlink,
2252 MLXSW_SP_DPIPE_TABLE_NAME_ADJ))
2253 return;
2254
2255 if (mlxsw_sp_flow_counter_alloc(mlxsw_sp, &nh->counter_index))
2256 return;
2257
2258 nh->counter_valid = true;
2259}
2260
Arkadi Sharshevsky427e6522017-09-25 10:32:30 +02002261void mlxsw_sp_nexthop_counter_free(struct mlxsw_sp *mlxsw_sp,
2262 struct mlxsw_sp_nexthop *nh)
Arkadi Sharshevskya5390272017-09-25 10:32:28 +02002263{
2264 if (!nh->counter_valid)
2265 return;
2266 mlxsw_sp_flow_counter_free(mlxsw_sp, nh->counter_index);
2267 nh->counter_valid = false;
2268}
2269
2270int mlxsw_sp_nexthop_counter_get(struct mlxsw_sp *mlxsw_sp,
2271 struct mlxsw_sp_nexthop *nh, u64 *p_counter)
2272{
2273 if (!nh->counter_valid)
2274 return -EINVAL;
2275
2276 return mlxsw_sp_flow_counter_get(mlxsw_sp, nh->counter_index,
2277 p_counter, NULL);
2278}
2279
Arkadi Sharshevskyc556cd22017-09-25 10:32:25 +02002280struct mlxsw_sp_nexthop *mlxsw_sp_nexthop_next(struct mlxsw_sp_router *router,
2281 struct mlxsw_sp_nexthop *nh)
2282{
2283 if (!nh) {
2284 if (list_empty(&router->nexthop_list))
2285 return NULL;
2286 else
2287 return list_first_entry(&router->nexthop_list,
2288 typeof(*nh), router_list_node);
2289 }
2290 if (list_is_last(&nh->router_list_node, &router->nexthop_list))
2291 return NULL;
2292 return list_next_entry(nh, router_list_node);
2293}
2294
2295bool mlxsw_sp_nexthop_offload(struct mlxsw_sp_nexthop *nh)
2296{
2297 return nh->offloaded;
2298}
2299
2300unsigned char *mlxsw_sp_nexthop_ha(struct mlxsw_sp_nexthop *nh)
2301{
2302 if (!nh->offloaded)
2303 return NULL;
2304 return nh->neigh_entry->ha;
2305}
2306
2307int mlxsw_sp_nexthop_indexes(struct mlxsw_sp_nexthop *nh, u32 *p_adj_index,
Ido Schimmele69cd9d2017-10-22 23:11:43 +02002308 u32 *p_adj_size, u32 *p_adj_hash_index)
Arkadi Sharshevskyc556cd22017-09-25 10:32:25 +02002309{
2310 struct mlxsw_sp_nexthop_group *nh_grp = nh->nh_grp;
2311 u32 adj_hash_index = 0;
2312 int i;
2313
2314 if (!nh->offloaded || !nh_grp->adj_index_valid)
2315 return -EINVAL;
2316
2317 *p_adj_index = nh_grp->adj_index;
Ido Schimmele69cd9d2017-10-22 23:11:43 +02002318 *p_adj_size = nh_grp->ecmp_size;
Arkadi Sharshevskyc556cd22017-09-25 10:32:25 +02002319
2320 for (i = 0; i < nh_grp->count; i++) {
2321 struct mlxsw_sp_nexthop *nh_iter = &nh_grp->nexthops[i];
2322
2323 if (nh_iter == nh)
2324 break;
2325 if (nh_iter->offloaded)
Ido Schimmeleb789982017-10-22 23:11:48 +02002326 adj_hash_index += nh_iter->num_adj_entries;
Arkadi Sharshevskyc556cd22017-09-25 10:32:25 +02002327 }
2328
2329 *p_adj_hash_index = adj_hash_index;
2330 return 0;
2331}
2332
2333struct mlxsw_sp_rif *mlxsw_sp_nexthop_rif(struct mlxsw_sp_nexthop *nh)
2334{
2335 return nh->rif;
2336}
2337
2338bool mlxsw_sp_nexthop_group_has_ipip(struct mlxsw_sp_nexthop *nh)
2339{
2340 struct mlxsw_sp_nexthop_group *nh_grp = nh->nh_grp;
2341 int i;
2342
2343 for (i = 0; i < nh_grp->count; i++) {
2344 struct mlxsw_sp_nexthop *nh_iter = &nh_grp->nexthops[i];
2345
2346 if (nh_iter->type == MLXSW_SP_NEXTHOP_TYPE_IPIP)
2347 return true;
2348 }
2349 return false;
2350}
2351
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02002352static struct fib_info *
2353mlxsw_sp_nexthop4_group_fi(const struct mlxsw_sp_nexthop_group *nh_grp)
2354{
2355 return nh_grp->priv;
2356}
2357
2358struct mlxsw_sp_nexthop_group_cmp_arg {
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02002359 enum mlxsw_sp_l3proto proto;
2360 union {
2361 struct fib_info *fi;
2362 struct mlxsw_sp_fib6_entry *fib6_entry;
2363 };
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02002364};
2365
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02002366static bool
2367mlxsw_sp_nexthop6_group_has_nexthop(const struct mlxsw_sp_nexthop_group *nh_grp,
2368 const struct in6_addr *gw, int ifindex)
2369{
2370 int i;
2371
2372 for (i = 0; i < nh_grp->count; i++) {
2373 const struct mlxsw_sp_nexthop *nh;
2374
2375 nh = &nh_grp->nexthops[i];
2376 if (nh->ifindex == ifindex &&
2377 ipv6_addr_equal(gw, (struct in6_addr *) nh->gw_addr))
2378 return true;
2379 }
2380
2381 return false;
2382}
2383
2384static bool
2385mlxsw_sp_nexthop6_group_cmp(const struct mlxsw_sp_nexthop_group *nh_grp,
2386 const struct mlxsw_sp_fib6_entry *fib6_entry)
2387{
2388 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
2389
2390 if (nh_grp->count != fib6_entry->nrt6)
2391 return false;
2392
2393 list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) {
2394 struct in6_addr *gw;
2395 int ifindex;
2396
2397 ifindex = mlxsw_sp_rt6->rt->dst.dev->ifindex;
2398 gw = &mlxsw_sp_rt6->rt->rt6i_gateway;
2399 if (!mlxsw_sp_nexthop6_group_has_nexthop(nh_grp, gw, ifindex))
2400 return false;
2401 }
2402
2403 return true;
2404}
2405
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02002406static int
2407mlxsw_sp_nexthop_group_cmp(struct rhashtable_compare_arg *arg, const void *ptr)
2408{
2409 const struct mlxsw_sp_nexthop_group_cmp_arg *cmp_arg = arg->key;
2410 const struct mlxsw_sp_nexthop_group *nh_grp = ptr;
2411
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02002412 switch (cmp_arg->proto) {
2413 case MLXSW_SP_L3_PROTO_IPV4:
2414 return cmp_arg->fi != mlxsw_sp_nexthop4_group_fi(nh_grp);
2415 case MLXSW_SP_L3_PROTO_IPV6:
2416 return !mlxsw_sp_nexthop6_group_cmp(nh_grp,
2417 cmp_arg->fib6_entry);
2418 default:
2419 WARN_ON(1);
2420 return 1;
2421 }
2422}
2423
2424static int
2425mlxsw_sp_nexthop_group_type(const struct mlxsw_sp_nexthop_group *nh_grp)
2426{
2427 return nh_grp->neigh_tbl->family;
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02002428}
2429
2430static u32 mlxsw_sp_nexthop_group_hash_obj(const void *data, u32 len, u32 seed)
2431{
2432 const struct mlxsw_sp_nexthop_group *nh_grp = data;
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02002433 const struct mlxsw_sp_nexthop *nh;
2434 struct fib_info *fi;
2435 unsigned int val;
2436 int i;
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02002437
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02002438 switch (mlxsw_sp_nexthop_group_type(nh_grp)) {
2439 case AF_INET:
2440 fi = mlxsw_sp_nexthop4_group_fi(nh_grp);
2441 return jhash(&fi, sizeof(fi), seed);
2442 case AF_INET6:
2443 val = nh_grp->count;
2444 for (i = 0; i < nh_grp->count; i++) {
2445 nh = &nh_grp->nexthops[i];
2446 val ^= nh->ifindex;
2447 }
2448 return jhash(&val, sizeof(val), seed);
2449 default:
2450 WARN_ON(1);
2451 return 0;
2452 }
2453}
2454
2455static u32
2456mlxsw_sp_nexthop6_group_hash(struct mlxsw_sp_fib6_entry *fib6_entry, u32 seed)
2457{
2458 unsigned int val = fib6_entry->nrt6;
2459 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
2460 struct net_device *dev;
2461
2462 list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) {
2463 dev = mlxsw_sp_rt6->rt->dst.dev;
2464 val ^= dev->ifindex;
2465 }
2466
2467 return jhash(&val, sizeof(val), seed);
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02002468}
2469
2470static u32
2471mlxsw_sp_nexthop_group_hash(const void *data, u32 len, u32 seed)
2472{
2473 const struct mlxsw_sp_nexthop_group_cmp_arg *cmp_arg = data;
2474
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02002475 switch (cmp_arg->proto) {
2476 case MLXSW_SP_L3_PROTO_IPV4:
2477 return jhash(&cmp_arg->fi, sizeof(cmp_arg->fi), seed);
2478 case MLXSW_SP_L3_PROTO_IPV6:
2479 return mlxsw_sp_nexthop6_group_hash(cmp_arg->fib6_entry, seed);
2480 default:
2481 WARN_ON(1);
2482 return 0;
2483 }
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02002484}
2485
Ido Schimmele9ad5e72017-02-08 11:16:29 +01002486static const struct rhashtable_params mlxsw_sp_nexthop_group_ht_params = {
Ido Schimmele9ad5e72017-02-08 11:16:29 +01002487 .head_offset = offsetof(struct mlxsw_sp_nexthop_group, ht_node),
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02002488 .hashfn = mlxsw_sp_nexthop_group_hash,
2489 .obj_hashfn = mlxsw_sp_nexthop_group_hash_obj,
2490 .obj_cmpfn = mlxsw_sp_nexthop_group_cmp,
Ido Schimmele9ad5e72017-02-08 11:16:29 +01002491};
2492
2493static int mlxsw_sp_nexthop_group_insert(struct mlxsw_sp *mlxsw_sp,
2494 struct mlxsw_sp_nexthop_group *nh_grp)
2495{
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02002496 if (mlxsw_sp_nexthop_group_type(nh_grp) == AF_INET6 &&
2497 !nh_grp->gateway)
2498 return 0;
2499
Ido Schimmel9011b672017-05-16 19:38:25 +02002500 return rhashtable_insert_fast(&mlxsw_sp->router->nexthop_group_ht,
Ido Schimmele9ad5e72017-02-08 11:16:29 +01002501 &nh_grp->ht_node,
2502 mlxsw_sp_nexthop_group_ht_params);
2503}
2504
2505static void mlxsw_sp_nexthop_group_remove(struct mlxsw_sp *mlxsw_sp,
2506 struct mlxsw_sp_nexthop_group *nh_grp)
2507{
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02002508 if (mlxsw_sp_nexthop_group_type(nh_grp) == AF_INET6 &&
2509 !nh_grp->gateway)
2510 return;
2511
Ido Schimmel9011b672017-05-16 19:38:25 +02002512 rhashtable_remove_fast(&mlxsw_sp->router->nexthop_group_ht,
Ido Schimmele9ad5e72017-02-08 11:16:29 +01002513 &nh_grp->ht_node,
2514 mlxsw_sp_nexthop_group_ht_params);
2515}
2516
2517static struct mlxsw_sp_nexthop_group *
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02002518mlxsw_sp_nexthop4_group_lookup(struct mlxsw_sp *mlxsw_sp,
2519 struct fib_info *fi)
Ido Schimmele9ad5e72017-02-08 11:16:29 +01002520{
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02002521 struct mlxsw_sp_nexthop_group_cmp_arg cmp_arg;
2522
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02002523 cmp_arg.proto = MLXSW_SP_L3_PROTO_IPV4;
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02002524 cmp_arg.fi = fi;
2525 return rhashtable_lookup_fast(&mlxsw_sp->router->nexthop_group_ht,
2526 &cmp_arg,
Ido Schimmele9ad5e72017-02-08 11:16:29 +01002527 mlxsw_sp_nexthop_group_ht_params);
2528}
2529
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02002530static struct mlxsw_sp_nexthop_group *
2531mlxsw_sp_nexthop6_group_lookup(struct mlxsw_sp *mlxsw_sp,
2532 struct mlxsw_sp_fib6_entry *fib6_entry)
2533{
2534 struct mlxsw_sp_nexthop_group_cmp_arg cmp_arg;
2535
2536 cmp_arg.proto = MLXSW_SP_L3_PROTO_IPV6;
2537 cmp_arg.fib6_entry = fib6_entry;
2538 return rhashtable_lookup_fast(&mlxsw_sp->router->nexthop_group_ht,
2539 &cmp_arg,
2540 mlxsw_sp_nexthop_group_ht_params);
2541}
2542
Ido Schimmelc53b8e12017-02-08 11:16:30 +01002543static const struct rhashtable_params mlxsw_sp_nexthop_ht_params = {
2544 .key_offset = offsetof(struct mlxsw_sp_nexthop, key),
2545 .head_offset = offsetof(struct mlxsw_sp_nexthop, ht_node),
2546 .key_len = sizeof(struct mlxsw_sp_nexthop_key),
2547};
2548
2549static int mlxsw_sp_nexthop_insert(struct mlxsw_sp *mlxsw_sp,
2550 struct mlxsw_sp_nexthop *nh)
2551{
Ido Schimmel9011b672017-05-16 19:38:25 +02002552 return rhashtable_insert_fast(&mlxsw_sp->router->nexthop_ht,
Ido Schimmelc53b8e12017-02-08 11:16:30 +01002553 &nh->ht_node, mlxsw_sp_nexthop_ht_params);
2554}
2555
2556static void mlxsw_sp_nexthop_remove(struct mlxsw_sp *mlxsw_sp,
2557 struct mlxsw_sp_nexthop *nh)
2558{
Ido Schimmel9011b672017-05-16 19:38:25 +02002559 rhashtable_remove_fast(&mlxsw_sp->router->nexthop_ht, &nh->ht_node,
Ido Schimmelc53b8e12017-02-08 11:16:30 +01002560 mlxsw_sp_nexthop_ht_params);
2561}
2562
Ido Schimmelad178c82017-02-08 11:16:40 +01002563static struct mlxsw_sp_nexthop *
2564mlxsw_sp_nexthop_lookup(struct mlxsw_sp *mlxsw_sp,
2565 struct mlxsw_sp_nexthop_key key)
2566{
Ido Schimmel9011b672017-05-16 19:38:25 +02002567 return rhashtable_lookup_fast(&mlxsw_sp->router->nexthop_ht, &key,
Ido Schimmelad178c82017-02-08 11:16:40 +01002568 mlxsw_sp_nexthop_ht_params);
2569}
2570
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002571static int mlxsw_sp_adj_index_mass_update_vr(struct mlxsw_sp *mlxsw_sp,
Ido Schimmel76610eb2017-03-10 08:53:41 +01002572 const struct mlxsw_sp_fib *fib,
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002573 u32 adj_index, u16 ecmp_size,
2574 u32 new_adj_index,
2575 u16 new_ecmp_size)
2576{
2577 char raleu_pl[MLXSW_REG_RALEU_LEN];
2578
Ido Schimmel1a9234e662016-09-19 08:29:26 +02002579 mlxsw_reg_raleu_pack(raleu_pl,
Ido Schimmel76610eb2017-03-10 08:53:41 +01002580 (enum mlxsw_reg_ralxx_protocol) fib->proto,
2581 fib->vr->id, adj_index, ecmp_size, new_adj_index,
Ido Schimmel1a9234e662016-09-19 08:29:26 +02002582 new_ecmp_size);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002583 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raleu), raleu_pl);
2584}
2585
2586static int mlxsw_sp_adj_index_mass_update(struct mlxsw_sp *mlxsw_sp,
2587 struct mlxsw_sp_nexthop_group *nh_grp,
2588 u32 old_adj_index, u16 old_ecmp_size)
2589{
2590 struct mlxsw_sp_fib_entry *fib_entry;
Ido Schimmel76610eb2017-03-10 08:53:41 +01002591 struct mlxsw_sp_fib *fib = NULL;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002592 int err;
2593
2594 list_for_each_entry(fib_entry, &nh_grp->fib_list, nexthop_group_node) {
Ido Schimmel76610eb2017-03-10 08:53:41 +01002595 if (fib == fib_entry->fib_node->fib)
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002596 continue;
Ido Schimmel76610eb2017-03-10 08:53:41 +01002597 fib = fib_entry->fib_node->fib;
2598 err = mlxsw_sp_adj_index_mass_update_vr(mlxsw_sp, fib,
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002599 old_adj_index,
2600 old_ecmp_size,
2601 nh_grp->adj_index,
2602 nh_grp->ecmp_size);
2603 if (err)
2604 return err;
2605 }
2606 return 0;
2607}
2608
Ido Schimmeleb789982017-10-22 23:11:48 +02002609static int __mlxsw_sp_nexthop_update(struct mlxsw_sp *mlxsw_sp, u32 adj_index,
2610 struct mlxsw_sp_nexthop *nh)
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002611{
2612 struct mlxsw_sp_neigh_entry *neigh_entry = nh->neigh_entry;
2613 char ratr_pl[MLXSW_REG_RATR_LEN];
2614
2615 mlxsw_reg_ratr_pack(ratr_pl, MLXSW_REG_RATR_OP_WRITE_WRITE_ENTRY,
Petr Machata89e41982017-09-02 23:49:15 +02002616 true, MLXSW_REG_RATR_TYPE_ETHERNET,
2617 adj_index, neigh_entry->rif);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002618 mlxsw_reg_ratr_eth_entry_pack(ratr_pl, neigh_entry->ha);
Arkadi Sharshevskya5390272017-09-25 10:32:28 +02002619 if (nh->counter_valid)
2620 mlxsw_reg_ratr_counter_pack(ratr_pl, nh->counter_index, true);
2621 else
2622 mlxsw_reg_ratr_counter_pack(ratr_pl, 0, false);
2623
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002624 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ratr), ratr_pl);
2625}
2626
Ido Schimmeleb789982017-10-22 23:11:48 +02002627int mlxsw_sp_nexthop_update(struct mlxsw_sp *mlxsw_sp, u32 adj_index,
2628 struct mlxsw_sp_nexthop *nh)
2629{
2630 int i;
2631
2632 for (i = 0; i < nh->num_adj_entries; i++) {
2633 int err;
2634
2635 err = __mlxsw_sp_nexthop_update(mlxsw_sp, adj_index + i, nh);
2636 if (err)
2637 return err;
2638 }
2639
2640 return 0;
2641}
2642
2643static int __mlxsw_sp_nexthop_ipip_update(struct mlxsw_sp *mlxsw_sp,
2644 u32 adj_index,
2645 struct mlxsw_sp_nexthop *nh)
Petr Machata1012b9a2017-09-02 23:49:23 +02002646{
2647 const struct mlxsw_sp_ipip_ops *ipip_ops;
2648
2649 ipip_ops = mlxsw_sp->router->ipip_ops_arr[nh->ipip_entry->ipipt];
2650 return ipip_ops->nexthop_update(mlxsw_sp, adj_index, nh->ipip_entry);
2651}
2652
Ido Schimmeleb789982017-10-22 23:11:48 +02002653static int mlxsw_sp_nexthop_ipip_update(struct mlxsw_sp *mlxsw_sp,
2654 u32 adj_index,
2655 struct mlxsw_sp_nexthop *nh)
2656{
2657 int i;
2658
2659 for (i = 0; i < nh->num_adj_entries; i++) {
2660 int err;
2661
2662 err = __mlxsw_sp_nexthop_ipip_update(mlxsw_sp, adj_index + i,
2663 nh);
2664 if (err)
2665 return err;
2666 }
2667
2668 return 0;
2669}
2670
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002671static int
Petr Machata35225e42017-09-02 23:49:22 +02002672mlxsw_sp_nexthop_group_update(struct mlxsw_sp *mlxsw_sp,
2673 struct mlxsw_sp_nexthop_group *nh_grp,
2674 bool reallocate)
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002675{
2676 u32 adj_index = nh_grp->adj_index; /* base */
2677 struct mlxsw_sp_nexthop *nh;
2678 int i;
2679 int err;
2680
2681 for (i = 0; i < nh_grp->count; i++) {
2682 nh = &nh_grp->nexthops[i];
2683
2684 if (!nh->should_offload) {
2685 nh->offloaded = 0;
2686 continue;
2687 }
2688
Ido Schimmela59b7e02017-01-23 11:11:42 +01002689 if (nh->update || reallocate) {
Petr Machata35225e42017-09-02 23:49:22 +02002690 switch (nh->type) {
2691 case MLXSW_SP_NEXTHOP_TYPE_ETH:
Arkadi Sharshevskya5390272017-09-25 10:32:28 +02002692 err = mlxsw_sp_nexthop_update
Petr Machata35225e42017-09-02 23:49:22 +02002693 (mlxsw_sp, adj_index, nh);
2694 break;
Petr Machata1012b9a2017-09-02 23:49:23 +02002695 case MLXSW_SP_NEXTHOP_TYPE_IPIP:
2696 err = mlxsw_sp_nexthop_ipip_update
2697 (mlxsw_sp, adj_index, nh);
2698 break;
Petr Machata35225e42017-09-02 23:49:22 +02002699 }
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002700 if (err)
2701 return err;
2702 nh->update = 0;
2703 nh->offloaded = 1;
2704 }
Ido Schimmeleb789982017-10-22 23:11:48 +02002705 adj_index += nh->num_adj_entries;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002706 }
2707 return 0;
2708}
2709
Ido Schimmel1819ae32017-07-21 18:04:28 +02002710static bool
2711mlxsw_sp_fib_node_entry_is_first(const struct mlxsw_sp_fib_node *fib_node,
2712 const struct mlxsw_sp_fib_entry *fib_entry);
2713
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002714static int
2715mlxsw_sp_nexthop_fib_entries_update(struct mlxsw_sp *mlxsw_sp,
2716 struct mlxsw_sp_nexthop_group *nh_grp)
2717{
2718 struct mlxsw_sp_fib_entry *fib_entry;
2719 int err;
2720
2721 list_for_each_entry(fib_entry, &nh_grp->fib_list, nexthop_group_node) {
Ido Schimmel1819ae32017-07-21 18:04:28 +02002722 if (!mlxsw_sp_fib_node_entry_is_first(fib_entry->fib_node,
2723 fib_entry))
2724 continue;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002725 err = mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
2726 if (err)
2727 return err;
2728 }
2729 return 0;
2730}
2731
2732static void
Ido Schimmel77d964e2017-08-02 09:56:05 +02002733mlxsw_sp_fib_entry_offload_refresh(struct mlxsw_sp_fib_entry *fib_entry,
2734 enum mlxsw_reg_ralue_op op, int err);
2735
2736static void
2737mlxsw_sp_nexthop_fib_entries_refresh(struct mlxsw_sp_nexthop_group *nh_grp)
2738{
2739 enum mlxsw_reg_ralue_op op = MLXSW_REG_RALUE_OP_WRITE_WRITE;
2740 struct mlxsw_sp_fib_entry *fib_entry;
2741
2742 list_for_each_entry(fib_entry, &nh_grp->fib_list, nexthop_group_node) {
2743 if (!mlxsw_sp_fib_node_entry_is_first(fib_entry->fib_node,
2744 fib_entry))
2745 continue;
2746 mlxsw_sp_fib_entry_offload_refresh(fib_entry, op, 0);
2747 }
2748}
2749
Ido Schimmel425a08c2017-10-22 23:11:47 +02002750static void mlxsw_sp_adj_grp_size_round_up(u16 *p_adj_grp_size)
2751{
2752 /* Valid sizes for an adjacency group are:
2753 * 1-64, 512, 1024, 2048 and 4096.
2754 */
2755 if (*p_adj_grp_size <= 64)
2756 return;
2757 else if (*p_adj_grp_size <= 512)
2758 *p_adj_grp_size = 512;
2759 else if (*p_adj_grp_size <= 1024)
2760 *p_adj_grp_size = 1024;
2761 else if (*p_adj_grp_size <= 2048)
2762 *p_adj_grp_size = 2048;
2763 else
2764 *p_adj_grp_size = 4096;
2765}
2766
2767static void mlxsw_sp_adj_grp_size_round_down(u16 *p_adj_grp_size,
2768 unsigned int alloc_size)
2769{
2770 if (alloc_size >= 4096)
2771 *p_adj_grp_size = 4096;
2772 else if (alloc_size >= 2048)
2773 *p_adj_grp_size = 2048;
2774 else if (alloc_size >= 1024)
2775 *p_adj_grp_size = 1024;
2776 else if (alloc_size >= 512)
2777 *p_adj_grp_size = 512;
2778}
2779
2780static int mlxsw_sp_fix_adj_grp_size(struct mlxsw_sp *mlxsw_sp,
2781 u16 *p_adj_grp_size)
2782{
2783 unsigned int alloc_size;
2784 int err;
2785
2786 /* Round up the requested group size to the next size supported
2787 * by the device and make sure the request can be satisfied.
2788 */
2789 mlxsw_sp_adj_grp_size_round_up(p_adj_grp_size);
2790 err = mlxsw_sp_kvdl_alloc_size_query(mlxsw_sp, *p_adj_grp_size,
2791 &alloc_size);
2792 if (err)
2793 return err;
2794 /* It is possible the allocation results in more allocated
2795 * entries than requested. Try to use as much of them as
2796 * possible.
2797 */
2798 mlxsw_sp_adj_grp_size_round_down(p_adj_grp_size, alloc_size);
2799
2800 return 0;
2801}
2802
Ido Schimmel77d964e2017-08-02 09:56:05 +02002803static void
Ido Schimmeleb789982017-10-22 23:11:48 +02002804mlxsw_sp_nexthop_group_normalize(struct mlxsw_sp_nexthop_group *nh_grp)
2805{
2806 int i, g = 0, sum_norm_weight = 0;
2807 struct mlxsw_sp_nexthop *nh;
2808
2809 for (i = 0; i < nh_grp->count; i++) {
2810 nh = &nh_grp->nexthops[i];
2811
2812 if (!nh->should_offload)
2813 continue;
2814 if (g > 0)
2815 g = gcd(nh->nh_weight, g);
2816 else
2817 g = nh->nh_weight;
2818 }
2819
2820 for (i = 0; i < nh_grp->count; i++) {
2821 nh = &nh_grp->nexthops[i];
2822
2823 if (!nh->should_offload)
2824 continue;
2825 nh->norm_nh_weight = nh->nh_weight / g;
2826 sum_norm_weight += nh->norm_nh_weight;
2827 }
2828
2829 nh_grp->sum_norm_weight = sum_norm_weight;
2830}
2831
2832static void
2833mlxsw_sp_nexthop_group_rebalance(struct mlxsw_sp_nexthop_group *nh_grp)
2834{
2835 int total = nh_grp->sum_norm_weight;
2836 u16 ecmp_size = nh_grp->ecmp_size;
2837 int i, weight = 0, lower_bound = 0;
2838
2839 for (i = 0; i < nh_grp->count; i++) {
2840 struct mlxsw_sp_nexthop *nh = &nh_grp->nexthops[i];
2841 int upper_bound;
2842
2843 if (!nh->should_offload)
2844 continue;
2845 weight += nh->norm_nh_weight;
2846 upper_bound = DIV_ROUND_CLOSEST(ecmp_size * weight, total);
2847 nh->num_adj_entries = upper_bound - lower_bound;
2848 lower_bound = upper_bound;
2849 }
2850}
2851
2852static void
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002853mlxsw_sp_nexthop_group_refresh(struct mlxsw_sp *mlxsw_sp,
2854 struct mlxsw_sp_nexthop_group *nh_grp)
2855{
Ido Schimmeleb789982017-10-22 23:11:48 +02002856 u16 ecmp_size, old_ecmp_size;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002857 struct mlxsw_sp_nexthop *nh;
2858 bool offload_change = false;
2859 u32 adj_index;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002860 bool old_adj_index_valid;
2861 u32 old_adj_index;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002862 int i;
2863 int err;
2864
Ido Schimmelb3e8d1e2017-02-08 11:16:32 +01002865 if (!nh_grp->gateway) {
2866 mlxsw_sp_nexthop_fib_entries_update(mlxsw_sp, nh_grp);
2867 return;
2868 }
2869
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002870 for (i = 0; i < nh_grp->count; i++) {
2871 nh = &nh_grp->nexthops[i];
2872
Petr Machata56b8a9e2017-07-31 09:27:29 +02002873 if (nh->should_offload != nh->offloaded) {
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002874 offload_change = true;
2875 if (nh->should_offload)
2876 nh->update = 1;
2877 }
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002878 }
2879 if (!offload_change) {
2880 /* Nothing was added or removed, so no need to reallocate. Just
2881 * update MAC on existing adjacency indexes.
2882 */
Petr Machata35225e42017-09-02 23:49:22 +02002883 err = mlxsw_sp_nexthop_group_update(mlxsw_sp, nh_grp, false);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002884 if (err) {
2885 dev_warn(mlxsw_sp->bus_info->dev, "Failed to update neigh MAC in adjacency table.\n");
2886 goto set_trap;
2887 }
2888 return;
2889 }
Ido Schimmeleb789982017-10-22 23:11:48 +02002890 mlxsw_sp_nexthop_group_normalize(nh_grp);
2891 if (!nh_grp->sum_norm_weight)
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002892 /* No neigh of this group is connected so we just set
2893 * the trap and let everthing flow through kernel.
2894 */
2895 goto set_trap;
2896
Ido Schimmeleb789982017-10-22 23:11:48 +02002897 ecmp_size = nh_grp->sum_norm_weight;
Ido Schimmel425a08c2017-10-22 23:11:47 +02002898 err = mlxsw_sp_fix_adj_grp_size(mlxsw_sp, &ecmp_size);
2899 if (err)
2900 /* No valid allocation size available. */
2901 goto set_trap;
2902
Arkadi Sharshevsky13124442017-03-25 08:28:22 +01002903 err = mlxsw_sp_kvdl_alloc(mlxsw_sp, ecmp_size, &adj_index);
2904 if (err) {
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002905 /* We ran out of KVD linear space, just set the
2906 * trap and let everything flow through kernel.
2907 */
2908 dev_warn(mlxsw_sp->bus_info->dev, "Failed to allocate KVD linear area for nexthop group.\n");
2909 goto set_trap;
2910 }
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002911 old_adj_index_valid = nh_grp->adj_index_valid;
2912 old_adj_index = nh_grp->adj_index;
2913 old_ecmp_size = nh_grp->ecmp_size;
2914 nh_grp->adj_index_valid = 1;
2915 nh_grp->adj_index = adj_index;
2916 nh_grp->ecmp_size = ecmp_size;
Ido Schimmeleb789982017-10-22 23:11:48 +02002917 mlxsw_sp_nexthop_group_rebalance(nh_grp);
Petr Machata35225e42017-09-02 23:49:22 +02002918 err = mlxsw_sp_nexthop_group_update(mlxsw_sp, nh_grp, true);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002919 if (err) {
2920 dev_warn(mlxsw_sp->bus_info->dev, "Failed to update neigh MAC in adjacency table.\n");
2921 goto set_trap;
2922 }
2923
2924 if (!old_adj_index_valid) {
2925 /* The trap was set for fib entries, so we have to call
2926 * fib entry update to unset it and use adjacency index.
2927 */
2928 err = mlxsw_sp_nexthop_fib_entries_update(mlxsw_sp, nh_grp);
2929 if (err) {
2930 dev_warn(mlxsw_sp->bus_info->dev, "Failed to add adjacency index to fib entries.\n");
2931 goto set_trap;
2932 }
2933 return;
2934 }
2935
2936 err = mlxsw_sp_adj_index_mass_update(mlxsw_sp, nh_grp,
2937 old_adj_index, old_ecmp_size);
2938 mlxsw_sp_kvdl_free(mlxsw_sp, old_adj_index);
2939 if (err) {
2940 dev_warn(mlxsw_sp->bus_info->dev, "Failed to mass-update adjacency index for nexthop group.\n");
2941 goto set_trap;
2942 }
Ido Schimmel77d964e2017-08-02 09:56:05 +02002943
2944 /* Offload state within the group changed, so update the flags. */
2945 mlxsw_sp_nexthop_fib_entries_refresh(nh_grp);
2946
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002947 return;
2948
2949set_trap:
2950 old_adj_index_valid = nh_grp->adj_index_valid;
2951 nh_grp->adj_index_valid = 0;
2952 for (i = 0; i < nh_grp->count; i++) {
2953 nh = &nh_grp->nexthops[i];
2954 nh->offloaded = 0;
2955 }
2956 err = mlxsw_sp_nexthop_fib_entries_update(mlxsw_sp, nh_grp);
2957 if (err)
2958 dev_warn(mlxsw_sp->bus_info->dev, "Failed to set traps for fib entries.\n");
2959 if (old_adj_index_valid)
2960 mlxsw_sp_kvdl_free(mlxsw_sp, nh_grp->adj_index);
2961}
2962
2963static void __mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp_nexthop *nh,
2964 bool removing)
2965{
Petr Machata213666a2017-07-31 09:27:30 +02002966 if (!removing)
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002967 nh->should_offload = 1;
Petr Machata213666a2017-07-31 09:27:30 +02002968 else if (nh->offloaded)
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002969 nh->should_offload = 0;
2970 nh->update = 1;
2971}
2972
2973static void
2974mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp *mlxsw_sp,
2975 struct mlxsw_sp_neigh_entry *neigh_entry,
2976 bool removing)
2977{
2978 struct mlxsw_sp_nexthop *nh;
2979
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002980 list_for_each_entry(nh, &neigh_entry->nexthop_list,
2981 neigh_list_node) {
2982 __mlxsw_sp_nexthop_neigh_update(nh, removing);
2983 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nh_grp);
2984 }
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002985}
2986
Ido Schimmel9665b742017-02-08 11:16:42 +01002987static void mlxsw_sp_nexthop_rif_init(struct mlxsw_sp_nexthop *nh,
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01002988 struct mlxsw_sp_rif *rif)
Ido Schimmel9665b742017-02-08 11:16:42 +01002989{
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01002990 if (nh->rif)
Ido Schimmel9665b742017-02-08 11:16:42 +01002991 return;
2992
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01002993 nh->rif = rif;
2994 list_add(&nh->rif_list_node, &rif->nexthop_list);
Ido Schimmel9665b742017-02-08 11:16:42 +01002995}
2996
2997static void mlxsw_sp_nexthop_rif_fini(struct mlxsw_sp_nexthop *nh)
2998{
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01002999 if (!nh->rif)
Ido Schimmel9665b742017-02-08 11:16:42 +01003000 return;
3001
3002 list_del(&nh->rif_list_node);
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003003 nh->rif = NULL;
Ido Schimmel9665b742017-02-08 11:16:42 +01003004}
3005
Ido Schimmela8c97012017-02-08 11:16:35 +01003006static int mlxsw_sp_nexthop_neigh_init(struct mlxsw_sp *mlxsw_sp,
3007 struct mlxsw_sp_nexthop *nh)
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003008{
3009 struct mlxsw_sp_neigh_entry *neigh_entry;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003010 struct neighbour *n;
Ido Schimmel93a87e52016-12-23 09:32:49 +01003011 u8 nud_state, dead;
Ido Schimmelc53b8e12017-02-08 11:16:30 +01003012 int err;
3013
Ido Schimmelad178c82017-02-08 11:16:40 +01003014 if (!nh->nh_grp->gateway || nh->neigh_entry)
Ido Schimmelb8399a12017-02-08 11:16:33 +01003015 return 0;
3016
Jiri Pirko33b13412016-11-10 12:31:04 +01003017 /* Take a reference of neigh here ensuring that neigh would
Petr Machata8de3c172017-07-31 09:27:25 +02003018 * not be destructed before the nexthop entry is finished.
Jiri Pirko33b13412016-11-10 12:31:04 +01003019 * The reference is taken either in neigh_lookup() or
Ido Schimmelfd76d912017-02-06 16:20:17 +01003020 * in neigh_create() in case n is not found.
Jiri Pirko33b13412016-11-10 12:31:04 +01003021 */
Ido Schimmel58adf2c2017-07-18 10:10:19 +02003022 n = neigh_lookup(nh->nh_grp->neigh_tbl, &nh->gw_addr, nh->rif->dev);
Jiri Pirko33b13412016-11-10 12:31:04 +01003023 if (!n) {
Ido Schimmel58adf2c2017-07-18 10:10:19 +02003024 n = neigh_create(nh->nh_grp->neigh_tbl, &nh->gw_addr,
3025 nh->rif->dev);
Ido Schimmela8c97012017-02-08 11:16:35 +01003026 if (IS_ERR(n))
3027 return PTR_ERR(n);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003028 neigh_event_send(n, NULL);
Jiri Pirko33b13412016-11-10 12:31:04 +01003029 }
3030 neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp, n);
3031 if (!neigh_entry) {
Ido Schimmel5c8802f2017-02-06 16:20:13 +01003032 neigh_entry = mlxsw_sp_neigh_entry_create(mlxsw_sp, n);
3033 if (IS_ERR(neigh_entry)) {
Ido Schimmelc53b8e12017-02-08 11:16:30 +01003034 err = -EINVAL;
3035 goto err_neigh_entry_create;
Ido Schimmel5c8802f2017-02-06 16:20:13 +01003036 }
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003037 }
Yotam Gigib2157142016-07-05 11:27:51 +02003038
3039 /* If that is the first nexthop connected to that neigh, add to
3040 * nexthop_neighs_list
3041 */
3042 if (list_empty(&neigh_entry->nexthop_list))
3043 list_add_tail(&neigh_entry->nexthop_neighs_list_node,
Ido Schimmel9011b672017-05-16 19:38:25 +02003044 &mlxsw_sp->router->nexthop_neighs_list);
Yotam Gigib2157142016-07-05 11:27:51 +02003045
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003046 nh->neigh_entry = neigh_entry;
3047 list_add_tail(&nh->neigh_list_node, &neigh_entry->nexthop_list);
3048 read_lock_bh(&n->lock);
3049 nud_state = n->nud_state;
Ido Schimmel93a87e52016-12-23 09:32:49 +01003050 dead = n->dead;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003051 read_unlock_bh(&n->lock);
Ido Schimmel93a87e52016-12-23 09:32:49 +01003052 __mlxsw_sp_nexthop_neigh_update(nh, !(nud_state & NUD_VALID && !dead));
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003053
3054 return 0;
Ido Schimmelc53b8e12017-02-08 11:16:30 +01003055
3056err_neigh_entry_create:
3057 neigh_release(n);
Ido Schimmelc53b8e12017-02-08 11:16:30 +01003058 return err;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003059}
3060
Ido Schimmela8c97012017-02-08 11:16:35 +01003061static void mlxsw_sp_nexthop_neigh_fini(struct mlxsw_sp *mlxsw_sp,
3062 struct mlxsw_sp_nexthop *nh)
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003063{
3064 struct mlxsw_sp_neigh_entry *neigh_entry = nh->neigh_entry;
Ido Schimmela8c97012017-02-08 11:16:35 +01003065 struct neighbour *n;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003066
Ido Schimmelb8399a12017-02-08 11:16:33 +01003067 if (!neigh_entry)
Ido Schimmela8c97012017-02-08 11:16:35 +01003068 return;
3069 n = neigh_entry->key.n;
Ido Schimmelb8399a12017-02-08 11:16:33 +01003070
Ido Schimmel58312122016-12-23 09:32:50 +01003071 __mlxsw_sp_nexthop_neigh_update(nh, true);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003072 list_del(&nh->neigh_list_node);
Ido Schimmele58be792017-02-08 11:16:28 +01003073 nh->neigh_entry = NULL;
Yotam Gigib2157142016-07-05 11:27:51 +02003074
3075 /* If that is the last nexthop connected to that neigh, remove from
3076 * nexthop_neighs_list
3077 */
Ido Schimmele58be792017-02-08 11:16:28 +01003078 if (list_empty(&neigh_entry->nexthop_list))
3079 list_del(&neigh_entry->nexthop_neighs_list_node);
Yotam Gigib2157142016-07-05 11:27:51 +02003080
Ido Schimmel5c8802f2017-02-06 16:20:13 +01003081 if (!neigh_entry->connected && list_empty(&neigh_entry->nexthop_list))
3082 mlxsw_sp_neigh_entry_destroy(mlxsw_sp, neigh_entry);
3083
3084 neigh_release(n);
Ido Schimmela8c97012017-02-08 11:16:35 +01003085}
Ido Schimmelc53b8e12017-02-08 11:16:30 +01003086
Petr Machata1012b9a2017-09-02 23:49:23 +02003087static int mlxsw_sp_nexthop_ipip_init(struct mlxsw_sp *mlxsw_sp,
Petr Machata1012b9a2017-09-02 23:49:23 +02003088 struct mlxsw_sp_nexthop *nh,
3089 struct net_device *ol_dev)
3090{
3091 if (!nh->nh_grp->gateway || nh->ipip_entry)
3092 return 0;
3093
Petr Machata4cccb732017-10-16 16:26:39 +02003094 nh->ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
3095 if (!nh->ipip_entry)
3096 return -ENOENT;
Petr Machata1012b9a2017-09-02 23:49:23 +02003097
3098 __mlxsw_sp_nexthop_neigh_update(nh, false);
3099 return 0;
3100}
3101
3102static void mlxsw_sp_nexthop_ipip_fini(struct mlxsw_sp *mlxsw_sp,
3103 struct mlxsw_sp_nexthop *nh)
3104{
3105 struct mlxsw_sp_ipip_entry *ipip_entry = nh->ipip_entry;
3106
3107 if (!ipip_entry)
3108 return;
3109
3110 __mlxsw_sp_nexthop_neigh_update(nh, true);
Petr Machata1012b9a2017-09-02 23:49:23 +02003111 nh->ipip_entry = NULL;
3112}
3113
3114static bool mlxsw_sp_nexthop4_ipip_type(const struct mlxsw_sp *mlxsw_sp,
3115 const struct fib_nh *fib_nh,
3116 enum mlxsw_sp_ipip_type *p_ipipt)
3117{
3118 struct net_device *dev = fib_nh->nh_dev;
3119
3120 return dev &&
3121 fib_nh->nh_parent->fib_type == RTN_UNICAST &&
3122 mlxsw_sp_netdev_ipip_type(mlxsw_sp, dev, p_ipipt);
3123}
3124
Petr Machata35225e42017-09-02 23:49:22 +02003125static void mlxsw_sp_nexthop_type_fini(struct mlxsw_sp *mlxsw_sp,
3126 struct mlxsw_sp_nexthop *nh)
3127{
3128 switch (nh->type) {
3129 case MLXSW_SP_NEXTHOP_TYPE_ETH:
3130 mlxsw_sp_nexthop_neigh_fini(mlxsw_sp, nh);
3131 mlxsw_sp_nexthop_rif_fini(nh);
3132 break;
Petr Machata1012b9a2017-09-02 23:49:23 +02003133 case MLXSW_SP_NEXTHOP_TYPE_IPIP:
Petr Machatade0f43c2017-10-02 12:14:57 +02003134 mlxsw_sp_nexthop_rif_fini(nh);
Petr Machata1012b9a2017-09-02 23:49:23 +02003135 mlxsw_sp_nexthop_ipip_fini(mlxsw_sp, nh);
3136 break;
Petr Machata35225e42017-09-02 23:49:22 +02003137 }
3138}
3139
3140static int mlxsw_sp_nexthop4_type_init(struct mlxsw_sp *mlxsw_sp,
3141 struct mlxsw_sp_nexthop *nh,
3142 struct fib_nh *fib_nh)
3143{
Petr Machata1012b9a2017-09-02 23:49:23 +02003144 struct mlxsw_sp_router *router = mlxsw_sp->router;
Petr Machata35225e42017-09-02 23:49:22 +02003145 struct net_device *dev = fib_nh->nh_dev;
Petr Machata1012b9a2017-09-02 23:49:23 +02003146 enum mlxsw_sp_ipip_type ipipt;
Petr Machata35225e42017-09-02 23:49:22 +02003147 struct mlxsw_sp_rif *rif;
3148 int err;
3149
Petr Machata1012b9a2017-09-02 23:49:23 +02003150 if (mlxsw_sp_nexthop4_ipip_type(mlxsw_sp, fib_nh, &ipipt) &&
3151 router->ipip_ops_arr[ipipt]->can_offload(mlxsw_sp, dev,
3152 MLXSW_SP_L3_PROTO_IPV4)) {
3153 nh->type = MLXSW_SP_NEXTHOP_TYPE_IPIP;
Petr Machata4cccb732017-10-16 16:26:39 +02003154 err = mlxsw_sp_nexthop_ipip_init(mlxsw_sp, nh, dev);
Petr Machatade0f43c2017-10-02 12:14:57 +02003155 if (err)
3156 return err;
3157 mlxsw_sp_nexthop_rif_init(nh, &nh->ipip_entry->ol_lb->common);
3158 return 0;
Petr Machata1012b9a2017-09-02 23:49:23 +02003159 }
3160
Petr Machata35225e42017-09-02 23:49:22 +02003161 nh->type = MLXSW_SP_NEXTHOP_TYPE_ETH;
3162 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
3163 if (!rif)
3164 return 0;
3165
3166 mlxsw_sp_nexthop_rif_init(nh, rif);
3167 err = mlxsw_sp_nexthop_neigh_init(mlxsw_sp, nh);
3168 if (err)
3169 goto err_neigh_init;
3170
3171 return 0;
3172
3173err_neigh_init:
3174 mlxsw_sp_nexthop_rif_fini(nh);
3175 return err;
3176}
3177
3178static void mlxsw_sp_nexthop4_type_fini(struct mlxsw_sp *mlxsw_sp,
3179 struct mlxsw_sp_nexthop *nh)
3180{
3181 mlxsw_sp_nexthop_type_fini(mlxsw_sp, nh);
3182}
3183
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02003184static int mlxsw_sp_nexthop4_init(struct mlxsw_sp *mlxsw_sp,
3185 struct mlxsw_sp_nexthop_group *nh_grp,
3186 struct mlxsw_sp_nexthop *nh,
3187 struct fib_nh *fib_nh)
Ido Schimmela8c97012017-02-08 11:16:35 +01003188{
3189 struct net_device *dev = fib_nh->nh_dev;
Ido Schimmeldf6dd79b2017-02-08 14:36:49 +01003190 struct in_device *in_dev;
Ido Schimmela8c97012017-02-08 11:16:35 +01003191 int err;
3192
3193 nh->nh_grp = nh_grp;
3194 nh->key.fib_nh = fib_nh;
Ido Schimmel408bd942017-10-22 23:11:46 +02003195#ifdef CONFIG_IP_ROUTE_MULTIPATH
3196 nh->nh_weight = fib_nh->nh_weight;
3197#else
3198 nh->nh_weight = 1;
3199#endif
Ido Schimmel58adf2c2017-07-18 10:10:19 +02003200 memcpy(&nh->gw_addr, &fib_nh->nh_gw, sizeof(fib_nh->nh_gw));
Ido Schimmela8c97012017-02-08 11:16:35 +01003201 err = mlxsw_sp_nexthop_insert(mlxsw_sp, nh);
3202 if (err)
3203 return err;
3204
Arkadi Sharshevskya5390272017-09-25 10:32:28 +02003205 mlxsw_sp_nexthop_counter_alloc(mlxsw_sp, nh);
Arkadi Sharshevskydbe45982017-09-25 10:32:23 +02003206 list_add_tail(&nh->router_list_node, &mlxsw_sp->router->nexthop_list);
3207
Ido Schimmel97989ee2017-03-10 08:53:38 +01003208 if (!dev)
3209 return 0;
3210
Ido Schimmeldf6dd79b2017-02-08 14:36:49 +01003211 in_dev = __in_dev_get_rtnl(dev);
3212 if (in_dev && IN_DEV_IGNORE_ROUTES_WITH_LINKDOWN(in_dev) &&
3213 fib_nh->nh_flags & RTNH_F_LINKDOWN)
3214 return 0;
3215
Petr Machata35225e42017-09-02 23:49:22 +02003216 err = mlxsw_sp_nexthop4_type_init(mlxsw_sp, nh, fib_nh);
Ido Schimmela8c97012017-02-08 11:16:35 +01003217 if (err)
3218 goto err_nexthop_neigh_init;
3219
3220 return 0;
3221
3222err_nexthop_neigh_init:
3223 mlxsw_sp_nexthop_remove(mlxsw_sp, nh);
3224 return err;
3225}
3226
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02003227static void mlxsw_sp_nexthop4_fini(struct mlxsw_sp *mlxsw_sp,
3228 struct mlxsw_sp_nexthop *nh)
Ido Schimmela8c97012017-02-08 11:16:35 +01003229{
Petr Machata35225e42017-09-02 23:49:22 +02003230 mlxsw_sp_nexthop4_type_fini(mlxsw_sp, nh);
Arkadi Sharshevskydbe45982017-09-25 10:32:23 +02003231 list_del(&nh->router_list_node);
Arkadi Sharshevskya5390272017-09-25 10:32:28 +02003232 mlxsw_sp_nexthop_counter_free(mlxsw_sp, nh);
Ido Schimmelc53b8e12017-02-08 11:16:30 +01003233 mlxsw_sp_nexthop_remove(mlxsw_sp, nh);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003234}
3235
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02003236static void mlxsw_sp_nexthop4_event(struct mlxsw_sp *mlxsw_sp,
3237 unsigned long event, struct fib_nh *fib_nh)
Ido Schimmelad178c82017-02-08 11:16:40 +01003238{
3239 struct mlxsw_sp_nexthop_key key;
3240 struct mlxsw_sp_nexthop *nh;
Ido Schimmelad178c82017-02-08 11:16:40 +01003241
Ido Schimmel9011b672017-05-16 19:38:25 +02003242 if (mlxsw_sp->router->aborted)
Ido Schimmelad178c82017-02-08 11:16:40 +01003243 return;
3244
3245 key.fib_nh = fib_nh;
3246 nh = mlxsw_sp_nexthop_lookup(mlxsw_sp, key);
3247 if (WARN_ON_ONCE(!nh))
3248 return;
3249
Ido Schimmelad178c82017-02-08 11:16:40 +01003250 switch (event) {
3251 case FIB_EVENT_NH_ADD:
Petr Machata35225e42017-09-02 23:49:22 +02003252 mlxsw_sp_nexthop4_type_init(mlxsw_sp, nh, fib_nh);
Ido Schimmelad178c82017-02-08 11:16:40 +01003253 break;
3254 case FIB_EVENT_NH_DEL:
Petr Machata35225e42017-09-02 23:49:22 +02003255 mlxsw_sp_nexthop4_type_fini(mlxsw_sp, nh);
Ido Schimmelad178c82017-02-08 11:16:40 +01003256 break;
3257 }
3258
3259 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nh_grp);
3260}
3261
Ido Schimmel9665b742017-02-08 11:16:42 +01003262static void mlxsw_sp_nexthop_rif_gone_sync(struct mlxsw_sp *mlxsw_sp,
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003263 struct mlxsw_sp_rif *rif)
Ido Schimmel9665b742017-02-08 11:16:42 +01003264{
3265 struct mlxsw_sp_nexthop *nh, *tmp;
3266
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003267 list_for_each_entry_safe(nh, tmp, &rif->nexthop_list, rif_list_node) {
Petr Machata35225e42017-09-02 23:49:22 +02003268 mlxsw_sp_nexthop_type_fini(mlxsw_sp, nh);
Ido Schimmel9665b742017-02-08 11:16:42 +01003269 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nh_grp);
3270 }
3271}
3272
Petr Machata9b014512017-09-02 23:49:20 +02003273static bool mlxsw_sp_fi_is_gateway(const struct mlxsw_sp *mlxsw_sp,
3274 const struct fib_info *fi)
3275{
Petr Machata1012b9a2017-09-02 23:49:23 +02003276 return fi->fib_nh->nh_scope == RT_SCOPE_LINK ||
3277 mlxsw_sp_nexthop4_ipip_type(mlxsw_sp, fi->fib_nh, NULL);
Petr Machata9b014512017-09-02 23:49:20 +02003278}
3279
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003280static struct mlxsw_sp_nexthop_group *
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02003281mlxsw_sp_nexthop4_group_create(struct mlxsw_sp *mlxsw_sp, struct fib_info *fi)
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003282{
3283 struct mlxsw_sp_nexthop_group *nh_grp;
3284 struct mlxsw_sp_nexthop *nh;
3285 struct fib_nh *fib_nh;
3286 size_t alloc_size;
3287 int i;
3288 int err;
3289
3290 alloc_size = sizeof(*nh_grp) +
3291 fi->fib_nhs * sizeof(struct mlxsw_sp_nexthop);
3292 nh_grp = kzalloc(alloc_size, GFP_KERNEL);
3293 if (!nh_grp)
3294 return ERR_PTR(-ENOMEM);
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02003295 nh_grp->priv = fi;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003296 INIT_LIST_HEAD(&nh_grp->fib_list);
Ido Schimmel58adf2c2017-07-18 10:10:19 +02003297 nh_grp->neigh_tbl = &arp_tbl;
3298
Petr Machata9b014512017-09-02 23:49:20 +02003299 nh_grp->gateway = mlxsw_sp_fi_is_gateway(mlxsw_sp, fi);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003300 nh_grp->count = fi->fib_nhs;
Ido Schimmel7387dbb2017-07-12 09:12:53 +02003301 fib_info_hold(fi);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003302 for (i = 0; i < nh_grp->count; i++) {
3303 nh = &nh_grp->nexthops[i];
3304 fib_nh = &fi->fib_nh[i];
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02003305 err = mlxsw_sp_nexthop4_init(mlxsw_sp, nh_grp, nh, fib_nh);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003306 if (err)
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02003307 goto err_nexthop4_init;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003308 }
Ido Schimmele9ad5e72017-02-08 11:16:29 +01003309 err = mlxsw_sp_nexthop_group_insert(mlxsw_sp, nh_grp);
3310 if (err)
3311 goto err_nexthop_group_insert;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003312 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
3313 return nh_grp;
3314
Ido Schimmele9ad5e72017-02-08 11:16:29 +01003315err_nexthop_group_insert:
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02003316err_nexthop4_init:
Ido Schimmeldf6dd79b2017-02-08 14:36:49 +01003317 for (i--; i >= 0; i--) {
3318 nh = &nh_grp->nexthops[i];
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02003319 mlxsw_sp_nexthop4_fini(mlxsw_sp, nh);
Ido Schimmeldf6dd79b2017-02-08 14:36:49 +01003320 }
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02003321 fib_info_put(fi);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003322 kfree(nh_grp);
3323 return ERR_PTR(err);
3324}
3325
3326static void
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02003327mlxsw_sp_nexthop4_group_destroy(struct mlxsw_sp *mlxsw_sp,
3328 struct mlxsw_sp_nexthop_group *nh_grp)
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003329{
3330 struct mlxsw_sp_nexthop *nh;
3331 int i;
3332
Ido Schimmele9ad5e72017-02-08 11:16:29 +01003333 mlxsw_sp_nexthop_group_remove(mlxsw_sp, nh_grp);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003334 for (i = 0; i < nh_grp->count; i++) {
3335 nh = &nh_grp->nexthops[i];
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02003336 mlxsw_sp_nexthop4_fini(mlxsw_sp, nh);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003337 }
Ido Schimmel58312122016-12-23 09:32:50 +01003338 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
3339 WARN_ON_ONCE(nh_grp->adj_index_valid);
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02003340 fib_info_put(mlxsw_sp_nexthop4_group_fi(nh_grp));
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003341 kfree(nh_grp);
3342}
3343
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02003344static int mlxsw_sp_nexthop4_group_get(struct mlxsw_sp *mlxsw_sp,
3345 struct mlxsw_sp_fib_entry *fib_entry,
3346 struct fib_info *fi)
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003347{
3348 struct mlxsw_sp_nexthop_group *nh_grp;
3349
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02003350 nh_grp = mlxsw_sp_nexthop4_group_lookup(mlxsw_sp, fi);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003351 if (!nh_grp) {
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02003352 nh_grp = mlxsw_sp_nexthop4_group_create(mlxsw_sp, fi);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003353 if (IS_ERR(nh_grp))
3354 return PTR_ERR(nh_grp);
3355 }
3356 list_add_tail(&fib_entry->nexthop_group_node, &nh_grp->fib_list);
3357 fib_entry->nh_group = nh_grp;
3358 return 0;
3359}
3360
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02003361static void mlxsw_sp_nexthop4_group_put(struct mlxsw_sp *mlxsw_sp,
3362 struct mlxsw_sp_fib_entry *fib_entry)
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003363{
3364 struct mlxsw_sp_nexthop_group *nh_grp = fib_entry->nh_group;
3365
3366 list_del(&fib_entry->nexthop_group_node);
3367 if (!list_empty(&nh_grp->fib_list))
3368 return;
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02003369 mlxsw_sp_nexthop4_group_destroy(mlxsw_sp, nh_grp);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003370}
3371
Ido Schimmel013b20f2017-02-08 11:16:36 +01003372static bool
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02003373mlxsw_sp_fib4_entry_should_offload(const struct mlxsw_sp_fib_entry *fib_entry)
3374{
3375 struct mlxsw_sp_fib4_entry *fib4_entry;
3376
3377 fib4_entry = container_of(fib_entry, struct mlxsw_sp_fib4_entry,
3378 common);
3379 return !fib4_entry->tos;
3380}
3381
3382static bool
Ido Schimmel013b20f2017-02-08 11:16:36 +01003383mlxsw_sp_fib_entry_should_offload(const struct mlxsw_sp_fib_entry *fib_entry)
3384{
3385 struct mlxsw_sp_nexthop_group *nh_group = fib_entry->nh_group;
3386
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02003387 switch (fib_entry->fib_node->fib->proto) {
3388 case MLXSW_SP_L3_PROTO_IPV4:
3389 if (!mlxsw_sp_fib4_entry_should_offload(fib_entry))
3390 return false;
3391 break;
3392 case MLXSW_SP_L3_PROTO_IPV6:
3393 break;
3394 }
Ido Schimmel9aecce12017-02-09 10:28:42 +01003395
Ido Schimmel013b20f2017-02-08 11:16:36 +01003396 switch (fib_entry->type) {
3397 case MLXSW_SP_FIB_ENTRY_TYPE_REMOTE:
3398 return !!nh_group->adj_index_valid;
3399 case MLXSW_SP_FIB_ENTRY_TYPE_LOCAL:
Ido Schimmel70ad3502017-02-08 11:16:38 +01003400 return !!nh_group->nh_rif;
Petr Machata4607f6d2017-09-02 23:49:25 +02003401 case MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP:
3402 return true;
Ido Schimmel013b20f2017-02-08 11:16:36 +01003403 default:
3404 return false;
3405 }
3406}
3407
Ido Schimmel428b8512017-08-03 13:28:28 +02003408static struct mlxsw_sp_nexthop *
3409mlxsw_sp_rt6_nexthop(struct mlxsw_sp_nexthop_group *nh_grp,
3410 const struct mlxsw_sp_rt6 *mlxsw_sp_rt6)
3411{
3412 int i;
3413
3414 for (i = 0; i < nh_grp->count; i++) {
3415 struct mlxsw_sp_nexthop *nh = &nh_grp->nexthops[i];
3416 struct rt6_info *rt = mlxsw_sp_rt6->rt;
3417
3418 if (nh->rif && nh->rif->dev == rt->dst.dev &&
3419 ipv6_addr_equal((const struct in6_addr *) &nh->gw_addr,
3420 &rt->rt6i_gateway))
3421 return nh;
3422 continue;
3423 }
3424
3425 return NULL;
3426}
3427
Ido Schimmel3984d1a2017-08-02 09:56:03 +02003428static void
3429mlxsw_sp_fib4_entry_offload_set(struct mlxsw_sp_fib_entry *fib_entry)
3430{
3431 struct mlxsw_sp_nexthop_group *nh_grp = fib_entry->nh_group;
3432 int i;
3433
Petr Machata4607f6d2017-09-02 23:49:25 +02003434 if (fib_entry->type == MLXSW_SP_FIB_ENTRY_TYPE_LOCAL ||
3435 fib_entry->type == MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP) {
Ido Schimmel3984d1a2017-08-02 09:56:03 +02003436 nh_grp->nexthops->key.fib_nh->nh_flags |= RTNH_F_OFFLOAD;
3437 return;
3438 }
3439
3440 for (i = 0; i < nh_grp->count; i++) {
3441 struct mlxsw_sp_nexthop *nh = &nh_grp->nexthops[i];
3442
3443 if (nh->offloaded)
3444 nh->key.fib_nh->nh_flags |= RTNH_F_OFFLOAD;
3445 else
3446 nh->key.fib_nh->nh_flags &= ~RTNH_F_OFFLOAD;
3447 }
3448}
3449
3450static void
3451mlxsw_sp_fib4_entry_offload_unset(struct mlxsw_sp_fib_entry *fib_entry)
3452{
3453 struct mlxsw_sp_nexthop_group *nh_grp = fib_entry->nh_group;
3454 int i;
3455
3456 for (i = 0; i < nh_grp->count; i++) {
3457 struct mlxsw_sp_nexthop *nh = &nh_grp->nexthops[i];
3458
3459 nh->key.fib_nh->nh_flags &= ~RTNH_F_OFFLOAD;
3460 }
3461}
3462
Ido Schimmel428b8512017-08-03 13:28:28 +02003463static void
3464mlxsw_sp_fib6_entry_offload_set(struct mlxsw_sp_fib_entry *fib_entry)
3465{
3466 struct mlxsw_sp_fib6_entry *fib6_entry;
3467 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
3468
3469 fib6_entry = container_of(fib_entry, struct mlxsw_sp_fib6_entry,
3470 common);
3471
3472 if (fib_entry->type == MLXSW_SP_FIB_ENTRY_TYPE_LOCAL) {
3473 list_first_entry(&fib6_entry->rt6_list, struct mlxsw_sp_rt6,
Ido Schimmelfe400792017-08-15 09:09:49 +02003474 list)->rt->rt6i_nh_flags |= RTNH_F_OFFLOAD;
Ido Schimmel428b8512017-08-03 13:28:28 +02003475 return;
3476 }
3477
3478 list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) {
3479 struct mlxsw_sp_nexthop_group *nh_grp = fib_entry->nh_group;
3480 struct mlxsw_sp_nexthop *nh;
3481
3482 nh = mlxsw_sp_rt6_nexthop(nh_grp, mlxsw_sp_rt6);
3483 if (nh && nh->offloaded)
Ido Schimmelfe400792017-08-15 09:09:49 +02003484 mlxsw_sp_rt6->rt->rt6i_nh_flags |= RTNH_F_OFFLOAD;
Ido Schimmel428b8512017-08-03 13:28:28 +02003485 else
Ido Schimmelfe400792017-08-15 09:09:49 +02003486 mlxsw_sp_rt6->rt->rt6i_nh_flags &= ~RTNH_F_OFFLOAD;
Ido Schimmel428b8512017-08-03 13:28:28 +02003487 }
3488}
3489
3490static void
3491mlxsw_sp_fib6_entry_offload_unset(struct mlxsw_sp_fib_entry *fib_entry)
3492{
3493 struct mlxsw_sp_fib6_entry *fib6_entry;
3494 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
3495
3496 fib6_entry = container_of(fib_entry, struct mlxsw_sp_fib6_entry,
3497 common);
3498 list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) {
3499 struct rt6_info *rt = mlxsw_sp_rt6->rt;
3500
Ido Schimmelfe400792017-08-15 09:09:49 +02003501 rt->rt6i_nh_flags &= ~RTNH_F_OFFLOAD;
Ido Schimmel428b8512017-08-03 13:28:28 +02003502 }
3503}
3504
Ido Schimmel013b20f2017-02-08 11:16:36 +01003505static void mlxsw_sp_fib_entry_offload_set(struct mlxsw_sp_fib_entry *fib_entry)
3506{
Ido Schimmel76610eb2017-03-10 08:53:41 +01003507 switch (fib_entry->fib_node->fib->proto) {
Ido Schimmel013b20f2017-02-08 11:16:36 +01003508 case MLXSW_SP_L3_PROTO_IPV4:
Ido Schimmel3984d1a2017-08-02 09:56:03 +02003509 mlxsw_sp_fib4_entry_offload_set(fib_entry);
Ido Schimmel013b20f2017-02-08 11:16:36 +01003510 break;
3511 case MLXSW_SP_L3_PROTO_IPV6:
Ido Schimmel428b8512017-08-03 13:28:28 +02003512 mlxsw_sp_fib6_entry_offload_set(fib_entry);
3513 break;
Ido Schimmel013b20f2017-02-08 11:16:36 +01003514 }
3515}
3516
3517static void
3518mlxsw_sp_fib_entry_offload_unset(struct mlxsw_sp_fib_entry *fib_entry)
3519{
Ido Schimmel76610eb2017-03-10 08:53:41 +01003520 switch (fib_entry->fib_node->fib->proto) {
Ido Schimmel013b20f2017-02-08 11:16:36 +01003521 case MLXSW_SP_L3_PROTO_IPV4:
Ido Schimmel3984d1a2017-08-02 09:56:03 +02003522 mlxsw_sp_fib4_entry_offload_unset(fib_entry);
Ido Schimmel013b20f2017-02-08 11:16:36 +01003523 break;
3524 case MLXSW_SP_L3_PROTO_IPV6:
Ido Schimmel428b8512017-08-03 13:28:28 +02003525 mlxsw_sp_fib6_entry_offload_unset(fib_entry);
3526 break;
Ido Schimmel013b20f2017-02-08 11:16:36 +01003527 }
Ido Schimmel013b20f2017-02-08 11:16:36 +01003528}
3529
3530static void
3531mlxsw_sp_fib_entry_offload_refresh(struct mlxsw_sp_fib_entry *fib_entry,
3532 enum mlxsw_reg_ralue_op op, int err)
3533{
3534 switch (op) {
3535 case MLXSW_REG_RALUE_OP_WRITE_DELETE:
Ido Schimmel013b20f2017-02-08 11:16:36 +01003536 return mlxsw_sp_fib_entry_offload_unset(fib_entry);
3537 case MLXSW_REG_RALUE_OP_WRITE_WRITE:
3538 if (err)
3539 return;
Ido Schimmel1353ee72017-08-02 09:56:04 +02003540 if (mlxsw_sp_fib_entry_should_offload(fib_entry))
Ido Schimmel013b20f2017-02-08 11:16:36 +01003541 mlxsw_sp_fib_entry_offload_set(fib_entry);
Petr Machata85f44a12017-10-02 12:21:58 +02003542 else
Ido Schimmel013b20f2017-02-08 11:16:36 +01003543 mlxsw_sp_fib_entry_offload_unset(fib_entry);
3544 return;
3545 default:
3546 return;
3547 }
3548}
3549
Ido Schimmel9dbf4d72017-07-18 10:10:24 +02003550static void
3551mlxsw_sp_fib_entry_ralue_pack(char *ralue_pl,
3552 const struct mlxsw_sp_fib_entry *fib_entry,
3553 enum mlxsw_reg_ralue_op op)
3554{
3555 struct mlxsw_sp_fib *fib = fib_entry->fib_node->fib;
3556 enum mlxsw_reg_ralxx_protocol proto;
3557 u32 *p_dip;
3558
3559 proto = (enum mlxsw_reg_ralxx_protocol) fib->proto;
3560
3561 switch (fib->proto) {
3562 case MLXSW_SP_L3_PROTO_IPV4:
3563 p_dip = (u32 *) fib_entry->fib_node->key.addr;
3564 mlxsw_reg_ralue_pack4(ralue_pl, proto, op, fib->vr->id,
3565 fib_entry->fib_node->key.prefix_len,
3566 *p_dip);
3567 break;
3568 case MLXSW_SP_L3_PROTO_IPV6:
3569 mlxsw_reg_ralue_pack6(ralue_pl, proto, op, fib->vr->id,
3570 fib_entry->fib_node->key.prefix_len,
3571 fib_entry->fib_node->key.addr);
3572 break;
3573 }
3574}
3575
3576static int mlxsw_sp_fib_entry_op_remote(struct mlxsw_sp *mlxsw_sp,
3577 struct mlxsw_sp_fib_entry *fib_entry,
3578 enum mlxsw_reg_ralue_op op)
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003579{
3580 char ralue_pl[MLXSW_REG_RALUE_LEN];
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003581 enum mlxsw_reg_ralue_trap_action trap_action;
3582 u16 trap_id = 0;
3583 u32 adjacency_index = 0;
3584 u16 ecmp_size = 0;
3585
3586 /* In case the nexthop group adjacency index is valid, use it
3587 * with provided ECMP size. Otherwise, setup trap and pass
3588 * traffic to kernel.
3589 */
Ido Schimmel4b411472017-02-08 11:16:37 +01003590 if (mlxsw_sp_fib_entry_should_offload(fib_entry)) {
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003591 trap_action = MLXSW_REG_RALUE_TRAP_ACTION_NOP;
3592 adjacency_index = fib_entry->nh_group->adj_index;
3593 ecmp_size = fib_entry->nh_group->ecmp_size;
3594 } else {
3595 trap_action = MLXSW_REG_RALUE_TRAP_ACTION_TRAP;
3596 trap_id = MLXSW_TRAP_ID_RTR_INGRESS0;
3597 }
3598
Ido Schimmel9dbf4d72017-07-18 10:10:24 +02003599 mlxsw_sp_fib_entry_ralue_pack(ralue_pl, fib_entry, op);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003600 mlxsw_reg_ralue_act_remote_pack(ralue_pl, trap_action, trap_id,
3601 adjacency_index, ecmp_size);
3602 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
3603}
3604
Ido Schimmel9dbf4d72017-07-18 10:10:24 +02003605static int mlxsw_sp_fib_entry_op_local(struct mlxsw_sp *mlxsw_sp,
3606 struct mlxsw_sp_fib_entry *fib_entry,
3607 enum mlxsw_reg_ralue_op op)
Jiri Pirko61c503f2016-07-04 08:23:11 +02003608{
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003609 struct mlxsw_sp_rif *rif = fib_entry->nh_group->nh_rif;
Ido Schimmel70ad3502017-02-08 11:16:38 +01003610 enum mlxsw_reg_ralue_trap_action trap_action;
Jiri Pirko61c503f2016-07-04 08:23:11 +02003611 char ralue_pl[MLXSW_REG_RALUE_LEN];
Ido Schimmel70ad3502017-02-08 11:16:38 +01003612 u16 trap_id = 0;
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003613 u16 rif_index = 0;
Ido Schimmel70ad3502017-02-08 11:16:38 +01003614
3615 if (mlxsw_sp_fib_entry_should_offload(fib_entry)) {
3616 trap_action = MLXSW_REG_RALUE_TRAP_ACTION_NOP;
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003617 rif_index = rif->rif_index;
Ido Schimmel70ad3502017-02-08 11:16:38 +01003618 } else {
3619 trap_action = MLXSW_REG_RALUE_TRAP_ACTION_TRAP;
3620 trap_id = MLXSW_TRAP_ID_RTR_INGRESS0;
3621 }
Jiri Pirko61c503f2016-07-04 08:23:11 +02003622
Ido Schimmel9dbf4d72017-07-18 10:10:24 +02003623 mlxsw_sp_fib_entry_ralue_pack(ralue_pl, fib_entry, op);
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003624 mlxsw_reg_ralue_act_local_pack(ralue_pl, trap_action, trap_id,
3625 rif_index);
Jiri Pirko61c503f2016-07-04 08:23:11 +02003626 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
3627}
3628
Ido Schimmel9dbf4d72017-07-18 10:10:24 +02003629static int mlxsw_sp_fib_entry_op_trap(struct mlxsw_sp *mlxsw_sp,
3630 struct mlxsw_sp_fib_entry *fib_entry,
3631 enum mlxsw_reg_ralue_op op)
Jiri Pirko61c503f2016-07-04 08:23:11 +02003632{
3633 char ralue_pl[MLXSW_REG_RALUE_LEN];
Jiri Pirko61c503f2016-07-04 08:23:11 +02003634
Ido Schimmel9dbf4d72017-07-18 10:10:24 +02003635 mlxsw_sp_fib_entry_ralue_pack(ralue_pl, fib_entry, op);
Jiri Pirko61c503f2016-07-04 08:23:11 +02003636 mlxsw_reg_ralue_act_ip2me_pack(ralue_pl);
3637 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
3638}
3639
Petr Machata4607f6d2017-09-02 23:49:25 +02003640static int
3641mlxsw_sp_fib_entry_op_ipip_decap(struct mlxsw_sp *mlxsw_sp,
3642 struct mlxsw_sp_fib_entry *fib_entry,
3643 enum mlxsw_reg_ralue_op op)
3644{
3645 struct mlxsw_sp_ipip_entry *ipip_entry = fib_entry->decap.ipip_entry;
3646 const struct mlxsw_sp_ipip_ops *ipip_ops;
3647
3648 if (WARN_ON(!ipip_entry))
3649 return -EINVAL;
3650
3651 ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt];
3652 return ipip_ops->fib_entry_op(mlxsw_sp, ipip_entry, op,
3653 fib_entry->decap.tunnel_index);
3654}
3655
Ido Schimmel9dbf4d72017-07-18 10:10:24 +02003656static int __mlxsw_sp_fib_entry_op(struct mlxsw_sp *mlxsw_sp,
3657 struct mlxsw_sp_fib_entry *fib_entry,
3658 enum mlxsw_reg_ralue_op op)
Jiri Pirko61c503f2016-07-04 08:23:11 +02003659{
3660 switch (fib_entry->type) {
3661 case MLXSW_SP_FIB_ENTRY_TYPE_REMOTE:
Ido Schimmel9dbf4d72017-07-18 10:10:24 +02003662 return mlxsw_sp_fib_entry_op_remote(mlxsw_sp, fib_entry, op);
Jiri Pirko61c503f2016-07-04 08:23:11 +02003663 case MLXSW_SP_FIB_ENTRY_TYPE_LOCAL:
Ido Schimmel9dbf4d72017-07-18 10:10:24 +02003664 return mlxsw_sp_fib_entry_op_local(mlxsw_sp, fib_entry, op);
Jiri Pirko61c503f2016-07-04 08:23:11 +02003665 case MLXSW_SP_FIB_ENTRY_TYPE_TRAP:
Ido Schimmel9dbf4d72017-07-18 10:10:24 +02003666 return mlxsw_sp_fib_entry_op_trap(mlxsw_sp, fib_entry, op);
Petr Machata4607f6d2017-09-02 23:49:25 +02003667 case MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP:
3668 return mlxsw_sp_fib_entry_op_ipip_decap(mlxsw_sp,
3669 fib_entry, op);
Jiri Pirko61c503f2016-07-04 08:23:11 +02003670 }
3671 return -EINVAL;
3672}
3673
3674static int mlxsw_sp_fib_entry_op(struct mlxsw_sp *mlxsw_sp,
3675 struct mlxsw_sp_fib_entry *fib_entry,
3676 enum mlxsw_reg_ralue_op op)
3677{
Ido Schimmel9dbf4d72017-07-18 10:10:24 +02003678 int err = __mlxsw_sp_fib_entry_op(mlxsw_sp, fib_entry, op);
Ido Schimmel013b20f2017-02-08 11:16:36 +01003679
Ido Schimmel013b20f2017-02-08 11:16:36 +01003680 mlxsw_sp_fib_entry_offload_refresh(fib_entry, op, err);
Ido Schimmel9dbf4d72017-07-18 10:10:24 +02003681
Ido Schimmel013b20f2017-02-08 11:16:36 +01003682 return err;
Jiri Pirko61c503f2016-07-04 08:23:11 +02003683}
3684
3685static int mlxsw_sp_fib_entry_update(struct mlxsw_sp *mlxsw_sp,
3686 struct mlxsw_sp_fib_entry *fib_entry)
3687{
Jiri Pirko7146da32016-09-01 10:37:41 +02003688 return mlxsw_sp_fib_entry_op(mlxsw_sp, fib_entry,
3689 MLXSW_REG_RALUE_OP_WRITE_WRITE);
Jiri Pirko61c503f2016-07-04 08:23:11 +02003690}
3691
3692static int mlxsw_sp_fib_entry_del(struct mlxsw_sp *mlxsw_sp,
3693 struct mlxsw_sp_fib_entry *fib_entry)
3694{
3695 return mlxsw_sp_fib_entry_op(mlxsw_sp, fib_entry,
3696 MLXSW_REG_RALUE_OP_WRITE_DELETE);
3697}
3698
Jiri Pirko61c503f2016-07-04 08:23:11 +02003699static int
Ido Schimmel013b20f2017-02-08 11:16:36 +01003700mlxsw_sp_fib4_entry_type_set(struct mlxsw_sp *mlxsw_sp,
3701 const struct fib_entry_notifier_info *fen_info,
3702 struct mlxsw_sp_fib_entry *fib_entry)
Jiri Pirko61c503f2016-07-04 08:23:11 +02003703{
Petr Machata4607f6d2017-09-02 23:49:25 +02003704 union mlxsw_sp_l3addr dip = { .addr4 = htonl(fen_info->dst) };
3705 struct net_device *dev = fen_info->fi->fib_dev;
3706 struct mlxsw_sp_ipip_entry *ipip_entry;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02003707 struct fib_info *fi = fen_info->fi;
Jiri Pirko61c503f2016-07-04 08:23:11 +02003708
Ido Schimmel97989ee2017-03-10 08:53:38 +01003709 switch (fen_info->type) {
Ido Schimmel97989ee2017-03-10 08:53:38 +01003710 case RTN_LOCAL:
Petr Machata4607f6d2017-09-02 23:49:25 +02003711 ipip_entry = mlxsw_sp_ipip_entry_find_by_decap(mlxsw_sp, dev,
3712 MLXSW_SP_L3_PROTO_IPV4, dip);
3713 if (ipip_entry) {
3714 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP;
3715 return mlxsw_sp_fib_entry_decap_init(mlxsw_sp,
3716 fib_entry,
3717 ipip_entry);
3718 }
3719 /* fall through */
3720 case RTN_BROADCAST:
Jiri Pirko61c503f2016-07-04 08:23:11 +02003721 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
3722 return 0;
Ido Schimmel97989ee2017-03-10 08:53:38 +01003723 case RTN_UNREACHABLE: /* fall through */
3724 case RTN_BLACKHOLE: /* fall through */
3725 case RTN_PROHIBIT:
3726 /* Packets hitting these routes need to be trapped, but
3727 * can do so with a lower priority than packets directed
3728 * at the host, so use action type local instead of trap.
3729 */
Jiri Pirkob45f64d2016-09-26 12:52:31 +02003730 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_LOCAL;
Ido Schimmel97989ee2017-03-10 08:53:38 +01003731 return 0;
3732 case RTN_UNICAST:
Petr Machata9b014512017-09-02 23:49:20 +02003733 if (mlxsw_sp_fi_is_gateway(mlxsw_sp, fi))
Ido Schimmel97989ee2017-03-10 08:53:38 +01003734 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_REMOTE;
Petr Machata9b014512017-09-02 23:49:20 +02003735 else
3736 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_LOCAL;
Ido Schimmel97989ee2017-03-10 08:53:38 +01003737 return 0;
3738 default:
3739 return -EINVAL;
3740 }
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003741}
3742
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02003743static struct mlxsw_sp_fib4_entry *
Ido Schimmel9aecce12017-02-09 10:28:42 +01003744mlxsw_sp_fib4_entry_create(struct mlxsw_sp *mlxsw_sp,
3745 struct mlxsw_sp_fib_node *fib_node,
3746 const struct fib_entry_notifier_info *fen_info)
Jiri Pirko5b004412016-09-01 10:37:40 +02003747{
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02003748 struct mlxsw_sp_fib4_entry *fib4_entry;
Jiri Pirko5b004412016-09-01 10:37:40 +02003749 struct mlxsw_sp_fib_entry *fib_entry;
Ido Schimmel9aecce12017-02-09 10:28:42 +01003750 int err;
3751
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02003752 fib4_entry = kzalloc(sizeof(*fib4_entry), GFP_KERNEL);
3753 if (!fib4_entry)
3754 return ERR_PTR(-ENOMEM);
3755 fib_entry = &fib4_entry->common;
Ido Schimmel9aecce12017-02-09 10:28:42 +01003756
3757 err = mlxsw_sp_fib4_entry_type_set(mlxsw_sp, fen_info, fib_entry);
3758 if (err)
3759 goto err_fib4_entry_type_set;
3760
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02003761 err = mlxsw_sp_nexthop4_group_get(mlxsw_sp, fib_entry, fen_info->fi);
Ido Schimmel9aecce12017-02-09 10:28:42 +01003762 if (err)
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02003763 goto err_nexthop4_group_get;
Ido Schimmel9aecce12017-02-09 10:28:42 +01003764
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02003765 fib4_entry->prio = fen_info->fi->fib_priority;
3766 fib4_entry->tb_id = fen_info->tb_id;
3767 fib4_entry->type = fen_info->type;
3768 fib4_entry->tos = fen_info->tos;
Ido Schimmel9aecce12017-02-09 10:28:42 +01003769
3770 fib_entry->fib_node = fib_node;
3771
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02003772 return fib4_entry;
Ido Schimmel9aecce12017-02-09 10:28:42 +01003773
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02003774err_nexthop4_group_get:
Ido Schimmel9aecce12017-02-09 10:28:42 +01003775err_fib4_entry_type_set:
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02003776 kfree(fib4_entry);
Ido Schimmel9aecce12017-02-09 10:28:42 +01003777 return ERR_PTR(err);
3778}
3779
3780static void mlxsw_sp_fib4_entry_destroy(struct mlxsw_sp *mlxsw_sp,
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02003781 struct mlxsw_sp_fib4_entry *fib4_entry)
Ido Schimmel9aecce12017-02-09 10:28:42 +01003782{
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02003783 mlxsw_sp_nexthop4_group_put(mlxsw_sp, &fib4_entry->common);
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02003784 kfree(fib4_entry);
Ido Schimmel9aecce12017-02-09 10:28:42 +01003785}
3786
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02003787static struct mlxsw_sp_fib4_entry *
Ido Schimmel9aecce12017-02-09 10:28:42 +01003788mlxsw_sp_fib4_entry_lookup(struct mlxsw_sp *mlxsw_sp,
3789 const struct fib_entry_notifier_info *fen_info)
3790{
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02003791 struct mlxsw_sp_fib4_entry *fib4_entry;
Ido Schimmel9aecce12017-02-09 10:28:42 +01003792 struct mlxsw_sp_fib_node *fib_node;
Ido Schimmel160e22a2017-07-18 10:10:20 +02003793 struct mlxsw_sp_fib *fib;
3794 struct mlxsw_sp_vr *vr;
Ido Schimmel9aecce12017-02-09 10:28:42 +01003795
Ido Schimmel160e22a2017-07-18 10:10:20 +02003796 vr = mlxsw_sp_vr_find(mlxsw_sp, fen_info->tb_id);
3797 if (!vr)
3798 return NULL;
3799 fib = mlxsw_sp_vr_fib(vr, MLXSW_SP_L3_PROTO_IPV4);
3800
3801 fib_node = mlxsw_sp_fib_node_lookup(fib, &fen_info->dst,
3802 sizeof(fen_info->dst),
3803 fen_info->dst_len);
3804 if (!fib_node)
Ido Schimmel9aecce12017-02-09 10:28:42 +01003805 return NULL;
3806
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02003807 list_for_each_entry(fib4_entry, &fib_node->entry_list, common.list) {
3808 if (fib4_entry->tb_id == fen_info->tb_id &&
3809 fib4_entry->tos == fen_info->tos &&
3810 fib4_entry->type == fen_info->type &&
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02003811 mlxsw_sp_nexthop4_group_fi(fib4_entry->common.nh_group) ==
3812 fen_info->fi) {
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02003813 return fib4_entry;
Ido Schimmel9aecce12017-02-09 10:28:42 +01003814 }
3815 }
3816
3817 return NULL;
3818}
3819
3820static const struct rhashtable_params mlxsw_sp_fib_ht_params = {
3821 .key_offset = offsetof(struct mlxsw_sp_fib_node, key),
3822 .head_offset = offsetof(struct mlxsw_sp_fib_node, ht_node),
3823 .key_len = sizeof(struct mlxsw_sp_fib_key),
3824 .automatic_shrinking = true,
3825};
3826
3827static int mlxsw_sp_fib_node_insert(struct mlxsw_sp_fib *fib,
3828 struct mlxsw_sp_fib_node *fib_node)
3829{
3830 return rhashtable_insert_fast(&fib->ht, &fib_node->ht_node,
3831 mlxsw_sp_fib_ht_params);
3832}
3833
3834static void mlxsw_sp_fib_node_remove(struct mlxsw_sp_fib *fib,
3835 struct mlxsw_sp_fib_node *fib_node)
3836{
3837 rhashtable_remove_fast(&fib->ht, &fib_node->ht_node,
3838 mlxsw_sp_fib_ht_params);
3839}
3840
3841static struct mlxsw_sp_fib_node *
3842mlxsw_sp_fib_node_lookup(struct mlxsw_sp_fib *fib, const void *addr,
3843 size_t addr_len, unsigned char prefix_len)
3844{
3845 struct mlxsw_sp_fib_key key;
3846
3847 memset(&key, 0, sizeof(key));
3848 memcpy(key.addr, addr, addr_len);
3849 key.prefix_len = prefix_len;
3850 return rhashtable_lookup_fast(&fib->ht, &key, mlxsw_sp_fib_ht_params);
3851}
3852
3853static struct mlxsw_sp_fib_node *
Ido Schimmel76610eb2017-03-10 08:53:41 +01003854mlxsw_sp_fib_node_create(struct mlxsw_sp_fib *fib, const void *addr,
Ido Schimmel9aecce12017-02-09 10:28:42 +01003855 size_t addr_len, unsigned char prefix_len)
3856{
3857 struct mlxsw_sp_fib_node *fib_node;
3858
3859 fib_node = kzalloc(sizeof(*fib_node), GFP_KERNEL);
3860 if (!fib_node)
3861 return NULL;
3862
3863 INIT_LIST_HEAD(&fib_node->entry_list);
Ido Schimmel76610eb2017-03-10 08:53:41 +01003864 list_add(&fib_node->list, &fib->node_list);
Ido Schimmel9aecce12017-02-09 10:28:42 +01003865 memcpy(fib_node->key.addr, addr, addr_len);
3866 fib_node->key.prefix_len = prefix_len;
Ido Schimmel9aecce12017-02-09 10:28:42 +01003867
3868 return fib_node;
3869}
3870
3871static void mlxsw_sp_fib_node_destroy(struct mlxsw_sp_fib_node *fib_node)
3872{
Ido Schimmel9aecce12017-02-09 10:28:42 +01003873 list_del(&fib_node->list);
3874 WARN_ON(!list_empty(&fib_node->entry_list));
3875 kfree(fib_node);
3876}
3877
3878static bool
3879mlxsw_sp_fib_node_entry_is_first(const struct mlxsw_sp_fib_node *fib_node,
3880 const struct mlxsw_sp_fib_entry *fib_entry)
3881{
3882 return list_first_entry(&fib_node->entry_list,
3883 struct mlxsw_sp_fib_entry, list) == fib_entry;
3884}
3885
Ido Schimmelfc922bb2017-08-14 10:54:05 +02003886static int mlxsw_sp_fib_lpm_tree_link(struct mlxsw_sp *mlxsw_sp,
3887 struct mlxsw_sp_fib *fib,
3888 struct mlxsw_sp_fib_node *fib_node)
3889{
3890 struct mlxsw_sp_prefix_usage req_prefix_usage = {{ 0 } };
3891 struct mlxsw_sp_lpm_tree *lpm_tree;
3892 int err;
3893
3894 /* Since the tree is shared between all virtual routers we must
3895 * make sure it contains all the required prefix lengths. This
3896 * can be computed by either adding the new prefix length to the
3897 * existing prefix usage of a bound tree, or by aggregating the
3898 * prefix lengths across all virtual routers and adding the new
3899 * one as well.
3900 */
3901 if (fib->lpm_tree)
3902 mlxsw_sp_prefix_usage_cpy(&req_prefix_usage,
3903 &fib->lpm_tree->prefix_usage);
3904 else
3905 mlxsw_sp_vrs_prefixes(mlxsw_sp, fib->proto, &req_prefix_usage);
3906 mlxsw_sp_prefix_usage_set(&req_prefix_usage, fib_node->key.prefix_len);
3907
3908 lpm_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, &req_prefix_usage,
3909 fib->proto);
3910 if (IS_ERR(lpm_tree))
3911 return PTR_ERR(lpm_tree);
3912
3913 if (fib->lpm_tree && fib->lpm_tree->id == lpm_tree->id)
3914 return 0;
3915
3916 err = mlxsw_sp_vrs_lpm_tree_replace(mlxsw_sp, fib, lpm_tree);
3917 if (err)
3918 return err;
3919
3920 return 0;
3921}
3922
3923static void mlxsw_sp_fib_lpm_tree_unlink(struct mlxsw_sp *mlxsw_sp,
3924 struct mlxsw_sp_fib *fib)
3925{
Ido Schimmelfc922bb2017-08-14 10:54:05 +02003926 if (!mlxsw_sp_prefix_usage_none(&fib->prefix_usage))
3927 return;
3928 mlxsw_sp_vr_lpm_tree_unbind(mlxsw_sp, fib);
3929 mlxsw_sp_lpm_tree_put(mlxsw_sp, fib->lpm_tree);
3930 fib->lpm_tree = NULL;
3931}
3932
Ido Schimmel9aecce12017-02-09 10:28:42 +01003933static void mlxsw_sp_fib_node_prefix_inc(struct mlxsw_sp_fib_node *fib_node)
3934{
3935 unsigned char prefix_len = fib_node->key.prefix_len;
Ido Schimmel76610eb2017-03-10 08:53:41 +01003936 struct mlxsw_sp_fib *fib = fib_node->fib;
Ido Schimmel9aecce12017-02-09 10:28:42 +01003937
3938 if (fib->prefix_ref_count[prefix_len]++ == 0)
3939 mlxsw_sp_prefix_usage_set(&fib->prefix_usage, prefix_len);
3940}
3941
3942static void mlxsw_sp_fib_node_prefix_dec(struct mlxsw_sp_fib_node *fib_node)
3943{
3944 unsigned char prefix_len = fib_node->key.prefix_len;
Ido Schimmel76610eb2017-03-10 08:53:41 +01003945 struct mlxsw_sp_fib *fib = fib_node->fib;
Ido Schimmel9aecce12017-02-09 10:28:42 +01003946
3947 if (--fib->prefix_ref_count[prefix_len] == 0)
3948 mlxsw_sp_prefix_usage_clear(&fib->prefix_usage, prefix_len);
3949}
3950
Ido Schimmel76610eb2017-03-10 08:53:41 +01003951static int mlxsw_sp_fib_node_init(struct mlxsw_sp *mlxsw_sp,
3952 struct mlxsw_sp_fib_node *fib_node,
3953 struct mlxsw_sp_fib *fib)
3954{
Ido Schimmel76610eb2017-03-10 08:53:41 +01003955 int err;
3956
3957 err = mlxsw_sp_fib_node_insert(fib, fib_node);
3958 if (err)
3959 return err;
3960 fib_node->fib = fib;
3961
Ido Schimmelfc922bb2017-08-14 10:54:05 +02003962 err = mlxsw_sp_fib_lpm_tree_link(mlxsw_sp, fib, fib_node);
3963 if (err)
3964 goto err_fib_lpm_tree_link;
Ido Schimmel76610eb2017-03-10 08:53:41 +01003965
3966 mlxsw_sp_fib_node_prefix_inc(fib_node);
3967
3968 return 0;
3969
Ido Schimmelfc922bb2017-08-14 10:54:05 +02003970err_fib_lpm_tree_link:
Ido Schimmel76610eb2017-03-10 08:53:41 +01003971 fib_node->fib = NULL;
3972 mlxsw_sp_fib_node_remove(fib, fib_node);
3973 return err;
3974}
3975
3976static void mlxsw_sp_fib_node_fini(struct mlxsw_sp *mlxsw_sp,
3977 struct mlxsw_sp_fib_node *fib_node)
3978{
Ido Schimmel76610eb2017-03-10 08:53:41 +01003979 struct mlxsw_sp_fib *fib = fib_node->fib;
3980
3981 mlxsw_sp_fib_node_prefix_dec(fib_node);
Ido Schimmelfc922bb2017-08-14 10:54:05 +02003982 mlxsw_sp_fib_lpm_tree_unlink(mlxsw_sp, fib);
Ido Schimmel76610eb2017-03-10 08:53:41 +01003983 fib_node->fib = NULL;
3984 mlxsw_sp_fib_node_remove(fib, fib_node);
3985}
3986
Ido Schimmel9aecce12017-02-09 10:28:42 +01003987static struct mlxsw_sp_fib_node *
Ido Schimmel731ea1c2017-07-18 10:10:21 +02003988mlxsw_sp_fib_node_get(struct mlxsw_sp *mlxsw_sp, u32 tb_id, const void *addr,
3989 size_t addr_len, unsigned char prefix_len,
3990 enum mlxsw_sp_l3proto proto)
Ido Schimmel9aecce12017-02-09 10:28:42 +01003991{
3992 struct mlxsw_sp_fib_node *fib_node;
Ido Schimmel76610eb2017-03-10 08:53:41 +01003993 struct mlxsw_sp_fib *fib;
Jiri Pirko5b004412016-09-01 10:37:40 +02003994 struct mlxsw_sp_vr *vr;
3995 int err;
3996
David Ahernf8fa9b42017-10-18 09:56:56 -07003997 vr = mlxsw_sp_vr_get(mlxsw_sp, tb_id, NULL);
Jiri Pirko5b004412016-09-01 10:37:40 +02003998 if (IS_ERR(vr))
3999 return ERR_CAST(vr);
Ido Schimmel731ea1c2017-07-18 10:10:21 +02004000 fib = mlxsw_sp_vr_fib(vr, proto);
Jiri Pirko5b004412016-09-01 10:37:40 +02004001
Ido Schimmel731ea1c2017-07-18 10:10:21 +02004002 fib_node = mlxsw_sp_fib_node_lookup(fib, addr, addr_len, prefix_len);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004003 if (fib_node)
4004 return fib_node;
4005
Ido Schimmel731ea1c2017-07-18 10:10:21 +02004006 fib_node = mlxsw_sp_fib_node_create(fib, addr, addr_len, prefix_len);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004007 if (!fib_node) {
Jiri Pirko5b004412016-09-01 10:37:40 +02004008 err = -ENOMEM;
Ido Schimmel9aecce12017-02-09 10:28:42 +01004009 goto err_fib_node_create;
Jiri Pirko5b004412016-09-01 10:37:40 +02004010 }
Jiri Pirko5b004412016-09-01 10:37:40 +02004011
Ido Schimmel76610eb2017-03-10 08:53:41 +01004012 err = mlxsw_sp_fib_node_init(mlxsw_sp, fib_node, fib);
4013 if (err)
4014 goto err_fib_node_init;
4015
Ido Schimmel9aecce12017-02-09 10:28:42 +01004016 return fib_node;
Jiri Pirko5b004412016-09-01 10:37:40 +02004017
Ido Schimmel76610eb2017-03-10 08:53:41 +01004018err_fib_node_init:
4019 mlxsw_sp_fib_node_destroy(fib_node);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004020err_fib_node_create:
Ido Schimmel76610eb2017-03-10 08:53:41 +01004021 mlxsw_sp_vr_put(vr);
Jiri Pirko5b004412016-09-01 10:37:40 +02004022 return ERR_PTR(err);
4023}
4024
Ido Schimmel731ea1c2017-07-18 10:10:21 +02004025static void mlxsw_sp_fib_node_put(struct mlxsw_sp *mlxsw_sp,
4026 struct mlxsw_sp_fib_node *fib_node)
Jiri Pirko5b004412016-09-01 10:37:40 +02004027{
Ido Schimmel76610eb2017-03-10 08:53:41 +01004028 struct mlxsw_sp_vr *vr = fib_node->fib->vr;
Jiri Pirko5b004412016-09-01 10:37:40 +02004029
Ido Schimmel9aecce12017-02-09 10:28:42 +01004030 if (!list_empty(&fib_node->entry_list))
4031 return;
Ido Schimmel76610eb2017-03-10 08:53:41 +01004032 mlxsw_sp_fib_node_fini(mlxsw_sp, fib_node);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004033 mlxsw_sp_fib_node_destroy(fib_node);
Ido Schimmel76610eb2017-03-10 08:53:41 +01004034 mlxsw_sp_vr_put(vr);
Jiri Pirko5b004412016-09-01 10:37:40 +02004035}
4036
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004037static struct mlxsw_sp_fib4_entry *
Ido Schimmel9aecce12017-02-09 10:28:42 +01004038mlxsw_sp_fib4_node_entry_find(const struct mlxsw_sp_fib_node *fib_node,
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004039 const struct mlxsw_sp_fib4_entry *new4_entry)
Jiri Pirko61c503f2016-07-04 08:23:11 +02004040{
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004041 struct mlxsw_sp_fib4_entry *fib4_entry;
Ido Schimmel9aecce12017-02-09 10:28:42 +01004042
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004043 list_for_each_entry(fib4_entry, &fib_node->entry_list, common.list) {
4044 if (fib4_entry->tb_id > new4_entry->tb_id)
Ido Schimmel9aecce12017-02-09 10:28:42 +01004045 continue;
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004046 if (fib4_entry->tb_id != new4_entry->tb_id)
Ido Schimmel9aecce12017-02-09 10:28:42 +01004047 break;
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004048 if (fib4_entry->tos > new4_entry->tos)
Ido Schimmel9aecce12017-02-09 10:28:42 +01004049 continue;
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004050 if (fib4_entry->prio >= new4_entry->prio ||
4051 fib4_entry->tos < new4_entry->tos)
4052 return fib4_entry;
Ido Schimmel9aecce12017-02-09 10:28:42 +01004053 }
4054
4055 return NULL;
4056}
4057
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004058static int
4059mlxsw_sp_fib4_node_list_append(struct mlxsw_sp_fib4_entry *fib4_entry,
4060 struct mlxsw_sp_fib4_entry *new4_entry)
Ido Schimmel4283bce2017-02-09 10:28:43 +01004061{
4062 struct mlxsw_sp_fib_node *fib_node;
4063
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004064 if (WARN_ON(!fib4_entry))
Ido Schimmel4283bce2017-02-09 10:28:43 +01004065 return -EINVAL;
4066
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004067 fib_node = fib4_entry->common.fib_node;
4068 list_for_each_entry_from(fib4_entry, &fib_node->entry_list,
4069 common.list) {
4070 if (fib4_entry->tb_id != new4_entry->tb_id ||
4071 fib4_entry->tos != new4_entry->tos ||
4072 fib4_entry->prio != new4_entry->prio)
Ido Schimmel4283bce2017-02-09 10:28:43 +01004073 break;
4074 }
4075
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004076 list_add_tail(&new4_entry->common.list, &fib4_entry->common.list);
Ido Schimmel4283bce2017-02-09 10:28:43 +01004077 return 0;
4078}
4079
Ido Schimmel9aecce12017-02-09 10:28:42 +01004080static int
Ido Schimmel9efbee62017-07-18 10:10:28 +02004081mlxsw_sp_fib4_node_list_insert(struct mlxsw_sp_fib4_entry *new4_entry,
Ido Schimmel599cf8f2017-02-09 10:28:44 +01004082 bool replace, bool append)
Ido Schimmel9aecce12017-02-09 10:28:42 +01004083{
Ido Schimmel9efbee62017-07-18 10:10:28 +02004084 struct mlxsw_sp_fib_node *fib_node = new4_entry->common.fib_node;
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004085 struct mlxsw_sp_fib4_entry *fib4_entry;
Ido Schimmel9aecce12017-02-09 10:28:42 +01004086
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004087 fib4_entry = mlxsw_sp_fib4_node_entry_find(fib_node, new4_entry);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004088
Ido Schimmel4283bce2017-02-09 10:28:43 +01004089 if (append)
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004090 return mlxsw_sp_fib4_node_list_append(fib4_entry, new4_entry);
4091 if (replace && WARN_ON(!fib4_entry))
Ido Schimmel599cf8f2017-02-09 10:28:44 +01004092 return -EINVAL;
Ido Schimmel4283bce2017-02-09 10:28:43 +01004093
Ido Schimmel599cf8f2017-02-09 10:28:44 +01004094 /* Insert new entry before replaced one, so that we can later
4095 * remove the second.
4096 */
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004097 if (fib4_entry) {
4098 list_add_tail(&new4_entry->common.list,
4099 &fib4_entry->common.list);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004100 } else {
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004101 struct mlxsw_sp_fib4_entry *last;
Ido Schimmel9aecce12017-02-09 10:28:42 +01004102
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004103 list_for_each_entry(last, &fib_node->entry_list, common.list) {
4104 if (new4_entry->tb_id > last->tb_id)
Ido Schimmel9aecce12017-02-09 10:28:42 +01004105 break;
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004106 fib4_entry = last;
Ido Schimmel9aecce12017-02-09 10:28:42 +01004107 }
4108
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004109 if (fib4_entry)
4110 list_add(&new4_entry->common.list,
4111 &fib4_entry->common.list);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004112 else
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004113 list_add(&new4_entry->common.list,
4114 &fib_node->entry_list);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004115 }
4116
4117 return 0;
4118}
4119
4120static void
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004121mlxsw_sp_fib4_node_list_remove(struct mlxsw_sp_fib4_entry *fib4_entry)
Ido Schimmel9aecce12017-02-09 10:28:42 +01004122{
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004123 list_del(&fib4_entry->common.list);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004124}
4125
Ido Schimmel80c238f2017-07-18 10:10:29 +02004126static int mlxsw_sp_fib_node_entry_add(struct mlxsw_sp *mlxsw_sp,
4127 struct mlxsw_sp_fib_entry *fib_entry)
Ido Schimmel9aecce12017-02-09 10:28:42 +01004128{
Ido Schimmel9efbee62017-07-18 10:10:28 +02004129 struct mlxsw_sp_fib_node *fib_node = fib_entry->fib_node;
4130
Ido Schimmel9aecce12017-02-09 10:28:42 +01004131 if (!mlxsw_sp_fib_node_entry_is_first(fib_node, fib_entry))
4132 return 0;
4133
4134 /* To prevent packet loss, overwrite the previously offloaded
4135 * entry.
4136 */
4137 if (!list_is_singular(&fib_node->entry_list)) {
4138 enum mlxsw_reg_ralue_op op = MLXSW_REG_RALUE_OP_WRITE_DELETE;
4139 struct mlxsw_sp_fib_entry *n = list_next_entry(fib_entry, list);
4140
4141 mlxsw_sp_fib_entry_offload_refresh(n, op, 0);
4142 }
4143
4144 return mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
4145}
4146
Ido Schimmel80c238f2017-07-18 10:10:29 +02004147static void mlxsw_sp_fib_node_entry_del(struct mlxsw_sp *mlxsw_sp,
4148 struct mlxsw_sp_fib_entry *fib_entry)
Ido Schimmel9aecce12017-02-09 10:28:42 +01004149{
Ido Schimmel9efbee62017-07-18 10:10:28 +02004150 struct mlxsw_sp_fib_node *fib_node = fib_entry->fib_node;
4151
Ido Schimmel9aecce12017-02-09 10:28:42 +01004152 if (!mlxsw_sp_fib_node_entry_is_first(fib_node, fib_entry))
4153 return;
4154
4155 /* Promote the next entry by overwriting the deleted entry */
4156 if (!list_is_singular(&fib_node->entry_list)) {
4157 struct mlxsw_sp_fib_entry *n = list_next_entry(fib_entry, list);
4158 enum mlxsw_reg_ralue_op op = MLXSW_REG_RALUE_OP_WRITE_DELETE;
4159
4160 mlxsw_sp_fib_entry_update(mlxsw_sp, n);
4161 mlxsw_sp_fib_entry_offload_refresh(fib_entry, op, 0);
4162 return;
4163 }
4164
4165 mlxsw_sp_fib_entry_del(mlxsw_sp, fib_entry);
4166}
4167
4168static int mlxsw_sp_fib4_node_entry_link(struct mlxsw_sp *mlxsw_sp,
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004169 struct mlxsw_sp_fib4_entry *fib4_entry,
Ido Schimmel599cf8f2017-02-09 10:28:44 +01004170 bool replace, bool append)
Ido Schimmel9aecce12017-02-09 10:28:42 +01004171{
Ido Schimmel9aecce12017-02-09 10:28:42 +01004172 int err;
4173
Ido Schimmel9efbee62017-07-18 10:10:28 +02004174 err = mlxsw_sp_fib4_node_list_insert(fib4_entry, replace, append);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004175 if (err)
4176 return err;
4177
Ido Schimmel80c238f2017-07-18 10:10:29 +02004178 err = mlxsw_sp_fib_node_entry_add(mlxsw_sp, &fib4_entry->common);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004179 if (err)
Ido Schimmel80c238f2017-07-18 10:10:29 +02004180 goto err_fib_node_entry_add;
Ido Schimmel9aecce12017-02-09 10:28:42 +01004181
Ido Schimmel9aecce12017-02-09 10:28:42 +01004182 return 0;
4183
Ido Schimmel80c238f2017-07-18 10:10:29 +02004184err_fib_node_entry_add:
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004185 mlxsw_sp_fib4_node_list_remove(fib4_entry);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004186 return err;
4187}
4188
4189static void
4190mlxsw_sp_fib4_node_entry_unlink(struct mlxsw_sp *mlxsw_sp,
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004191 struct mlxsw_sp_fib4_entry *fib4_entry)
Ido Schimmel9aecce12017-02-09 10:28:42 +01004192{
Ido Schimmel80c238f2017-07-18 10:10:29 +02004193 mlxsw_sp_fib_node_entry_del(mlxsw_sp, &fib4_entry->common);
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004194 mlxsw_sp_fib4_node_list_remove(fib4_entry);
Petr Machata4607f6d2017-09-02 23:49:25 +02004195
4196 if (fib4_entry->common.type == MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP)
4197 mlxsw_sp_fib_entry_decap_fini(mlxsw_sp, &fib4_entry->common);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004198}
4199
Ido Schimmel599cf8f2017-02-09 10:28:44 +01004200static void mlxsw_sp_fib4_entry_replace(struct mlxsw_sp *mlxsw_sp,
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004201 struct mlxsw_sp_fib4_entry *fib4_entry,
Ido Schimmel599cf8f2017-02-09 10:28:44 +01004202 bool replace)
4203{
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004204 struct mlxsw_sp_fib_node *fib_node = fib4_entry->common.fib_node;
4205 struct mlxsw_sp_fib4_entry *replaced;
Ido Schimmel599cf8f2017-02-09 10:28:44 +01004206
4207 if (!replace)
4208 return;
4209
4210 /* We inserted the new entry before replaced one */
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004211 replaced = list_next_entry(fib4_entry, common.list);
Ido Schimmel599cf8f2017-02-09 10:28:44 +01004212
4213 mlxsw_sp_fib4_node_entry_unlink(mlxsw_sp, replaced);
4214 mlxsw_sp_fib4_entry_destroy(mlxsw_sp, replaced);
Ido Schimmel731ea1c2017-07-18 10:10:21 +02004215 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
Ido Schimmel599cf8f2017-02-09 10:28:44 +01004216}
4217
Ido Schimmel9aecce12017-02-09 10:28:42 +01004218static int
4219mlxsw_sp_router_fib4_add(struct mlxsw_sp *mlxsw_sp,
Ido Schimmel4283bce2017-02-09 10:28:43 +01004220 const struct fib_entry_notifier_info *fen_info,
Ido Schimmel599cf8f2017-02-09 10:28:44 +01004221 bool replace, bool append)
Ido Schimmel9aecce12017-02-09 10:28:42 +01004222{
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004223 struct mlxsw_sp_fib4_entry *fib4_entry;
Ido Schimmel9aecce12017-02-09 10:28:42 +01004224 struct mlxsw_sp_fib_node *fib_node;
Jiri Pirko61c503f2016-07-04 08:23:11 +02004225 int err;
4226
Ido Schimmel9011b672017-05-16 19:38:25 +02004227 if (mlxsw_sp->router->aborted)
Jiri Pirkob45f64d2016-09-26 12:52:31 +02004228 return 0;
4229
Ido Schimmel731ea1c2017-07-18 10:10:21 +02004230 fib_node = mlxsw_sp_fib_node_get(mlxsw_sp, fen_info->tb_id,
4231 &fen_info->dst, sizeof(fen_info->dst),
4232 fen_info->dst_len,
4233 MLXSW_SP_L3_PROTO_IPV4);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004234 if (IS_ERR(fib_node)) {
4235 dev_warn(mlxsw_sp->bus_info->dev, "Failed to get FIB node\n");
4236 return PTR_ERR(fib_node);
4237 }
4238
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004239 fib4_entry = mlxsw_sp_fib4_entry_create(mlxsw_sp, fib_node, fen_info);
4240 if (IS_ERR(fib4_entry)) {
Ido Schimmel9aecce12017-02-09 10:28:42 +01004241 dev_warn(mlxsw_sp->bus_info->dev, "Failed to create FIB entry\n");
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004242 err = PTR_ERR(fib4_entry);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004243 goto err_fib4_entry_create;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02004244 }
Jiri Pirko61c503f2016-07-04 08:23:11 +02004245
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004246 err = mlxsw_sp_fib4_node_entry_link(mlxsw_sp, fib4_entry, replace,
Ido Schimmel599cf8f2017-02-09 10:28:44 +01004247 append);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02004248 if (err) {
Ido Schimmel9aecce12017-02-09 10:28:42 +01004249 dev_warn(mlxsw_sp->bus_info->dev, "Failed to link FIB entry to node\n");
4250 goto err_fib4_node_entry_link;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02004251 }
Ido Schimmel9aecce12017-02-09 10:28:42 +01004252
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004253 mlxsw_sp_fib4_entry_replace(mlxsw_sp, fib4_entry, replace);
Ido Schimmel599cf8f2017-02-09 10:28:44 +01004254
Jiri Pirko61c503f2016-07-04 08:23:11 +02004255 return 0;
4256
Ido Schimmel9aecce12017-02-09 10:28:42 +01004257err_fib4_node_entry_link:
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004258 mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib4_entry);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004259err_fib4_entry_create:
Ido Schimmel731ea1c2017-07-18 10:10:21 +02004260 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
Jiri Pirko61c503f2016-07-04 08:23:11 +02004261 return err;
4262}
4263
Jiri Pirko37956d72016-10-20 16:05:43 +02004264static void mlxsw_sp_router_fib4_del(struct mlxsw_sp *mlxsw_sp,
4265 struct fib_entry_notifier_info *fen_info)
Jiri Pirko61c503f2016-07-04 08:23:11 +02004266{
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004267 struct mlxsw_sp_fib4_entry *fib4_entry;
Ido Schimmel9aecce12017-02-09 10:28:42 +01004268 struct mlxsw_sp_fib_node *fib_node;
Jiri Pirko61c503f2016-07-04 08:23:11 +02004269
Ido Schimmel9011b672017-05-16 19:38:25 +02004270 if (mlxsw_sp->router->aborted)
Jiri Pirko37956d72016-10-20 16:05:43 +02004271 return;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02004272
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004273 fib4_entry = mlxsw_sp_fib4_entry_lookup(mlxsw_sp, fen_info);
4274 if (WARN_ON(!fib4_entry))
Jiri Pirko37956d72016-10-20 16:05:43 +02004275 return;
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004276 fib_node = fib4_entry->common.fib_node;
Jiri Pirko5b004412016-09-01 10:37:40 +02004277
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004278 mlxsw_sp_fib4_node_entry_unlink(mlxsw_sp, fib4_entry);
4279 mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib4_entry);
Ido Schimmel731ea1c2017-07-18 10:10:21 +02004280 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
Jiri Pirko61c503f2016-07-04 08:23:11 +02004281}
Jiri Pirkob45f64d2016-09-26 12:52:31 +02004282
Ido Schimmel428b8512017-08-03 13:28:28 +02004283static bool mlxsw_sp_fib6_rt_should_ignore(const struct rt6_info *rt)
4284{
4285 /* Packets with link-local destination IP arriving to the router
4286 * are trapped to the CPU, so no need to program specific routes
4287 * for them.
4288 */
4289 if (ipv6_addr_type(&rt->rt6i_dst.addr) & IPV6_ADDR_LINKLOCAL)
4290 return true;
4291
4292 /* Multicast routes aren't supported, so ignore them. Neighbour
4293 * Discovery packets are specifically trapped.
4294 */
4295 if (ipv6_addr_type(&rt->rt6i_dst.addr) & IPV6_ADDR_MULTICAST)
4296 return true;
4297
4298 /* Cloned routes are irrelevant in the forwarding path. */
4299 if (rt->rt6i_flags & RTF_CACHE)
4300 return true;
4301
4302 return false;
4303}
4304
4305static struct mlxsw_sp_rt6 *mlxsw_sp_rt6_create(struct rt6_info *rt)
4306{
4307 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
4308
4309 mlxsw_sp_rt6 = kzalloc(sizeof(*mlxsw_sp_rt6), GFP_KERNEL);
4310 if (!mlxsw_sp_rt6)
4311 return ERR_PTR(-ENOMEM);
4312
4313 /* In case of route replace, replaced route is deleted with
4314 * no notification. Take reference to prevent accessing freed
4315 * memory.
4316 */
4317 mlxsw_sp_rt6->rt = rt;
4318 rt6_hold(rt);
4319
4320 return mlxsw_sp_rt6;
4321}
4322
4323#if IS_ENABLED(CONFIG_IPV6)
4324static void mlxsw_sp_rt6_release(struct rt6_info *rt)
4325{
4326 rt6_release(rt);
4327}
4328#else
4329static void mlxsw_sp_rt6_release(struct rt6_info *rt)
4330{
4331}
4332#endif
4333
4334static void mlxsw_sp_rt6_destroy(struct mlxsw_sp_rt6 *mlxsw_sp_rt6)
4335{
4336 mlxsw_sp_rt6_release(mlxsw_sp_rt6->rt);
4337 kfree(mlxsw_sp_rt6);
4338}
4339
4340static bool mlxsw_sp_fib6_rt_can_mp(const struct rt6_info *rt)
4341{
4342 /* RTF_CACHE routes are ignored */
4343 return (rt->rt6i_flags & (RTF_GATEWAY | RTF_ADDRCONF)) == RTF_GATEWAY;
4344}
4345
4346static struct rt6_info *
4347mlxsw_sp_fib6_entry_rt(const struct mlxsw_sp_fib6_entry *fib6_entry)
4348{
4349 return list_first_entry(&fib6_entry->rt6_list, struct mlxsw_sp_rt6,
4350 list)->rt;
4351}
4352
4353static struct mlxsw_sp_fib6_entry *
4354mlxsw_sp_fib6_node_mp_entry_find(const struct mlxsw_sp_fib_node *fib_node,
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02004355 const struct rt6_info *nrt, bool replace)
Ido Schimmel428b8512017-08-03 13:28:28 +02004356{
4357 struct mlxsw_sp_fib6_entry *fib6_entry;
4358
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02004359 if (!mlxsw_sp_fib6_rt_can_mp(nrt) || replace)
Ido Schimmel428b8512017-08-03 13:28:28 +02004360 return NULL;
4361
4362 list_for_each_entry(fib6_entry, &fib_node->entry_list, common.list) {
4363 struct rt6_info *rt = mlxsw_sp_fib6_entry_rt(fib6_entry);
4364
4365 /* RT6_TABLE_LOCAL and RT6_TABLE_MAIN share the same
4366 * virtual router.
4367 */
4368 if (rt->rt6i_table->tb6_id > nrt->rt6i_table->tb6_id)
4369 continue;
4370 if (rt->rt6i_table->tb6_id != nrt->rt6i_table->tb6_id)
4371 break;
4372 if (rt->rt6i_metric < nrt->rt6i_metric)
4373 continue;
4374 if (rt->rt6i_metric == nrt->rt6i_metric &&
4375 mlxsw_sp_fib6_rt_can_mp(rt))
4376 return fib6_entry;
4377 if (rt->rt6i_metric > nrt->rt6i_metric)
4378 break;
4379 }
4380
4381 return NULL;
4382}
4383
4384static struct mlxsw_sp_rt6 *
4385mlxsw_sp_fib6_entry_rt_find(const struct mlxsw_sp_fib6_entry *fib6_entry,
4386 const struct rt6_info *rt)
4387{
4388 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
4389
4390 list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) {
4391 if (mlxsw_sp_rt6->rt == rt)
4392 return mlxsw_sp_rt6;
4393 }
4394
4395 return NULL;
4396}
4397
Petr Machata8f28a302017-09-02 23:49:24 +02004398static bool mlxsw_sp_nexthop6_ipip_type(const struct mlxsw_sp *mlxsw_sp,
4399 const struct rt6_info *rt,
4400 enum mlxsw_sp_ipip_type *ret)
4401{
4402 return rt->dst.dev &&
4403 mlxsw_sp_netdev_ipip_type(mlxsw_sp, rt->dst.dev, ret);
4404}
4405
Petr Machata35225e42017-09-02 23:49:22 +02004406static int mlxsw_sp_nexthop6_type_init(struct mlxsw_sp *mlxsw_sp,
4407 struct mlxsw_sp_nexthop_group *nh_grp,
4408 struct mlxsw_sp_nexthop *nh,
4409 const struct rt6_info *rt)
Ido Schimmel428b8512017-08-03 13:28:28 +02004410{
Petr Machata8f28a302017-09-02 23:49:24 +02004411 struct mlxsw_sp_router *router = mlxsw_sp->router;
Ido Schimmel428b8512017-08-03 13:28:28 +02004412 struct net_device *dev = rt->dst.dev;
Petr Machata8f28a302017-09-02 23:49:24 +02004413 enum mlxsw_sp_ipip_type ipipt;
Ido Schimmel428b8512017-08-03 13:28:28 +02004414 struct mlxsw_sp_rif *rif;
4415 int err;
4416
Petr Machata8f28a302017-09-02 23:49:24 +02004417 if (mlxsw_sp_nexthop6_ipip_type(mlxsw_sp, rt, &ipipt) &&
4418 router->ipip_ops_arr[ipipt]->can_offload(mlxsw_sp, dev,
4419 MLXSW_SP_L3_PROTO_IPV6)) {
4420 nh->type = MLXSW_SP_NEXTHOP_TYPE_IPIP;
Petr Machata4cccb732017-10-16 16:26:39 +02004421 err = mlxsw_sp_nexthop_ipip_init(mlxsw_sp, nh, dev);
Petr Machatade0f43c2017-10-02 12:14:57 +02004422 if (err)
4423 return err;
4424 mlxsw_sp_nexthop_rif_init(nh, &nh->ipip_entry->ol_lb->common);
4425 return 0;
Petr Machata8f28a302017-09-02 23:49:24 +02004426 }
4427
Petr Machata35225e42017-09-02 23:49:22 +02004428 nh->type = MLXSW_SP_NEXTHOP_TYPE_ETH;
Ido Schimmel428b8512017-08-03 13:28:28 +02004429 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
4430 if (!rif)
4431 return 0;
4432 mlxsw_sp_nexthop_rif_init(nh, rif);
4433
4434 err = mlxsw_sp_nexthop_neigh_init(mlxsw_sp, nh);
4435 if (err)
4436 goto err_nexthop_neigh_init;
4437
4438 return 0;
4439
4440err_nexthop_neigh_init:
4441 mlxsw_sp_nexthop_rif_fini(nh);
4442 return err;
4443}
4444
Petr Machata35225e42017-09-02 23:49:22 +02004445static void mlxsw_sp_nexthop6_type_fini(struct mlxsw_sp *mlxsw_sp,
4446 struct mlxsw_sp_nexthop *nh)
4447{
4448 mlxsw_sp_nexthop_type_fini(mlxsw_sp, nh);
4449}
4450
4451static int mlxsw_sp_nexthop6_init(struct mlxsw_sp *mlxsw_sp,
4452 struct mlxsw_sp_nexthop_group *nh_grp,
4453 struct mlxsw_sp_nexthop *nh,
4454 const struct rt6_info *rt)
4455{
4456 struct net_device *dev = rt->dst.dev;
4457
4458 nh->nh_grp = nh_grp;
Ido Schimmel408bd942017-10-22 23:11:46 +02004459 nh->nh_weight = 1;
Petr Machata35225e42017-09-02 23:49:22 +02004460 memcpy(&nh->gw_addr, &rt->rt6i_gateway, sizeof(nh->gw_addr));
Arkadi Sharshevskya5390272017-09-25 10:32:28 +02004461 mlxsw_sp_nexthop_counter_alloc(mlxsw_sp, nh);
Petr Machata35225e42017-09-02 23:49:22 +02004462
Arkadi Sharshevskydbe45982017-09-25 10:32:23 +02004463 list_add_tail(&nh->router_list_node, &mlxsw_sp->router->nexthop_list);
4464
Petr Machata35225e42017-09-02 23:49:22 +02004465 if (!dev)
4466 return 0;
4467 nh->ifindex = dev->ifindex;
4468
4469 return mlxsw_sp_nexthop6_type_init(mlxsw_sp, nh_grp, nh, rt);
4470}
4471
Ido Schimmel428b8512017-08-03 13:28:28 +02004472static void mlxsw_sp_nexthop6_fini(struct mlxsw_sp *mlxsw_sp,
4473 struct mlxsw_sp_nexthop *nh)
4474{
Petr Machata35225e42017-09-02 23:49:22 +02004475 mlxsw_sp_nexthop6_type_fini(mlxsw_sp, nh);
Arkadi Sharshevskydbe45982017-09-25 10:32:23 +02004476 list_del(&nh->router_list_node);
Arkadi Sharshevskya5390272017-09-25 10:32:28 +02004477 mlxsw_sp_nexthop_counter_free(mlxsw_sp, nh);
Ido Schimmel428b8512017-08-03 13:28:28 +02004478}
4479
Petr Machataf6050ee2017-09-02 23:49:21 +02004480static bool mlxsw_sp_rt6_is_gateway(const struct mlxsw_sp *mlxsw_sp,
4481 const struct rt6_info *rt)
4482{
Petr Machata8f28a302017-09-02 23:49:24 +02004483 return rt->rt6i_flags & RTF_GATEWAY ||
4484 mlxsw_sp_nexthop6_ipip_type(mlxsw_sp, rt, NULL);
Petr Machataf6050ee2017-09-02 23:49:21 +02004485}
4486
Ido Schimmel428b8512017-08-03 13:28:28 +02004487static struct mlxsw_sp_nexthop_group *
4488mlxsw_sp_nexthop6_group_create(struct mlxsw_sp *mlxsw_sp,
4489 struct mlxsw_sp_fib6_entry *fib6_entry)
4490{
4491 struct mlxsw_sp_nexthop_group *nh_grp;
4492 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
4493 struct mlxsw_sp_nexthop *nh;
4494 size_t alloc_size;
4495 int i = 0;
4496 int err;
4497
4498 alloc_size = sizeof(*nh_grp) +
4499 fib6_entry->nrt6 * sizeof(struct mlxsw_sp_nexthop);
4500 nh_grp = kzalloc(alloc_size, GFP_KERNEL);
4501 if (!nh_grp)
4502 return ERR_PTR(-ENOMEM);
4503 INIT_LIST_HEAD(&nh_grp->fib_list);
4504#if IS_ENABLED(CONFIG_IPV6)
4505 nh_grp->neigh_tbl = &nd_tbl;
4506#endif
4507 mlxsw_sp_rt6 = list_first_entry(&fib6_entry->rt6_list,
4508 struct mlxsw_sp_rt6, list);
Petr Machataf6050ee2017-09-02 23:49:21 +02004509 nh_grp->gateway = mlxsw_sp_rt6_is_gateway(mlxsw_sp, mlxsw_sp_rt6->rt);
Ido Schimmel428b8512017-08-03 13:28:28 +02004510 nh_grp->count = fib6_entry->nrt6;
4511 for (i = 0; i < nh_grp->count; i++) {
4512 struct rt6_info *rt = mlxsw_sp_rt6->rt;
4513
4514 nh = &nh_grp->nexthops[i];
4515 err = mlxsw_sp_nexthop6_init(mlxsw_sp, nh_grp, nh, rt);
4516 if (err)
4517 goto err_nexthop6_init;
4518 mlxsw_sp_rt6 = list_next_entry(mlxsw_sp_rt6, list);
4519 }
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02004520
4521 err = mlxsw_sp_nexthop_group_insert(mlxsw_sp, nh_grp);
4522 if (err)
4523 goto err_nexthop_group_insert;
4524
Ido Schimmel428b8512017-08-03 13:28:28 +02004525 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
4526 return nh_grp;
4527
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02004528err_nexthop_group_insert:
Ido Schimmel428b8512017-08-03 13:28:28 +02004529err_nexthop6_init:
4530 for (i--; i >= 0; i--) {
4531 nh = &nh_grp->nexthops[i];
4532 mlxsw_sp_nexthop6_fini(mlxsw_sp, nh);
4533 }
4534 kfree(nh_grp);
4535 return ERR_PTR(err);
4536}
4537
4538static void
4539mlxsw_sp_nexthop6_group_destroy(struct mlxsw_sp *mlxsw_sp,
4540 struct mlxsw_sp_nexthop_group *nh_grp)
4541{
4542 struct mlxsw_sp_nexthop *nh;
4543 int i = nh_grp->count;
4544
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02004545 mlxsw_sp_nexthop_group_remove(mlxsw_sp, nh_grp);
Ido Schimmel428b8512017-08-03 13:28:28 +02004546 for (i--; i >= 0; i--) {
4547 nh = &nh_grp->nexthops[i];
4548 mlxsw_sp_nexthop6_fini(mlxsw_sp, nh);
4549 }
4550 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
4551 WARN_ON(nh_grp->adj_index_valid);
4552 kfree(nh_grp);
4553}
4554
4555static int mlxsw_sp_nexthop6_group_get(struct mlxsw_sp *mlxsw_sp,
4556 struct mlxsw_sp_fib6_entry *fib6_entry)
4557{
4558 struct mlxsw_sp_nexthop_group *nh_grp;
4559
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02004560 nh_grp = mlxsw_sp_nexthop6_group_lookup(mlxsw_sp, fib6_entry);
4561 if (!nh_grp) {
4562 nh_grp = mlxsw_sp_nexthop6_group_create(mlxsw_sp, fib6_entry);
4563 if (IS_ERR(nh_grp))
4564 return PTR_ERR(nh_grp);
4565 }
Ido Schimmel428b8512017-08-03 13:28:28 +02004566
4567 list_add_tail(&fib6_entry->common.nexthop_group_node,
4568 &nh_grp->fib_list);
4569 fib6_entry->common.nh_group = nh_grp;
4570
4571 return 0;
4572}
4573
4574static void mlxsw_sp_nexthop6_group_put(struct mlxsw_sp *mlxsw_sp,
4575 struct mlxsw_sp_fib_entry *fib_entry)
4576{
4577 struct mlxsw_sp_nexthop_group *nh_grp = fib_entry->nh_group;
4578
4579 list_del(&fib_entry->nexthop_group_node);
4580 if (!list_empty(&nh_grp->fib_list))
4581 return;
4582 mlxsw_sp_nexthop6_group_destroy(mlxsw_sp, nh_grp);
4583}
4584
4585static int
4586mlxsw_sp_nexthop6_group_update(struct mlxsw_sp *mlxsw_sp,
4587 struct mlxsw_sp_fib6_entry *fib6_entry)
4588{
4589 struct mlxsw_sp_nexthop_group *old_nh_grp = fib6_entry->common.nh_group;
4590 int err;
4591
4592 fib6_entry->common.nh_group = NULL;
4593 list_del(&fib6_entry->common.nexthop_group_node);
4594
4595 err = mlxsw_sp_nexthop6_group_get(mlxsw_sp, fib6_entry);
4596 if (err)
4597 goto err_nexthop6_group_get;
4598
4599 /* In case this entry is offloaded, then the adjacency index
4600 * currently associated with it in the device's table is that
4601 * of the old group. Start using the new one instead.
4602 */
4603 err = mlxsw_sp_fib_node_entry_add(mlxsw_sp, &fib6_entry->common);
4604 if (err)
4605 goto err_fib_node_entry_add;
4606
4607 if (list_empty(&old_nh_grp->fib_list))
4608 mlxsw_sp_nexthop6_group_destroy(mlxsw_sp, old_nh_grp);
4609
4610 return 0;
4611
4612err_fib_node_entry_add:
4613 mlxsw_sp_nexthop6_group_put(mlxsw_sp, &fib6_entry->common);
4614err_nexthop6_group_get:
4615 list_add_tail(&fib6_entry->common.nexthop_group_node,
4616 &old_nh_grp->fib_list);
4617 fib6_entry->common.nh_group = old_nh_grp;
4618 return err;
4619}
4620
4621static int
4622mlxsw_sp_fib6_entry_nexthop_add(struct mlxsw_sp *mlxsw_sp,
4623 struct mlxsw_sp_fib6_entry *fib6_entry,
4624 struct rt6_info *rt)
4625{
4626 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
4627 int err;
4628
4629 mlxsw_sp_rt6 = mlxsw_sp_rt6_create(rt);
4630 if (IS_ERR(mlxsw_sp_rt6))
4631 return PTR_ERR(mlxsw_sp_rt6);
4632
4633 list_add_tail(&mlxsw_sp_rt6->list, &fib6_entry->rt6_list);
4634 fib6_entry->nrt6++;
4635
4636 err = mlxsw_sp_nexthop6_group_update(mlxsw_sp, fib6_entry);
4637 if (err)
4638 goto err_nexthop6_group_update;
4639
4640 return 0;
4641
4642err_nexthop6_group_update:
4643 fib6_entry->nrt6--;
4644 list_del(&mlxsw_sp_rt6->list);
4645 mlxsw_sp_rt6_destroy(mlxsw_sp_rt6);
4646 return err;
4647}
4648
4649static void
4650mlxsw_sp_fib6_entry_nexthop_del(struct mlxsw_sp *mlxsw_sp,
4651 struct mlxsw_sp_fib6_entry *fib6_entry,
4652 struct rt6_info *rt)
4653{
4654 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
4655
4656 mlxsw_sp_rt6 = mlxsw_sp_fib6_entry_rt_find(fib6_entry, rt);
4657 if (WARN_ON(!mlxsw_sp_rt6))
4658 return;
4659
4660 fib6_entry->nrt6--;
4661 list_del(&mlxsw_sp_rt6->list);
4662 mlxsw_sp_nexthop6_group_update(mlxsw_sp, fib6_entry);
4663 mlxsw_sp_rt6_destroy(mlxsw_sp_rt6);
4664}
4665
Petr Machataf6050ee2017-09-02 23:49:21 +02004666static void mlxsw_sp_fib6_entry_type_set(struct mlxsw_sp *mlxsw_sp,
4667 struct mlxsw_sp_fib_entry *fib_entry,
Ido Schimmel428b8512017-08-03 13:28:28 +02004668 const struct rt6_info *rt)
4669{
4670 /* Packets hitting RTF_REJECT routes need to be discarded by the
4671 * stack. We can rely on their destination device not having a
4672 * RIF (it's the loopback device) and can thus use action type
4673 * local, which will cause them to be trapped with a lower
4674 * priority than packets that need to be locally received.
4675 */
Ido Schimmeld3b6d372017-09-01 10:58:55 +02004676 if (rt->rt6i_flags & (RTF_LOCAL | RTF_ANYCAST))
Ido Schimmel428b8512017-08-03 13:28:28 +02004677 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
4678 else if (rt->rt6i_flags & RTF_REJECT)
4679 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_LOCAL;
Petr Machataf6050ee2017-09-02 23:49:21 +02004680 else if (mlxsw_sp_rt6_is_gateway(mlxsw_sp, rt))
Ido Schimmel428b8512017-08-03 13:28:28 +02004681 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_REMOTE;
4682 else
4683 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_LOCAL;
4684}
4685
4686static void
4687mlxsw_sp_fib6_entry_rt_destroy_all(struct mlxsw_sp_fib6_entry *fib6_entry)
4688{
4689 struct mlxsw_sp_rt6 *mlxsw_sp_rt6, *tmp;
4690
4691 list_for_each_entry_safe(mlxsw_sp_rt6, tmp, &fib6_entry->rt6_list,
4692 list) {
4693 fib6_entry->nrt6--;
4694 list_del(&mlxsw_sp_rt6->list);
4695 mlxsw_sp_rt6_destroy(mlxsw_sp_rt6);
4696 }
4697}
4698
4699static struct mlxsw_sp_fib6_entry *
4700mlxsw_sp_fib6_entry_create(struct mlxsw_sp *mlxsw_sp,
4701 struct mlxsw_sp_fib_node *fib_node,
4702 struct rt6_info *rt)
4703{
4704 struct mlxsw_sp_fib6_entry *fib6_entry;
4705 struct mlxsw_sp_fib_entry *fib_entry;
4706 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
4707 int err;
4708
4709 fib6_entry = kzalloc(sizeof(*fib6_entry), GFP_KERNEL);
4710 if (!fib6_entry)
4711 return ERR_PTR(-ENOMEM);
4712 fib_entry = &fib6_entry->common;
4713
4714 mlxsw_sp_rt6 = mlxsw_sp_rt6_create(rt);
4715 if (IS_ERR(mlxsw_sp_rt6)) {
4716 err = PTR_ERR(mlxsw_sp_rt6);
4717 goto err_rt6_create;
4718 }
4719
Petr Machataf6050ee2017-09-02 23:49:21 +02004720 mlxsw_sp_fib6_entry_type_set(mlxsw_sp, fib_entry, mlxsw_sp_rt6->rt);
Ido Schimmel428b8512017-08-03 13:28:28 +02004721
4722 INIT_LIST_HEAD(&fib6_entry->rt6_list);
4723 list_add_tail(&mlxsw_sp_rt6->list, &fib6_entry->rt6_list);
4724 fib6_entry->nrt6 = 1;
4725 err = mlxsw_sp_nexthop6_group_get(mlxsw_sp, fib6_entry);
4726 if (err)
4727 goto err_nexthop6_group_get;
4728
4729 fib_entry->fib_node = fib_node;
4730
4731 return fib6_entry;
4732
4733err_nexthop6_group_get:
4734 list_del(&mlxsw_sp_rt6->list);
4735 mlxsw_sp_rt6_destroy(mlxsw_sp_rt6);
4736err_rt6_create:
4737 kfree(fib6_entry);
4738 return ERR_PTR(err);
4739}
4740
4741static void mlxsw_sp_fib6_entry_destroy(struct mlxsw_sp *mlxsw_sp,
4742 struct mlxsw_sp_fib6_entry *fib6_entry)
4743{
4744 mlxsw_sp_nexthop6_group_put(mlxsw_sp, &fib6_entry->common);
4745 mlxsw_sp_fib6_entry_rt_destroy_all(fib6_entry);
4746 WARN_ON(fib6_entry->nrt6);
4747 kfree(fib6_entry);
4748}
4749
4750static struct mlxsw_sp_fib6_entry *
4751mlxsw_sp_fib6_node_entry_find(const struct mlxsw_sp_fib_node *fib_node,
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02004752 const struct rt6_info *nrt, bool replace)
Ido Schimmel428b8512017-08-03 13:28:28 +02004753{
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02004754 struct mlxsw_sp_fib6_entry *fib6_entry, *fallback = NULL;
Ido Schimmel428b8512017-08-03 13:28:28 +02004755
4756 list_for_each_entry(fib6_entry, &fib_node->entry_list, common.list) {
4757 struct rt6_info *rt = mlxsw_sp_fib6_entry_rt(fib6_entry);
4758
4759 if (rt->rt6i_table->tb6_id > nrt->rt6i_table->tb6_id)
4760 continue;
4761 if (rt->rt6i_table->tb6_id != nrt->rt6i_table->tb6_id)
4762 break;
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02004763 if (replace && rt->rt6i_metric == nrt->rt6i_metric) {
4764 if (mlxsw_sp_fib6_rt_can_mp(rt) ==
4765 mlxsw_sp_fib6_rt_can_mp(nrt))
4766 return fib6_entry;
4767 if (mlxsw_sp_fib6_rt_can_mp(nrt))
4768 fallback = fallback ?: fib6_entry;
4769 }
Ido Schimmel428b8512017-08-03 13:28:28 +02004770 if (rt->rt6i_metric > nrt->rt6i_metric)
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02004771 return fallback ?: fib6_entry;
Ido Schimmel428b8512017-08-03 13:28:28 +02004772 }
4773
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02004774 return fallback;
Ido Schimmel428b8512017-08-03 13:28:28 +02004775}
4776
4777static int
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02004778mlxsw_sp_fib6_node_list_insert(struct mlxsw_sp_fib6_entry *new6_entry,
4779 bool replace)
Ido Schimmel428b8512017-08-03 13:28:28 +02004780{
4781 struct mlxsw_sp_fib_node *fib_node = new6_entry->common.fib_node;
4782 struct rt6_info *nrt = mlxsw_sp_fib6_entry_rt(new6_entry);
4783 struct mlxsw_sp_fib6_entry *fib6_entry;
4784
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02004785 fib6_entry = mlxsw_sp_fib6_node_entry_find(fib_node, nrt, replace);
4786
4787 if (replace && WARN_ON(!fib6_entry))
4788 return -EINVAL;
Ido Schimmel428b8512017-08-03 13:28:28 +02004789
4790 if (fib6_entry) {
4791 list_add_tail(&new6_entry->common.list,
4792 &fib6_entry->common.list);
4793 } else {
4794 struct mlxsw_sp_fib6_entry *last;
4795
4796 list_for_each_entry(last, &fib_node->entry_list, common.list) {
4797 struct rt6_info *rt = mlxsw_sp_fib6_entry_rt(last);
4798
4799 if (nrt->rt6i_table->tb6_id > rt->rt6i_table->tb6_id)
4800 break;
4801 fib6_entry = last;
4802 }
4803
4804 if (fib6_entry)
4805 list_add(&new6_entry->common.list,
4806 &fib6_entry->common.list);
4807 else
4808 list_add(&new6_entry->common.list,
4809 &fib_node->entry_list);
4810 }
4811
4812 return 0;
4813}
4814
4815static void
4816mlxsw_sp_fib6_node_list_remove(struct mlxsw_sp_fib6_entry *fib6_entry)
4817{
4818 list_del(&fib6_entry->common.list);
4819}
4820
4821static int mlxsw_sp_fib6_node_entry_link(struct mlxsw_sp *mlxsw_sp,
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02004822 struct mlxsw_sp_fib6_entry *fib6_entry,
4823 bool replace)
Ido Schimmel428b8512017-08-03 13:28:28 +02004824{
4825 int err;
4826
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02004827 err = mlxsw_sp_fib6_node_list_insert(fib6_entry, replace);
Ido Schimmel428b8512017-08-03 13:28:28 +02004828 if (err)
4829 return err;
4830
4831 err = mlxsw_sp_fib_node_entry_add(mlxsw_sp, &fib6_entry->common);
4832 if (err)
4833 goto err_fib_node_entry_add;
4834
4835 return 0;
4836
4837err_fib_node_entry_add:
4838 mlxsw_sp_fib6_node_list_remove(fib6_entry);
4839 return err;
4840}
4841
4842static void
4843mlxsw_sp_fib6_node_entry_unlink(struct mlxsw_sp *mlxsw_sp,
4844 struct mlxsw_sp_fib6_entry *fib6_entry)
4845{
4846 mlxsw_sp_fib_node_entry_del(mlxsw_sp, &fib6_entry->common);
4847 mlxsw_sp_fib6_node_list_remove(fib6_entry);
4848}
4849
4850static struct mlxsw_sp_fib6_entry *
4851mlxsw_sp_fib6_entry_lookup(struct mlxsw_sp *mlxsw_sp,
4852 const struct rt6_info *rt)
4853{
4854 struct mlxsw_sp_fib6_entry *fib6_entry;
4855 struct mlxsw_sp_fib_node *fib_node;
4856 struct mlxsw_sp_fib *fib;
4857 struct mlxsw_sp_vr *vr;
4858
4859 vr = mlxsw_sp_vr_find(mlxsw_sp, rt->rt6i_table->tb6_id);
4860 if (!vr)
4861 return NULL;
4862 fib = mlxsw_sp_vr_fib(vr, MLXSW_SP_L3_PROTO_IPV6);
4863
4864 fib_node = mlxsw_sp_fib_node_lookup(fib, &rt->rt6i_dst.addr,
4865 sizeof(rt->rt6i_dst.addr),
4866 rt->rt6i_dst.plen);
4867 if (!fib_node)
4868 return NULL;
4869
4870 list_for_each_entry(fib6_entry, &fib_node->entry_list, common.list) {
4871 struct rt6_info *iter_rt = mlxsw_sp_fib6_entry_rt(fib6_entry);
4872
4873 if (rt->rt6i_table->tb6_id == iter_rt->rt6i_table->tb6_id &&
4874 rt->rt6i_metric == iter_rt->rt6i_metric &&
4875 mlxsw_sp_fib6_entry_rt_find(fib6_entry, rt))
4876 return fib6_entry;
4877 }
4878
4879 return NULL;
4880}
4881
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02004882static void mlxsw_sp_fib6_entry_replace(struct mlxsw_sp *mlxsw_sp,
4883 struct mlxsw_sp_fib6_entry *fib6_entry,
4884 bool replace)
4885{
4886 struct mlxsw_sp_fib_node *fib_node = fib6_entry->common.fib_node;
4887 struct mlxsw_sp_fib6_entry *replaced;
4888
4889 if (!replace)
4890 return;
4891
4892 replaced = list_next_entry(fib6_entry, common.list);
4893
4894 mlxsw_sp_fib6_node_entry_unlink(mlxsw_sp, replaced);
4895 mlxsw_sp_fib6_entry_destroy(mlxsw_sp, replaced);
4896 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
4897}
4898
Ido Schimmel428b8512017-08-03 13:28:28 +02004899static int mlxsw_sp_router_fib6_add(struct mlxsw_sp *mlxsw_sp,
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02004900 struct rt6_info *rt, bool replace)
Ido Schimmel428b8512017-08-03 13:28:28 +02004901{
4902 struct mlxsw_sp_fib6_entry *fib6_entry;
4903 struct mlxsw_sp_fib_node *fib_node;
4904 int err;
4905
4906 if (mlxsw_sp->router->aborted)
4907 return 0;
4908
Ido Schimmelf36f5ac2017-08-03 13:28:30 +02004909 if (rt->rt6i_src.plen)
4910 return -EINVAL;
4911
Ido Schimmel428b8512017-08-03 13:28:28 +02004912 if (mlxsw_sp_fib6_rt_should_ignore(rt))
4913 return 0;
4914
4915 fib_node = mlxsw_sp_fib_node_get(mlxsw_sp, rt->rt6i_table->tb6_id,
4916 &rt->rt6i_dst.addr,
4917 sizeof(rt->rt6i_dst.addr),
4918 rt->rt6i_dst.plen,
4919 MLXSW_SP_L3_PROTO_IPV6);
4920 if (IS_ERR(fib_node))
4921 return PTR_ERR(fib_node);
4922
4923 /* Before creating a new entry, try to append route to an existing
4924 * multipath entry.
4925 */
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02004926 fib6_entry = mlxsw_sp_fib6_node_mp_entry_find(fib_node, rt, replace);
Ido Schimmel428b8512017-08-03 13:28:28 +02004927 if (fib6_entry) {
4928 err = mlxsw_sp_fib6_entry_nexthop_add(mlxsw_sp, fib6_entry, rt);
4929 if (err)
4930 goto err_fib6_entry_nexthop_add;
4931 return 0;
4932 }
4933
4934 fib6_entry = mlxsw_sp_fib6_entry_create(mlxsw_sp, fib_node, rt);
4935 if (IS_ERR(fib6_entry)) {
4936 err = PTR_ERR(fib6_entry);
4937 goto err_fib6_entry_create;
4938 }
4939
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02004940 err = mlxsw_sp_fib6_node_entry_link(mlxsw_sp, fib6_entry, replace);
Ido Schimmel428b8512017-08-03 13:28:28 +02004941 if (err)
4942 goto err_fib6_node_entry_link;
4943
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02004944 mlxsw_sp_fib6_entry_replace(mlxsw_sp, fib6_entry, replace);
4945
Ido Schimmel428b8512017-08-03 13:28:28 +02004946 return 0;
4947
4948err_fib6_node_entry_link:
4949 mlxsw_sp_fib6_entry_destroy(mlxsw_sp, fib6_entry);
4950err_fib6_entry_create:
4951err_fib6_entry_nexthop_add:
4952 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
4953 return err;
4954}
4955
4956static void mlxsw_sp_router_fib6_del(struct mlxsw_sp *mlxsw_sp,
4957 struct rt6_info *rt)
4958{
4959 struct mlxsw_sp_fib6_entry *fib6_entry;
4960 struct mlxsw_sp_fib_node *fib_node;
4961
4962 if (mlxsw_sp->router->aborted)
4963 return;
4964
4965 if (mlxsw_sp_fib6_rt_should_ignore(rt))
4966 return;
4967
4968 fib6_entry = mlxsw_sp_fib6_entry_lookup(mlxsw_sp, rt);
4969 if (WARN_ON(!fib6_entry))
4970 return;
4971
4972 /* If route is part of a multipath entry, but not the last one
4973 * removed, then only reduce its nexthop group.
4974 */
4975 if (!list_is_singular(&fib6_entry->rt6_list)) {
4976 mlxsw_sp_fib6_entry_nexthop_del(mlxsw_sp, fib6_entry, rt);
4977 return;
4978 }
4979
4980 fib_node = fib6_entry->common.fib_node;
4981
4982 mlxsw_sp_fib6_node_entry_unlink(mlxsw_sp, fib6_entry);
4983 mlxsw_sp_fib6_entry_destroy(mlxsw_sp, fib6_entry);
4984 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
4985}
4986
Ido Schimmelbc65a8a2017-07-18 10:10:25 +02004987static int __mlxsw_sp_router_set_abort_trap(struct mlxsw_sp *mlxsw_sp,
4988 enum mlxsw_reg_ralxx_protocol proto,
4989 u8 tree_id)
Jiri Pirkob45f64d2016-09-26 12:52:31 +02004990{
4991 char ralta_pl[MLXSW_REG_RALTA_LEN];
4992 char ralst_pl[MLXSW_REG_RALST_LEN];
Ido Schimmelb5d90e62017-03-10 08:53:43 +01004993 int i, err;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02004994
Ido Schimmelbc65a8a2017-07-18 10:10:25 +02004995 mlxsw_reg_ralta_pack(ralta_pl, true, proto, tree_id);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02004996 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralta), ralta_pl);
4997 if (err)
4998 return err;
4999
Ido Schimmelbc65a8a2017-07-18 10:10:25 +02005000 mlxsw_reg_ralst_pack(ralst_pl, 0xff, tree_id);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005001 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralst), ralst_pl);
5002 if (err)
5003 return err;
5004
Ido Schimmelb5d90e62017-03-10 08:53:43 +01005005 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
Ido Schimmel9011b672017-05-16 19:38:25 +02005006 struct mlxsw_sp_vr *vr = &mlxsw_sp->router->vrs[i];
Ido Schimmelb5d90e62017-03-10 08:53:43 +01005007 char raltb_pl[MLXSW_REG_RALTB_LEN];
5008 char ralue_pl[MLXSW_REG_RALUE_LEN];
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005009
Ido Schimmelbc65a8a2017-07-18 10:10:25 +02005010 mlxsw_reg_raltb_pack(raltb_pl, vr->id, proto, tree_id);
Ido Schimmelb5d90e62017-03-10 08:53:43 +01005011 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raltb),
5012 raltb_pl);
5013 if (err)
5014 return err;
5015
Ido Schimmelbc65a8a2017-07-18 10:10:25 +02005016 mlxsw_reg_ralue_pack(ralue_pl, proto,
5017 MLXSW_REG_RALUE_OP_WRITE_WRITE, vr->id, 0);
Ido Schimmelb5d90e62017-03-10 08:53:43 +01005018 mlxsw_reg_ralue_act_ip2me_pack(ralue_pl);
5019 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue),
5020 ralue_pl);
5021 if (err)
5022 return err;
5023 }
5024
5025 return 0;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005026}
5027
Yotam Gigid42b0962017-09-27 08:23:20 +02005028static int mlxsw_sp_router_fibmr_add(struct mlxsw_sp *mlxsw_sp,
5029 struct mfc_entry_notifier_info *men_info,
5030 bool replace)
5031{
5032 struct mlxsw_sp_vr *vr;
5033
5034 if (mlxsw_sp->router->aborted)
5035 return 0;
5036
David Ahernf8fa9b42017-10-18 09:56:56 -07005037 vr = mlxsw_sp_vr_get(mlxsw_sp, men_info->tb_id, NULL);
Yotam Gigid42b0962017-09-27 08:23:20 +02005038 if (IS_ERR(vr))
5039 return PTR_ERR(vr);
5040
5041 return mlxsw_sp_mr_route4_add(vr->mr4_table, men_info->mfc, replace);
5042}
5043
5044static void mlxsw_sp_router_fibmr_del(struct mlxsw_sp *mlxsw_sp,
5045 struct mfc_entry_notifier_info *men_info)
5046{
5047 struct mlxsw_sp_vr *vr;
5048
5049 if (mlxsw_sp->router->aborted)
5050 return;
5051
5052 vr = mlxsw_sp_vr_find(mlxsw_sp, men_info->tb_id);
5053 if (WARN_ON(!vr))
5054 return;
5055
5056 mlxsw_sp_mr_route4_del(vr->mr4_table, men_info->mfc);
5057 mlxsw_sp_vr_put(vr);
5058}
5059
5060static int
5061mlxsw_sp_router_fibmr_vif_add(struct mlxsw_sp *mlxsw_sp,
5062 struct vif_entry_notifier_info *ven_info)
5063{
5064 struct mlxsw_sp_rif *rif;
5065 struct mlxsw_sp_vr *vr;
5066
5067 if (mlxsw_sp->router->aborted)
5068 return 0;
5069
David Ahernf8fa9b42017-10-18 09:56:56 -07005070 vr = mlxsw_sp_vr_get(mlxsw_sp, ven_info->tb_id, NULL);
Yotam Gigid42b0962017-09-27 08:23:20 +02005071 if (IS_ERR(vr))
5072 return PTR_ERR(vr);
5073
5074 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, ven_info->dev);
5075 return mlxsw_sp_mr_vif_add(vr->mr4_table, ven_info->dev,
5076 ven_info->vif_index,
5077 ven_info->vif_flags, rif);
5078}
5079
5080static void
5081mlxsw_sp_router_fibmr_vif_del(struct mlxsw_sp *mlxsw_sp,
5082 struct vif_entry_notifier_info *ven_info)
5083{
5084 struct mlxsw_sp_vr *vr;
5085
5086 if (mlxsw_sp->router->aborted)
5087 return;
5088
5089 vr = mlxsw_sp_vr_find(mlxsw_sp, ven_info->tb_id);
5090 if (WARN_ON(!vr))
5091 return;
5092
5093 mlxsw_sp_mr_vif_del(vr->mr4_table, ven_info->vif_index);
5094 mlxsw_sp_vr_put(vr);
5095}
5096
Ido Schimmelbc65a8a2017-07-18 10:10:25 +02005097static int mlxsw_sp_router_set_abort_trap(struct mlxsw_sp *mlxsw_sp)
5098{
5099 enum mlxsw_reg_ralxx_protocol proto = MLXSW_REG_RALXX_PROTOCOL_IPV4;
5100 int err;
5101
5102 err = __mlxsw_sp_router_set_abort_trap(mlxsw_sp, proto,
5103 MLXSW_SP_LPM_TREE_MIN);
5104 if (err)
5105 return err;
5106
Yotam Gigid42b0962017-09-27 08:23:20 +02005107 /* The multicast router code does not need an abort trap as by default,
5108 * packets that don't match any routes are trapped to the CPU.
5109 */
5110
Ido Schimmelbc65a8a2017-07-18 10:10:25 +02005111 proto = MLXSW_REG_RALXX_PROTOCOL_IPV6;
5112 return __mlxsw_sp_router_set_abort_trap(mlxsw_sp, proto,
5113 MLXSW_SP_LPM_TREE_MIN + 1);
5114}
5115
Ido Schimmel9aecce12017-02-09 10:28:42 +01005116static void mlxsw_sp_fib4_node_flush(struct mlxsw_sp *mlxsw_sp,
5117 struct mlxsw_sp_fib_node *fib_node)
5118{
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02005119 struct mlxsw_sp_fib4_entry *fib4_entry, *tmp;
Ido Schimmel9aecce12017-02-09 10:28:42 +01005120
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02005121 list_for_each_entry_safe(fib4_entry, tmp, &fib_node->entry_list,
5122 common.list) {
5123 bool do_break = &tmp->common.list == &fib_node->entry_list;
Ido Schimmel9aecce12017-02-09 10:28:42 +01005124
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02005125 mlxsw_sp_fib4_node_entry_unlink(mlxsw_sp, fib4_entry);
5126 mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib4_entry);
Ido Schimmel731ea1c2017-07-18 10:10:21 +02005127 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
Ido Schimmel9aecce12017-02-09 10:28:42 +01005128 /* Break when entry list is empty and node was freed.
5129 * Otherwise, we'll access freed memory in the next
5130 * iteration.
5131 */
5132 if (do_break)
5133 break;
5134 }
5135}
5136
Ido Schimmel428b8512017-08-03 13:28:28 +02005137static void mlxsw_sp_fib6_node_flush(struct mlxsw_sp *mlxsw_sp,
5138 struct mlxsw_sp_fib_node *fib_node)
5139{
5140 struct mlxsw_sp_fib6_entry *fib6_entry, *tmp;
5141
5142 list_for_each_entry_safe(fib6_entry, tmp, &fib_node->entry_list,
5143 common.list) {
5144 bool do_break = &tmp->common.list == &fib_node->entry_list;
5145
5146 mlxsw_sp_fib6_node_entry_unlink(mlxsw_sp, fib6_entry);
5147 mlxsw_sp_fib6_entry_destroy(mlxsw_sp, fib6_entry);
5148 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
5149 if (do_break)
5150 break;
5151 }
5152}
5153
Ido Schimmel9aecce12017-02-09 10:28:42 +01005154static void mlxsw_sp_fib_node_flush(struct mlxsw_sp *mlxsw_sp,
5155 struct mlxsw_sp_fib_node *fib_node)
5156{
Ido Schimmel76610eb2017-03-10 08:53:41 +01005157 switch (fib_node->fib->proto) {
Ido Schimmel9aecce12017-02-09 10:28:42 +01005158 case MLXSW_SP_L3_PROTO_IPV4:
5159 mlxsw_sp_fib4_node_flush(mlxsw_sp, fib_node);
5160 break;
5161 case MLXSW_SP_L3_PROTO_IPV6:
Ido Schimmel428b8512017-08-03 13:28:28 +02005162 mlxsw_sp_fib6_node_flush(mlxsw_sp, fib_node);
Ido Schimmel9aecce12017-02-09 10:28:42 +01005163 break;
5164 }
5165}
5166
Ido Schimmel76610eb2017-03-10 08:53:41 +01005167static void mlxsw_sp_vr_fib_flush(struct mlxsw_sp *mlxsw_sp,
5168 struct mlxsw_sp_vr *vr,
5169 enum mlxsw_sp_l3proto proto)
5170{
5171 struct mlxsw_sp_fib *fib = mlxsw_sp_vr_fib(vr, proto);
5172 struct mlxsw_sp_fib_node *fib_node, *tmp;
5173
5174 list_for_each_entry_safe(fib_node, tmp, &fib->node_list, list) {
5175 bool do_break = &tmp->list == &fib->node_list;
5176
5177 mlxsw_sp_fib_node_flush(mlxsw_sp, fib_node);
5178 if (do_break)
5179 break;
5180 }
5181}
5182
Ido Schimmelac571de2016-11-14 11:26:32 +01005183static void mlxsw_sp_router_fib_flush(struct mlxsw_sp *mlxsw_sp)
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005184{
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005185 int i;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005186
Jiri Pirkoc1a38312016-10-21 16:07:23 +02005187 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
Ido Schimmel9011b672017-05-16 19:38:25 +02005188 struct mlxsw_sp_vr *vr = &mlxsw_sp->router->vrs[i];
Ido Schimmelac571de2016-11-14 11:26:32 +01005189
Ido Schimmel76610eb2017-03-10 08:53:41 +01005190 if (!mlxsw_sp_vr_is_used(vr))
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005191 continue;
Yotam Gigid42b0962017-09-27 08:23:20 +02005192
5193 mlxsw_sp_mr_table_flush(vr->mr4_table);
Ido Schimmel76610eb2017-03-10 08:53:41 +01005194 mlxsw_sp_vr_fib_flush(mlxsw_sp, vr, MLXSW_SP_L3_PROTO_IPV4);
Ido Schimmela3d9bc52017-07-18 10:10:22 +02005195
5196 /* If virtual router was only used for IPv4, then it's no
5197 * longer used.
5198 */
5199 if (!mlxsw_sp_vr_is_used(vr))
5200 continue;
5201 mlxsw_sp_vr_fib_flush(mlxsw_sp, vr, MLXSW_SP_L3_PROTO_IPV6);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005202 }
Ido Schimmelac571de2016-11-14 11:26:32 +01005203}
5204
Ido Schimmelbc65a8a2017-07-18 10:10:25 +02005205static void mlxsw_sp_router_fib_abort(struct mlxsw_sp *mlxsw_sp)
Ido Schimmelac571de2016-11-14 11:26:32 +01005206{
5207 int err;
5208
Ido Schimmel9011b672017-05-16 19:38:25 +02005209 if (mlxsw_sp->router->aborted)
Ido Schimmeld331d302016-11-16 09:51:58 +01005210 return;
5211 dev_warn(mlxsw_sp->bus_info->dev, "FIB abort triggered. Note that FIB entries are no longer being offloaded to this device.\n");
Ido Schimmelac571de2016-11-14 11:26:32 +01005212 mlxsw_sp_router_fib_flush(mlxsw_sp);
Ido Schimmel9011b672017-05-16 19:38:25 +02005213 mlxsw_sp->router->aborted = true;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005214 err = mlxsw_sp_router_set_abort_trap(mlxsw_sp);
5215 if (err)
5216 dev_warn(mlxsw_sp->bus_info->dev, "Failed to set abort trap.\n");
5217}
5218
Ido Schimmel30572242016-12-03 16:45:01 +01005219struct mlxsw_sp_fib_event_work {
Ido Schimmela0e47612017-02-06 16:20:10 +01005220 struct work_struct work;
Ido Schimmelad178c82017-02-08 11:16:40 +01005221 union {
Ido Schimmel428b8512017-08-03 13:28:28 +02005222 struct fib6_entry_notifier_info fen6_info;
Ido Schimmelad178c82017-02-08 11:16:40 +01005223 struct fib_entry_notifier_info fen_info;
Ido Schimmel5d7bfd12017-03-16 09:08:14 +01005224 struct fib_rule_notifier_info fr_info;
Ido Schimmelad178c82017-02-08 11:16:40 +01005225 struct fib_nh_notifier_info fnh_info;
Yotam Gigid42b0962017-09-27 08:23:20 +02005226 struct mfc_entry_notifier_info men_info;
5227 struct vif_entry_notifier_info ven_info;
Ido Schimmelad178c82017-02-08 11:16:40 +01005228 };
Ido Schimmel30572242016-12-03 16:45:01 +01005229 struct mlxsw_sp *mlxsw_sp;
5230 unsigned long event;
5231};
5232
Ido Schimmel66a57632017-08-03 13:28:26 +02005233static void mlxsw_sp_router_fib4_event_work(struct work_struct *work)
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005234{
Ido Schimmel30572242016-12-03 16:45:01 +01005235 struct mlxsw_sp_fib_event_work *fib_work =
Ido Schimmela0e47612017-02-06 16:20:10 +01005236 container_of(work, struct mlxsw_sp_fib_event_work, work);
Ido Schimmel30572242016-12-03 16:45:01 +01005237 struct mlxsw_sp *mlxsw_sp = fib_work->mlxsw_sp;
Ido Schimmel599cf8f2017-02-09 10:28:44 +01005238 bool replace, append;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005239 int err;
5240
Ido Schimmel30572242016-12-03 16:45:01 +01005241 /* Protect internal structures from changes */
5242 rtnl_lock();
5243 switch (fib_work->event) {
Ido Schimmel599cf8f2017-02-09 10:28:44 +01005244 case FIB_EVENT_ENTRY_REPLACE: /* fall through */
Ido Schimmel4283bce2017-02-09 10:28:43 +01005245 case FIB_EVENT_ENTRY_APPEND: /* fall through */
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005246 case FIB_EVENT_ENTRY_ADD:
Ido Schimmel599cf8f2017-02-09 10:28:44 +01005247 replace = fib_work->event == FIB_EVENT_ENTRY_REPLACE;
Ido Schimmel4283bce2017-02-09 10:28:43 +01005248 append = fib_work->event == FIB_EVENT_ENTRY_APPEND;
5249 err = mlxsw_sp_router_fib4_add(mlxsw_sp, &fib_work->fen_info,
Ido Schimmel599cf8f2017-02-09 10:28:44 +01005250 replace, append);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005251 if (err)
Ido Schimmelbc65a8a2017-07-18 10:10:25 +02005252 mlxsw_sp_router_fib_abort(mlxsw_sp);
Ido Schimmel30572242016-12-03 16:45:01 +01005253 fib_info_put(fib_work->fen_info.fi);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005254 break;
5255 case FIB_EVENT_ENTRY_DEL:
Ido Schimmel30572242016-12-03 16:45:01 +01005256 mlxsw_sp_router_fib4_del(mlxsw_sp, &fib_work->fen_info);
5257 fib_info_put(fib_work->fen_info.fi);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005258 break;
David Ahern1f279232017-10-27 17:37:14 -07005259 case FIB_EVENT_RULE_ADD:
5260 /* if we get here, a rule was added that we do not support.
5261 * just do the fib_abort
5262 */
5263 mlxsw_sp_router_fib_abort(mlxsw_sp);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005264 break;
Ido Schimmelad178c82017-02-08 11:16:40 +01005265 case FIB_EVENT_NH_ADD: /* fall through */
5266 case FIB_EVENT_NH_DEL:
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02005267 mlxsw_sp_nexthop4_event(mlxsw_sp, fib_work->event,
5268 fib_work->fnh_info.fib_nh);
Ido Schimmelad178c82017-02-08 11:16:40 +01005269 fib_info_put(fib_work->fnh_info.fib_nh->nh_parent);
5270 break;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005271 }
Ido Schimmel30572242016-12-03 16:45:01 +01005272 rtnl_unlock();
5273 kfree(fib_work);
5274}
5275
Ido Schimmel66a57632017-08-03 13:28:26 +02005276static void mlxsw_sp_router_fib6_event_work(struct work_struct *work)
5277{
Ido Schimmel583419f2017-08-03 13:28:27 +02005278 struct mlxsw_sp_fib_event_work *fib_work =
5279 container_of(work, struct mlxsw_sp_fib_event_work, work);
5280 struct mlxsw_sp *mlxsw_sp = fib_work->mlxsw_sp;
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02005281 bool replace;
Ido Schimmel428b8512017-08-03 13:28:28 +02005282 int err;
Ido Schimmel583419f2017-08-03 13:28:27 +02005283
5284 rtnl_lock();
5285 switch (fib_work->event) {
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02005286 case FIB_EVENT_ENTRY_REPLACE: /* fall through */
Ido Schimmel428b8512017-08-03 13:28:28 +02005287 case FIB_EVENT_ENTRY_ADD:
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02005288 replace = fib_work->event == FIB_EVENT_ENTRY_REPLACE;
Ido Schimmel428b8512017-08-03 13:28:28 +02005289 err = mlxsw_sp_router_fib6_add(mlxsw_sp,
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02005290 fib_work->fen6_info.rt, replace);
Ido Schimmel428b8512017-08-03 13:28:28 +02005291 if (err)
5292 mlxsw_sp_router_fib_abort(mlxsw_sp);
5293 mlxsw_sp_rt6_release(fib_work->fen6_info.rt);
5294 break;
5295 case FIB_EVENT_ENTRY_DEL:
5296 mlxsw_sp_router_fib6_del(mlxsw_sp, fib_work->fen6_info.rt);
5297 mlxsw_sp_rt6_release(fib_work->fen6_info.rt);
5298 break;
David Ahern1f279232017-10-27 17:37:14 -07005299 case FIB_EVENT_RULE_ADD:
5300 /* if we get here, a rule was added that we do not support.
5301 * just do the fib_abort
5302 */
5303 mlxsw_sp_router_fib_abort(mlxsw_sp);
Ido Schimmel583419f2017-08-03 13:28:27 +02005304 break;
5305 }
5306 rtnl_unlock();
5307 kfree(fib_work);
Ido Schimmel66a57632017-08-03 13:28:26 +02005308}
5309
Yotam Gigid42b0962017-09-27 08:23:20 +02005310static void mlxsw_sp_router_fibmr_event_work(struct work_struct *work)
5311{
5312 struct mlxsw_sp_fib_event_work *fib_work =
5313 container_of(work, struct mlxsw_sp_fib_event_work, work);
5314 struct mlxsw_sp *mlxsw_sp = fib_work->mlxsw_sp;
Yotam Gigid42b0962017-09-27 08:23:20 +02005315 bool replace;
5316 int err;
5317
5318 rtnl_lock();
5319 switch (fib_work->event) {
5320 case FIB_EVENT_ENTRY_REPLACE: /* fall through */
5321 case FIB_EVENT_ENTRY_ADD:
5322 replace = fib_work->event == FIB_EVENT_ENTRY_REPLACE;
5323
5324 err = mlxsw_sp_router_fibmr_add(mlxsw_sp, &fib_work->men_info,
5325 replace);
5326 if (err)
5327 mlxsw_sp_router_fib_abort(mlxsw_sp);
5328 ipmr_cache_put(fib_work->men_info.mfc);
5329 break;
5330 case FIB_EVENT_ENTRY_DEL:
5331 mlxsw_sp_router_fibmr_del(mlxsw_sp, &fib_work->men_info);
5332 ipmr_cache_put(fib_work->men_info.mfc);
5333 break;
5334 case FIB_EVENT_VIF_ADD:
5335 err = mlxsw_sp_router_fibmr_vif_add(mlxsw_sp,
5336 &fib_work->ven_info);
5337 if (err)
5338 mlxsw_sp_router_fib_abort(mlxsw_sp);
5339 dev_put(fib_work->ven_info.dev);
5340 break;
5341 case FIB_EVENT_VIF_DEL:
5342 mlxsw_sp_router_fibmr_vif_del(mlxsw_sp,
5343 &fib_work->ven_info);
5344 dev_put(fib_work->ven_info.dev);
5345 break;
David Ahern1f279232017-10-27 17:37:14 -07005346 case FIB_EVENT_RULE_ADD:
5347 /* if we get here, a rule was added that we do not support.
5348 * just do the fib_abort
5349 */
5350 mlxsw_sp_router_fib_abort(mlxsw_sp);
Yotam Gigid42b0962017-09-27 08:23:20 +02005351 break;
5352 }
5353 rtnl_unlock();
5354 kfree(fib_work);
5355}
5356
Ido Schimmel66a57632017-08-03 13:28:26 +02005357static void mlxsw_sp_router_fib4_event(struct mlxsw_sp_fib_event_work *fib_work,
5358 struct fib_notifier_info *info)
5359{
David Ahern3c75f9b2017-10-18 15:01:38 -07005360 struct fib_entry_notifier_info *fen_info;
David Ahern3c75f9b2017-10-18 15:01:38 -07005361 struct fib_nh_notifier_info *fnh_info;
5362
Ido Schimmel66a57632017-08-03 13:28:26 +02005363 switch (fib_work->event) {
5364 case FIB_EVENT_ENTRY_REPLACE: /* fall through */
5365 case FIB_EVENT_ENTRY_APPEND: /* fall through */
5366 case FIB_EVENT_ENTRY_ADD: /* fall through */
5367 case FIB_EVENT_ENTRY_DEL:
David Ahern3c75f9b2017-10-18 15:01:38 -07005368 fen_info = container_of(info, struct fib_entry_notifier_info,
5369 info);
5370 fib_work->fen_info = *fen_info;
5371 /* Take reference on fib_info to prevent it from being
Ido Schimmel66a57632017-08-03 13:28:26 +02005372 * freed while work is queued. Release it afterwards.
5373 */
5374 fib_info_hold(fib_work->fen_info.fi);
5375 break;
Ido Schimmel66a57632017-08-03 13:28:26 +02005376 case FIB_EVENT_NH_ADD: /* fall through */
5377 case FIB_EVENT_NH_DEL:
David Ahern3c75f9b2017-10-18 15:01:38 -07005378 fnh_info = container_of(info, struct fib_nh_notifier_info,
5379 info);
5380 fib_work->fnh_info = *fnh_info;
Ido Schimmel66a57632017-08-03 13:28:26 +02005381 fib_info_hold(fib_work->fnh_info.fib_nh->nh_parent);
5382 break;
5383 }
5384}
5385
5386static void mlxsw_sp_router_fib6_event(struct mlxsw_sp_fib_event_work *fib_work,
5387 struct fib_notifier_info *info)
5388{
David Ahern3c75f9b2017-10-18 15:01:38 -07005389 struct fib6_entry_notifier_info *fen6_info;
David Ahern3c75f9b2017-10-18 15:01:38 -07005390
Ido Schimmel583419f2017-08-03 13:28:27 +02005391 switch (fib_work->event) {
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02005392 case FIB_EVENT_ENTRY_REPLACE: /* fall through */
Ido Schimmel428b8512017-08-03 13:28:28 +02005393 case FIB_EVENT_ENTRY_ADD: /* fall through */
5394 case FIB_EVENT_ENTRY_DEL:
David Ahern3c75f9b2017-10-18 15:01:38 -07005395 fen6_info = container_of(info, struct fib6_entry_notifier_info,
5396 info);
5397 fib_work->fen6_info = *fen6_info;
Ido Schimmel428b8512017-08-03 13:28:28 +02005398 rt6_hold(fib_work->fen6_info.rt);
5399 break;
Ido Schimmel583419f2017-08-03 13:28:27 +02005400 }
Ido Schimmel66a57632017-08-03 13:28:26 +02005401}
5402
Yotam Gigid42b0962017-09-27 08:23:20 +02005403static void
5404mlxsw_sp_router_fibmr_event(struct mlxsw_sp_fib_event_work *fib_work,
5405 struct fib_notifier_info *info)
5406{
5407 switch (fib_work->event) {
5408 case FIB_EVENT_ENTRY_REPLACE: /* fall through */
5409 case FIB_EVENT_ENTRY_ADD: /* fall through */
5410 case FIB_EVENT_ENTRY_DEL:
5411 memcpy(&fib_work->men_info, info, sizeof(fib_work->men_info));
5412 ipmr_cache_hold(fib_work->men_info.mfc);
5413 break;
5414 case FIB_EVENT_VIF_ADD: /* fall through */
5415 case FIB_EVENT_VIF_DEL:
5416 memcpy(&fib_work->ven_info, info, sizeof(fib_work->ven_info));
5417 dev_hold(fib_work->ven_info.dev);
5418 break;
David Ahern1f279232017-10-27 17:37:14 -07005419 }
5420}
5421
5422static int mlxsw_sp_router_fib_rule_event(unsigned long event,
5423 struct fib_notifier_info *info,
5424 struct mlxsw_sp *mlxsw_sp)
5425{
5426 struct netlink_ext_ack *extack = info->extack;
5427 struct fib_rule_notifier_info *fr_info;
5428 struct fib_rule *rule;
5429 int err = 0;
5430
5431 /* nothing to do at the moment */
5432 if (event == FIB_EVENT_RULE_DEL)
5433 return 0;
5434
5435 if (mlxsw_sp->router->aborted)
5436 return 0;
5437
5438 fr_info = container_of(info, struct fib_rule_notifier_info, info);
5439 rule = fr_info->rule;
5440
5441 switch (info->family) {
5442 case AF_INET:
5443 if (!fib4_rule_default(rule) && !rule->l3mdev)
5444 err = -1;
5445 break;
5446 case AF_INET6:
5447 if (!fib6_rule_default(rule) && !rule->l3mdev)
5448 err = -1;
5449 break;
5450 case RTNL_FAMILY_IPMR:
5451 if (!ipmr_rule_default(rule) && !rule->l3mdev)
5452 err = -1;
Yotam Gigid42b0962017-09-27 08:23:20 +02005453 break;
5454 }
David Ahern1f279232017-10-27 17:37:14 -07005455
5456 if (err < 0)
5457 NL_SET_ERR_MSG(extack, "spectrum: FIB rules not supported. Aborting offload");
5458
5459 return err;
Yotam Gigid42b0962017-09-27 08:23:20 +02005460}
5461
Ido Schimmel30572242016-12-03 16:45:01 +01005462/* Called with rcu_read_lock() */
5463static int mlxsw_sp_router_fib_event(struct notifier_block *nb,
5464 unsigned long event, void *ptr)
5465{
Ido Schimmel30572242016-12-03 16:45:01 +01005466 struct mlxsw_sp_fib_event_work *fib_work;
5467 struct fib_notifier_info *info = ptr;
Ido Schimmel7e39d112017-05-16 19:38:28 +02005468 struct mlxsw_sp_router *router;
David Ahern1f279232017-10-27 17:37:14 -07005469 int err;
Ido Schimmel30572242016-12-03 16:45:01 +01005470
Ido Schimmel8e29f972017-09-15 15:31:07 +02005471 if (!net_eq(info->net, &init_net) ||
Yotam Gigi664375e2017-09-27 08:23:22 +02005472 (info->family != AF_INET && info->family != AF_INET6 &&
5473 info->family != RTNL_FAMILY_IPMR))
Ido Schimmel30572242016-12-03 16:45:01 +01005474 return NOTIFY_DONE;
5475
David Ahern1f279232017-10-27 17:37:14 -07005476 router = container_of(nb, struct mlxsw_sp_router, fib_nb);
5477
5478 switch (event) {
5479 case FIB_EVENT_RULE_ADD: /* fall through */
5480 case FIB_EVENT_RULE_DEL:
5481 err = mlxsw_sp_router_fib_rule_event(event, info,
5482 router->mlxsw_sp);
5483 if (!err)
5484 return NOTIFY_DONE;
5485 }
5486
Ido Schimmel30572242016-12-03 16:45:01 +01005487 fib_work = kzalloc(sizeof(*fib_work), GFP_ATOMIC);
5488 if (WARN_ON(!fib_work))
5489 return NOTIFY_BAD;
5490
Ido Schimmel7e39d112017-05-16 19:38:28 +02005491 fib_work->mlxsw_sp = router->mlxsw_sp;
Ido Schimmel30572242016-12-03 16:45:01 +01005492 fib_work->event = event;
5493
Ido Schimmel66a57632017-08-03 13:28:26 +02005494 switch (info->family) {
5495 case AF_INET:
5496 INIT_WORK(&fib_work->work, mlxsw_sp_router_fib4_event_work);
5497 mlxsw_sp_router_fib4_event(fib_work, info);
Ido Schimmel30572242016-12-03 16:45:01 +01005498 break;
Ido Schimmel66a57632017-08-03 13:28:26 +02005499 case AF_INET6:
5500 INIT_WORK(&fib_work->work, mlxsw_sp_router_fib6_event_work);
5501 mlxsw_sp_router_fib6_event(fib_work, info);
Ido Schimmelad178c82017-02-08 11:16:40 +01005502 break;
Yotam Gigid42b0962017-09-27 08:23:20 +02005503 case RTNL_FAMILY_IPMR:
5504 INIT_WORK(&fib_work->work, mlxsw_sp_router_fibmr_event_work);
5505 mlxsw_sp_router_fibmr_event(fib_work, info);
5506 break;
Ido Schimmel30572242016-12-03 16:45:01 +01005507 }
5508
Ido Schimmela0e47612017-02-06 16:20:10 +01005509 mlxsw_core_schedule_work(&fib_work->work);
Ido Schimmel30572242016-12-03 16:45:01 +01005510
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005511 return NOTIFY_DONE;
5512}
5513
Ido Schimmel4724ba562017-03-10 08:53:39 +01005514static struct mlxsw_sp_rif *
5515mlxsw_sp_rif_find_by_dev(const struct mlxsw_sp *mlxsw_sp,
5516 const struct net_device *dev)
5517{
5518 int i;
5519
5520 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++)
Ido Schimmel5f9efff2017-05-16 19:38:27 +02005521 if (mlxsw_sp->router->rifs[i] &&
5522 mlxsw_sp->router->rifs[i]->dev == dev)
5523 return mlxsw_sp->router->rifs[i];
Ido Schimmel4724ba562017-03-10 08:53:39 +01005524
5525 return NULL;
5526}
5527
5528static int mlxsw_sp_router_rif_disable(struct mlxsw_sp *mlxsw_sp, u16 rif)
5529{
5530 char ritr_pl[MLXSW_REG_RITR_LEN];
5531 int err;
5532
5533 mlxsw_reg_ritr_rif_pack(ritr_pl, rif);
5534 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
5535 if (WARN_ON_ONCE(err))
5536 return err;
5537
5538 mlxsw_reg_ritr_enable_set(ritr_pl, false);
5539 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
5540}
5541
5542static void mlxsw_sp_router_rif_gone_sync(struct mlxsw_sp *mlxsw_sp,
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01005543 struct mlxsw_sp_rif *rif)
Ido Schimmel4724ba562017-03-10 08:53:39 +01005544{
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01005545 mlxsw_sp_router_rif_disable(mlxsw_sp, rif->rif_index);
5546 mlxsw_sp_nexthop_rif_gone_sync(mlxsw_sp, rif);
5547 mlxsw_sp_neigh_rif_gone_sync(mlxsw_sp, rif);
Ido Schimmel4724ba562017-03-10 08:53:39 +01005548}
5549
Arkadi Sharshevsky5ea12372017-07-18 10:10:13 +02005550static bool
5551mlxsw_sp_rif_should_config(struct mlxsw_sp_rif *rif, struct net_device *dev,
5552 unsigned long event)
Ido Schimmel4724ba562017-03-10 08:53:39 +01005553{
Arkadi Sharshevsky5ea12372017-07-18 10:10:13 +02005554 struct inet6_dev *inet6_dev;
5555 bool addr_list_empty = true;
5556 struct in_device *idev;
5557
Ido Schimmel4724ba562017-03-10 08:53:39 +01005558 switch (event) {
5559 case NETDEV_UP:
Petr Machataf1b1f272017-07-31 09:27:28 +02005560 return rif == NULL;
Ido Schimmel4724ba562017-03-10 08:53:39 +01005561 case NETDEV_DOWN:
Arkadi Sharshevsky5ea12372017-07-18 10:10:13 +02005562 idev = __in_dev_get_rtnl(dev);
5563 if (idev && idev->ifa_list)
5564 addr_list_empty = false;
5565
5566 inet6_dev = __in6_dev_get(dev);
5567 if (addr_list_empty && inet6_dev &&
5568 !list_empty(&inet6_dev->addr_list))
5569 addr_list_empty = false;
5570
5571 if (rif && addr_list_empty &&
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01005572 !netif_is_l3_slave(rif->dev))
Ido Schimmel4724ba562017-03-10 08:53:39 +01005573 return true;
5574 /* It is possible we already removed the RIF ourselves
5575 * if it was assigned to a netdev that is now a bridge
5576 * or LAG slave.
5577 */
5578 return false;
5579 }
5580
5581 return false;
5582}
5583
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02005584static enum mlxsw_sp_rif_type
5585mlxsw_sp_dev_rif_type(const struct mlxsw_sp *mlxsw_sp,
5586 const struct net_device *dev)
5587{
5588 enum mlxsw_sp_fid_type type;
5589
Petr Machata6ddb7422017-09-02 23:49:19 +02005590 if (mlxsw_sp_netdev_ipip_type(mlxsw_sp, dev, NULL))
5591 return MLXSW_SP_RIF_TYPE_IPIP_LB;
5592
5593 /* Otherwise RIF type is derived from the type of the underlying FID. */
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02005594 if (is_vlan_dev(dev) && netif_is_bridge_master(vlan_dev_real_dev(dev)))
5595 type = MLXSW_SP_FID_TYPE_8021Q;
5596 else if (netif_is_bridge_master(dev) && br_vlan_enabled(dev))
5597 type = MLXSW_SP_FID_TYPE_8021Q;
5598 else if (netif_is_bridge_master(dev))
5599 type = MLXSW_SP_FID_TYPE_8021D;
5600 else
5601 type = MLXSW_SP_FID_TYPE_RFID;
5602
5603 return mlxsw_sp_fid_type_rif_type(mlxsw_sp, type);
5604}
5605
Ido Schimmelde5ed992017-06-04 16:53:40 +02005606static int mlxsw_sp_rif_index_alloc(struct mlxsw_sp *mlxsw_sp, u16 *p_rif_index)
Ido Schimmel4724ba562017-03-10 08:53:39 +01005607{
5608 int i;
5609
Ido Schimmelde5ed992017-06-04 16:53:40 +02005610 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++) {
5611 if (!mlxsw_sp->router->rifs[i]) {
5612 *p_rif_index = i;
5613 return 0;
5614 }
5615 }
Ido Schimmel4724ba562017-03-10 08:53:39 +01005616
Ido Schimmelde5ed992017-06-04 16:53:40 +02005617 return -ENOBUFS;
Ido Schimmel4724ba562017-03-10 08:53:39 +01005618}
5619
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02005620static struct mlxsw_sp_rif *mlxsw_sp_rif_alloc(size_t rif_size, u16 rif_index,
5621 u16 vr_id,
5622 struct net_device *l3_dev)
Ido Schimmel4724ba562017-03-10 08:53:39 +01005623{
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01005624 struct mlxsw_sp_rif *rif;
Ido Schimmel4724ba562017-03-10 08:53:39 +01005625
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02005626 rif = kzalloc(rif_size, GFP_KERNEL);
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01005627 if (!rif)
Ido Schimmel4724ba562017-03-10 08:53:39 +01005628 return NULL;
5629
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01005630 INIT_LIST_HEAD(&rif->nexthop_list);
5631 INIT_LIST_HEAD(&rif->neigh_list);
5632 ether_addr_copy(rif->addr, l3_dev->dev_addr);
5633 rif->mtu = l3_dev->mtu;
5634 rif->vr_id = vr_id;
5635 rif->dev = l3_dev;
5636 rif->rif_index = rif_index;
Ido Schimmel4724ba562017-03-10 08:53:39 +01005637
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01005638 return rif;
Ido Schimmel4724ba562017-03-10 08:53:39 +01005639}
5640
Ido Schimmel5f9efff2017-05-16 19:38:27 +02005641struct mlxsw_sp_rif *mlxsw_sp_rif_by_index(const struct mlxsw_sp *mlxsw_sp,
5642 u16 rif_index)
5643{
5644 return mlxsw_sp->router->rifs[rif_index];
5645}
5646
Arkadi Sharshevskyfd1b9d42017-03-28 17:24:16 +02005647u16 mlxsw_sp_rif_index(const struct mlxsw_sp_rif *rif)
5648{
5649 return rif->rif_index;
5650}
5651
Petr Machata92107cf2017-09-02 23:49:28 +02005652u16 mlxsw_sp_ipip_lb_rif_index(const struct mlxsw_sp_rif_ipip_lb *lb_rif)
5653{
5654 return lb_rif->common.rif_index;
5655}
5656
5657u16 mlxsw_sp_ipip_lb_ul_vr_id(const struct mlxsw_sp_rif_ipip_lb *lb_rif)
5658{
5659 return lb_rif->ul_vr_id;
5660}
5661
Arkadi Sharshevskyfd1b9d42017-03-28 17:24:16 +02005662int mlxsw_sp_rif_dev_ifindex(const struct mlxsw_sp_rif *rif)
5663{
5664 return rif->dev->ifindex;
5665}
5666
Yotam Gigi91e4d592017-09-19 10:00:19 +02005667const struct net_device *mlxsw_sp_rif_dev(const struct mlxsw_sp_rif *rif)
5668{
5669 return rif->dev;
5670}
5671
Ido Schimmel4724ba562017-03-10 08:53:39 +01005672static struct mlxsw_sp_rif *
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02005673mlxsw_sp_rif_create(struct mlxsw_sp *mlxsw_sp,
David Ahernf8fa9b42017-10-18 09:56:56 -07005674 const struct mlxsw_sp_rif_params *params,
5675 struct netlink_ext_ack *extack)
Ido Schimmel4724ba562017-03-10 08:53:39 +01005676{
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02005677 u32 tb_id = l3mdev_fib_table(params->dev);
5678 const struct mlxsw_sp_rif_ops *ops;
Petr Machata010cadf2017-09-02 23:49:18 +02005679 struct mlxsw_sp_fid *fid = NULL;
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02005680 enum mlxsw_sp_rif_type type;
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01005681 struct mlxsw_sp_rif *rif;
Ido Schimmela1107482017-05-26 08:37:39 +02005682 struct mlxsw_sp_vr *vr;
5683 u16 rif_index;
Ido Schimmel4724ba562017-03-10 08:53:39 +01005684 int err;
5685
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02005686 type = mlxsw_sp_dev_rif_type(mlxsw_sp, params->dev);
5687 ops = mlxsw_sp->router->rif_ops_arr[type];
5688
David Ahernf8fa9b42017-10-18 09:56:56 -07005689 vr = mlxsw_sp_vr_get(mlxsw_sp, tb_id ? : RT_TABLE_MAIN, extack);
Ido Schimmelc9ec53f2017-05-26 08:37:38 +02005690 if (IS_ERR(vr))
5691 return ERR_CAST(vr);
Petr Machata28a04c72017-10-02 12:14:56 +02005692 vr->rif_count++;
Ido Schimmelc9ec53f2017-05-26 08:37:38 +02005693
Ido Schimmelde5ed992017-06-04 16:53:40 +02005694 err = mlxsw_sp_rif_index_alloc(mlxsw_sp, &rif_index);
David Ahernf8fa9b42017-10-18 09:56:56 -07005695 if (err) {
5696 NL_SET_ERR_MSG(extack, "spectrum: Exceeded number of supported router interfaces");
Ido Schimmelde5ed992017-06-04 16:53:40 +02005697 goto err_rif_index_alloc;
David Ahernf8fa9b42017-10-18 09:56:56 -07005698 }
Ido Schimmel4724ba562017-03-10 08:53:39 +01005699
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02005700 rif = mlxsw_sp_rif_alloc(ops->rif_size, rif_index, vr->id, params->dev);
Ido Schimmela13a5942017-05-26 08:37:33 +02005701 if (!rif) {
5702 err = -ENOMEM;
5703 goto err_rif_alloc;
5704 }
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02005705 rif->mlxsw_sp = mlxsw_sp;
5706 rif->ops = ops;
Ido Schimmela13a5942017-05-26 08:37:33 +02005707
Petr Machata010cadf2017-09-02 23:49:18 +02005708 if (ops->fid_get) {
5709 fid = ops->fid_get(rif);
5710 if (IS_ERR(fid)) {
5711 err = PTR_ERR(fid);
5712 goto err_fid_get;
5713 }
5714 rif->fid = fid;
Ido Schimmel4d93cee2017-05-26 08:37:34 +02005715 }
5716
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02005717 if (ops->setup)
5718 ops->setup(rif, params);
5719
5720 err = ops->configure(rif);
Ido Schimmel4724ba562017-03-10 08:53:39 +01005721 if (err)
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02005722 goto err_configure;
Ido Schimmel4724ba562017-03-10 08:53:39 +01005723
Yotam Gigid42b0962017-09-27 08:23:20 +02005724 err = mlxsw_sp_mr_rif_add(vr->mr4_table, rif);
5725 if (err)
5726 goto err_mr_rif_add;
5727
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02005728 mlxsw_sp_rif_counters_alloc(rif);
Ido Schimmel5f9efff2017-05-16 19:38:27 +02005729 mlxsw_sp->router->rifs[rif_index] = rif;
Ido Schimmel4724ba562017-03-10 08:53:39 +01005730
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01005731 return rif;
Ido Schimmel4724ba562017-03-10 08:53:39 +01005732
Yotam Gigid42b0962017-09-27 08:23:20 +02005733err_mr_rif_add:
5734 ops->deconfigure(rif);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02005735err_configure:
Petr Machata010cadf2017-09-02 23:49:18 +02005736 if (fid)
5737 mlxsw_sp_fid_put(fid);
Ido Schimmela1107482017-05-26 08:37:39 +02005738err_fid_get:
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02005739 kfree(rif);
5740err_rif_alloc:
Ido Schimmelde5ed992017-06-04 16:53:40 +02005741err_rif_index_alloc:
Petr Machata28a04c72017-10-02 12:14:56 +02005742 vr->rif_count--;
Ido Schimmelc9ec53f2017-05-26 08:37:38 +02005743 mlxsw_sp_vr_put(vr);
Ido Schimmel4724ba562017-03-10 08:53:39 +01005744 return ERR_PTR(err);
5745}
5746
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02005747void mlxsw_sp_rif_destroy(struct mlxsw_sp_rif *rif)
Ido Schimmel4724ba562017-03-10 08:53:39 +01005748{
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02005749 const struct mlxsw_sp_rif_ops *ops = rif->ops;
5750 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
Ido Schimmela1107482017-05-26 08:37:39 +02005751 struct mlxsw_sp_fid *fid = rif->fid;
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02005752 struct mlxsw_sp_vr *vr;
Ido Schimmel4724ba562017-03-10 08:53:39 +01005753
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01005754 mlxsw_sp_router_rif_gone_sync(mlxsw_sp, rif);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02005755 vr = &mlxsw_sp->router->vrs[rif->vr_id];
Arkadi Sharshevskye0c0afd2017-03-28 17:24:15 +02005756
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02005757 mlxsw_sp->router->rifs[rif->rif_index] = NULL;
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02005758 mlxsw_sp_rif_counters_free(rif);
Yotam Gigid42b0962017-09-27 08:23:20 +02005759 mlxsw_sp_mr_rif_del(vr->mr4_table, rif);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02005760 ops->deconfigure(rif);
Petr Machata010cadf2017-09-02 23:49:18 +02005761 if (fid)
5762 /* Loopback RIFs are not associated with a FID. */
5763 mlxsw_sp_fid_put(fid);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02005764 kfree(rif);
Petr Machata28a04c72017-10-02 12:14:56 +02005765 vr->rif_count--;
Ido Schimmelc9ec53f2017-05-26 08:37:38 +02005766 mlxsw_sp_vr_put(vr);
Ido Schimmel4724ba562017-03-10 08:53:39 +01005767}
5768
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02005769static void
5770mlxsw_sp_rif_subport_params_init(struct mlxsw_sp_rif_params *params,
5771 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
5772{
5773 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
5774
5775 params->vid = mlxsw_sp_port_vlan->vid;
5776 params->lag = mlxsw_sp_port->lagged;
5777 if (params->lag)
5778 params->lag_id = mlxsw_sp_port->lag_id;
5779 else
5780 params->system_port = mlxsw_sp_port->local_port;
5781}
5782
Ido Schimmel7cbecf22017-05-26 08:37:28 +02005783static int
Ido Schimmela1107482017-05-26 08:37:39 +02005784mlxsw_sp_port_vlan_router_join(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan,
David Ahernf8fa9b42017-10-18 09:56:56 -07005785 struct net_device *l3_dev,
5786 struct netlink_ext_ack *extack)
Ido Schimmel4724ba562017-03-10 08:53:39 +01005787{
Ido Schimmel7cbecf22017-05-26 08:37:28 +02005788 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
Ido Schimmel1b8f09a2017-05-26 08:37:36 +02005789 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
Ido Schimmel7cbecf22017-05-26 08:37:28 +02005790 u16 vid = mlxsw_sp_port_vlan->vid;
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01005791 struct mlxsw_sp_rif *rif;
Ido Schimmela1107482017-05-26 08:37:39 +02005792 struct mlxsw_sp_fid *fid;
Ido Schimmel03ea01e2017-05-23 21:56:30 +02005793 int err;
Ido Schimmel4724ba562017-03-10 08:53:39 +01005794
Ido Schimmel1b8f09a2017-05-26 08:37:36 +02005795 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev);
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01005796 if (!rif) {
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02005797 struct mlxsw_sp_rif_params params = {
5798 .dev = l3_dev,
5799 };
5800
5801 mlxsw_sp_rif_subport_params_init(&params, mlxsw_sp_port_vlan);
David Ahernf8fa9b42017-10-18 09:56:56 -07005802 rif = mlxsw_sp_rif_create(mlxsw_sp, &params, extack);
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01005803 if (IS_ERR(rif))
5804 return PTR_ERR(rif);
Ido Schimmel4724ba562017-03-10 08:53:39 +01005805 }
5806
Ido Schimmela1107482017-05-26 08:37:39 +02005807 /* FID was already created, just take a reference */
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02005808 fid = rif->ops->fid_get(rif);
Ido Schimmela1107482017-05-26 08:37:39 +02005809 err = mlxsw_sp_fid_port_vid_map(fid, mlxsw_sp_port, vid);
5810 if (err)
5811 goto err_fid_port_vid_map;
5812
Ido Schimmel7cbecf22017-05-26 08:37:28 +02005813 err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, false);
Ido Schimmel03ea01e2017-05-23 21:56:30 +02005814 if (err)
5815 goto err_port_vid_learning_set;
5816
Ido Schimmel7cbecf22017-05-26 08:37:28 +02005817 err = mlxsw_sp_port_vid_stp_set(mlxsw_sp_port, vid,
Ido Schimmel03ea01e2017-05-23 21:56:30 +02005818 BR_STATE_FORWARDING);
5819 if (err)
5820 goto err_port_vid_stp_set;
5821
Ido Schimmela1107482017-05-26 08:37:39 +02005822 mlxsw_sp_port_vlan->fid = fid;
Ido Schimmel4724ba562017-03-10 08:53:39 +01005823
Ido Schimmel4724ba562017-03-10 08:53:39 +01005824 return 0;
Ido Schimmel03ea01e2017-05-23 21:56:30 +02005825
5826err_port_vid_stp_set:
Ido Schimmel7cbecf22017-05-26 08:37:28 +02005827 mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, true);
Ido Schimmel03ea01e2017-05-23 21:56:30 +02005828err_port_vid_learning_set:
Ido Schimmela1107482017-05-26 08:37:39 +02005829 mlxsw_sp_fid_port_vid_unmap(fid, mlxsw_sp_port, vid);
5830err_fid_port_vid_map:
5831 mlxsw_sp_fid_put(fid);
Ido Schimmel03ea01e2017-05-23 21:56:30 +02005832 return err;
Ido Schimmel4724ba562017-03-10 08:53:39 +01005833}
5834
Ido Schimmela1107482017-05-26 08:37:39 +02005835void
5836mlxsw_sp_port_vlan_router_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
Ido Schimmel4724ba562017-03-10 08:53:39 +01005837{
Ido Schimmelce95e152017-05-26 08:37:27 +02005838 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
Ido Schimmel7cbecf22017-05-26 08:37:28 +02005839 struct mlxsw_sp_fid *fid = mlxsw_sp_port_vlan->fid;
Ido Schimmelce95e152017-05-26 08:37:27 +02005840 u16 vid = mlxsw_sp_port_vlan->vid;
Ido Schimmelce95e152017-05-26 08:37:27 +02005841
Ido Schimmela1107482017-05-26 08:37:39 +02005842 if (WARN_ON(mlxsw_sp_fid_type(fid) != MLXSW_SP_FID_TYPE_RFID))
5843 return;
Ido Schimmel4aafc362017-05-26 08:37:25 +02005844
Ido Schimmela1107482017-05-26 08:37:39 +02005845 mlxsw_sp_port_vlan->fid = NULL;
Ido Schimmel7cbecf22017-05-26 08:37:28 +02005846 mlxsw_sp_port_vid_stp_set(mlxsw_sp_port, vid, BR_STATE_BLOCKING);
5847 mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, true);
Ido Schimmela1107482017-05-26 08:37:39 +02005848 mlxsw_sp_fid_port_vid_unmap(fid, mlxsw_sp_port, vid);
5849 /* If router port holds the last reference on the rFID, then the
5850 * associated Sub-port RIF will be destroyed.
5851 */
5852 mlxsw_sp_fid_put(fid);
Ido Schimmel4724ba562017-03-10 08:53:39 +01005853}
5854
Ido Schimmel7cbecf22017-05-26 08:37:28 +02005855static int mlxsw_sp_inetaddr_port_vlan_event(struct net_device *l3_dev,
5856 struct net_device *port_dev,
David Ahernf8fa9b42017-10-18 09:56:56 -07005857 unsigned long event, u16 vid,
5858 struct netlink_ext_ack *extack)
Ido Schimmel4724ba562017-03-10 08:53:39 +01005859{
5860 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(port_dev);
Ido Schimmelce95e152017-05-26 08:37:27 +02005861 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
Ido Schimmel4724ba562017-03-10 08:53:39 +01005862
Ido Schimmelce95e152017-05-26 08:37:27 +02005863 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
Ido Schimmel7cbecf22017-05-26 08:37:28 +02005864 if (WARN_ON(!mlxsw_sp_port_vlan))
5865 return -EINVAL;
Ido Schimmel4724ba562017-03-10 08:53:39 +01005866
5867 switch (event) {
5868 case NETDEV_UP:
Ido Schimmela1107482017-05-26 08:37:39 +02005869 return mlxsw_sp_port_vlan_router_join(mlxsw_sp_port_vlan,
David Ahernf8fa9b42017-10-18 09:56:56 -07005870 l3_dev, extack);
Ido Schimmel4724ba562017-03-10 08:53:39 +01005871 case NETDEV_DOWN:
Ido Schimmela1107482017-05-26 08:37:39 +02005872 mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port_vlan);
Ido Schimmel4724ba562017-03-10 08:53:39 +01005873 break;
5874 }
5875
5876 return 0;
5877}
5878
5879static int mlxsw_sp_inetaddr_port_event(struct net_device *port_dev,
David Ahernf8fa9b42017-10-18 09:56:56 -07005880 unsigned long event,
5881 struct netlink_ext_ack *extack)
Ido Schimmel4724ba562017-03-10 08:53:39 +01005882{
Jiri Pirko2b94e582017-04-18 16:55:37 +02005883 if (netif_is_bridge_port(port_dev) ||
5884 netif_is_lag_port(port_dev) ||
5885 netif_is_ovs_port(port_dev))
Ido Schimmel4724ba562017-03-10 08:53:39 +01005886 return 0;
5887
David Ahernf8fa9b42017-10-18 09:56:56 -07005888 return mlxsw_sp_inetaddr_port_vlan_event(port_dev, port_dev, event, 1,
5889 extack);
Ido Schimmel4724ba562017-03-10 08:53:39 +01005890}
5891
5892static int __mlxsw_sp_inetaddr_lag_event(struct net_device *l3_dev,
5893 struct net_device *lag_dev,
David Ahernf8fa9b42017-10-18 09:56:56 -07005894 unsigned long event, u16 vid,
5895 struct netlink_ext_ack *extack)
Ido Schimmel4724ba562017-03-10 08:53:39 +01005896{
5897 struct net_device *port_dev;
5898 struct list_head *iter;
5899 int err;
5900
5901 netdev_for_each_lower_dev(lag_dev, port_dev, iter) {
5902 if (mlxsw_sp_port_dev_check(port_dev)) {
Ido Schimmel7cbecf22017-05-26 08:37:28 +02005903 err = mlxsw_sp_inetaddr_port_vlan_event(l3_dev,
5904 port_dev,
David Ahernf8fa9b42017-10-18 09:56:56 -07005905 event, vid,
5906 extack);
Ido Schimmel4724ba562017-03-10 08:53:39 +01005907 if (err)
5908 return err;
5909 }
5910 }
5911
5912 return 0;
5913}
5914
5915static int mlxsw_sp_inetaddr_lag_event(struct net_device *lag_dev,
David Ahernf8fa9b42017-10-18 09:56:56 -07005916 unsigned long event,
5917 struct netlink_ext_ack *extack)
Ido Schimmel4724ba562017-03-10 08:53:39 +01005918{
5919 if (netif_is_bridge_port(lag_dev))
5920 return 0;
5921
David Ahernf8fa9b42017-10-18 09:56:56 -07005922 return __mlxsw_sp_inetaddr_lag_event(lag_dev, lag_dev, event, 1,
5923 extack);
Ido Schimmel4724ba562017-03-10 08:53:39 +01005924}
5925
Ido Schimmel4724ba562017-03-10 08:53:39 +01005926static int mlxsw_sp_inetaddr_bridge_event(struct net_device *l3_dev,
David Ahernf8fa9b42017-10-18 09:56:56 -07005927 unsigned long event,
5928 struct netlink_ext_ack *extack)
Ido Schimmel4724ba562017-03-10 08:53:39 +01005929{
5930 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(l3_dev);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02005931 struct mlxsw_sp_rif_params params = {
5932 .dev = l3_dev,
5933 };
Ido Schimmela1107482017-05-26 08:37:39 +02005934 struct mlxsw_sp_rif *rif;
Ido Schimmel4724ba562017-03-10 08:53:39 +01005935
5936 switch (event) {
5937 case NETDEV_UP:
David Ahernf8fa9b42017-10-18 09:56:56 -07005938 rif = mlxsw_sp_rif_create(mlxsw_sp, &params, extack);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02005939 if (IS_ERR(rif))
5940 return PTR_ERR(rif);
5941 break;
Ido Schimmel4724ba562017-03-10 08:53:39 +01005942 case NETDEV_DOWN:
Ido Schimmela1107482017-05-26 08:37:39 +02005943 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02005944 mlxsw_sp_rif_destroy(rif);
Ido Schimmel4724ba562017-03-10 08:53:39 +01005945 break;
5946 }
5947
5948 return 0;
5949}
5950
5951static int mlxsw_sp_inetaddr_vlan_event(struct net_device *vlan_dev,
David Ahernf8fa9b42017-10-18 09:56:56 -07005952 unsigned long event,
5953 struct netlink_ext_ack *extack)
Ido Schimmel4724ba562017-03-10 08:53:39 +01005954{
5955 struct net_device *real_dev = vlan_dev_real_dev(vlan_dev);
Ido Schimmel4724ba562017-03-10 08:53:39 +01005956 u16 vid = vlan_dev_vlan_id(vlan_dev);
5957
Ido Schimmel6b27c8a2017-06-28 09:03:12 +03005958 if (netif_is_bridge_port(vlan_dev))
5959 return 0;
5960
Ido Schimmel4724ba562017-03-10 08:53:39 +01005961 if (mlxsw_sp_port_dev_check(real_dev))
Ido Schimmel7cbecf22017-05-26 08:37:28 +02005962 return mlxsw_sp_inetaddr_port_vlan_event(vlan_dev, real_dev,
David Ahernf8fa9b42017-10-18 09:56:56 -07005963 event, vid, extack);
Ido Schimmel4724ba562017-03-10 08:53:39 +01005964 else if (netif_is_lag_master(real_dev))
5965 return __mlxsw_sp_inetaddr_lag_event(vlan_dev, real_dev, event,
David Ahernf8fa9b42017-10-18 09:56:56 -07005966 vid, extack);
Ido Schimmelc57529e2017-05-26 08:37:31 +02005967 else if (netif_is_bridge_master(real_dev) && br_vlan_enabled(real_dev))
David Ahernf8fa9b42017-10-18 09:56:56 -07005968 return mlxsw_sp_inetaddr_bridge_event(vlan_dev, event, extack);
Ido Schimmel4724ba562017-03-10 08:53:39 +01005969
5970 return 0;
5971}
5972
Ido Schimmelb1e45522017-04-30 19:47:14 +03005973static int __mlxsw_sp_inetaddr_event(struct net_device *dev,
David Ahernf8fa9b42017-10-18 09:56:56 -07005974 unsigned long event,
5975 struct netlink_ext_ack *extack)
Ido Schimmelb1e45522017-04-30 19:47:14 +03005976{
5977 if (mlxsw_sp_port_dev_check(dev))
David Ahernf8fa9b42017-10-18 09:56:56 -07005978 return mlxsw_sp_inetaddr_port_event(dev, event, extack);
Ido Schimmelb1e45522017-04-30 19:47:14 +03005979 else if (netif_is_lag_master(dev))
David Ahernf8fa9b42017-10-18 09:56:56 -07005980 return mlxsw_sp_inetaddr_lag_event(dev, event, extack);
Ido Schimmelb1e45522017-04-30 19:47:14 +03005981 else if (netif_is_bridge_master(dev))
David Ahernf8fa9b42017-10-18 09:56:56 -07005982 return mlxsw_sp_inetaddr_bridge_event(dev, event, extack);
Ido Schimmelb1e45522017-04-30 19:47:14 +03005983 else if (is_vlan_dev(dev))
David Ahernf8fa9b42017-10-18 09:56:56 -07005984 return mlxsw_sp_inetaddr_vlan_event(dev, event, extack);
Ido Schimmelb1e45522017-04-30 19:47:14 +03005985 else
5986 return 0;
5987}
5988
Ido Schimmel4724ba562017-03-10 08:53:39 +01005989int mlxsw_sp_inetaddr_event(struct notifier_block *unused,
5990 unsigned long event, void *ptr)
5991{
5992 struct in_ifaddr *ifa = (struct in_ifaddr *) ptr;
5993 struct net_device *dev = ifa->ifa_dev->dev;
5994 struct mlxsw_sp *mlxsw_sp;
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01005995 struct mlxsw_sp_rif *rif;
Ido Schimmel4724ba562017-03-10 08:53:39 +01005996 int err = 0;
5997
David Ahern89d5dd22017-10-18 09:56:55 -07005998 /* NETDEV_UP event is handled by mlxsw_sp_inetaddr_valid_event */
5999 if (event == NETDEV_UP)
6000 goto out;
6001
6002 mlxsw_sp = mlxsw_sp_lower_get(dev);
6003 if (!mlxsw_sp)
6004 goto out;
6005
6006 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
6007 if (!mlxsw_sp_rif_should_config(rif, dev, event))
6008 goto out;
6009
David Ahernf8fa9b42017-10-18 09:56:56 -07006010 err = __mlxsw_sp_inetaddr_event(dev, event, NULL);
David Ahern89d5dd22017-10-18 09:56:55 -07006011out:
6012 return notifier_from_errno(err);
6013}
6014
6015int mlxsw_sp_inetaddr_valid_event(struct notifier_block *unused,
6016 unsigned long event, void *ptr)
6017{
6018 struct in_validator_info *ivi = (struct in_validator_info *) ptr;
6019 struct net_device *dev = ivi->ivi_dev->dev;
6020 struct mlxsw_sp *mlxsw_sp;
6021 struct mlxsw_sp_rif *rif;
6022 int err = 0;
6023
Ido Schimmel4724ba562017-03-10 08:53:39 +01006024 mlxsw_sp = mlxsw_sp_lower_get(dev);
6025 if (!mlxsw_sp)
6026 goto out;
6027
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01006028 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
Arkadi Sharshevsky5ea12372017-07-18 10:10:13 +02006029 if (!mlxsw_sp_rif_should_config(rif, dev, event))
Ido Schimmel4724ba562017-03-10 08:53:39 +01006030 goto out;
6031
David Ahernf8fa9b42017-10-18 09:56:56 -07006032 err = __mlxsw_sp_inetaddr_event(dev, event, ivi->extack);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006033out:
6034 return notifier_from_errno(err);
6035}
6036
Arkadi Sharshevsky5ea12372017-07-18 10:10:13 +02006037struct mlxsw_sp_inet6addr_event_work {
6038 struct work_struct work;
6039 struct net_device *dev;
6040 unsigned long event;
6041};
6042
6043static void mlxsw_sp_inet6addr_event_work(struct work_struct *work)
6044{
6045 struct mlxsw_sp_inet6addr_event_work *inet6addr_work =
6046 container_of(work, struct mlxsw_sp_inet6addr_event_work, work);
6047 struct net_device *dev = inet6addr_work->dev;
6048 unsigned long event = inet6addr_work->event;
6049 struct mlxsw_sp *mlxsw_sp;
6050 struct mlxsw_sp_rif *rif;
6051
6052 rtnl_lock();
6053 mlxsw_sp = mlxsw_sp_lower_get(dev);
6054 if (!mlxsw_sp)
6055 goto out;
6056
6057 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
6058 if (!mlxsw_sp_rif_should_config(rif, dev, event))
6059 goto out;
6060
David Ahernf8fa9b42017-10-18 09:56:56 -07006061 __mlxsw_sp_inetaddr_event(dev, event, NULL);
Arkadi Sharshevsky5ea12372017-07-18 10:10:13 +02006062out:
6063 rtnl_unlock();
6064 dev_put(dev);
6065 kfree(inet6addr_work);
6066}
6067
6068/* Called with rcu_read_lock() */
6069int mlxsw_sp_inet6addr_event(struct notifier_block *unused,
6070 unsigned long event, void *ptr)
6071{
6072 struct inet6_ifaddr *if6 = (struct inet6_ifaddr *) ptr;
6073 struct mlxsw_sp_inet6addr_event_work *inet6addr_work;
6074 struct net_device *dev = if6->idev->dev;
6075
David Ahern89d5dd22017-10-18 09:56:55 -07006076 /* NETDEV_UP event is handled by mlxsw_sp_inet6addr_valid_event */
6077 if (event == NETDEV_UP)
6078 return NOTIFY_DONE;
6079
Arkadi Sharshevsky5ea12372017-07-18 10:10:13 +02006080 if (!mlxsw_sp_port_dev_lower_find_rcu(dev))
6081 return NOTIFY_DONE;
6082
6083 inet6addr_work = kzalloc(sizeof(*inet6addr_work), GFP_ATOMIC);
6084 if (!inet6addr_work)
6085 return NOTIFY_BAD;
6086
6087 INIT_WORK(&inet6addr_work->work, mlxsw_sp_inet6addr_event_work);
6088 inet6addr_work->dev = dev;
6089 inet6addr_work->event = event;
6090 dev_hold(dev);
6091 mlxsw_core_schedule_work(&inet6addr_work->work);
6092
6093 return NOTIFY_DONE;
6094}
6095
David Ahern89d5dd22017-10-18 09:56:55 -07006096int mlxsw_sp_inet6addr_valid_event(struct notifier_block *unused,
6097 unsigned long event, void *ptr)
6098{
6099 struct in6_validator_info *i6vi = (struct in6_validator_info *) ptr;
6100 struct net_device *dev = i6vi->i6vi_dev->dev;
6101 struct mlxsw_sp *mlxsw_sp;
6102 struct mlxsw_sp_rif *rif;
6103 int err = 0;
6104
6105 mlxsw_sp = mlxsw_sp_lower_get(dev);
6106 if (!mlxsw_sp)
6107 goto out;
6108
6109 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
6110 if (!mlxsw_sp_rif_should_config(rif, dev, event))
6111 goto out;
6112
David Ahernf8fa9b42017-10-18 09:56:56 -07006113 err = __mlxsw_sp_inetaddr_event(dev, event, i6vi->extack);
David Ahern89d5dd22017-10-18 09:56:55 -07006114out:
6115 return notifier_from_errno(err);
6116}
6117
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01006118static int mlxsw_sp_rif_edit(struct mlxsw_sp *mlxsw_sp, u16 rif_index,
Ido Schimmel4724ba562017-03-10 08:53:39 +01006119 const char *mac, int mtu)
6120{
6121 char ritr_pl[MLXSW_REG_RITR_LEN];
6122 int err;
6123
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01006124 mlxsw_reg_ritr_rif_pack(ritr_pl, rif_index);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006125 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
6126 if (err)
6127 return err;
6128
6129 mlxsw_reg_ritr_mtu_set(ritr_pl, mtu);
6130 mlxsw_reg_ritr_if_mac_memcpy_to(ritr_pl, mac);
6131 mlxsw_reg_ritr_op_set(ritr_pl, MLXSW_REG_RITR_RIF_CREATE);
6132 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
6133}
6134
6135int mlxsw_sp_netdevice_router_port_event(struct net_device *dev)
6136{
6137 struct mlxsw_sp *mlxsw_sp;
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01006138 struct mlxsw_sp_rif *rif;
Ido Schimmela1107482017-05-26 08:37:39 +02006139 u16 fid_index;
Ido Schimmel4724ba562017-03-10 08:53:39 +01006140 int err;
6141
6142 mlxsw_sp = mlxsw_sp_lower_get(dev);
6143 if (!mlxsw_sp)
6144 return 0;
6145
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01006146 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
6147 if (!rif)
Ido Schimmel4724ba562017-03-10 08:53:39 +01006148 return 0;
Ido Schimmela1107482017-05-26 08:37:39 +02006149 fid_index = mlxsw_sp_fid_index(rif->fid);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006150
Ido Schimmela1107482017-05-26 08:37:39 +02006151 err = mlxsw_sp_rif_fdb_op(mlxsw_sp, rif->addr, fid_index, false);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006152 if (err)
6153 return err;
6154
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01006155 err = mlxsw_sp_rif_edit(mlxsw_sp, rif->rif_index, dev->dev_addr,
6156 dev->mtu);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006157 if (err)
6158 goto err_rif_edit;
6159
Ido Schimmela1107482017-05-26 08:37:39 +02006160 err = mlxsw_sp_rif_fdb_op(mlxsw_sp, dev->dev_addr, fid_index, true);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006161 if (err)
6162 goto err_rif_fdb_op;
6163
Yotam Gigifd890fe2017-09-27 08:23:21 +02006164 if (rif->mtu != dev->mtu) {
6165 struct mlxsw_sp_vr *vr;
6166
6167 /* The RIF is relevant only to its mr_table instance, as unlike
6168 * unicast routing, in multicast routing a RIF cannot be shared
6169 * between several multicast routing tables.
6170 */
6171 vr = &mlxsw_sp->router->vrs[rif->vr_id];
6172 mlxsw_sp_mr_rif_mtu_update(vr->mr4_table, rif, dev->mtu);
6173 }
6174
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01006175 ether_addr_copy(rif->addr, dev->dev_addr);
6176 rif->mtu = dev->mtu;
Ido Schimmel4724ba562017-03-10 08:53:39 +01006177
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01006178 netdev_dbg(dev, "Updated RIF=%d\n", rif->rif_index);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006179
6180 return 0;
6181
6182err_rif_fdb_op:
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01006183 mlxsw_sp_rif_edit(mlxsw_sp, rif->rif_index, rif->addr, rif->mtu);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006184err_rif_edit:
Ido Schimmela1107482017-05-26 08:37:39 +02006185 mlxsw_sp_rif_fdb_op(mlxsw_sp, rif->addr, fid_index, true);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006186 return err;
6187}
6188
Ido Schimmelb1e45522017-04-30 19:47:14 +03006189static int mlxsw_sp_port_vrf_join(struct mlxsw_sp *mlxsw_sp,
David Ahernf8fa9b42017-10-18 09:56:56 -07006190 struct net_device *l3_dev,
6191 struct netlink_ext_ack *extack)
Ido Schimmel7179eb52017-03-16 09:08:18 +01006192{
Ido Schimmelb1e45522017-04-30 19:47:14 +03006193 struct mlxsw_sp_rif *rif;
Ido Schimmel7179eb52017-03-16 09:08:18 +01006194
Ido Schimmelb1e45522017-04-30 19:47:14 +03006195 /* If netdev is already associated with a RIF, then we need to
6196 * destroy it and create a new one with the new virtual router ID.
Ido Schimmel7179eb52017-03-16 09:08:18 +01006197 */
Ido Schimmelb1e45522017-04-30 19:47:14 +03006198 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev);
6199 if (rif)
David Ahernf8fa9b42017-10-18 09:56:56 -07006200 __mlxsw_sp_inetaddr_event(l3_dev, NETDEV_DOWN, extack);
Ido Schimmel7179eb52017-03-16 09:08:18 +01006201
David Ahernf8fa9b42017-10-18 09:56:56 -07006202 return __mlxsw_sp_inetaddr_event(l3_dev, NETDEV_UP, extack);
Ido Schimmel7179eb52017-03-16 09:08:18 +01006203}
6204
Ido Schimmelb1e45522017-04-30 19:47:14 +03006205static void mlxsw_sp_port_vrf_leave(struct mlxsw_sp *mlxsw_sp,
6206 struct net_device *l3_dev)
Ido Schimmel7179eb52017-03-16 09:08:18 +01006207{
Ido Schimmelb1e45522017-04-30 19:47:14 +03006208 struct mlxsw_sp_rif *rif;
Ido Schimmel7179eb52017-03-16 09:08:18 +01006209
Ido Schimmelb1e45522017-04-30 19:47:14 +03006210 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev);
6211 if (!rif)
Ido Schimmel7179eb52017-03-16 09:08:18 +01006212 return;
David Ahernf8fa9b42017-10-18 09:56:56 -07006213 __mlxsw_sp_inetaddr_event(l3_dev, NETDEV_DOWN, NULL);
Ido Schimmel7179eb52017-03-16 09:08:18 +01006214}
6215
Ido Schimmelb1e45522017-04-30 19:47:14 +03006216int mlxsw_sp_netdevice_vrf_event(struct net_device *l3_dev, unsigned long event,
6217 struct netdev_notifier_changeupper_info *info)
Ido Schimmel3d70e4582017-03-16 09:08:19 +01006218{
Ido Schimmelb1e45522017-04-30 19:47:14 +03006219 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(l3_dev);
6220 int err = 0;
Ido Schimmel3d70e4582017-03-16 09:08:19 +01006221
Ido Schimmelb1e45522017-04-30 19:47:14 +03006222 if (!mlxsw_sp)
6223 return 0;
Ido Schimmel3d70e4582017-03-16 09:08:19 +01006224
Ido Schimmelb1e45522017-04-30 19:47:14 +03006225 switch (event) {
6226 case NETDEV_PRECHANGEUPPER:
6227 return 0;
6228 case NETDEV_CHANGEUPPER:
David Ahernf8fa9b42017-10-18 09:56:56 -07006229 if (info->linking) {
6230 struct netlink_ext_ack *extack;
6231
6232 extack = netdev_notifier_info_to_extack(&info->info);
6233 err = mlxsw_sp_port_vrf_join(mlxsw_sp, l3_dev, extack);
6234 } else {
Ido Schimmelb1e45522017-04-30 19:47:14 +03006235 mlxsw_sp_port_vrf_leave(mlxsw_sp, l3_dev);
David Ahernf8fa9b42017-10-18 09:56:56 -07006236 }
Ido Schimmelb1e45522017-04-30 19:47:14 +03006237 break;
6238 }
Ido Schimmel3d70e4582017-03-16 09:08:19 +01006239
Ido Schimmelb1e45522017-04-30 19:47:14 +03006240 return err;
Ido Schimmel3d70e4582017-03-16 09:08:19 +01006241}
6242
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006243static struct mlxsw_sp_rif_subport *
6244mlxsw_sp_rif_subport_rif(const struct mlxsw_sp_rif *rif)
Ido Schimmela1107482017-05-26 08:37:39 +02006245{
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006246 return container_of(rif, struct mlxsw_sp_rif_subport, common);
Ido Schimmela1107482017-05-26 08:37:39 +02006247}
6248
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006249static void mlxsw_sp_rif_subport_setup(struct mlxsw_sp_rif *rif,
6250 const struct mlxsw_sp_rif_params *params)
6251{
6252 struct mlxsw_sp_rif_subport *rif_subport;
6253
6254 rif_subport = mlxsw_sp_rif_subport_rif(rif);
6255 rif_subport->vid = params->vid;
6256 rif_subport->lag = params->lag;
6257 if (params->lag)
6258 rif_subport->lag_id = params->lag_id;
6259 else
6260 rif_subport->system_port = params->system_port;
6261}
6262
6263static int mlxsw_sp_rif_subport_op(struct mlxsw_sp_rif *rif, bool enable)
6264{
6265 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
6266 struct mlxsw_sp_rif_subport *rif_subport;
6267 char ritr_pl[MLXSW_REG_RITR_LEN];
6268
6269 rif_subport = mlxsw_sp_rif_subport_rif(rif);
6270 mlxsw_reg_ritr_pack(ritr_pl, enable, MLXSW_REG_RITR_SP_IF,
Petr Machata9571e822017-09-02 23:49:14 +02006271 rif->rif_index, rif->vr_id, rif->dev->mtu);
6272 mlxsw_reg_ritr_mac_pack(ritr_pl, rif->dev->dev_addr);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006273 mlxsw_reg_ritr_sp_if_pack(ritr_pl, rif_subport->lag,
6274 rif_subport->lag ? rif_subport->lag_id :
6275 rif_subport->system_port,
6276 rif_subport->vid);
6277
6278 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
6279}
6280
6281static int mlxsw_sp_rif_subport_configure(struct mlxsw_sp_rif *rif)
6282{
Petr Machata010cadf2017-09-02 23:49:18 +02006283 int err;
6284
6285 err = mlxsw_sp_rif_subport_op(rif, true);
6286 if (err)
6287 return err;
6288
6289 err = mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
6290 mlxsw_sp_fid_index(rif->fid), true);
6291 if (err)
6292 goto err_rif_fdb_op;
6293
6294 mlxsw_sp_fid_rif_set(rif->fid, rif);
6295 return 0;
6296
6297err_rif_fdb_op:
6298 mlxsw_sp_rif_subport_op(rif, false);
6299 return err;
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006300}
6301
6302static void mlxsw_sp_rif_subport_deconfigure(struct mlxsw_sp_rif *rif)
6303{
Petr Machata010cadf2017-09-02 23:49:18 +02006304 struct mlxsw_sp_fid *fid = rif->fid;
6305
6306 mlxsw_sp_fid_rif_set(fid, NULL);
6307 mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
6308 mlxsw_sp_fid_index(fid), false);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006309 mlxsw_sp_rif_subport_op(rif, false);
6310}
6311
6312static struct mlxsw_sp_fid *
6313mlxsw_sp_rif_subport_fid_get(struct mlxsw_sp_rif *rif)
6314{
6315 return mlxsw_sp_fid_rfid_get(rif->mlxsw_sp, rif->rif_index);
6316}
6317
6318static const struct mlxsw_sp_rif_ops mlxsw_sp_rif_subport_ops = {
6319 .type = MLXSW_SP_RIF_TYPE_SUBPORT,
6320 .rif_size = sizeof(struct mlxsw_sp_rif_subport),
6321 .setup = mlxsw_sp_rif_subport_setup,
6322 .configure = mlxsw_sp_rif_subport_configure,
6323 .deconfigure = mlxsw_sp_rif_subport_deconfigure,
6324 .fid_get = mlxsw_sp_rif_subport_fid_get,
6325};
6326
6327static int mlxsw_sp_rif_vlan_fid_op(struct mlxsw_sp_rif *rif,
6328 enum mlxsw_reg_ritr_if_type type,
6329 u16 vid_fid, bool enable)
6330{
6331 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
6332 char ritr_pl[MLXSW_REG_RITR_LEN];
6333
6334 mlxsw_reg_ritr_pack(ritr_pl, enable, type, rif->rif_index, rif->vr_id,
Petr Machata9571e822017-09-02 23:49:14 +02006335 rif->dev->mtu);
6336 mlxsw_reg_ritr_mac_pack(ritr_pl, rif->dev->dev_addr);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006337 mlxsw_reg_ritr_fid_set(ritr_pl, type, vid_fid);
6338
6339 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
6340}
6341
Yotam Gigib35750f2017-10-09 11:15:33 +02006342u8 mlxsw_sp_router_port(const struct mlxsw_sp *mlxsw_sp)
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006343{
6344 return mlxsw_core_max_ports(mlxsw_sp->core) + 1;
6345}
6346
6347static int mlxsw_sp_rif_vlan_configure(struct mlxsw_sp_rif *rif)
6348{
6349 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
6350 u16 vid = mlxsw_sp_fid_8021q_vid(rif->fid);
6351 int err;
6352
6353 err = mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_VLAN_IF, vid, true);
6354 if (err)
6355 return err;
6356
Ido Schimmel0d284812017-07-18 10:10:12 +02006357 err = mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
6358 mlxsw_sp_router_port(mlxsw_sp), true);
6359 if (err)
6360 goto err_fid_mc_flood_set;
6361
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006362 err = mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
6363 mlxsw_sp_router_port(mlxsw_sp), true);
6364 if (err)
6365 goto err_fid_bc_flood_set;
6366
Petr Machata010cadf2017-09-02 23:49:18 +02006367 err = mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
6368 mlxsw_sp_fid_index(rif->fid), true);
6369 if (err)
6370 goto err_rif_fdb_op;
6371
6372 mlxsw_sp_fid_rif_set(rif->fid, rif);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006373 return 0;
6374
Petr Machata010cadf2017-09-02 23:49:18 +02006375err_rif_fdb_op:
6376 mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
6377 mlxsw_sp_router_port(mlxsw_sp), false);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006378err_fid_bc_flood_set:
Ido Schimmel0d284812017-07-18 10:10:12 +02006379 mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
6380 mlxsw_sp_router_port(mlxsw_sp), false);
6381err_fid_mc_flood_set:
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006382 mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_VLAN_IF, vid, false);
6383 return err;
6384}
6385
6386static void mlxsw_sp_rif_vlan_deconfigure(struct mlxsw_sp_rif *rif)
6387{
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006388 u16 vid = mlxsw_sp_fid_8021q_vid(rif->fid);
Petr Machata010cadf2017-09-02 23:49:18 +02006389 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
6390 struct mlxsw_sp_fid *fid = rif->fid;
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006391
Petr Machata010cadf2017-09-02 23:49:18 +02006392 mlxsw_sp_fid_rif_set(fid, NULL);
6393 mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
6394 mlxsw_sp_fid_index(fid), false);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006395 mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
6396 mlxsw_sp_router_port(mlxsw_sp), false);
Ido Schimmel0d284812017-07-18 10:10:12 +02006397 mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
6398 mlxsw_sp_router_port(mlxsw_sp), false);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006399 mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_VLAN_IF, vid, false);
6400}
6401
6402static struct mlxsw_sp_fid *
6403mlxsw_sp_rif_vlan_fid_get(struct mlxsw_sp_rif *rif)
6404{
6405 u16 vid = is_vlan_dev(rif->dev) ? vlan_dev_vlan_id(rif->dev) : 1;
6406
6407 return mlxsw_sp_fid_8021q_get(rif->mlxsw_sp, vid);
6408}
6409
6410static const struct mlxsw_sp_rif_ops mlxsw_sp_rif_vlan_ops = {
6411 .type = MLXSW_SP_RIF_TYPE_VLAN,
6412 .rif_size = sizeof(struct mlxsw_sp_rif),
6413 .configure = mlxsw_sp_rif_vlan_configure,
6414 .deconfigure = mlxsw_sp_rif_vlan_deconfigure,
6415 .fid_get = mlxsw_sp_rif_vlan_fid_get,
6416};
6417
6418static int mlxsw_sp_rif_fid_configure(struct mlxsw_sp_rif *rif)
6419{
6420 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
6421 u16 fid_index = mlxsw_sp_fid_index(rif->fid);
6422 int err;
6423
6424 err = mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_FID_IF, fid_index,
6425 true);
6426 if (err)
6427 return err;
6428
Ido Schimmel0d284812017-07-18 10:10:12 +02006429 err = mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
6430 mlxsw_sp_router_port(mlxsw_sp), true);
6431 if (err)
6432 goto err_fid_mc_flood_set;
6433
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006434 err = mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
6435 mlxsw_sp_router_port(mlxsw_sp), true);
6436 if (err)
6437 goto err_fid_bc_flood_set;
6438
Petr Machata010cadf2017-09-02 23:49:18 +02006439 err = mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
6440 mlxsw_sp_fid_index(rif->fid), true);
6441 if (err)
6442 goto err_rif_fdb_op;
6443
6444 mlxsw_sp_fid_rif_set(rif->fid, rif);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006445 return 0;
6446
Petr Machata010cadf2017-09-02 23:49:18 +02006447err_rif_fdb_op:
6448 mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
6449 mlxsw_sp_router_port(mlxsw_sp), false);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006450err_fid_bc_flood_set:
Ido Schimmel0d284812017-07-18 10:10:12 +02006451 mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
6452 mlxsw_sp_router_port(mlxsw_sp), false);
6453err_fid_mc_flood_set:
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006454 mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_FID_IF, fid_index, false);
6455 return err;
6456}
6457
6458static void mlxsw_sp_rif_fid_deconfigure(struct mlxsw_sp_rif *rif)
6459{
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006460 u16 fid_index = mlxsw_sp_fid_index(rif->fid);
Petr Machata010cadf2017-09-02 23:49:18 +02006461 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
6462 struct mlxsw_sp_fid *fid = rif->fid;
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006463
Petr Machata010cadf2017-09-02 23:49:18 +02006464 mlxsw_sp_fid_rif_set(fid, NULL);
6465 mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
6466 mlxsw_sp_fid_index(fid), false);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006467 mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
6468 mlxsw_sp_router_port(mlxsw_sp), false);
Ido Schimmel0d284812017-07-18 10:10:12 +02006469 mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
6470 mlxsw_sp_router_port(mlxsw_sp), false);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006471 mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_FID_IF, fid_index, false);
6472}
6473
6474static struct mlxsw_sp_fid *
6475mlxsw_sp_rif_fid_fid_get(struct mlxsw_sp_rif *rif)
6476{
6477 return mlxsw_sp_fid_8021d_get(rif->mlxsw_sp, rif->dev->ifindex);
6478}
6479
6480static const struct mlxsw_sp_rif_ops mlxsw_sp_rif_fid_ops = {
6481 .type = MLXSW_SP_RIF_TYPE_FID,
6482 .rif_size = sizeof(struct mlxsw_sp_rif),
6483 .configure = mlxsw_sp_rif_fid_configure,
6484 .deconfigure = mlxsw_sp_rif_fid_deconfigure,
6485 .fid_get = mlxsw_sp_rif_fid_fid_get,
6486};
6487
Petr Machata6ddb7422017-09-02 23:49:19 +02006488static struct mlxsw_sp_rif_ipip_lb *
6489mlxsw_sp_rif_ipip_lb_rif(struct mlxsw_sp_rif *rif)
6490{
6491 return container_of(rif, struct mlxsw_sp_rif_ipip_lb, common);
6492}
6493
6494static void
6495mlxsw_sp_rif_ipip_lb_setup(struct mlxsw_sp_rif *rif,
6496 const struct mlxsw_sp_rif_params *params)
6497{
6498 struct mlxsw_sp_rif_params_ipip_lb *params_lb;
6499 struct mlxsw_sp_rif_ipip_lb *rif_lb;
6500
6501 params_lb = container_of(params, struct mlxsw_sp_rif_params_ipip_lb,
6502 common);
6503 rif_lb = mlxsw_sp_rif_ipip_lb_rif(rif);
6504 rif_lb->lb_config = params_lb->lb_config;
6505}
6506
6507static int
6508mlxsw_sp_rif_ipip_lb_op(struct mlxsw_sp_rif_ipip_lb *lb_rif,
6509 struct mlxsw_sp_vr *ul_vr, bool enable)
6510{
6511 struct mlxsw_sp_rif_ipip_lb_config lb_cf = lb_rif->lb_config;
6512 struct mlxsw_sp_rif *rif = &lb_rif->common;
6513 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
6514 char ritr_pl[MLXSW_REG_RITR_LEN];
6515 u32 saddr4;
6516
6517 switch (lb_cf.ul_protocol) {
6518 case MLXSW_SP_L3_PROTO_IPV4:
6519 saddr4 = be32_to_cpu(lb_cf.saddr.addr4);
6520 mlxsw_reg_ritr_pack(ritr_pl, enable, MLXSW_REG_RITR_LOOPBACK_IF,
6521 rif->rif_index, rif->vr_id, rif->dev->mtu);
6522 mlxsw_reg_ritr_loopback_ipip4_pack(ritr_pl, lb_cf.lb_ipipt,
6523 MLXSW_REG_RITR_LOOPBACK_IPIP_OPTIONS_GRE_KEY_PRESET,
6524 ul_vr->id, saddr4, lb_cf.okey);
6525 break;
6526
6527 case MLXSW_SP_L3_PROTO_IPV6:
6528 return -EAFNOSUPPORT;
6529 }
6530
6531 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
6532}
6533
6534static int
6535mlxsw_sp_rif_ipip_lb_configure(struct mlxsw_sp_rif *rif)
6536{
6537 struct mlxsw_sp_rif_ipip_lb *lb_rif = mlxsw_sp_rif_ipip_lb_rif(rif);
6538 u32 ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(rif->dev);
6539 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
6540 struct mlxsw_sp_vr *ul_vr;
6541 int err;
6542
David Ahernf8fa9b42017-10-18 09:56:56 -07006543 ul_vr = mlxsw_sp_vr_get(mlxsw_sp, ul_tb_id, NULL);
Petr Machata6ddb7422017-09-02 23:49:19 +02006544 if (IS_ERR(ul_vr))
6545 return PTR_ERR(ul_vr);
6546
6547 err = mlxsw_sp_rif_ipip_lb_op(lb_rif, ul_vr, true);
6548 if (err)
6549 goto err_loopback_op;
6550
6551 lb_rif->ul_vr_id = ul_vr->id;
6552 ++ul_vr->rif_count;
6553 return 0;
6554
6555err_loopback_op:
6556 mlxsw_sp_vr_put(ul_vr);
6557 return err;
6558}
6559
6560static void mlxsw_sp_rif_ipip_lb_deconfigure(struct mlxsw_sp_rif *rif)
6561{
6562 struct mlxsw_sp_rif_ipip_lb *lb_rif = mlxsw_sp_rif_ipip_lb_rif(rif);
6563 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
6564 struct mlxsw_sp_vr *ul_vr;
6565
6566 ul_vr = &mlxsw_sp->router->vrs[lb_rif->ul_vr_id];
6567 mlxsw_sp_rif_ipip_lb_op(lb_rif, ul_vr, false);
6568
6569 --ul_vr->rif_count;
6570 mlxsw_sp_vr_put(ul_vr);
6571}
6572
6573static const struct mlxsw_sp_rif_ops mlxsw_sp_rif_ipip_lb_ops = {
6574 .type = MLXSW_SP_RIF_TYPE_IPIP_LB,
6575 .rif_size = sizeof(struct mlxsw_sp_rif_ipip_lb),
6576 .setup = mlxsw_sp_rif_ipip_lb_setup,
6577 .configure = mlxsw_sp_rif_ipip_lb_configure,
6578 .deconfigure = mlxsw_sp_rif_ipip_lb_deconfigure,
6579};
6580
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006581static const struct mlxsw_sp_rif_ops *mlxsw_sp_rif_ops_arr[] = {
6582 [MLXSW_SP_RIF_TYPE_SUBPORT] = &mlxsw_sp_rif_subport_ops,
6583 [MLXSW_SP_RIF_TYPE_VLAN] = &mlxsw_sp_rif_vlan_ops,
6584 [MLXSW_SP_RIF_TYPE_FID] = &mlxsw_sp_rif_fid_ops,
Petr Machata6ddb7422017-09-02 23:49:19 +02006585 [MLXSW_SP_RIF_TYPE_IPIP_LB] = &mlxsw_sp_rif_ipip_lb_ops,
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006586};
6587
Ido Schimmel348b8fc2017-05-16 19:38:29 +02006588static int mlxsw_sp_rifs_init(struct mlxsw_sp *mlxsw_sp)
6589{
6590 u64 max_rifs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS);
6591
6592 mlxsw_sp->router->rifs = kcalloc(max_rifs,
6593 sizeof(struct mlxsw_sp_rif *),
6594 GFP_KERNEL);
6595 if (!mlxsw_sp->router->rifs)
6596 return -ENOMEM;
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006597
6598 mlxsw_sp->router->rif_ops_arr = mlxsw_sp_rif_ops_arr;
6599
Ido Schimmel348b8fc2017-05-16 19:38:29 +02006600 return 0;
6601}
6602
6603static void mlxsw_sp_rifs_fini(struct mlxsw_sp *mlxsw_sp)
6604{
6605 int i;
6606
6607 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++)
6608 WARN_ON_ONCE(mlxsw_sp->router->rifs[i]);
6609
6610 kfree(mlxsw_sp->router->rifs);
6611}
6612
Petr Machatadcbda282017-10-20 09:16:16 +02006613static int
6614mlxsw_sp_ipip_config_tigcr(struct mlxsw_sp *mlxsw_sp)
6615{
6616 char tigcr_pl[MLXSW_REG_TIGCR_LEN];
6617
6618 mlxsw_reg_tigcr_pack(tigcr_pl, true, 0);
6619 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(tigcr), tigcr_pl);
6620}
6621
Petr Machata38ebc0f2017-09-02 23:49:17 +02006622static int mlxsw_sp_ipips_init(struct mlxsw_sp *mlxsw_sp)
6623{
6624 mlxsw_sp->router->ipip_ops_arr = mlxsw_sp_ipip_ops_arr;
Petr Machata1012b9a2017-09-02 23:49:23 +02006625 INIT_LIST_HEAD(&mlxsw_sp->router->ipip_list);
Petr Machatadcbda282017-10-20 09:16:16 +02006626 return mlxsw_sp_ipip_config_tigcr(mlxsw_sp);
Petr Machata38ebc0f2017-09-02 23:49:17 +02006627}
6628
6629static void mlxsw_sp_ipips_fini(struct mlxsw_sp *mlxsw_sp)
6630{
Petr Machata1012b9a2017-09-02 23:49:23 +02006631 WARN_ON(!list_empty(&mlxsw_sp->router->ipip_list));
Petr Machata38ebc0f2017-09-02 23:49:17 +02006632}
6633
Ido Schimmelc3852ef2016-12-03 16:45:07 +01006634static void mlxsw_sp_router_fib_dump_flush(struct notifier_block *nb)
6635{
Ido Schimmel7e39d112017-05-16 19:38:28 +02006636 struct mlxsw_sp_router *router;
Ido Schimmelc3852ef2016-12-03 16:45:07 +01006637
6638 /* Flush pending FIB notifications and then flush the device's
6639 * table before requesting another dump. The FIB notification
6640 * block is unregistered, so no need to take RTNL.
6641 */
6642 mlxsw_core_flush_owq();
Ido Schimmel7e39d112017-05-16 19:38:28 +02006643 router = container_of(nb, struct mlxsw_sp_router, fib_nb);
6644 mlxsw_sp_router_fib_flush(router->mlxsw_sp);
Ido Schimmelc3852ef2016-12-03 16:45:07 +01006645}
6646
Ido Schimmel4724ba562017-03-10 08:53:39 +01006647static int __mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp)
6648{
6649 char rgcr_pl[MLXSW_REG_RGCR_LEN];
6650 u64 max_rifs;
6651 int err;
6652
6653 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_RIFS))
6654 return -EIO;
Ido Schimmel4724ba562017-03-10 08:53:39 +01006655 max_rifs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006656
Arkadi Sharshevskye29237e2017-07-18 10:10:09 +02006657 mlxsw_reg_rgcr_pack(rgcr_pl, true, true);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006658 mlxsw_reg_rgcr_max_router_interfaces_set(rgcr_pl, max_rifs);
6659 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rgcr), rgcr_pl);
6660 if (err)
Ido Schimmel348b8fc2017-05-16 19:38:29 +02006661 return err;
Ido Schimmel4724ba562017-03-10 08:53:39 +01006662 return 0;
Ido Schimmel4724ba562017-03-10 08:53:39 +01006663}
6664
6665static void __mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp)
6666{
6667 char rgcr_pl[MLXSW_REG_RGCR_LEN];
Ido Schimmel4724ba562017-03-10 08:53:39 +01006668
Arkadi Sharshevskye29237e2017-07-18 10:10:09 +02006669 mlxsw_reg_rgcr_pack(rgcr_pl, false, false);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006670 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rgcr), rgcr_pl);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006671}
6672
Jiri Pirkob45f64d2016-09-26 12:52:31 +02006673int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp)
6674{
Ido Schimmel9011b672017-05-16 19:38:25 +02006675 struct mlxsw_sp_router *router;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02006676 int err;
6677
Ido Schimmel9011b672017-05-16 19:38:25 +02006678 router = kzalloc(sizeof(*mlxsw_sp->router), GFP_KERNEL);
6679 if (!router)
6680 return -ENOMEM;
6681 mlxsw_sp->router = router;
6682 router->mlxsw_sp = mlxsw_sp;
6683
6684 INIT_LIST_HEAD(&mlxsw_sp->router->nexthop_neighs_list);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02006685 err = __mlxsw_sp_router_init(mlxsw_sp);
6686 if (err)
Ido Schimmel9011b672017-05-16 19:38:25 +02006687 goto err_router_init;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02006688
Ido Schimmel348b8fc2017-05-16 19:38:29 +02006689 err = mlxsw_sp_rifs_init(mlxsw_sp);
6690 if (err)
6691 goto err_rifs_init;
6692
Petr Machata38ebc0f2017-09-02 23:49:17 +02006693 err = mlxsw_sp_ipips_init(mlxsw_sp);
6694 if (err)
6695 goto err_ipips_init;
6696
Ido Schimmel9011b672017-05-16 19:38:25 +02006697 err = rhashtable_init(&mlxsw_sp->router->nexthop_ht,
Ido Schimmelc53b8e12017-02-08 11:16:30 +01006698 &mlxsw_sp_nexthop_ht_params);
6699 if (err)
6700 goto err_nexthop_ht_init;
6701
Ido Schimmel9011b672017-05-16 19:38:25 +02006702 err = rhashtable_init(&mlxsw_sp->router->nexthop_group_ht,
Ido Schimmele9ad5e72017-02-08 11:16:29 +01006703 &mlxsw_sp_nexthop_group_ht_params);
6704 if (err)
6705 goto err_nexthop_group_ht_init;
6706
Arkadi Sharshevskydbe45982017-09-25 10:32:23 +02006707 INIT_LIST_HEAD(&mlxsw_sp->router->nexthop_list);
Ido Schimmel8494ab02017-03-24 08:02:47 +01006708 err = mlxsw_sp_lpm_init(mlxsw_sp);
6709 if (err)
6710 goto err_lpm_init;
6711
Yotam Gigid42b0962017-09-27 08:23:20 +02006712 err = mlxsw_sp_mr_init(mlxsw_sp, &mlxsw_sp_mr_tcam_ops);
6713 if (err)
6714 goto err_mr_init;
6715
Jiri Pirkob45f64d2016-09-26 12:52:31 +02006716 err = mlxsw_sp_vrs_init(mlxsw_sp);
6717 if (err)
6718 goto err_vrs_init;
6719
Ido Schimmel8c9583a2016-10-27 15:12:57 +02006720 err = mlxsw_sp_neigh_init(mlxsw_sp);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02006721 if (err)
6722 goto err_neigh_init;
6723
Ido Schimmel48fac882017-11-02 17:14:06 +01006724 mlxsw_sp->router->netevent_nb.notifier_call =
6725 mlxsw_sp_router_netevent_event;
6726 err = register_netevent_notifier(&mlxsw_sp->router->netevent_nb);
6727 if (err)
6728 goto err_register_netevent_notifier;
6729
Ido Schimmel7e39d112017-05-16 19:38:28 +02006730 mlxsw_sp->router->fib_nb.notifier_call = mlxsw_sp_router_fib_event;
6731 err = register_fib_notifier(&mlxsw_sp->router->fib_nb,
Ido Schimmelc3852ef2016-12-03 16:45:07 +01006732 mlxsw_sp_router_fib_dump_flush);
6733 if (err)
6734 goto err_register_fib_notifier;
6735
Jiri Pirkob45f64d2016-09-26 12:52:31 +02006736 return 0;
6737
Ido Schimmelc3852ef2016-12-03 16:45:07 +01006738err_register_fib_notifier:
Ido Schimmel48fac882017-11-02 17:14:06 +01006739 unregister_netevent_notifier(&mlxsw_sp->router->netevent_nb);
6740err_register_netevent_notifier:
Ido Schimmelc3852ef2016-12-03 16:45:07 +01006741 mlxsw_sp_neigh_fini(mlxsw_sp);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02006742err_neigh_init:
6743 mlxsw_sp_vrs_fini(mlxsw_sp);
6744err_vrs_init:
Yotam Gigid42b0962017-09-27 08:23:20 +02006745 mlxsw_sp_mr_fini(mlxsw_sp);
6746err_mr_init:
Ido Schimmel8494ab02017-03-24 08:02:47 +01006747 mlxsw_sp_lpm_fini(mlxsw_sp);
6748err_lpm_init:
Ido Schimmel9011b672017-05-16 19:38:25 +02006749 rhashtable_destroy(&mlxsw_sp->router->nexthop_group_ht);
Ido Schimmele9ad5e72017-02-08 11:16:29 +01006750err_nexthop_group_ht_init:
Ido Schimmel9011b672017-05-16 19:38:25 +02006751 rhashtable_destroy(&mlxsw_sp->router->nexthop_ht);
Ido Schimmelc53b8e12017-02-08 11:16:30 +01006752err_nexthop_ht_init:
Petr Machata38ebc0f2017-09-02 23:49:17 +02006753 mlxsw_sp_ipips_fini(mlxsw_sp);
6754err_ipips_init:
Ido Schimmel348b8fc2017-05-16 19:38:29 +02006755 mlxsw_sp_rifs_fini(mlxsw_sp);
6756err_rifs_init:
Jiri Pirkob45f64d2016-09-26 12:52:31 +02006757 __mlxsw_sp_router_fini(mlxsw_sp);
Ido Schimmel9011b672017-05-16 19:38:25 +02006758err_router_init:
6759 kfree(mlxsw_sp->router);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02006760 return err;
6761}
6762
6763void mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp)
6764{
Ido Schimmel7e39d112017-05-16 19:38:28 +02006765 unregister_fib_notifier(&mlxsw_sp->router->fib_nb);
Ido Schimmel48fac882017-11-02 17:14:06 +01006766 unregister_netevent_notifier(&mlxsw_sp->router->netevent_nb);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02006767 mlxsw_sp_neigh_fini(mlxsw_sp);
6768 mlxsw_sp_vrs_fini(mlxsw_sp);
Yotam Gigid42b0962017-09-27 08:23:20 +02006769 mlxsw_sp_mr_fini(mlxsw_sp);
Ido Schimmel8494ab02017-03-24 08:02:47 +01006770 mlxsw_sp_lpm_fini(mlxsw_sp);
Ido Schimmel9011b672017-05-16 19:38:25 +02006771 rhashtable_destroy(&mlxsw_sp->router->nexthop_group_ht);
6772 rhashtable_destroy(&mlxsw_sp->router->nexthop_ht);
Petr Machata38ebc0f2017-09-02 23:49:17 +02006773 mlxsw_sp_ipips_fini(mlxsw_sp);
Ido Schimmel348b8fc2017-05-16 19:38:29 +02006774 mlxsw_sp_rifs_fini(mlxsw_sp);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02006775 __mlxsw_sp_router_fini(mlxsw_sp);
Ido Schimmel9011b672017-05-16 19:38:25 +02006776 kfree(mlxsw_sp->router);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02006777}