blob: a590c0ab75244eac96bdff4b423843c95ff212b2 [file] [log] [blame]
Ido Schimmel464dce12016-07-02 11:00:15 +02001/*
2 * drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
Petr Machata6ddb7422017-09-02 23:49:19 +02003 * Copyright (c) 2016-2017 Mellanox Technologies. All rights reserved.
Ido Schimmel464dce12016-07-02 11:00:15 +02004 * Copyright (c) 2016 Jiri Pirko <jiri@mellanox.com>
5 * Copyright (c) 2016 Ido Schimmel <idosch@mellanox.com>
Yotam Gigic723c7352016-07-05 11:27:43 +02006 * Copyright (c) 2016 Yotam Gigi <yotamg@mellanox.com>
Petr Machata6ddb7422017-09-02 23:49:19 +02007 * Copyright (c) 2017 Petr Machata <petrm@mellanox.com>
Ido Schimmel464dce12016-07-02 11:00:15 +02008 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions are met:
11 *
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. Neither the names of the copyright holders nor the names of its
18 * contributors may be used to endorse or promote products derived from
19 * this software without specific prior written permission.
20 *
21 * Alternatively, this software may be distributed under the terms of the
22 * GNU General Public License ("GPL") version 2 as published by the Free
23 * Software Foundation.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
26 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
29 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
36 */
37
38#include <linux/kernel.h>
39#include <linux/types.h>
Jiri Pirko5e9c16c2016-07-04 08:23:04 +020040#include <linux/rhashtable.h>
41#include <linux/bitops.h>
42#include <linux/in6.h>
Yotam Gigic723c7352016-07-05 11:27:43 +020043#include <linux/notifier.h>
Ido Schimmeldf6dd79b2017-02-08 14:36:49 +010044#include <linux/inetdevice.h>
Ido Schimmel9db032b2017-03-16 09:08:17 +010045#include <linux/netdevice.h>
Ido Schimmel03ea01e2017-05-23 21:56:30 +020046#include <linux/if_bridge.h>
Ido Schimmelb5f3e0d2017-07-24 09:56:00 +020047#include <linux/socket.h>
Ido Schimmel428b8512017-08-03 13:28:28 +020048#include <linux/route.h>
Ido Schimmeleb789982017-10-22 23:11:48 +020049#include <linux/gcd.h>
Ido Schimmelaf658b62017-11-02 17:14:09 +010050#include <linux/random.h>
Yotam Gigic723c7352016-07-05 11:27:43 +020051#include <net/netevent.h>
Jiri Pirko6cf3c972016-07-05 11:27:39 +020052#include <net/neighbour.h>
53#include <net/arp.h>
Jiri Pirkob45f64d2016-09-26 12:52:31 +020054#include <net/ip_fib.h>
Ido Schimmel583419f2017-08-03 13:28:27 +020055#include <net/ip6_fib.h>
Ido Schimmel5d7bfd12017-03-16 09:08:14 +010056#include <net/fib_rules.h>
Petr Machata6ddb7422017-09-02 23:49:19 +020057#include <net/ip_tunnels.h>
Ido Schimmel57837882017-03-16 09:08:16 +010058#include <net/l3mdev.h>
Arkadi Sharshevsky5ea12372017-07-18 10:10:13 +020059#include <net/addrconf.h>
Arkadi Sharshevskyd5eb89c2017-07-18 10:10:15 +020060#include <net/ndisc.h>
61#include <net/ipv6.h>
Ido Schimmel04b1d4e2017-08-03 13:28:11 +020062#include <net/fib_notifier.h>
Ido Schimmel464dce12016-07-02 11:00:15 +020063
64#include "spectrum.h"
65#include "core.h"
66#include "reg.h"
Arkadi Sharshevskye0c0afd2017-03-28 17:24:15 +020067#include "spectrum_cnt.h"
68#include "spectrum_dpipe.h"
Petr Machata38ebc0f2017-09-02 23:49:17 +020069#include "spectrum_ipip.h"
Yotam Gigid42b0962017-09-27 08:23:20 +020070#include "spectrum_mr.h"
71#include "spectrum_mr_tcam.h"
Arkadi Sharshevskye0c0afd2017-03-28 17:24:15 +020072#include "spectrum_router.h"
Ido Schimmel464dce12016-07-02 11:00:15 +020073
Ido Schimmel9011b672017-05-16 19:38:25 +020074struct mlxsw_sp_vr;
75struct mlxsw_sp_lpm_tree;
Ido Schimmele4f3c1c2017-05-26 08:37:40 +020076struct mlxsw_sp_rif_ops;
Ido Schimmel9011b672017-05-16 19:38:25 +020077
78struct mlxsw_sp_router {
79 struct mlxsw_sp *mlxsw_sp;
Ido Schimmel5f9efff2017-05-16 19:38:27 +020080 struct mlxsw_sp_rif **rifs;
Ido Schimmel9011b672017-05-16 19:38:25 +020081 struct mlxsw_sp_vr *vrs;
82 struct rhashtable neigh_ht;
83 struct rhashtable nexthop_group_ht;
84 struct rhashtable nexthop_ht;
Arkadi Sharshevskydbe45982017-09-25 10:32:23 +020085 struct list_head nexthop_list;
Ido Schimmel9011b672017-05-16 19:38:25 +020086 struct {
87 struct mlxsw_sp_lpm_tree *trees;
88 unsigned int tree_count;
89 } lpm;
90 struct {
91 struct delayed_work dw;
92 unsigned long interval; /* ms */
93 } neighs_update;
94 struct delayed_work nexthop_probe_dw;
95#define MLXSW_SP_UNRESOLVED_NH_PROBE_INTERVAL 5000 /* ms */
96 struct list_head nexthop_neighs_list;
Petr Machata1012b9a2017-09-02 23:49:23 +020097 struct list_head ipip_list;
Ido Schimmel9011b672017-05-16 19:38:25 +020098 bool aborted;
Ido Schimmel7e39d112017-05-16 19:38:28 +020099 struct notifier_block fib_nb;
Ido Schimmel48fac882017-11-02 17:14:06 +0100100 struct notifier_block netevent_nb;
Ido Schimmele4f3c1c2017-05-26 08:37:40 +0200101 const struct mlxsw_sp_rif_ops **rif_ops_arr;
Petr Machata38ebc0f2017-09-02 23:49:17 +0200102 const struct mlxsw_sp_ipip_ops **ipip_ops_arr;
Ido Schimmel9011b672017-05-16 19:38:25 +0200103};
104
Ido Schimmel4724ba562017-03-10 08:53:39 +0100105struct mlxsw_sp_rif {
106 struct list_head nexthop_list;
107 struct list_head neigh_list;
108 struct net_device *dev;
Ido Schimmela1107482017-05-26 08:37:39 +0200109 struct mlxsw_sp_fid *fid;
Ido Schimmel4724ba562017-03-10 08:53:39 +0100110 unsigned char addr[ETH_ALEN];
111 int mtu;
Arkadi Sharshevskybf952332017-03-17 09:38:00 +0100112 u16 rif_index;
Ido Schimmel69132292017-03-10 08:53:42 +0100113 u16 vr_id;
Ido Schimmele4f3c1c2017-05-26 08:37:40 +0200114 const struct mlxsw_sp_rif_ops *ops;
115 struct mlxsw_sp *mlxsw_sp;
116
Arkadi Sharshevskye0c0afd2017-03-28 17:24:15 +0200117 unsigned int counter_ingress;
118 bool counter_ingress_valid;
119 unsigned int counter_egress;
120 bool counter_egress_valid;
Ido Schimmel4724ba562017-03-10 08:53:39 +0100121};
122
Ido Schimmele4f3c1c2017-05-26 08:37:40 +0200123struct mlxsw_sp_rif_params {
124 struct net_device *dev;
125 union {
126 u16 system_port;
127 u16 lag_id;
128 };
129 u16 vid;
130 bool lag;
131};
132
Ido Schimmel4d93cee2017-05-26 08:37:34 +0200133struct mlxsw_sp_rif_subport {
134 struct mlxsw_sp_rif common;
135 union {
136 u16 system_port;
137 u16 lag_id;
138 };
139 u16 vid;
140 bool lag;
141};
142
Petr Machata6ddb7422017-09-02 23:49:19 +0200143struct mlxsw_sp_rif_ipip_lb {
144 struct mlxsw_sp_rif common;
145 struct mlxsw_sp_rif_ipip_lb_config lb_config;
146 u16 ul_vr_id; /* Reserved for Spectrum-2. */
147};
148
149struct mlxsw_sp_rif_params_ipip_lb {
150 struct mlxsw_sp_rif_params common;
151 struct mlxsw_sp_rif_ipip_lb_config lb_config;
152};
153
Ido Schimmele4f3c1c2017-05-26 08:37:40 +0200154struct mlxsw_sp_rif_ops {
155 enum mlxsw_sp_rif_type type;
156 size_t rif_size;
157
158 void (*setup)(struct mlxsw_sp_rif *rif,
159 const struct mlxsw_sp_rif_params *params);
160 int (*configure)(struct mlxsw_sp_rif *rif);
161 void (*deconfigure)(struct mlxsw_sp_rif *rif);
162 struct mlxsw_sp_fid * (*fid_get)(struct mlxsw_sp_rif *rif);
163};
164
Arkadi Sharshevskye0c0afd2017-03-28 17:24:15 +0200165static unsigned int *
166mlxsw_sp_rif_p_counter_get(struct mlxsw_sp_rif *rif,
167 enum mlxsw_sp_rif_counter_dir dir)
168{
169 switch (dir) {
170 case MLXSW_SP_RIF_COUNTER_EGRESS:
171 return &rif->counter_egress;
172 case MLXSW_SP_RIF_COUNTER_INGRESS:
173 return &rif->counter_ingress;
174 }
175 return NULL;
176}
177
178static bool
179mlxsw_sp_rif_counter_valid_get(struct mlxsw_sp_rif *rif,
180 enum mlxsw_sp_rif_counter_dir dir)
181{
182 switch (dir) {
183 case MLXSW_SP_RIF_COUNTER_EGRESS:
184 return rif->counter_egress_valid;
185 case MLXSW_SP_RIF_COUNTER_INGRESS:
186 return rif->counter_ingress_valid;
187 }
188 return false;
189}
190
191static void
192mlxsw_sp_rif_counter_valid_set(struct mlxsw_sp_rif *rif,
193 enum mlxsw_sp_rif_counter_dir dir,
194 bool valid)
195{
196 switch (dir) {
197 case MLXSW_SP_RIF_COUNTER_EGRESS:
198 rif->counter_egress_valid = valid;
199 break;
200 case MLXSW_SP_RIF_COUNTER_INGRESS:
201 rif->counter_ingress_valid = valid;
202 break;
203 }
204}
205
206static int mlxsw_sp_rif_counter_edit(struct mlxsw_sp *mlxsw_sp, u16 rif_index,
207 unsigned int counter_index, bool enable,
208 enum mlxsw_sp_rif_counter_dir dir)
209{
210 char ritr_pl[MLXSW_REG_RITR_LEN];
211 bool is_egress = false;
212 int err;
213
214 if (dir == MLXSW_SP_RIF_COUNTER_EGRESS)
215 is_egress = true;
216 mlxsw_reg_ritr_rif_pack(ritr_pl, rif_index);
217 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
218 if (err)
219 return err;
220
221 mlxsw_reg_ritr_counter_pack(ritr_pl, counter_index, enable,
222 is_egress);
223 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
224}
225
226int mlxsw_sp_rif_counter_value_get(struct mlxsw_sp *mlxsw_sp,
227 struct mlxsw_sp_rif *rif,
228 enum mlxsw_sp_rif_counter_dir dir, u64 *cnt)
229{
230 char ricnt_pl[MLXSW_REG_RICNT_LEN];
231 unsigned int *p_counter_index;
232 bool valid;
233 int err;
234
235 valid = mlxsw_sp_rif_counter_valid_get(rif, dir);
236 if (!valid)
237 return -EINVAL;
238
239 p_counter_index = mlxsw_sp_rif_p_counter_get(rif, dir);
240 if (!p_counter_index)
241 return -EINVAL;
242 mlxsw_reg_ricnt_pack(ricnt_pl, *p_counter_index,
243 MLXSW_REG_RICNT_OPCODE_NOP);
244 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ricnt), ricnt_pl);
245 if (err)
246 return err;
247 *cnt = mlxsw_reg_ricnt_good_unicast_packets_get(ricnt_pl);
248 return 0;
249}
250
251static int mlxsw_sp_rif_counter_clear(struct mlxsw_sp *mlxsw_sp,
252 unsigned int counter_index)
253{
254 char ricnt_pl[MLXSW_REG_RICNT_LEN];
255
256 mlxsw_reg_ricnt_pack(ricnt_pl, counter_index,
257 MLXSW_REG_RICNT_OPCODE_CLEAR);
258 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ricnt), ricnt_pl);
259}
260
261int mlxsw_sp_rif_counter_alloc(struct mlxsw_sp *mlxsw_sp,
262 struct mlxsw_sp_rif *rif,
263 enum mlxsw_sp_rif_counter_dir dir)
264{
265 unsigned int *p_counter_index;
266 int err;
267
268 p_counter_index = mlxsw_sp_rif_p_counter_get(rif, dir);
269 if (!p_counter_index)
270 return -EINVAL;
271 err = mlxsw_sp_counter_alloc(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_RIF,
272 p_counter_index);
273 if (err)
274 return err;
275
276 err = mlxsw_sp_rif_counter_clear(mlxsw_sp, *p_counter_index);
277 if (err)
278 goto err_counter_clear;
279
280 err = mlxsw_sp_rif_counter_edit(mlxsw_sp, rif->rif_index,
281 *p_counter_index, true, dir);
282 if (err)
283 goto err_counter_edit;
284 mlxsw_sp_rif_counter_valid_set(rif, dir, true);
285 return 0;
286
287err_counter_edit:
288err_counter_clear:
289 mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_RIF,
290 *p_counter_index);
291 return err;
292}
293
294void mlxsw_sp_rif_counter_free(struct mlxsw_sp *mlxsw_sp,
295 struct mlxsw_sp_rif *rif,
296 enum mlxsw_sp_rif_counter_dir dir)
297{
298 unsigned int *p_counter_index;
299
Arkadi Sharshevsky6b1206b2017-05-18 09:18:53 +0200300 if (!mlxsw_sp_rif_counter_valid_get(rif, dir))
301 return;
302
Arkadi Sharshevskye0c0afd2017-03-28 17:24:15 +0200303 p_counter_index = mlxsw_sp_rif_p_counter_get(rif, dir);
304 if (WARN_ON(!p_counter_index))
305 return;
306 mlxsw_sp_rif_counter_edit(mlxsw_sp, rif->rif_index,
307 *p_counter_index, false, dir);
308 mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_RIF,
309 *p_counter_index);
310 mlxsw_sp_rif_counter_valid_set(rif, dir, false);
311}
312
Ido Schimmele4f3c1c2017-05-26 08:37:40 +0200313static void mlxsw_sp_rif_counters_alloc(struct mlxsw_sp_rif *rif)
314{
315 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
316 struct devlink *devlink;
317
318 devlink = priv_to_devlink(mlxsw_sp->core);
319 if (!devlink_dpipe_table_counter_enabled(devlink,
320 MLXSW_SP_DPIPE_TABLE_NAME_ERIF))
321 return;
322 mlxsw_sp_rif_counter_alloc(mlxsw_sp, rif, MLXSW_SP_RIF_COUNTER_EGRESS);
323}
324
325static void mlxsw_sp_rif_counters_free(struct mlxsw_sp_rif *rif)
326{
327 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
328
329 mlxsw_sp_rif_counter_free(mlxsw_sp, rif, MLXSW_SP_RIF_COUNTER_EGRESS);
330}
331
Ido Schimmel4724ba562017-03-10 08:53:39 +0100332static struct mlxsw_sp_rif *
333mlxsw_sp_rif_find_by_dev(const struct mlxsw_sp *mlxsw_sp,
334 const struct net_device *dev);
335
Ido Schimmel7dcc18a2017-07-18 10:10:30 +0200336#define MLXSW_SP_PREFIX_COUNT (sizeof(struct in6_addr) * BITS_PER_BYTE + 1)
Ido Schimmel9011b672017-05-16 19:38:25 +0200337
338struct mlxsw_sp_prefix_usage {
339 DECLARE_BITMAP(b, MLXSW_SP_PREFIX_COUNT);
340};
341
Jiri Pirko53342022016-07-04 08:23:08 +0200342#define mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage) \
343 for_each_set_bit(prefix, (prefix_usage)->b, MLXSW_SP_PREFIX_COUNT)
344
345static bool
346mlxsw_sp_prefix_usage_eq(struct mlxsw_sp_prefix_usage *prefix_usage1,
347 struct mlxsw_sp_prefix_usage *prefix_usage2)
348{
349 return !memcmp(prefix_usage1, prefix_usage2, sizeof(*prefix_usage1));
350}
351
Jiri Pirko6b75c482016-07-04 08:23:09 +0200352static void
353mlxsw_sp_prefix_usage_cpy(struct mlxsw_sp_prefix_usage *prefix_usage1,
354 struct mlxsw_sp_prefix_usage *prefix_usage2)
355{
356 memcpy(prefix_usage1, prefix_usage2, sizeof(*prefix_usage1));
357}
358
359static void
Jiri Pirko5e9c16c2016-07-04 08:23:04 +0200360mlxsw_sp_prefix_usage_set(struct mlxsw_sp_prefix_usage *prefix_usage,
361 unsigned char prefix_len)
362{
363 set_bit(prefix_len, prefix_usage->b);
364}
365
366static void
367mlxsw_sp_prefix_usage_clear(struct mlxsw_sp_prefix_usage *prefix_usage,
368 unsigned char prefix_len)
369{
370 clear_bit(prefix_len, prefix_usage->b);
371}
372
373struct mlxsw_sp_fib_key {
374 unsigned char addr[sizeof(struct in6_addr)];
375 unsigned char prefix_len;
376};
377
Jiri Pirko61c503f2016-07-04 08:23:11 +0200378enum mlxsw_sp_fib_entry_type {
379 MLXSW_SP_FIB_ENTRY_TYPE_REMOTE,
380 MLXSW_SP_FIB_ENTRY_TYPE_LOCAL,
381 MLXSW_SP_FIB_ENTRY_TYPE_TRAP,
Petr Machata4607f6d2017-09-02 23:49:25 +0200382
383 /* This is a special case of local delivery, where a packet should be
384 * decapsulated on reception. Note that there is no corresponding ENCAP,
385 * because that's a type of next hop, not of FIB entry. (There can be
386 * several next hops in a REMOTE entry, and some of them may be
387 * encapsulating entries.)
388 */
389 MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP,
Jiri Pirko61c503f2016-07-04 08:23:11 +0200390};
391
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +0200392struct mlxsw_sp_nexthop_group;
Ido Schimmel9011b672017-05-16 19:38:25 +0200393struct mlxsw_sp_fib;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +0200394
Ido Schimmel9aecce12017-02-09 10:28:42 +0100395struct mlxsw_sp_fib_node {
396 struct list_head entry_list;
Jiri Pirkob45f64d2016-09-26 12:52:31 +0200397 struct list_head list;
Ido Schimmel9aecce12017-02-09 10:28:42 +0100398 struct rhash_head ht_node;
Ido Schimmel76610eb2017-03-10 08:53:41 +0100399 struct mlxsw_sp_fib *fib;
Ido Schimmel9aecce12017-02-09 10:28:42 +0100400 struct mlxsw_sp_fib_key key;
401};
402
Petr Machata4607f6d2017-09-02 23:49:25 +0200403struct mlxsw_sp_fib_entry_decap {
404 struct mlxsw_sp_ipip_entry *ipip_entry;
405 u32 tunnel_index;
406};
407
Ido Schimmel9aecce12017-02-09 10:28:42 +0100408struct mlxsw_sp_fib_entry {
409 struct list_head list;
410 struct mlxsw_sp_fib_node *fib_node;
411 enum mlxsw_sp_fib_entry_type type;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +0200412 struct list_head nexthop_group_node;
413 struct mlxsw_sp_nexthop_group *nh_group;
Petr Machata4607f6d2017-09-02 23:49:25 +0200414 struct mlxsw_sp_fib_entry_decap decap; /* Valid for decap entries. */
Jiri Pirko5e9c16c2016-07-04 08:23:04 +0200415};
416
Ido Schimmel4f1c7f12017-07-18 10:10:26 +0200417struct mlxsw_sp_fib4_entry {
418 struct mlxsw_sp_fib_entry common;
419 u32 tb_id;
420 u32 prio;
421 u8 tos;
422 u8 type;
423};
424
Ido Schimmel428b8512017-08-03 13:28:28 +0200425struct mlxsw_sp_fib6_entry {
426 struct mlxsw_sp_fib_entry common;
427 struct list_head rt6_list;
428 unsigned int nrt6;
429};
430
431struct mlxsw_sp_rt6 {
432 struct list_head list;
433 struct rt6_info *rt;
434};
435
Ido Schimmel9011b672017-05-16 19:38:25 +0200436struct mlxsw_sp_lpm_tree {
437 u8 id; /* tree ID */
438 unsigned int ref_count;
439 enum mlxsw_sp_l3proto proto;
440 struct mlxsw_sp_prefix_usage prefix_usage;
441};
442
Jiri Pirko5e9c16c2016-07-04 08:23:04 +0200443struct mlxsw_sp_fib {
444 struct rhashtable ht;
Ido Schimmel9aecce12017-02-09 10:28:42 +0100445 struct list_head node_list;
Ido Schimmel76610eb2017-03-10 08:53:41 +0100446 struct mlxsw_sp_vr *vr;
447 struct mlxsw_sp_lpm_tree *lpm_tree;
Jiri Pirko5e9c16c2016-07-04 08:23:04 +0200448 unsigned long prefix_ref_count[MLXSW_SP_PREFIX_COUNT];
449 struct mlxsw_sp_prefix_usage prefix_usage;
Ido Schimmel76610eb2017-03-10 08:53:41 +0100450 enum mlxsw_sp_l3proto proto;
Jiri Pirko5e9c16c2016-07-04 08:23:04 +0200451};
452
Ido Schimmel9011b672017-05-16 19:38:25 +0200453struct mlxsw_sp_vr {
454 u16 id; /* virtual router ID */
455 u32 tb_id; /* kernel fib table id */
456 unsigned int rif_count;
457 struct mlxsw_sp_fib *fib4;
Ido Schimmela3d9bc52017-07-18 10:10:22 +0200458 struct mlxsw_sp_fib *fib6;
Yotam Gigid42b0962017-09-27 08:23:20 +0200459 struct mlxsw_sp_mr_table *mr4_table;
Ido Schimmel9011b672017-05-16 19:38:25 +0200460};
461
Ido Schimmel9aecce12017-02-09 10:28:42 +0100462static const struct rhashtable_params mlxsw_sp_fib_ht_params;
Jiri Pirko5e9c16c2016-07-04 08:23:04 +0200463
Ido Schimmel76610eb2017-03-10 08:53:41 +0100464static struct mlxsw_sp_fib *mlxsw_sp_fib_create(struct mlxsw_sp_vr *vr,
465 enum mlxsw_sp_l3proto proto)
Jiri Pirko5e9c16c2016-07-04 08:23:04 +0200466{
467 struct mlxsw_sp_fib *fib;
468 int err;
469
470 fib = kzalloc(sizeof(*fib), GFP_KERNEL);
471 if (!fib)
472 return ERR_PTR(-ENOMEM);
473 err = rhashtable_init(&fib->ht, &mlxsw_sp_fib_ht_params);
474 if (err)
475 goto err_rhashtable_init;
Ido Schimmel9aecce12017-02-09 10:28:42 +0100476 INIT_LIST_HEAD(&fib->node_list);
Ido Schimmel76610eb2017-03-10 08:53:41 +0100477 fib->proto = proto;
478 fib->vr = vr;
Jiri Pirko5e9c16c2016-07-04 08:23:04 +0200479 return fib;
480
481err_rhashtable_init:
482 kfree(fib);
483 return ERR_PTR(err);
484}
485
486static void mlxsw_sp_fib_destroy(struct mlxsw_sp_fib *fib)
487{
Ido Schimmel9aecce12017-02-09 10:28:42 +0100488 WARN_ON(!list_empty(&fib->node_list));
Ido Schimmel76610eb2017-03-10 08:53:41 +0100489 WARN_ON(fib->lpm_tree);
Jiri Pirko5e9c16c2016-07-04 08:23:04 +0200490 rhashtable_destroy(&fib->ht);
491 kfree(fib);
492}
493
Jiri Pirko53342022016-07-04 08:23:08 +0200494static struct mlxsw_sp_lpm_tree *
Ido Schimmel382dbb42017-03-10 08:53:40 +0100495mlxsw_sp_lpm_tree_find_unused(struct mlxsw_sp *mlxsw_sp)
Jiri Pirko53342022016-07-04 08:23:08 +0200496{
497 static struct mlxsw_sp_lpm_tree *lpm_tree;
498 int i;
499
Ido Schimmel9011b672017-05-16 19:38:25 +0200500 for (i = 0; i < mlxsw_sp->router->lpm.tree_count; i++) {
501 lpm_tree = &mlxsw_sp->router->lpm.trees[i];
Ido Schimmel382dbb42017-03-10 08:53:40 +0100502 if (lpm_tree->ref_count == 0)
503 return lpm_tree;
Jiri Pirko53342022016-07-04 08:23:08 +0200504 }
505 return NULL;
506}
507
508static int mlxsw_sp_lpm_tree_alloc(struct mlxsw_sp *mlxsw_sp,
509 struct mlxsw_sp_lpm_tree *lpm_tree)
510{
511 char ralta_pl[MLXSW_REG_RALTA_LEN];
512
Ido Schimmel1a9234e662016-09-19 08:29:26 +0200513 mlxsw_reg_ralta_pack(ralta_pl, true,
514 (enum mlxsw_reg_ralxx_protocol) lpm_tree->proto,
515 lpm_tree->id);
Jiri Pirko53342022016-07-04 08:23:08 +0200516 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralta), ralta_pl);
517}
518
Ido Schimmelcc702672017-08-14 10:54:03 +0200519static void mlxsw_sp_lpm_tree_free(struct mlxsw_sp *mlxsw_sp,
520 struct mlxsw_sp_lpm_tree *lpm_tree)
Jiri Pirko53342022016-07-04 08:23:08 +0200521{
522 char ralta_pl[MLXSW_REG_RALTA_LEN];
523
Ido Schimmel1a9234e662016-09-19 08:29:26 +0200524 mlxsw_reg_ralta_pack(ralta_pl, false,
525 (enum mlxsw_reg_ralxx_protocol) lpm_tree->proto,
526 lpm_tree->id);
Ido Schimmelcc702672017-08-14 10:54:03 +0200527 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralta), ralta_pl);
Jiri Pirko53342022016-07-04 08:23:08 +0200528}
529
530static int
531mlxsw_sp_lpm_tree_left_struct_set(struct mlxsw_sp *mlxsw_sp,
532 struct mlxsw_sp_prefix_usage *prefix_usage,
533 struct mlxsw_sp_lpm_tree *lpm_tree)
534{
535 char ralst_pl[MLXSW_REG_RALST_LEN];
536 u8 root_bin = 0;
537 u8 prefix;
538 u8 last_prefix = MLXSW_REG_RALST_BIN_NO_CHILD;
539
540 mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage)
541 root_bin = prefix;
542
543 mlxsw_reg_ralst_pack(ralst_pl, root_bin, lpm_tree->id);
544 mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage) {
545 if (prefix == 0)
546 continue;
547 mlxsw_reg_ralst_bin_pack(ralst_pl, prefix, last_prefix,
548 MLXSW_REG_RALST_BIN_NO_CHILD);
549 last_prefix = prefix;
550 }
551 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralst), ralst_pl);
552}
553
554static struct mlxsw_sp_lpm_tree *
555mlxsw_sp_lpm_tree_create(struct mlxsw_sp *mlxsw_sp,
556 struct mlxsw_sp_prefix_usage *prefix_usage,
Ido Schimmel382dbb42017-03-10 08:53:40 +0100557 enum mlxsw_sp_l3proto proto)
Jiri Pirko53342022016-07-04 08:23:08 +0200558{
559 struct mlxsw_sp_lpm_tree *lpm_tree;
560 int err;
561
Ido Schimmel382dbb42017-03-10 08:53:40 +0100562 lpm_tree = mlxsw_sp_lpm_tree_find_unused(mlxsw_sp);
Jiri Pirko53342022016-07-04 08:23:08 +0200563 if (!lpm_tree)
564 return ERR_PTR(-EBUSY);
565 lpm_tree->proto = proto;
566 err = mlxsw_sp_lpm_tree_alloc(mlxsw_sp, lpm_tree);
567 if (err)
568 return ERR_PTR(err);
569
570 err = mlxsw_sp_lpm_tree_left_struct_set(mlxsw_sp, prefix_usage,
571 lpm_tree);
572 if (err)
573 goto err_left_struct_set;
Jiri Pirko2083d362016-10-25 11:25:56 +0200574 memcpy(&lpm_tree->prefix_usage, prefix_usage,
575 sizeof(lpm_tree->prefix_usage));
Jiri Pirko53342022016-07-04 08:23:08 +0200576 return lpm_tree;
577
578err_left_struct_set:
579 mlxsw_sp_lpm_tree_free(mlxsw_sp, lpm_tree);
580 return ERR_PTR(err);
581}
582
Ido Schimmelcc702672017-08-14 10:54:03 +0200583static void mlxsw_sp_lpm_tree_destroy(struct mlxsw_sp *mlxsw_sp,
584 struct mlxsw_sp_lpm_tree *lpm_tree)
Jiri Pirko53342022016-07-04 08:23:08 +0200585{
Ido Schimmelcc702672017-08-14 10:54:03 +0200586 mlxsw_sp_lpm_tree_free(mlxsw_sp, lpm_tree);
Jiri Pirko53342022016-07-04 08:23:08 +0200587}
588
589static struct mlxsw_sp_lpm_tree *
590mlxsw_sp_lpm_tree_get(struct mlxsw_sp *mlxsw_sp,
591 struct mlxsw_sp_prefix_usage *prefix_usage,
Ido Schimmel382dbb42017-03-10 08:53:40 +0100592 enum mlxsw_sp_l3proto proto)
Jiri Pirko53342022016-07-04 08:23:08 +0200593{
594 struct mlxsw_sp_lpm_tree *lpm_tree;
595 int i;
596
Ido Schimmel9011b672017-05-16 19:38:25 +0200597 for (i = 0; i < mlxsw_sp->router->lpm.tree_count; i++) {
598 lpm_tree = &mlxsw_sp->router->lpm.trees[i];
Jiri Pirko8b99bec2016-10-25 11:25:57 +0200599 if (lpm_tree->ref_count != 0 &&
600 lpm_tree->proto == proto &&
Jiri Pirko53342022016-07-04 08:23:08 +0200601 mlxsw_sp_prefix_usage_eq(&lpm_tree->prefix_usage,
602 prefix_usage))
Ido Schimmelfc922bb2017-08-14 10:54:05 +0200603 return lpm_tree;
Jiri Pirko53342022016-07-04 08:23:08 +0200604 }
Ido Schimmelfc922bb2017-08-14 10:54:05 +0200605 return mlxsw_sp_lpm_tree_create(mlxsw_sp, prefix_usage, proto);
606}
Jiri Pirko53342022016-07-04 08:23:08 +0200607
Ido Schimmelfc922bb2017-08-14 10:54:05 +0200608static void mlxsw_sp_lpm_tree_hold(struct mlxsw_sp_lpm_tree *lpm_tree)
609{
Jiri Pirko53342022016-07-04 08:23:08 +0200610 lpm_tree->ref_count++;
Jiri Pirko53342022016-07-04 08:23:08 +0200611}
612
Ido Schimmelcc702672017-08-14 10:54:03 +0200613static void mlxsw_sp_lpm_tree_put(struct mlxsw_sp *mlxsw_sp,
614 struct mlxsw_sp_lpm_tree *lpm_tree)
Jiri Pirko53342022016-07-04 08:23:08 +0200615{
616 if (--lpm_tree->ref_count == 0)
Ido Schimmelcc702672017-08-14 10:54:03 +0200617 mlxsw_sp_lpm_tree_destroy(mlxsw_sp, lpm_tree);
Jiri Pirko53342022016-07-04 08:23:08 +0200618}
619
Ido Schimmeld7a60302017-06-08 08:47:43 +0200620#define MLXSW_SP_LPM_TREE_MIN 1 /* tree 0 is reserved */
Ido Schimmel8494ab02017-03-24 08:02:47 +0100621
622static int mlxsw_sp_lpm_init(struct mlxsw_sp *mlxsw_sp)
Jiri Pirko53342022016-07-04 08:23:08 +0200623{
624 struct mlxsw_sp_lpm_tree *lpm_tree;
Ido Schimmel8494ab02017-03-24 08:02:47 +0100625 u64 max_trees;
Jiri Pirko53342022016-07-04 08:23:08 +0200626 int i;
627
Ido Schimmel8494ab02017-03-24 08:02:47 +0100628 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_LPM_TREES))
629 return -EIO;
630
631 max_trees = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_LPM_TREES);
Ido Schimmel9011b672017-05-16 19:38:25 +0200632 mlxsw_sp->router->lpm.tree_count = max_trees - MLXSW_SP_LPM_TREE_MIN;
633 mlxsw_sp->router->lpm.trees = kcalloc(mlxsw_sp->router->lpm.tree_count,
Ido Schimmel8494ab02017-03-24 08:02:47 +0100634 sizeof(struct mlxsw_sp_lpm_tree),
635 GFP_KERNEL);
Ido Schimmel9011b672017-05-16 19:38:25 +0200636 if (!mlxsw_sp->router->lpm.trees)
Ido Schimmel8494ab02017-03-24 08:02:47 +0100637 return -ENOMEM;
638
Ido Schimmel9011b672017-05-16 19:38:25 +0200639 for (i = 0; i < mlxsw_sp->router->lpm.tree_count; i++) {
640 lpm_tree = &mlxsw_sp->router->lpm.trees[i];
Jiri Pirko53342022016-07-04 08:23:08 +0200641 lpm_tree->id = i + MLXSW_SP_LPM_TREE_MIN;
642 }
Ido Schimmel8494ab02017-03-24 08:02:47 +0100643
644 return 0;
645}
646
647static void mlxsw_sp_lpm_fini(struct mlxsw_sp *mlxsw_sp)
648{
Ido Schimmel9011b672017-05-16 19:38:25 +0200649 kfree(mlxsw_sp->router->lpm.trees);
Jiri Pirko53342022016-07-04 08:23:08 +0200650}
651
Ido Schimmel76610eb2017-03-10 08:53:41 +0100652static bool mlxsw_sp_vr_is_used(const struct mlxsw_sp_vr *vr)
653{
Yotam Gigid42b0962017-09-27 08:23:20 +0200654 return !!vr->fib4 || !!vr->fib6 || !!vr->mr4_table;
Ido Schimmel76610eb2017-03-10 08:53:41 +0100655}
656
Jiri Pirko6b75c482016-07-04 08:23:09 +0200657static struct mlxsw_sp_vr *mlxsw_sp_vr_find_unused(struct mlxsw_sp *mlxsw_sp)
658{
659 struct mlxsw_sp_vr *vr;
660 int i;
661
Jiri Pirkoc1a38312016-10-21 16:07:23 +0200662 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
Ido Schimmel9011b672017-05-16 19:38:25 +0200663 vr = &mlxsw_sp->router->vrs[i];
Ido Schimmel76610eb2017-03-10 08:53:41 +0100664 if (!mlxsw_sp_vr_is_used(vr))
Jiri Pirko6b75c482016-07-04 08:23:09 +0200665 return vr;
666 }
667 return NULL;
668}
669
670static int mlxsw_sp_vr_lpm_tree_bind(struct mlxsw_sp *mlxsw_sp,
Ido Schimmel0adb2142017-08-14 10:54:04 +0200671 const struct mlxsw_sp_fib *fib, u8 tree_id)
Jiri Pirko6b75c482016-07-04 08:23:09 +0200672{
673 char raltb_pl[MLXSW_REG_RALTB_LEN];
674
Ido Schimmel76610eb2017-03-10 08:53:41 +0100675 mlxsw_reg_raltb_pack(raltb_pl, fib->vr->id,
676 (enum mlxsw_reg_ralxx_protocol) fib->proto,
Ido Schimmel0adb2142017-08-14 10:54:04 +0200677 tree_id);
Jiri Pirko6b75c482016-07-04 08:23:09 +0200678 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raltb), raltb_pl);
679}
680
681static int mlxsw_sp_vr_lpm_tree_unbind(struct mlxsw_sp *mlxsw_sp,
Ido Schimmel76610eb2017-03-10 08:53:41 +0100682 const struct mlxsw_sp_fib *fib)
Jiri Pirko6b75c482016-07-04 08:23:09 +0200683{
684 char raltb_pl[MLXSW_REG_RALTB_LEN];
685
686 /* Bind to tree 0 which is default */
Ido Schimmel76610eb2017-03-10 08:53:41 +0100687 mlxsw_reg_raltb_pack(raltb_pl, fib->vr->id,
688 (enum mlxsw_reg_ralxx_protocol) fib->proto, 0);
Jiri Pirko6b75c482016-07-04 08:23:09 +0200689 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raltb), raltb_pl);
690}
691
692static u32 mlxsw_sp_fix_tb_id(u32 tb_id)
693{
Yotam Gigi7e50d432017-09-27 08:23:19 +0200694 /* For our purpose, squash main, default and local tables into one */
695 if (tb_id == RT_TABLE_LOCAL || tb_id == RT_TABLE_DEFAULT)
Jiri Pirko6b75c482016-07-04 08:23:09 +0200696 tb_id = RT_TABLE_MAIN;
697 return tb_id;
698}
699
700static struct mlxsw_sp_vr *mlxsw_sp_vr_find(struct mlxsw_sp *mlxsw_sp,
Ido Schimmel76610eb2017-03-10 08:53:41 +0100701 u32 tb_id)
Jiri Pirko6b75c482016-07-04 08:23:09 +0200702{
703 struct mlxsw_sp_vr *vr;
704 int i;
705
706 tb_id = mlxsw_sp_fix_tb_id(tb_id);
Nogah Frankel9497c042016-09-20 11:16:54 +0200707
Jiri Pirkoc1a38312016-10-21 16:07:23 +0200708 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
Ido Schimmel9011b672017-05-16 19:38:25 +0200709 vr = &mlxsw_sp->router->vrs[i];
Ido Schimmel76610eb2017-03-10 08:53:41 +0100710 if (mlxsw_sp_vr_is_used(vr) && vr->tb_id == tb_id)
Jiri Pirko6b75c482016-07-04 08:23:09 +0200711 return vr;
712 }
713 return NULL;
714}
715
Ido Schimmel76610eb2017-03-10 08:53:41 +0100716static struct mlxsw_sp_fib *mlxsw_sp_vr_fib(const struct mlxsw_sp_vr *vr,
717 enum mlxsw_sp_l3proto proto)
Jiri Pirko6b75c482016-07-04 08:23:09 +0200718{
Ido Schimmel76610eb2017-03-10 08:53:41 +0100719 switch (proto) {
720 case MLXSW_SP_L3_PROTO_IPV4:
721 return vr->fib4;
722 case MLXSW_SP_L3_PROTO_IPV6:
Ido Schimmela3d9bc52017-07-18 10:10:22 +0200723 return vr->fib6;
Ido Schimmel76610eb2017-03-10 08:53:41 +0100724 }
725 return NULL;
726}
727
728static struct mlxsw_sp_vr *mlxsw_sp_vr_create(struct mlxsw_sp *mlxsw_sp,
David Ahernf8fa9b42017-10-18 09:56:56 -0700729 u32 tb_id,
730 struct netlink_ext_ack *extack)
Ido Schimmel76610eb2017-03-10 08:53:41 +0100731{
Jiri Pirko6b75c482016-07-04 08:23:09 +0200732 struct mlxsw_sp_vr *vr;
Ido Schimmela3d9bc52017-07-18 10:10:22 +0200733 int err;
Jiri Pirko6b75c482016-07-04 08:23:09 +0200734
735 vr = mlxsw_sp_vr_find_unused(mlxsw_sp);
David Ahernf8fa9b42017-10-18 09:56:56 -0700736 if (!vr) {
737 NL_SET_ERR_MSG(extack, "spectrum: Exceeded number of supported virtual routers");
Jiri Pirko6b75c482016-07-04 08:23:09 +0200738 return ERR_PTR(-EBUSY);
David Ahernf8fa9b42017-10-18 09:56:56 -0700739 }
Ido Schimmel76610eb2017-03-10 08:53:41 +0100740 vr->fib4 = mlxsw_sp_fib_create(vr, MLXSW_SP_L3_PROTO_IPV4);
741 if (IS_ERR(vr->fib4))
742 return ERR_CAST(vr->fib4);
Ido Schimmela3d9bc52017-07-18 10:10:22 +0200743 vr->fib6 = mlxsw_sp_fib_create(vr, MLXSW_SP_L3_PROTO_IPV6);
744 if (IS_ERR(vr->fib6)) {
745 err = PTR_ERR(vr->fib6);
746 goto err_fib6_create;
747 }
Yotam Gigid42b0962017-09-27 08:23:20 +0200748 vr->mr4_table = mlxsw_sp_mr_table_create(mlxsw_sp, vr->id,
749 MLXSW_SP_L3_PROTO_IPV4);
750 if (IS_ERR(vr->mr4_table)) {
751 err = PTR_ERR(vr->mr4_table);
752 goto err_mr_table_create;
753 }
Jiri Pirko6b75c482016-07-04 08:23:09 +0200754 vr->tb_id = tb_id;
Jiri Pirko6b75c482016-07-04 08:23:09 +0200755 return vr;
Ido Schimmela3d9bc52017-07-18 10:10:22 +0200756
Yotam Gigid42b0962017-09-27 08:23:20 +0200757err_mr_table_create:
758 mlxsw_sp_fib_destroy(vr->fib6);
759 vr->fib6 = NULL;
Ido Schimmela3d9bc52017-07-18 10:10:22 +0200760err_fib6_create:
761 mlxsw_sp_fib_destroy(vr->fib4);
762 vr->fib4 = NULL;
763 return ERR_PTR(err);
Jiri Pirko6b75c482016-07-04 08:23:09 +0200764}
765
Ido Schimmel76610eb2017-03-10 08:53:41 +0100766static void mlxsw_sp_vr_destroy(struct mlxsw_sp_vr *vr)
Jiri Pirko6b75c482016-07-04 08:23:09 +0200767{
Yotam Gigid42b0962017-09-27 08:23:20 +0200768 mlxsw_sp_mr_table_destroy(vr->mr4_table);
769 vr->mr4_table = NULL;
Ido Schimmela3d9bc52017-07-18 10:10:22 +0200770 mlxsw_sp_fib_destroy(vr->fib6);
771 vr->fib6 = NULL;
Ido Schimmel76610eb2017-03-10 08:53:41 +0100772 mlxsw_sp_fib_destroy(vr->fib4);
773 vr->fib4 = NULL;
Jiri Pirko6b75c482016-07-04 08:23:09 +0200774}
775
David Ahernf8fa9b42017-10-18 09:56:56 -0700776static struct mlxsw_sp_vr *mlxsw_sp_vr_get(struct mlxsw_sp *mlxsw_sp, u32 tb_id,
777 struct netlink_ext_ack *extack)
Jiri Pirko6b75c482016-07-04 08:23:09 +0200778{
779 struct mlxsw_sp_vr *vr;
Jiri Pirko6b75c482016-07-04 08:23:09 +0200780
781 tb_id = mlxsw_sp_fix_tb_id(tb_id);
Ido Schimmel76610eb2017-03-10 08:53:41 +0100782 vr = mlxsw_sp_vr_find(mlxsw_sp, tb_id);
783 if (!vr)
David Ahernf8fa9b42017-10-18 09:56:56 -0700784 vr = mlxsw_sp_vr_create(mlxsw_sp, tb_id, extack);
Jiri Pirko6b75c482016-07-04 08:23:09 +0200785 return vr;
786}
787
Ido Schimmel76610eb2017-03-10 08:53:41 +0100788static void mlxsw_sp_vr_put(struct mlxsw_sp_vr *vr)
Jiri Pirko6b75c482016-07-04 08:23:09 +0200789{
Ido Schimmela3d9bc52017-07-18 10:10:22 +0200790 if (!vr->rif_count && list_empty(&vr->fib4->node_list) &&
Yotam Gigid42b0962017-09-27 08:23:20 +0200791 list_empty(&vr->fib6->node_list) &&
792 mlxsw_sp_mr_table_empty(vr->mr4_table))
Ido Schimmel76610eb2017-03-10 08:53:41 +0100793 mlxsw_sp_vr_destroy(vr);
Jiri Pirko6b75c482016-07-04 08:23:09 +0200794}
795
Ido Schimmelfc922bb2017-08-14 10:54:05 +0200796static bool
797mlxsw_sp_vr_lpm_tree_should_replace(struct mlxsw_sp_vr *vr,
798 enum mlxsw_sp_l3proto proto, u8 tree_id)
799{
800 struct mlxsw_sp_fib *fib = mlxsw_sp_vr_fib(vr, proto);
801
802 if (!mlxsw_sp_vr_is_used(vr))
803 return false;
804 if (fib->lpm_tree && fib->lpm_tree->id == tree_id)
805 return true;
806 return false;
807}
808
809static int mlxsw_sp_vr_lpm_tree_replace(struct mlxsw_sp *mlxsw_sp,
810 struct mlxsw_sp_fib *fib,
811 struct mlxsw_sp_lpm_tree *new_tree)
812{
813 struct mlxsw_sp_lpm_tree *old_tree = fib->lpm_tree;
814 int err;
815
Ido Schimmelfc922bb2017-08-14 10:54:05 +0200816 fib->lpm_tree = new_tree;
817 mlxsw_sp_lpm_tree_hold(new_tree);
Ido Schimmeled604c52018-01-18 15:42:10 +0100818 err = mlxsw_sp_vr_lpm_tree_bind(mlxsw_sp, fib, new_tree->id);
819 if (err)
820 goto err_tree_bind;
Ido Schimmelfc922bb2017-08-14 10:54:05 +0200821 mlxsw_sp_lpm_tree_put(mlxsw_sp, old_tree);
822 return 0;
Ido Schimmeled604c52018-01-18 15:42:10 +0100823
824err_tree_bind:
825 mlxsw_sp_lpm_tree_put(mlxsw_sp, new_tree);
826 fib->lpm_tree = old_tree;
827 return err;
Ido Schimmelfc922bb2017-08-14 10:54:05 +0200828}
829
830static int mlxsw_sp_vrs_lpm_tree_replace(struct mlxsw_sp *mlxsw_sp,
831 struct mlxsw_sp_fib *fib,
832 struct mlxsw_sp_lpm_tree *new_tree)
833{
834 struct mlxsw_sp_lpm_tree *old_tree = fib->lpm_tree;
835 enum mlxsw_sp_l3proto proto = fib->proto;
836 u8 old_id, new_id = new_tree->id;
837 struct mlxsw_sp_vr *vr;
838 int i, err;
839
840 if (!old_tree)
841 goto no_replace;
842 old_id = old_tree->id;
843
844 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
845 vr = &mlxsw_sp->router->vrs[i];
846 if (!mlxsw_sp_vr_lpm_tree_should_replace(vr, proto, old_id))
847 continue;
848 err = mlxsw_sp_vr_lpm_tree_replace(mlxsw_sp,
849 mlxsw_sp_vr_fib(vr, proto),
850 new_tree);
851 if (err)
852 goto err_tree_replace;
853 }
854
855 return 0;
856
857err_tree_replace:
858 for (i--; i >= 0; i--) {
859 if (!mlxsw_sp_vr_lpm_tree_should_replace(vr, proto, new_id))
860 continue;
861 mlxsw_sp_vr_lpm_tree_replace(mlxsw_sp,
862 mlxsw_sp_vr_fib(vr, proto),
863 old_tree);
864 }
865 return err;
866
867no_replace:
Ido Schimmelfc922bb2017-08-14 10:54:05 +0200868 fib->lpm_tree = new_tree;
869 mlxsw_sp_lpm_tree_hold(new_tree);
Ido Schimmeled604c52018-01-18 15:42:10 +0100870 err = mlxsw_sp_vr_lpm_tree_bind(mlxsw_sp, fib, new_tree->id);
871 if (err) {
872 mlxsw_sp_lpm_tree_put(mlxsw_sp, new_tree);
873 fib->lpm_tree = NULL;
874 return err;
875 }
Ido Schimmelfc922bb2017-08-14 10:54:05 +0200876 return 0;
877}
878
879static void
880mlxsw_sp_vrs_prefixes(struct mlxsw_sp *mlxsw_sp,
881 enum mlxsw_sp_l3proto proto,
882 struct mlxsw_sp_prefix_usage *req_prefix_usage)
883{
884 int i;
885
886 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
887 struct mlxsw_sp_vr *vr = &mlxsw_sp->router->vrs[i];
888 struct mlxsw_sp_fib *fib = mlxsw_sp_vr_fib(vr, proto);
889 unsigned char prefix;
890
891 if (!mlxsw_sp_vr_is_used(vr))
892 continue;
893 mlxsw_sp_prefix_usage_for_each(prefix, &fib->prefix_usage)
894 mlxsw_sp_prefix_usage_set(req_prefix_usage, prefix);
895 }
896}
897
Nogah Frankel9497c042016-09-20 11:16:54 +0200898static int mlxsw_sp_vrs_init(struct mlxsw_sp *mlxsw_sp)
Jiri Pirko6b75c482016-07-04 08:23:09 +0200899{
900 struct mlxsw_sp_vr *vr;
Jiri Pirkoc1a38312016-10-21 16:07:23 +0200901 u64 max_vrs;
Jiri Pirko6b75c482016-07-04 08:23:09 +0200902 int i;
903
Jiri Pirkoc1a38312016-10-21 16:07:23 +0200904 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_VRS))
Nogah Frankel9497c042016-09-20 11:16:54 +0200905 return -EIO;
906
Jiri Pirkoc1a38312016-10-21 16:07:23 +0200907 max_vrs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS);
Ido Schimmel9011b672017-05-16 19:38:25 +0200908 mlxsw_sp->router->vrs = kcalloc(max_vrs, sizeof(struct mlxsw_sp_vr),
909 GFP_KERNEL);
910 if (!mlxsw_sp->router->vrs)
Nogah Frankel9497c042016-09-20 11:16:54 +0200911 return -ENOMEM;
912
Jiri Pirkoc1a38312016-10-21 16:07:23 +0200913 for (i = 0; i < max_vrs; i++) {
Ido Schimmel9011b672017-05-16 19:38:25 +0200914 vr = &mlxsw_sp->router->vrs[i];
Jiri Pirko6b75c482016-07-04 08:23:09 +0200915 vr->id = i;
916 }
Nogah Frankel9497c042016-09-20 11:16:54 +0200917
918 return 0;
919}
920
Ido Schimmelac571de2016-11-14 11:26:32 +0100921static void mlxsw_sp_router_fib_flush(struct mlxsw_sp *mlxsw_sp);
922
Nogah Frankel9497c042016-09-20 11:16:54 +0200923static void mlxsw_sp_vrs_fini(struct mlxsw_sp *mlxsw_sp)
924{
Ido Schimmel30572242016-12-03 16:45:01 +0100925 /* At this stage we're guaranteed not to have new incoming
926 * FIB notifications and the work queue is free from FIBs
927 * sitting on top of mlxsw netdevs. However, we can still
928 * have other FIBs queued. Flush the queue before flushing
929 * the device's tables. No need for locks, as we're the only
930 * writer.
931 */
932 mlxsw_core_flush_owq();
Ido Schimmelac571de2016-11-14 11:26:32 +0100933 mlxsw_sp_router_fib_flush(mlxsw_sp);
Ido Schimmel9011b672017-05-16 19:38:25 +0200934 kfree(mlxsw_sp->router->vrs);
Jiri Pirko6b75c482016-07-04 08:23:09 +0200935}
936
Petr Machata6ddb7422017-09-02 23:49:19 +0200937static struct net_device *
938__mlxsw_sp_ipip_netdev_ul_dev_get(const struct net_device *ol_dev)
939{
940 struct ip_tunnel *tun = netdev_priv(ol_dev);
941 struct net *net = dev_net(ol_dev);
942
943 return __dev_get_by_index(net, tun->parms.link);
944}
945
Petr Machata4cf04f32017-11-03 10:03:42 +0100946u32 mlxsw_sp_ipip_dev_ul_tb_id(const struct net_device *ol_dev)
Petr Machata6ddb7422017-09-02 23:49:19 +0200947{
948 struct net_device *d = __mlxsw_sp_ipip_netdev_ul_dev_get(ol_dev);
949
950 if (d)
951 return l3mdev_fib_table(d) ? : RT_TABLE_MAIN;
952 else
953 return l3mdev_fib_table(ol_dev) ? : RT_TABLE_MAIN;
954}
955
Petr Machata1012b9a2017-09-02 23:49:23 +0200956static struct mlxsw_sp_rif *
957mlxsw_sp_rif_create(struct mlxsw_sp *mlxsw_sp,
David Ahernf8fa9b42017-10-18 09:56:56 -0700958 const struct mlxsw_sp_rif_params *params,
959 struct netlink_ext_ack *extack);
Petr Machata1012b9a2017-09-02 23:49:23 +0200960
961static struct mlxsw_sp_rif_ipip_lb *
962mlxsw_sp_ipip_ol_ipip_lb_create(struct mlxsw_sp *mlxsw_sp,
963 enum mlxsw_sp_ipip_type ipipt,
Petr Machata7e75af62017-11-03 10:03:36 +0100964 struct net_device *ol_dev,
965 struct netlink_ext_ack *extack)
Petr Machata1012b9a2017-09-02 23:49:23 +0200966{
967 struct mlxsw_sp_rif_params_ipip_lb lb_params;
968 const struct mlxsw_sp_ipip_ops *ipip_ops;
969 struct mlxsw_sp_rif *rif;
970
971 ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipipt];
972 lb_params = (struct mlxsw_sp_rif_params_ipip_lb) {
973 .common.dev = ol_dev,
974 .common.lag = false,
975 .lb_config = ipip_ops->ol_loopback_config(mlxsw_sp, ol_dev),
976 };
977
Petr Machata7e75af62017-11-03 10:03:36 +0100978 rif = mlxsw_sp_rif_create(mlxsw_sp, &lb_params.common, extack);
Petr Machata1012b9a2017-09-02 23:49:23 +0200979 if (IS_ERR(rif))
980 return ERR_CAST(rif);
981 return container_of(rif, struct mlxsw_sp_rif_ipip_lb, common);
982}
983
984static struct mlxsw_sp_ipip_entry *
985mlxsw_sp_ipip_entry_alloc(struct mlxsw_sp *mlxsw_sp,
986 enum mlxsw_sp_ipip_type ipipt,
987 struct net_device *ol_dev)
988{
989 struct mlxsw_sp_ipip_entry *ipip_entry;
990 struct mlxsw_sp_ipip_entry *ret = NULL;
991
992 ipip_entry = kzalloc(sizeof(*ipip_entry), GFP_KERNEL);
993 if (!ipip_entry)
994 return ERR_PTR(-ENOMEM);
995
996 ipip_entry->ol_lb = mlxsw_sp_ipip_ol_ipip_lb_create(mlxsw_sp, ipipt,
Petr Machata7e75af62017-11-03 10:03:36 +0100997 ol_dev, NULL);
Petr Machata1012b9a2017-09-02 23:49:23 +0200998 if (IS_ERR(ipip_entry->ol_lb)) {
999 ret = ERR_CAST(ipip_entry->ol_lb);
1000 goto err_ol_ipip_lb_create;
1001 }
1002
1003 ipip_entry->ipipt = ipipt;
1004 ipip_entry->ol_dev = ol_dev;
Petr Machata4cf04f32017-11-03 10:03:42 +01001005 ipip_entry->parms = mlxsw_sp_ipip_netdev_parms(ol_dev);
Petr Machata1012b9a2017-09-02 23:49:23 +02001006
1007 return ipip_entry;
1008
1009err_ol_ipip_lb_create:
1010 kfree(ipip_entry);
1011 return ret;
1012}
1013
1014static void
Petr Machata4cccb732017-10-16 16:26:39 +02001015mlxsw_sp_ipip_entry_dealloc(struct mlxsw_sp_ipip_entry *ipip_entry)
Petr Machata1012b9a2017-09-02 23:49:23 +02001016{
Petr Machata1012b9a2017-09-02 23:49:23 +02001017 mlxsw_sp_rif_destroy(&ipip_entry->ol_lb->common);
1018 kfree(ipip_entry);
1019}
1020
Petr Machata1012b9a2017-09-02 23:49:23 +02001021static bool
1022mlxsw_sp_ipip_entry_saddr_matches(struct mlxsw_sp *mlxsw_sp,
1023 const enum mlxsw_sp_l3proto ul_proto,
1024 union mlxsw_sp_l3addr saddr,
1025 u32 ul_tb_id,
1026 struct mlxsw_sp_ipip_entry *ipip_entry)
1027{
1028 u32 tun_ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(ipip_entry->ol_dev);
1029 enum mlxsw_sp_ipip_type ipipt = ipip_entry->ipipt;
1030 union mlxsw_sp_l3addr tun_saddr;
1031
1032 if (mlxsw_sp->router->ipip_ops_arr[ipipt]->ul_proto != ul_proto)
1033 return false;
1034
1035 tun_saddr = mlxsw_sp_ipip_netdev_saddr(ul_proto, ipip_entry->ol_dev);
1036 return tun_ul_tb_id == ul_tb_id &&
1037 mlxsw_sp_l3addr_eq(&tun_saddr, &saddr);
1038}
1039
Petr Machata4607f6d2017-09-02 23:49:25 +02001040static int
1041mlxsw_sp_fib_entry_decap_init(struct mlxsw_sp *mlxsw_sp,
1042 struct mlxsw_sp_fib_entry *fib_entry,
1043 struct mlxsw_sp_ipip_entry *ipip_entry)
1044{
1045 u32 tunnel_index;
1046 int err;
1047
1048 err = mlxsw_sp_kvdl_alloc(mlxsw_sp, 1, &tunnel_index);
1049 if (err)
1050 return err;
1051
1052 ipip_entry->decap_fib_entry = fib_entry;
1053 fib_entry->decap.ipip_entry = ipip_entry;
1054 fib_entry->decap.tunnel_index = tunnel_index;
1055 return 0;
1056}
1057
1058static void mlxsw_sp_fib_entry_decap_fini(struct mlxsw_sp *mlxsw_sp,
1059 struct mlxsw_sp_fib_entry *fib_entry)
1060{
1061 /* Unlink this node from the IPIP entry that it's the decap entry of. */
1062 fib_entry->decap.ipip_entry->decap_fib_entry = NULL;
1063 fib_entry->decap.ipip_entry = NULL;
1064 mlxsw_sp_kvdl_free(mlxsw_sp, fib_entry->decap.tunnel_index);
1065}
1066
Petr Machata1cc38fb2017-09-02 23:49:26 +02001067static struct mlxsw_sp_fib_node *
1068mlxsw_sp_fib_node_lookup(struct mlxsw_sp_fib *fib, const void *addr,
1069 size_t addr_len, unsigned char prefix_len);
Petr Machata4607f6d2017-09-02 23:49:25 +02001070static int mlxsw_sp_fib_entry_update(struct mlxsw_sp *mlxsw_sp,
1071 struct mlxsw_sp_fib_entry *fib_entry);
1072
1073static void
1074mlxsw_sp_ipip_entry_demote_decap(struct mlxsw_sp *mlxsw_sp,
1075 struct mlxsw_sp_ipip_entry *ipip_entry)
1076{
1077 struct mlxsw_sp_fib_entry *fib_entry = ipip_entry->decap_fib_entry;
1078
1079 mlxsw_sp_fib_entry_decap_fini(mlxsw_sp, fib_entry);
1080 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
1081
1082 mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
1083}
1084
Petr Machata1cc38fb2017-09-02 23:49:26 +02001085static void
1086mlxsw_sp_ipip_entry_promote_decap(struct mlxsw_sp *mlxsw_sp,
1087 struct mlxsw_sp_ipip_entry *ipip_entry,
1088 struct mlxsw_sp_fib_entry *decap_fib_entry)
1089{
1090 if (mlxsw_sp_fib_entry_decap_init(mlxsw_sp, decap_fib_entry,
1091 ipip_entry))
1092 return;
1093 decap_fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP;
1094
1095 if (mlxsw_sp_fib_entry_update(mlxsw_sp, decap_fib_entry))
1096 mlxsw_sp_ipip_entry_demote_decap(mlxsw_sp, ipip_entry);
1097}
1098
1099/* Given an IPIP entry, find the corresponding decap route. */
1100static struct mlxsw_sp_fib_entry *
1101mlxsw_sp_ipip_entry_find_decap(struct mlxsw_sp *mlxsw_sp,
1102 struct mlxsw_sp_ipip_entry *ipip_entry)
1103{
1104 static struct mlxsw_sp_fib_node *fib_node;
1105 const struct mlxsw_sp_ipip_ops *ipip_ops;
1106 struct mlxsw_sp_fib_entry *fib_entry;
1107 unsigned char saddr_prefix_len;
1108 union mlxsw_sp_l3addr saddr;
1109 struct mlxsw_sp_fib *ul_fib;
1110 struct mlxsw_sp_vr *ul_vr;
1111 const void *saddrp;
1112 size_t saddr_len;
1113 u32 ul_tb_id;
1114 u32 saddr4;
1115
1116 ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt];
1117
1118 ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(ipip_entry->ol_dev);
1119 ul_vr = mlxsw_sp_vr_find(mlxsw_sp, ul_tb_id);
1120 if (!ul_vr)
1121 return NULL;
1122
1123 ul_fib = mlxsw_sp_vr_fib(ul_vr, ipip_ops->ul_proto);
1124 saddr = mlxsw_sp_ipip_netdev_saddr(ipip_ops->ul_proto,
1125 ipip_entry->ol_dev);
1126
1127 switch (ipip_ops->ul_proto) {
1128 case MLXSW_SP_L3_PROTO_IPV4:
1129 saddr4 = be32_to_cpu(saddr.addr4);
1130 saddrp = &saddr4;
1131 saddr_len = 4;
1132 saddr_prefix_len = 32;
1133 break;
1134 case MLXSW_SP_L3_PROTO_IPV6:
1135 WARN_ON(1);
1136 return NULL;
1137 }
1138
1139 fib_node = mlxsw_sp_fib_node_lookup(ul_fib, saddrp, saddr_len,
1140 saddr_prefix_len);
1141 if (!fib_node || list_empty(&fib_node->entry_list))
1142 return NULL;
1143
1144 fib_entry = list_first_entry(&fib_node->entry_list,
1145 struct mlxsw_sp_fib_entry, list);
1146 if (fib_entry->type != MLXSW_SP_FIB_ENTRY_TYPE_TRAP)
1147 return NULL;
1148
1149 return fib_entry;
1150}
1151
Petr Machata1012b9a2017-09-02 23:49:23 +02001152static struct mlxsw_sp_ipip_entry *
Petr Machata4cccb732017-10-16 16:26:39 +02001153mlxsw_sp_ipip_entry_create(struct mlxsw_sp *mlxsw_sp,
1154 enum mlxsw_sp_ipip_type ipipt,
1155 struct net_device *ol_dev)
Petr Machata1012b9a2017-09-02 23:49:23 +02001156{
Petr Machata1012b9a2017-09-02 23:49:23 +02001157 struct mlxsw_sp_ipip_entry *ipip_entry;
Petr Machata1012b9a2017-09-02 23:49:23 +02001158
1159 ipip_entry = mlxsw_sp_ipip_entry_alloc(mlxsw_sp, ipipt, ol_dev);
1160 if (IS_ERR(ipip_entry))
1161 return ipip_entry;
1162
1163 list_add_tail(&ipip_entry->ipip_list_node,
1164 &mlxsw_sp->router->ipip_list);
1165
Petr Machata1012b9a2017-09-02 23:49:23 +02001166 return ipip_entry;
1167}
1168
1169static void
Petr Machata4cccb732017-10-16 16:26:39 +02001170mlxsw_sp_ipip_entry_destroy(struct mlxsw_sp *mlxsw_sp,
1171 struct mlxsw_sp_ipip_entry *ipip_entry)
Petr Machata1012b9a2017-09-02 23:49:23 +02001172{
Petr Machata4cccb732017-10-16 16:26:39 +02001173 list_del(&ipip_entry->ipip_list_node);
1174 mlxsw_sp_ipip_entry_dealloc(ipip_entry);
Petr Machata1012b9a2017-09-02 23:49:23 +02001175}
1176
Petr Machata4607f6d2017-09-02 23:49:25 +02001177static bool
1178mlxsw_sp_ipip_entry_matches_decap(struct mlxsw_sp *mlxsw_sp,
1179 const struct net_device *ul_dev,
1180 enum mlxsw_sp_l3proto ul_proto,
1181 union mlxsw_sp_l3addr ul_dip,
1182 struct mlxsw_sp_ipip_entry *ipip_entry)
1183{
1184 u32 ul_tb_id = l3mdev_fib_table(ul_dev) ? : RT_TABLE_MAIN;
1185 enum mlxsw_sp_ipip_type ipipt = ipip_entry->ipipt;
1186 struct net_device *ipip_ul_dev;
1187
1188 if (mlxsw_sp->router->ipip_ops_arr[ipipt]->ul_proto != ul_proto)
1189 return false;
1190
1191 ipip_ul_dev = __mlxsw_sp_ipip_netdev_ul_dev_get(ipip_entry->ol_dev);
1192 return mlxsw_sp_ipip_entry_saddr_matches(mlxsw_sp, ul_proto, ul_dip,
1193 ul_tb_id, ipip_entry) &&
1194 (!ipip_ul_dev || ipip_ul_dev == ul_dev);
1195}
1196
1197/* Given decap parameters, find the corresponding IPIP entry. */
1198static struct mlxsw_sp_ipip_entry *
1199mlxsw_sp_ipip_entry_find_by_decap(struct mlxsw_sp *mlxsw_sp,
1200 const struct net_device *ul_dev,
1201 enum mlxsw_sp_l3proto ul_proto,
1202 union mlxsw_sp_l3addr ul_dip)
1203{
1204 struct mlxsw_sp_ipip_entry *ipip_entry;
1205
1206 list_for_each_entry(ipip_entry, &mlxsw_sp->router->ipip_list,
1207 ipip_list_node)
1208 if (mlxsw_sp_ipip_entry_matches_decap(mlxsw_sp, ul_dev,
1209 ul_proto, ul_dip,
1210 ipip_entry))
1211 return ipip_entry;
1212
1213 return NULL;
1214}
1215
Petr Machata6698c162017-10-16 16:26:36 +02001216static bool mlxsw_sp_netdev_ipip_type(const struct mlxsw_sp *mlxsw_sp,
1217 const struct net_device *dev,
1218 enum mlxsw_sp_ipip_type *p_type)
1219{
1220 struct mlxsw_sp_router *router = mlxsw_sp->router;
1221 const struct mlxsw_sp_ipip_ops *ipip_ops;
1222 enum mlxsw_sp_ipip_type ipipt;
1223
1224 for (ipipt = 0; ipipt < MLXSW_SP_IPIP_TYPE_MAX; ++ipipt) {
1225 ipip_ops = router->ipip_ops_arr[ipipt];
1226 if (dev->type == ipip_ops->dev_type) {
1227 if (p_type)
1228 *p_type = ipipt;
1229 return true;
1230 }
1231 }
1232 return false;
1233}
1234
Petr Machata796ec772017-11-03 10:03:29 +01001235bool mlxsw_sp_netdev_is_ipip_ol(const struct mlxsw_sp *mlxsw_sp,
1236 const struct net_device *dev)
Petr Machata00635872017-10-16 16:26:37 +02001237{
1238 return mlxsw_sp_netdev_ipip_type(mlxsw_sp, dev, NULL);
1239}
1240
1241static struct mlxsw_sp_ipip_entry *
1242mlxsw_sp_ipip_entry_find_by_ol_dev(struct mlxsw_sp *mlxsw_sp,
1243 const struct net_device *ol_dev)
1244{
1245 struct mlxsw_sp_ipip_entry *ipip_entry;
1246
1247 list_for_each_entry(ipip_entry, &mlxsw_sp->router->ipip_list,
1248 ipip_list_node)
1249 if (ipip_entry->ol_dev == ol_dev)
1250 return ipip_entry;
1251
1252 return NULL;
1253}
1254
Petr Machata61481f22017-11-03 10:03:41 +01001255static struct mlxsw_sp_ipip_entry *
1256mlxsw_sp_ipip_entry_find_by_ul_dev(const struct mlxsw_sp *mlxsw_sp,
1257 const struct net_device *ul_dev,
1258 struct mlxsw_sp_ipip_entry *start)
1259{
1260 struct mlxsw_sp_ipip_entry *ipip_entry;
1261
1262 ipip_entry = list_prepare_entry(start, &mlxsw_sp->router->ipip_list,
1263 ipip_list_node);
1264 list_for_each_entry_continue(ipip_entry, &mlxsw_sp->router->ipip_list,
1265 ipip_list_node) {
1266 struct net_device *ipip_ul_dev =
1267 __mlxsw_sp_ipip_netdev_ul_dev_get(ipip_entry->ol_dev);
1268
1269 if (ipip_ul_dev == ul_dev)
1270 return ipip_entry;
1271 }
1272
1273 return NULL;
1274}
1275
1276bool mlxsw_sp_netdev_is_ipip_ul(const struct mlxsw_sp *mlxsw_sp,
1277 const struct net_device *dev)
1278{
1279 return mlxsw_sp_ipip_entry_find_by_ul_dev(mlxsw_sp, dev, NULL);
1280}
1281
Petr Machatacafdb2a2017-11-03 10:03:30 +01001282static bool mlxsw_sp_netdevice_ipip_can_offload(struct mlxsw_sp *mlxsw_sp,
1283 const struct net_device *ol_dev,
1284 enum mlxsw_sp_ipip_type ipipt)
1285{
1286 const struct mlxsw_sp_ipip_ops *ops
1287 = mlxsw_sp->router->ipip_ops_arr[ipipt];
1288
1289 /* For deciding whether decap should be offloaded, we don't care about
1290 * overlay protocol, so ask whether either one is supported.
1291 */
1292 return ops->can_offload(mlxsw_sp, ol_dev, MLXSW_SP_L3_PROTO_IPV4) ||
1293 ops->can_offload(mlxsw_sp, ol_dev, MLXSW_SP_L3_PROTO_IPV6);
1294}
1295
Petr Machata796ec772017-11-03 10:03:29 +01001296static int mlxsw_sp_netdevice_ipip_ol_reg_event(struct mlxsw_sp *mlxsw_sp,
1297 struct net_device *ol_dev)
Petr Machata00635872017-10-16 16:26:37 +02001298{
Petr Machata00635872017-10-16 16:26:37 +02001299 struct mlxsw_sp_ipip_entry *ipip_entry;
Petr Machataaf641712017-11-03 10:03:40 +01001300 enum mlxsw_sp_l3proto ul_proto;
Petr Machata00635872017-10-16 16:26:37 +02001301 enum mlxsw_sp_ipip_type ipipt;
Petr Machataaf641712017-11-03 10:03:40 +01001302 union mlxsw_sp_l3addr saddr;
1303 u32 ul_tb_id;
Petr Machata00635872017-10-16 16:26:37 +02001304
1305 mlxsw_sp_netdev_ipip_type(mlxsw_sp, ol_dev, &ipipt);
Petr Machatacafdb2a2017-11-03 10:03:30 +01001306 if (mlxsw_sp_netdevice_ipip_can_offload(mlxsw_sp, ol_dev, ipipt)) {
Petr Machataaf641712017-11-03 10:03:40 +01001307 ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(ol_dev);
1308 ul_proto = mlxsw_sp->router->ipip_ops_arr[ipipt]->ul_proto;
1309 saddr = mlxsw_sp_ipip_netdev_saddr(ul_proto, ol_dev);
1310 if (!mlxsw_sp_ipip_demote_tunnel_by_saddr(mlxsw_sp, ul_proto,
1311 saddr, ul_tb_id,
1312 NULL)) {
1313 ipip_entry = mlxsw_sp_ipip_entry_create(mlxsw_sp, ipipt,
1314 ol_dev);
1315 if (IS_ERR(ipip_entry))
1316 return PTR_ERR(ipip_entry);
1317 }
Petr Machata00635872017-10-16 16:26:37 +02001318 }
1319
1320 return 0;
1321}
1322
Petr Machata796ec772017-11-03 10:03:29 +01001323static void mlxsw_sp_netdevice_ipip_ol_unreg_event(struct mlxsw_sp *mlxsw_sp,
1324 struct net_device *ol_dev)
Petr Machata00635872017-10-16 16:26:37 +02001325{
1326 struct mlxsw_sp_ipip_entry *ipip_entry;
1327
1328 ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
1329 if (ipip_entry)
Petr Machata4cccb732017-10-16 16:26:39 +02001330 mlxsw_sp_ipip_entry_destroy(mlxsw_sp, ipip_entry);
Petr Machata00635872017-10-16 16:26:37 +02001331}
1332
Petr Machata47518ca2017-11-03 10:03:35 +01001333static void
1334mlxsw_sp_ipip_entry_ol_up_event(struct mlxsw_sp *mlxsw_sp,
1335 struct mlxsw_sp_ipip_entry *ipip_entry)
1336{
1337 struct mlxsw_sp_fib_entry *decap_fib_entry;
1338
1339 decap_fib_entry = mlxsw_sp_ipip_entry_find_decap(mlxsw_sp, ipip_entry);
1340 if (decap_fib_entry)
1341 mlxsw_sp_ipip_entry_promote_decap(mlxsw_sp, ipip_entry,
1342 decap_fib_entry);
1343}
1344
Petr Machata6d4de442017-11-03 10:03:34 +01001345static void mlxsw_sp_netdevice_ipip_ol_up_event(struct mlxsw_sp *mlxsw_sp,
1346 struct net_device *ol_dev)
Petr Machata00635872017-10-16 16:26:37 +02001347{
Petr Machata00635872017-10-16 16:26:37 +02001348 struct mlxsw_sp_ipip_entry *ipip_entry;
1349
1350 ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
Petr Machata47518ca2017-11-03 10:03:35 +01001351 if (ipip_entry)
1352 mlxsw_sp_ipip_entry_ol_up_event(mlxsw_sp, ipip_entry);
Petr Machata00635872017-10-16 16:26:37 +02001353}
1354
Petr Machataa3fe1982017-11-03 10:03:33 +01001355static void
1356mlxsw_sp_ipip_entry_ol_down_event(struct mlxsw_sp *mlxsw_sp,
1357 struct mlxsw_sp_ipip_entry *ipip_entry)
1358{
1359 if (ipip_entry->decap_fib_entry)
1360 mlxsw_sp_ipip_entry_demote_decap(mlxsw_sp, ipip_entry);
1361}
1362
Petr Machata796ec772017-11-03 10:03:29 +01001363static void mlxsw_sp_netdevice_ipip_ol_down_event(struct mlxsw_sp *mlxsw_sp,
1364 struct net_device *ol_dev)
Petr Machata00635872017-10-16 16:26:37 +02001365{
1366 struct mlxsw_sp_ipip_entry *ipip_entry;
1367
1368 ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
Petr Machataa3fe1982017-11-03 10:03:33 +01001369 if (ipip_entry)
1370 mlxsw_sp_ipip_entry_ol_down_event(mlxsw_sp, ipip_entry);
Petr Machata00635872017-10-16 16:26:37 +02001371}
1372
Petr Machata09dbf622017-11-28 13:17:14 +01001373static void mlxsw_sp_nexthop_rif_migrate(struct mlxsw_sp *mlxsw_sp,
1374 struct mlxsw_sp_rif *old_rif,
1375 struct mlxsw_sp_rif *new_rif);
Petr Machata65a61212017-11-03 10:03:37 +01001376static int
1377mlxsw_sp_ipip_entry_ol_lb_update(struct mlxsw_sp *mlxsw_sp,
1378 struct mlxsw_sp_ipip_entry *ipip_entry,
Petr Machata0c5f1cd2017-11-03 10:03:38 +01001379 bool keep_encap,
Petr Machata65a61212017-11-03 10:03:37 +01001380 struct netlink_ext_ack *extack)
Petr Machataf63ce4e2017-10-16 16:26:38 +02001381{
Petr Machata65a61212017-11-03 10:03:37 +01001382 struct mlxsw_sp_rif_ipip_lb *old_lb_rif = ipip_entry->ol_lb;
1383 struct mlxsw_sp_rif_ipip_lb *new_lb_rif;
Petr Machataf63ce4e2017-10-16 16:26:38 +02001384
Petr Machata65a61212017-11-03 10:03:37 +01001385 new_lb_rif = mlxsw_sp_ipip_ol_ipip_lb_create(mlxsw_sp,
1386 ipip_entry->ipipt,
1387 ipip_entry->ol_dev,
1388 extack);
1389 if (IS_ERR(new_lb_rif))
1390 return PTR_ERR(new_lb_rif);
1391 ipip_entry->ol_lb = new_lb_rif;
Petr Machata0c5f1cd2017-11-03 10:03:38 +01001392
Petr Machata09dbf622017-11-28 13:17:14 +01001393 if (keep_encap)
1394 mlxsw_sp_nexthop_rif_migrate(mlxsw_sp, &old_lb_rif->common,
1395 &new_lb_rif->common);
Petr Machata0c5f1cd2017-11-03 10:03:38 +01001396
Petr Machata65a61212017-11-03 10:03:37 +01001397 mlxsw_sp_rif_destroy(&old_lb_rif->common);
Petr Machataf63ce4e2017-10-16 16:26:38 +02001398
Petr Machata65a61212017-11-03 10:03:37 +01001399 return 0;
1400}
1401
Petr Machata09dbf622017-11-28 13:17:14 +01001402static void mlxsw_sp_nexthop_rif_update(struct mlxsw_sp *mlxsw_sp,
1403 struct mlxsw_sp_rif *rif);
1404
Petr Machata0c5f1cd2017-11-03 10:03:38 +01001405/**
1406 * Update the offload related to an IPIP entry. This always updates decap, and
1407 * in addition to that it also:
1408 * @recreate_loopback: recreates the associated loopback RIF
1409 * @keep_encap: updates next hops that use the tunnel netdevice. This is only
1410 * relevant when recreate_loopback is true.
1411 * @update_nexthops: updates next hops, keeping the current loopback RIF. This
1412 * is only relevant when recreate_loopback is false.
1413 */
Petr Machata65a61212017-11-03 10:03:37 +01001414int __mlxsw_sp_ipip_entry_update_tunnel(struct mlxsw_sp *mlxsw_sp,
1415 struct mlxsw_sp_ipip_entry *ipip_entry,
Petr Machata0c5f1cd2017-11-03 10:03:38 +01001416 bool recreate_loopback,
1417 bool keep_encap,
1418 bool update_nexthops,
Petr Machata65a61212017-11-03 10:03:37 +01001419 struct netlink_ext_ack *extack)
1420{
1421 int err;
1422
1423 /* RIFs can't be edited, so to update loopback, we need to destroy and
1424 * recreate it. That creates a window of opportunity where RALUE and
1425 * RATR registers end up referencing a RIF that's already gone. RATRs
1426 * are handled in mlxsw_sp_ipip_entry_ol_lb_update(), and to take care
Petr Machataf63ce4e2017-10-16 16:26:38 +02001427 * of RALUE, demote the decap route back.
1428 */
1429 if (ipip_entry->decap_fib_entry)
1430 mlxsw_sp_ipip_entry_demote_decap(mlxsw_sp, ipip_entry);
1431
Petr Machata0c5f1cd2017-11-03 10:03:38 +01001432 if (recreate_loopback) {
1433 err = mlxsw_sp_ipip_entry_ol_lb_update(mlxsw_sp, ipip_entry,
1434 keep_encap, extack);
1435 if (err)
1436 return err;
1437 } else if (update_nexthops) {
1438 mlxsw_sp_nexthop_rif_update(mlxsw_sp,
1439 &ipip_entry->ol_lb->common);
1440 }
Petr Machataf63ce4e2017-10-16 16:26:38 +02001441
Petr Machata65a61212017-11-03 10:03:37 +01001442 if (ipip_entry->ol_dev->flags & IFF_UP)
1443 mlxsw_sp_ipip_entry_ol_up_event(mlxsw_sp, ipip_entry);
Petr Machataf63ce4e2017-10-16 16:26:38 +02001444
1445 return 0;
1446}
1447
Petr Machata65a61212017-11-03 10:03:37 +01001448static int mlxsw_sp_netdevice_ipip_ol_vrf_event(struct mlxsw_sp *mlxsw_sp,
1449 struct net_device *ol_dev,
1450 struct netlink_ext_ack *extack)
1451{
1452 struct mlxsw_sp_ipip_entry *ipip_entry =
1453 mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
Petr Machatacab43d92017-11-28 13:17:12 +01001454 enum mlxsw_sp_l3proto ul_proto;
1455 union mlxsw_sp_l3addr saddr;
1456 u32 ul_tb_id;
Petr Machata65a61212017-11-03 10:03:37 +01001457
1458 if (!ipip_entry)
1459 return 0;
Petr Machatacab43d92017-11-28 13:17:12 +01001460
1461 /* For flat configuration cases, moving overlay to a different VRF might
1462 * cause local address conflict, and the conflicting tunnels need to be
1463 * demoted.
1464 */
1465 ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(ol_dev);
1466 ul_proto = mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt]->ul_proto;
1467 saddr = mlxsw_sp_ipip_netdev_saddr(ul_proto, ol_dev);
1468 if (mlxsw_sp_ipip_demote_tunnel_by_saddr(mlxsw_sp, ul_proto,
1469 saddr, ul_tb_id,
1470 ipip_entry)) {
1471 mlxsw_sp_ipip_entry_demote_tunnel(mlxsw_sp, ipip_entry);
1472 return 0;
1473 }
1474
Petr Machata65a61212017-11-03 10:03:37 +01001475 return __mlxsw_sp_ipip_entry_update_tunnel(mlxsw_sp, ipip_entry,
Petr Machata0c5f1cd2017-11-03 10:03:38 +01001476 true, false, false, extack);
Petr Machata65a61212017-11-03 10:03:37 +01001477}
1478
Petr Machata61481f22017-11-03 10:03:41 +01001479static int
1480mlxsw_sp_netdevice_ipip_ul_vrf_event(struct mlxsw_sp *mlxsw_sp,
1481 struct mlxsw_sp_ipip_entry *ipip_entry,
1482 struct net_device *ul_dev,
1483 struct netlink_ext_ack *extack)
1484{
1485 return __mlxsw_sp_ipip_entry_update_tunnel(mlxsw_sp, ipip_entry,
1486 true, true, false, extack);
1487}
1488
Petr Machata4cf04f32017-11-03 10:03:42 +01001489static int
Petr Machata44b0fff2017-11-03 10:03:44 +01001490mlxsw_sp_netdevice_ipip_ul_up_event(struct mlxsw_sp *mlxsw_sp,
1491 struct mlxsw_sp_ipip_entry *ipip_entry,
1492 struct net_device *ul_dev)
1493{
1494 return __mlxsw_sp_ipip_entry_update_tunnel(mlxsw_sp, ipip_entry,
1495 false, false, true, NULL);
1496}
1497
1498static int
1499mlxsw_sp_netdevice_ipip_ul_down_event(struct mlxsw_sp *mlxsw_sp,
1500 struct mlxsw_sp_ipip_entry *ipip_entry,
1501 struct net_device *ul_dev)
1502{
1503 /* A down underlay device causes encapsulated packets to not be
1504 * forwarded, but decap still works. So refresh next hops without
1505 * touching anything else.
1506 */
1507 return __mlxsw_sp_ipip_entry_update_tunnel(mlxsw_sp, ipip_entry,
1508 false, false, true, NULL);
1509}
1510
1511static int
Petr Machata4cf04f32017-11-03 10:03:42 +01001512mlxsw_sp_netdevice_ipip_ol_change_event(struct mlxsw_sp *mlxsw_sp,
1513 struct net_device *ol_dev,
1514 struct netlink_ext_ack *extack)
1515{
1516 const struct mlxsw_sp_ipip_ops *ipip_ops;
1517 struct mlxsw_sp_ipip_entry *ipip_entry;
1518 int err;
1519
1520 ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
1521 if (!ipip_entry)
1522 /* A change might make a tunnel eligible for offloading, but
1523 * that is currently not implemented. What falls to slow path
1524 * stays there.
1525 */
1526 return 0;
1527
1528 /* A change might make a tunnel not eligible for offloading. */
1529 if (!mlxsw_sp_netdevice_ipip_can_offload(mlxsw_sp, ol_dev,
1530 ipip_entry->ipipt)) {
1531 mlxsw_sp_ipip_entry_demote_tunnel(mlxsw_sp, ipip_entry);
1532 return 0;
1533 }
1534
1535 ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt];
1536 err = ipip_ops->ol_netdev_change(mlxsw_sp, ipip_entry, extack);
1537 return err;
1538}
1539
Petr Machataaf641712017-11-03 10:03:40 +01001540void mlxsw_sp_ipip_entry_demote_tunnel(struct mlxsw_sp *mlxsw_sp,
1541 struct mlxsw_sp_ipip_entry *ipip_entry)
1542{
1543 struct net_device *ol_dev = ipip_entry->ol_dev;
1544
1545 if (ol_dev->flags & IFF_UP)
1546 mlxsw_sp_ipip_entry_ol_down_event(mlxsw_sp, ipip_entry);
1547 mlxsw_sp_ipip_entry_destroy(mlxsw_sp, ipip_entry);
1548}
1549
1550/* The configuration where several tunnels have the same local address in the
1551 * same underlay table needs special treatment in the HW. That is currently not
1552 * implemented in the driver. This function finds and demotes the first tunnel
1553 * with a given source address, except the one passed in in the argument
1554 * `except'.
1555 */
1556bool
1557mlxsw_sp_ipip_demote_tunnel_by_saddr(struct mlxsw_sp *mlxsw_sp,
1558 enum mlxsw_sp_l3proto ul_proto,
1559 union mlxsw_sp_l3addr saddr,
1560 u32 ul_tb_id,
1561 const struct mlxsw_sp_ipip_entry *except)
1562{
1563 struct mlxsw_sp_ipip_entry *ipip_entry, *tmp;
1564
1565 list_for_each_entry_safe(ipip_entry, tmp, &mlxsw_sp->router->ipip_list,
1566 ipip_list_node) {
1567 if (ipip_entry != except &&
1568 mlxsw_sp_ipip_entry_saddr_matches(mlxsw_sp, ul_proto, saddr,
1569 ul_tb_id, ipip_entry)) {
1570 mlxsw_sp_ipip_entry_demote_tunnel(mlxsw_sp, ipip_entry);
1571 return true;
1572 }
1573 }
1574
1575 return false;
1576}
1577
Petr Machata61481f22017-11-03 10:03:41 +01001578static void mlxsw_sp_ipip_demote_tunnel_by_ul_netdev(struct mlxsw_sp *mlxsw_sp,
1579 struct net_device *ul_dev)
1580{
1581 struct mlxsw_sp_ipip_entry *ipip_entry, *tmp;
1582
1583 list_for_each_entry_safe(ipip_entry, tmp, &mlxsw_sp->router->ipip_list,
1584 ipip_list_node) {
1585 struct net_device *ipip_ul_dev =
1586 __mlxsw_sp_ipip_netdev_ul_dev_get(ipip_entry->ol_dev);
1587
1588 if (ipip_ul_dev == ul_dev)
1589 mlxsw_sp_ipip_entry_demote_tunnel(mlxsw_sp, ipip_entry);
1590 }
1591}
1592
Petr Machata7e75af62017-11-03 10:03:36 +01001593int mlxsw_sp_netdevice_ipip_ol_event(struct mlxsw_sp *mlxsw_sp,
1594 struct net_device *ol_dev,
1595 unsigned long event,
1596 struct netdev_notifier_info *info)
Petr Machata00635872017-10-16 16:26:37 +02001597{
Petr Machata7e75af62017-11-03 10:03:36 +01001598 struct netdev_notifier_changeupper_info *chup;
1599 struct netlink_ext_ack *extack;
1600
Petr Machata00635872017-10-16 16:26:37 +02001601 switch (event) {
1602 case NETDEV_REGISTER:
Petr Machata796ec772017-11-03 10:03:29 +01001603 return mlxsw_sp_netdevice_ipip_ol_reg_event(mlxsw_sp, ol_dev);
Petr Machata00635872017-10-16 16:26:37 +02001604 case NETDEV_UNREGISTER:
Petr Machata796ec772017-11-03 10:03:29 +01001605 mlxsw_sp_netdevice_ipip_ol_unreg_event(mlxsw_sp, ol_dev);
Petr Machata00635872017-10-16 16:26:37 +02001606 return 0;
1607 case NETDEV_UP:
Petr Machata6d4de442017-11-03 10:03:34 +01001608 mlxsw_sp_netdevice_ipip_ol_up_event(mlxsw_sp, ol_dev);
1609 return 0;
Petr Machata00635872017-10-16 16:26:37 +02001610 case NETDEV_DOWN:
Petr Machata796ec772017-11-03 10:03:29 +01001611 mlxsw_sp_netdevice_ipip_ol_down_event(mlxsw_sp, ol_dev);
Petr Machata00635872017-10-16 16:26:37 +02001612 return 0;
Petr Machataf63ce4e2017-10-16 16:26:38 +02001613 case NETDEV_CHANGEUPPER:
Petr Machata7e75af62017-11-03 10:03:36 +01001614 chup = container_of(info, typeof(*chup), info);
1615 extack = info->extack;
1616 if (netif_is_l3_master(chup->upper_dev))
Petr Machata796ec772017-11-03 10:03:29 +01001617 return mlxsw_sp_netdevice_ipip_ol_vrf_event(mlxsw_sp,
Petr Machata7e75af62017-11-03 10:03:36 +01001618 ol_dev,
1619 extack);
Petr Machataf63ce4e2017-10-16 16:26:38 +02001620 return 0;
Petr Machata4cf04f32017-11-03 10:03:42 +01001621 case NETDEV_CHANGE:
1622 extack = info->extack;
1623 return mlxsw_sp_netdevice_ipip_ol_change_event(mlxsw_sp,
1624 ol_dev, extack);
Petr Machata00635872017-10-16 16:26:37 +02001625 }
1626 return 0;
1627}
1628
Petr Machata61481f22017-11-03 10:03:41 +01001629static int
1630__mlxsw_sp_netdevice_ipip_ul_event(struct mlxsw_sp *mlxsw_sp,
1631 struct mlxsw_sp_ipip_entry *ipip_entry,
1632 struct net_device *ul_dev,
1633 unsigned long event,
1634 struct netdev_notifier_info *info)
1635{
1636 struct netdev_notifier_changeupper_info *chup;
1637 struct netlink_ext_ack *extack;
1638
1639 switch (event) {
1640 case NETDEV_CHANGEUPPER:
1641 chup = container_of(info, typeof(*chup), info);
1642 extack = info->extack;
1643 if (netif_is_l3_master(chup->upper_dev))
1644 return mlxsw_sp_netdevice_ipip_ul_vrf_event(mlxsw_sp,
1645 ipip_entry,
1646 ul_dev,
1647 extack);
1648 break;
Petr Machata44b0fff2017-11-03 10:03:44 +01001649
1650 case NETDEV_UP:
1651 return mlxsw_sp_netdevice_ipip_ul_up_event(mlxsw_sp, ipip_entry,
1652 ul_dev);
1653 case NETDEV_DOWN:
1654 return mlxsw_sp_netdevice_ipip_ul_down_event(mlxsw_sp,
1655 ipip_entry,
1656 ul_dev);
Petr Machata61481f22017-11-03 10:03:41 +01001657 }
1658 return 0;
1659}
1660
1661int
1662mlxsw_sp_netdevice_ipip_ul_event(struct mlxsw_sp *mlxsw_sp,
1663 struct net_device *ul_dev,
1664 unsigned long event,
1665 struct netdev_notifier_info *info)
1666{
1667 struct mlxsw_sp_ipip_entry *ipip_entry = NULL;
1668 int err;
1669
1670 while ((ipip_entry = mlxsw_sp_ipip_entry_find_by_ul_dev(mlxsw_sp,
1671 ul_dev,
1672 ipip_entry))) {
1673 err = __mlxsw_sp_netdevice_ipip_ul_event(mlxsw_sp, ipip_entry,
1674 ul_dev, event, info);
1675 if (err) {
1676 mlxsw_sp_ipip_demote_tunnel_by_ul_netdev(mlxsw_sp,
1677 ul_dev);
1678 return err;
1679 }
1680 }
1681
1682 return 0;
1683}
1684
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001685struct mlxsw_sp_neigh_key {
Jiri Pirko33b13412016-11-10 12:31:04 +01001686 struct neighbour *n;
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001687};
1688
1689struct mlxsw_sp_neigh_entry {
Ido Schimmel9665b742017-02-08 11:16:42 +01001690 struct list_head rif_list_node;
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001691 struct rhash_head ht_node;
1692 struct mlxsw_sp_neigh_key key;
1693 u16 rif;
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001694 bool connected;
Yotam Gigia6bf9e92016-07-05 11:27:44 +02001695 unsigned char ha[ETH_ALEN];
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001696 struct list_head nexthop_list; /* list of nexthops using
1697 * this neigh entry
1698 */
Yotam Gigib2157142016-07-05 11:27:51 +02001699 struct list_head nexthop_neighs_list_node;
Arkadi Sharshevsky7cfcbc72017-08-24 08:40:08 +02001700 unsigned int counter_index;
1701 bool counter_valid;
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001702};
1703
1704static const struct rhashtable_params mlxsw_sp_neigh_ht_params = {
1705 .key_offset = offsetof(struct mlxsw_sp_neigh_entry, key),
1706 .head_offset = offsetof(struct mlxsw_sp_neigh_entry, ht_node),
1707 .key_len = sizeof(struct mlxsw_sp_neigh_key),
1708};
1709
Arkadi Sharshevskyf17cc842017-08-24 08:40:04 +02001710struct mlxsw_sp_neigh_entry *
1711mlxsw_sp_rif_neigh_next(struct mlxsw_sp_rif *rif,
1712 struct mlxsw_sp_neigh_entry *neigh_entry)
1713{
1714 if (!neigh_entry) {
1715 if (list_empty(&rif->neigh_list))
1716 return NULL;
1717 else
1718 return list_first_entry(&rif->neigh_list,
1719 typeof(*neigh_entry),
1720 rif_list_node);
1721 }
Arkadi Sharshevskyec2437f2017-09-25 10:32:24 +02001722 if (list_is_last(&neigh_entry->rif_list_node, &rif->neigh_list))
Arkadi Sharshevskyf17cc842017-08-24 08:40:04 +02001723 return NULL;
1724 return list_next_entry(neigh_entry, rif_list_node);
1725}
1726
1727int mlxsw_sp_neigh_entry_type(struct mlxsw_sp_neigh_entry *neigh_entry)
1728{
1729 return neigh_entry->key.n->tbl->family;
1730}
1731
1732unsigned char *
1733mlxsw_sp_neigh_entry_ha(struct mlxsw_sp_neigh_entry *neigh_entry)
1734{
1735 return neigh_entry->ha;
1736}
1737
1738u32 mlxsw_sp_neigh4_entry_dip(struct mlxsw_sp_neigh_entry *neigh_entry)
1739{
1740 struct neighbour *n;
1741
1742 n = neigh_entry->key.n;
1743 return ntohl(*((__be32 *) n->primary_key));
1744}
1745
Arkadi Sharshevsky02507682017-08-31 17:59:15 +02001746struct in6_addr *
1747mlxsw_sp_neigh6_entry_dip(struct mlxsw_sp_neigh_entry *neigh_entry)
1748{
1749 struct neighbour *n;
1750
1751 n = neigh_entry->key.n;
1752 return (struct in6_addr *) &n->primary_key;
1753}
1754
Arkadi Sharshevsky7cfcbc72017-08-24 08:40:08 +02001755int mlxsw_sp_neigh_counter_get(struct mlxsw_sp *mlxsw_sp,
1756 struct mlxsw_sp_neigh_entry *neigh_entry,
1757 u64 *p_counter)
1758{
1759 if (!neigh_entry->counter_valid)
1760 return -EINVAL;
1761
1762 return mlxsw_sp_flow_counter_get(mlxsw_sp, neigh_entry->counter_index,
1763 p_counter, NULL);
1764}
1765
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001766static struct mlxsw_sp_neigh_entry *
1767mlxsw_sp_neigh_entry_alloc(struct mlxsw_sp *mlxsw_sp, struct neighbour *n,
1768 u16 rif)
1769{
1770 struct mlxsw_sp_neigh_entry *neigh_entry;
1771
1772 neigh_entry = kzalloc(sizeof(*neigh_entry), GFP_KERNEL);
1773 if (!neigh_entry)
1774 return NULL;
1775
1776 neigh_entry->key.n = n;
1777 neigh_entry->rif = rif;
1778 INIT_LIST_HEAD(&neigh_entry->nexthop_list);
1779
1780 return neigh_entry;
1781}
1782
1783static void mlxsw_sp_neigh_entry_free(struct mlxsw_sp_neigh_entry *neigh_entry)
1784{
1785 kfree(neigh_entry);
1786}
1787
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001788static int
1789mlxsw_sp_neigh_entry_insert(struct mlxsw_sp *mlxsw_sp,
1790 struct mlxsw_sp_neigh_entry *neigh_entry)
1791{
Ido Schimmel9011b672017-05-16 19:38:25 +02001792 return rhashtable_insert_fast(&mlxsw_sp->router->neigh_ht,
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001793 &neigh_entry->ht_node,
1794 mlxsw_sp_neigh_ht_params);
1795}
1796
1797static void
1798mlxsw_sp_neigh_entry_remove(struct mlxsw_sp *mlxsw_sp,
1799 struct mlxsw_sp_neigh_entry *neigh_entry)
1800{
Ido Schimmel9011b672017-05-16 19:38:25 +02001801 rhashtable_remove_fast(&mlxsw_sp->router->neigh_ht,
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001802 &neigh_entry->ht_node,
1803 mlxsw_sp_neigh_ht_params);
1804}
1805
Arkadi Sharshevsky7cfcbc72017-08-24 08:40:08 +02001806static bool
Arkadi Sharshevsky1ed55742017-08-31 17:59:18 +02001807mlxsw_sp_neigh_counter_should_alloc(struct mlxsw_sp *mlxsw_sp,
1808 struct mlxsw_sp_neigh_entry *neigh_entry)
Arkadi Sharshevsky7cfcbc72017-08-24 08:40:08 +02001809{
1810 struct devlink *devlink;
Arkadi Sharshevsky1ed55742017-08-31 17:59:18 +02001811 const char *table_name;
1812
1813 switch (mlxsw_sp_neigh_entry_type(neigh_entry)) {
1814 case AF_INET:
1815 table_name = MLXSW_SP_DPIPE_TABLE_NAME_HOST4;
1816 break;
1817 case AF_INET6:
1818 table_name = MLXSW_SP_DPIPE_TABLE_NAME_HOST6;
1819 break;
1820 default:
1821 WARN_ON(1);
1822 return false;
1823 }
Arkadi Sharshevsky7cfcbc72017-08-24 08:40:08 +02001824
1825 devlink = priv_to_devlink(mlxsw_sp->core);
Arkadi Sharshevsky1ed55742017-08-31 17:59:18 +02001826 return devlink_dpipe_table_counter_enabled(devlink, table_name);
Arkadi Sharshevsky7cfcbc72017-08-24 08:40:08 +02001827}
1828
1829static void
1830mlxsw_sp_neigh_counter_alloc(struct mlxsw_sp *mlxsw_sp,
1831 struct mlxsw_sp_neigh_entry *neigh_entry)
1832{
Arkadi Sharshevsky1ed55742017-08-31 17:59:18 +02001833 if (!mlxsw_sp_neigh_counter_should_alloc(mlxsw_sp, neigh_entry))
Arkadi Sharshevsky7cfcbc72017-08-24 08:40:08 +02001834 return;
1835
1836 if (mlxsw_sp_flow_counter_alloc(mlxsw_sp, &neigh_entry->counter_index))
1837 return;
1838
1839 neigh_entry->counter_valid = true;
1840}
1841
1842static void
1843mlxsw_sp_neigh_counter_free(struct mlxsw_sp *mlxsw_sp,
1844 struct mlxsw_sp_neigh_entry *neigh_entry)
1845{
1846 if (!neigh_entry->counter_valid)
1847 return;
1848 mlxsw_sp_flow_counter_free(mlxsw_sp,
1849 neigh_entry->counter_index);
1850 neigh_entry->counter_valid = false;
1851}
1852
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001853static struct mlxsw_sp_neigh_entry *
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001854mlxsw_sp_neigh_entry_create(struct mlxsw_sp *mlxsw_sp, struct neighbour *n)
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001855{
1856 struct mlxsw_sp_neigh_entry *neigh_entry;
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01001857 struct mlxsw_sp_rif *rif;
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001858 int err;
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001859
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01001860 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, n->dev);
1861 if (!rif)
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001862 return ERR_PTR(-EINVAL);
1863
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01001864 neigh_entry = mlxsw_sp_neigh_entry_alloc(mlxsw_sp, n, rif->rif_index);
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001865 if (!neigh_entry)
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001866 return ERR_PTR(-ENOMEM);
1867
1868 err = mlxsw_sp_neigh_entry_insert(mlxsw_sp, neigh_entry);
1869 if (err)
1870 goto err_neigh_entry_insert;
1871
Arkadi Sharshevsky7cfcbc72017-08-24 08:40:08 +02001872 mlxsw_sp_neigh_counter_alloc(mlxsw_sp, neigh_entry);
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01001873 list_add(&neigh_entry->rif_list_node, &rif->neigh_list);
Ido Schimmel9665b742017-02-08 11:16:42 +01001874
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001875 return neigh_entry;
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001876
1877err_neigh_entry_insert:
1878 mlxsw_sp_neigh_entry_free(neigh_entry);
1879 return ERR_PTR(err);
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001880}
1881
1882static void
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001883mlxsw_sp_neigh_entry_destroy(struct mlxsw_sp *mlxsw_sp,
1884 struct mlxsw_sp_neigh_entry *neigh_entry)
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001885{
Ido Schimmel9665b742017-02-08 11:16:42 +01001886 list_del(&neigh_entry->rif_list_node);
Arkadi Sharshevsky7cfcbc72017-08-24 08:40:08 +02001887 mlxsw_sp_neigh_counter_free(mlxsw_sp, neigh_entry);
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001888 mlxsw_sp_neigh_entry_remove(mlxsw_sp, neigh_entry);
1889 mlxsw_sp_neigh_entry_free(neigh_entry);
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001890}
1891
1892static struct mlxsw_sp_neigh_entry *
Jiri Pirko33b13412016-11-10 12:31:04 +01001893mlxsw_sp_neigh_entry_lookup(struct mlxsw_sp *mlxsw_sp, struct neighbour *n)
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001894{
Jiri Pirko33b13412016-11-10 12:31:04 +01001895 struct mlxsw_sp_neigh_key key;
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001896
Jiri Pirko33b13412016-11-10 12:31:04 +01001897 key.n = n;
Ido Schimmel9011b672017-05-16 19:38:25 +02001898 return rhashtable_lookup_fast(&mlxsw_sp->router->neigh_ht,
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001899 &key, mlxsw_sp_neigh_ht_params);
1900}
1901
Yotam Gigic723c7352016-07-05 11:27:43 +02001902static void
1903mlxsw_sp_router_neighs_update_interval_init(struct mlxsw_sp *mlxsw_sp)
1904{
Arkadi Sharshevskya6c9b5d2017-07-18 10:10:18 +02001905 unsigned long interval;
Yotam Gigic723c7352016-07-05 11:27:43 +02001906
Ido Schimmelb5f3e0d2017-07-24 09:56:00 +02001907#if IS_ENABLED(CONFIG_IPV6)
Arkadi Sharshevskya6c9b5d2017-07-18 10:10:18 +02001908 interval = min_t(unsigned long,
1909 NEIGH_VAR(&arp_tbl.parms, DELAY_PROBE_TIME),
1910 NEIGH_VAR(&nd_tbl.parms, DELAY_PROBE_TIME));
Ido Schimmelb5f3e0d2017-07-24 09:56:00 +02001911#else
1912 interval = NEIGH_VAR(&arp_tbl.parms, DELAY_PROBE_TIME);
1913#endif
Ido Schimmel9011b672017-05-16 19:38:25 +02001914 mlxsw_sp->router->neighs_update.interval = jiffies_to_msecs(interval);
Yotam Gigic723c7352016-07-05 11:27:43 +02001915}
1916
1917static void mlxsw_sp_router_neigh_ent_ipv4_process(struct mlxsw_sp *mlxsw_sp,
1918 char *rauhtd_pl,
1919 int ent_index)
1920{
1921 struct net_device *dev;
1922 struct neighbour *n;
1923 __be32 dipn;
1924 u32 dip;
1925 u16 rif;
1926
1927 mlxsw_reg_rauhtd_ent_ipv4_unpack(rauhtd_pl, ent_index, &rif, &dip);
1928
Ido Schimmel5f9efff2017-05-16 19:38:27 +02001929 if (!mlxsw_sp->router->rifs[rif]) {
Yotam Gigic723c7352016-07-05 11:27:43 +02001930 dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Incorrect RIF in neighbour entry\n");
1931 return;
1932 }
1933
1934 dipn = htonl(dip);
Ido Schimmel5f9efff2017-05-16 19:38:27 +02001935 dev = mlxsw_sp->router->rifs[rif]->dev;
Yotam Gigic723c7352016-07-05 11:27:43 +02001936 n = neigh_lookup(&arp_tbl, &dipn, dev);
1937 if (!n) {
1938 netdev_err(dev, "Failed to find matching neighbour for IP=%pI4h\n",
1939 &dip);
1940 return;
1941 }
1942
1943 netdev_dbg(dev, "Updating neighbour with IP=%pI4h\n", &dip);
1944 neigh_event_send(n, NULL);
1945 neigh_release(n);
1946}
1947
Ido Schimmeldf9a21f2017-08-15 09:10:33 +02001948#if IS_ENABLED(CONFIG_IPV6)
Arkadi Sharshevsky60f040c2017-07-18 10:10:17 +02001949static void mlxsw_sp_router_neigh_ent_ipv6_process(struct mlxsw_sp *mlxsw_sp,
1950 char *rauhtd_pl,
1951 int rec_index)
1952{
1953 struct net_device *dev;
1954 struct neighbour *n;
1955 struct in6_addr dip;
1956 u16 rif;
1957
1958 mlxsw_reg_rauhtd_ent_ipv6_unpack(rauhtd_pl, rec_index, &rif,
1959 (char *) &dip);
1960
1961 if (!mlxsw_sp->router->rifs[rif]) {
1962 dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Incorrect RIF in neighbour entry\n");
1963 return;
1964 }
1965
1966 dev = mlxsw_sp->router->rifs[rif]->dev;
1967 n = neigh_lookup(&nd_tbl, &dip, dev);
1968 if (!n) {
1969 netdev_err(dev, "Failed to find matching neighbour for IP=%pI6c\n",
1970 &dip);
1971 return;
1972 }
1973
1974 netdev_dbg(dev, "Updating neighbour with IP=%pI6c\n", &dip);
1975 neigh_event_send(n, NULL);
1976 neigh_release(n);
1977}
Ido Schimmelb5f3e0d2017-07-24 09:56:00 +02001978#else
1979static void mlxsw_sp_router_neigh_ent_ipv6_process(struct mlxsw_sp *mlxsw_sp,
1980 char *rauhtd_pl,
1981 int rec_index)
1982{
1983}
1984#endif
Arkadi Sharshevsky60f040c2017-07-18 10:10:17 +02001985
Yotam Gigic723c7352016-07-05 11:27:43 +02001986static void mlxsw_sp_router_neigh_rec_ipv4_process(struct mlxsw_sp *mlxsw_sp,
1987 char *rauhtd_pl,
1988 int rec_index)
1989{
1990 u8 num_entries;
1991 int i;
1992
1993 num_entries = mlxsw_reg_rauhtd_ipv4_rec_num_entries_get(rauhtd_pl,
1994 rec_index);
1995 /* Hardware starts counting at 0, so add 1. */
1996 num_entries++;
1997
1998 /* Each record consists of several neighbour entries. */
1999 for (i = 0; i < num_entries; i++) {
2000 int ent_index;
2001
2002 ent_index = rec_index * MLXSW_REG_RAUHTD_IPV4_ENT_PER_REC + i;
2003 mlxsw_sp_router_neigh_ent_ipv4_process(mlxsw_sp, rauhtd_pl,
2004 ent_index);
2005 }
2006
2007}
2008
Arkadi Sharshevsky60f040c2017-07-18 10:10:17 +02002009static void mlxsw_sp_router_neigh_rec_ipv6_process(struct mlxsw_sp *mlxsw_sp,
2010 char *rauhtd_pl,
2011 int rec_index)
2012{
2013 /* One record contains one entry. */
2014 mlxsw_sp_router_neigh_ent_ipv6_process(mlxsw_sp, rauhtd_pl,
2015 rec_index);
2016}
2017
Yotam Gigic723c7352016-07-05 11:27:43 +02002018static void mlxsw_sp_router_neigh_rec_process(struct mlxsw_sp *mlxsw_sp,
2019 char *rauhtd_pl, int rec_index)
2020{
2021 switch (mlxsw_reg_rauhtd_rec_type_get(rauhtd_pl, rec_index)) {
2022 case MLXSW_REG_RAUHTD_TYPE_IPV4:
2023 mlxsw_sp_router_neigh_rec_ipv4_process(mlxsw_sp, rauhtd_pl,
2024 rec_index);
2025 break;
2026 case MLXSW_REG_RAUHTD_TYPE_IPV6:
Arkadi Sharshevsky60f040c2017-07-18 10:10:17 +02002027 mlxsw_sp_router_neigh_rec_ipv6_process(mlxsw_sp, rauhtd_pl,
2028 rec_index);
Yotam Gigic723c7352016-07-05 11:27:43 +02002029 break;
2030 }
2031}
2032
Arkadi Sharshevsky42cdb332016-11-11 16:34:26 +01002033static bool mlxsw_sp_router_rauhtd_is_full(char *rauhtd_pl)
2034{
2035 u8 num_rec, last_rec_index, num_entries;
2036
2037 num_rec = mlxsw_reg_rauhtd_num_rec_get(rauhtd_pl);
2038 last_rec_index = num_rec - 1;
2039
2040 if (num_rec < MLXSW_REG_RAUHTD_REC_MAX_NUM)
2041 return false;
2042 if (mlxsw_reg_rauhtd_rec_type_get(rauhtd_pl, last_rec_index) ==
2043 MLXSW_REG_RAUHTD_TYPE_IPV6)
2044 return true;
2045
2046 num_entries = mlxsw_reg_rauhtd_ipv4_rec_num_entries_get(rauhtd_pl,
2047 last_rec_index);
2048 if (++num_entries == MLXSW_REG_RAUHTD_IPV4_ENT_PER_REC)
2049 return true;
2050 return false;
2051}
2052
Arkadi Sharshevsky60f040c2017-07-18 10:10:17 +02002053static int
2054__mlxsw_sp_router_neighs_update_rauhtd(struct mlxsw_sp *mlxsw_sp,
2055 char *rauhtd_pl,
2056 enum mlxsw_reg_rauhtd_type type)
Yotam Gigic723c7352016-07-05 11:27:43 +02002057{
Arkadi Sharshevsky60f040c2017-07-18 10:10:17 +02002058 int i, num_rec;
2059 int err;
Yotam Gigic723c7352016-07-05 11:27:43 +02002060
2061 /* Make sure the neighbour's netdev isn't removed in the
2062 * process.
2063 */
2064 rtnl_lock();
2065 do {
Arkadi Sharshevsky60f040c2017-07-18 10:10:17 +02002066 mlxsw_reg_rauhtd_pack(rauhtd_pl, type);
Yotam Gigic723c7352016-07-05 11:27:43 +02002067 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(rauhtd),
2068 rauhtd_pl);
2069 if (err) {
Petr Machata7ff176f2017-10-02 12:21:57 +02002070 dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Failed to dump neighbour table\n");
Yotam Gigic723c7352016-07-05 11:27:43 +02002071 break;
2072 }
2073 num_rec = mlxsw_reg_rauhtd_num_rec_get(rauhtd_pl);
2074 for (i = 0; i < num_rec; i++)
2075 mlxsw_sp_router_neigh_rec_process(mlxsw_sp, rauhtd_pl,
2076 i);
Arkadi Sharshevsky42cdb332016-11-11 16:34:26 +01002077 } while (mlxsw_sp_router_rauhtd_is_full(rauhtd_pl));
Yotam Gigic723c7352016-07-05 11:27:43 +02002078 rtnl_unlock();
2079
Arkadi Sharshevsky60f040c2017-07-18 10:10:17 +02002080 return err;
2081}
2082
2083static int mlxsw_sp_router_neighs_update_rauhtd(struct mlxsw_sp *mlxsw_sp)
2084{
2085 enum mlxsw_reg_rauhtd_type type;
2086 char *rauhtd_pl;
2087 int err;
2088
2089 rauhtd_pl = kmalloc(MLXSW_REG_RAUHTD_LEN, GFP_KERNEL);
2090 if (!rauhtd_pl)
2091 return -ENOMEM;
2092
2093 type = MLXSW_REG_RAUHTD_TYPE_IPV4;
2094 err = __mlxsw_sp_router_neighs_update_rauhtd(mlxsw_sp, rauhtd_pl, type);
2095 if (err)
2096 goto out;
2097
2098 type = MLXSW_REG_RAUHTD_TYPE_IPV6;
2099 err = __mlxsw_sp_router_neighs_update_rauhtd(mlxsw_sp, rauhtd_pl, type);
2100out:
Yotam Gigic723c7352016-07-05 11:27:43 +02002101 kfree(rauhtd_pl);
Yotam Gigib2157142016-07-05 11:27:51 +02002102 return err;
2103}
2104
2105static void mlxsw_sp_router_neighs_update_nh(struct mlxsw_sp *mlxsw_sp)
2106{
2107 struct mlxsw_sp_neigh_entry *neigh_entry;
2108
2109 /* Take RTNL mutex here to prevent lists from changes */
2110 rtnl_lock();
Ido Schimmel9011b672017-05-16 19:38:25 +02002111 list_for_each_entry(neigh_entry, &mlxsw_sp->router->nexthop_neighs_list,
Ido Schimmel8a0b7272017-02-06 16:20:15 +01002112 nexthop_neighs_list_node)
Yotam Gigib2157142016-07-05 11:27:51 +02002113 /* If this neigh have nexthops, make the kernel think this neigh
2114 * is active regardless of the traffic.
2115 */
Ido Schimmel8a0b7272017-02-06 16:20:15 +01002116 neigh_event_send(neigh_entry->key.n, NULL);
Yotam Gigib2157142016-07-05 11:27:51 +02002117 rtnl_unlock();
2118}
2119
2120static void
2121mlxsw_sp_router_neighs_update_work_schedule(struct mlxsw_sp *mlxsw_sp)
2122{
Ido Schimmel9011b672017-05-16 19:38:25 +02002123 unsigned long interval = mlxsw_sp->router->neighs_update.interval;
Yotam Gigib2157142016-07-05 11:27:51 +02002124
Ido Schimmel9011b672017-05-16 19:38:25 +02002125 mlxsw_core_schedule_dw(&mlxsw_sp->router->neighs_update.dw,
Yotam Gigib2157142016-07-05 11:27:51 +02002126 msecs_to_jiffies(interval));
2127}
2128
2129static void mlxsw_sp_router_neighs_update_work(struct work_struct *work)
2130{
Ido Schimmel9011b672017-05-16 19:38:25 +02002131 struct mlxsw_sp_router *router;
Yotam Gigib2157142016-07-05 11:27:51 +02002132 int err;
2133
Ido Schimmel9011b672017-05-16 19:38:25 +02002134 router = container_of(work, struct mlxsw_sp_router,
2135 neighs_update.dw.work);
2136 err = mlxsw_sp_router_neighs_update_rauhtd(router->mlxsw_sp);
Yotam Gigib2157142016-07-05 11:27:51 +02002137 if (err)
Ido Schimmel9011b672017-05-16 19:38:25 +02002138 dev_err(router->mlxsw_sp->bus_info->dev, "Could not update kernel for neigh activity");
Yotam Gigib2157142016-07-05 11:27:51 +02002139
Ido Schimmel9011b672017-05-16 19:38:25 +02002140 mlxsw_sp_router_neighs_update_nh(router->mlxsw_sp);
Yotam Gigib2157142016-07-05 11:27:51 +02002141
Ido Schimmel9011b672017-05-16 19:38:25 +02002142 mlxsw_sp_router_neighs_update_work_schedule(router->mlxsw_sp);
Yotam Gigic723c7352016-07-05 11:27:43 +02002143}
2144
Yotam Gigi0b2361d2016-07-05 11:27:52 +02002145static void mlxsw_sp_router_probe_unresolved_nexthops(struct work_struct *work)
2146{
2147 struct mlxsw_sp_neigh_entry *neigh_entry;
Ido Schimmel9011b672017-05-16 19:38:25 +02002148 struct mlxsw_sp_router *router;
Yotam Gigi0b2361d2016-07-05 11:27:52 +02002149
Ido Schimmel9011b672017-05-16 19:38:25 +02002150 router = container_of(work, struct mlxsw_sp_router,
2151 nexthop_probe_dw.work);
Yotam Gigi0b2361d2016-07-05 11:27:52 +02002152 /* Iterate over nexthop neighbours, find those who are unresolved and
2153 * send arp on them. This solves the chicken-egg problem when
2154 * the nexthop wouldn't get offloaded until the neighbor is resolved
2155 * but it wouldn't get resolved ever in case traffic is flowing in HW
2156 * using different nexthop.
2157 *
2158 * Take RTNL mutex here to prevent lists from changes.
2159 */
2160 rtnl_lock();
Ido Schimmel9011b672017-05-16 19:38:25 +02002161 list_for_each_entry(neigh_entry, &router->nexthop_neighs_list,
Ido Schimmel8a0b7272017-02-06 16:20:15 +01002162 nexthop_neighs_list_node)
Ido Schimmel01b1aa32017-02-06 16:20:16 +01002163 if (!neigh_entry->connected)
Jiri Pirko33b13412016-11-10 12:31:04 +01002164 neigh_event_send(neigh_entry->key.n, NULL);
Yotam Gigi0b2361d2016-07-05 11:27:52 +02002165 rtnl_unlock();
2166
Ido Schimmel9011b672017-05-16 19:38:25 +02002167 mlxsw_core_schedule_dw(&router->nexthop_probe_dw,
Yotam Gigi0b2361d2016-07-05 11:27:52 +02002168 MLXSW_SP_UNRESOLVED_NH_PROBE_INTERVAL);
2169}
2170
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002171static void
2172mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp *mlxsw_sp,
2173 struct mlxsw_sp_neigh_entry *neigh_entry,
2174 bool removing);
2175
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002176static enum mlxsw_reg_rauht_op mlxsw_sp_rauht_op(bool adding)
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002177{
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002178 return adding ? MLXSW_REG_RAUHT_OP_WRITE_ADD :
2179 MLXSW_REG_RAUHT_OP_WRITE_DELETE;
2180}
2181
2182static void
2183mlxsw_sp_router_neigh_entry_op4(struct mlxsw_sp *mlxsw_sp,
2184 struct mlxsw_sp_neigh_entry *neigh_entry,
2185 enum mlxsw_reg_rauht_op op)
2186{
Jiri Pirko33b13412016-11-10 12:31:04 +01002187 struct neighbour *n = neigh_entry->key.n;
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002188 u32 dip = ntohl(*((__be32 *) n->primary_key));
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002189 char rauht_pl[MLXSW_REG_RAUHT_LEN];
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002190
2191 mlxsw_reg_rauht_pack4(rauht_pl, op, neigh_entry->rif, neigh_entry->ha,
2192 dip);
Arkadi Sharshevsky7cfcbc72017-08-24 08:40:08 +02002193 if (neigh_entry->counter_valid)
2194 mlxsw_reg_rauht_pack_counter(rauht_pl,
2195 neigh_entry->counter_index);
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002196 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rauht), rauht_pl);
2197}
2198
2199static void
Arkadi Sharshevskyd5eb89c2017-07-18 10:10:15 +02002200mlxsw_sp_router_neigh_entry_op6(struct mlxsw_sp *mlxsw_sp,
2201 struct mlxsw_sp_neigh_entry *neigh_entry,
2202 enum mlxsw_reg_rauht_op op)
2203{
2204 struct neighbour *n = neigh_entry->key.n;
2205 char rauht_pl[MLXSW_REG_RAUHT_LEN];
2206 const char *dip = n->primary_key;
2207
2208 mlxsw_reg_rauht_pack6(rauht_pl, op, neigh_entry->rif, neigh_entry->ha,
2209 dip);
Arkadi Sharshevsky7cfcbc72017-08-24 08:40:08 +02002210 if (neigh_entry->counter_valid)
2211 mlxsw_reg_rauht_pack_counter(rauht_pl,
2212 neigh_entry->counter_index);
Arkadi Sharshevskyd5eb89c2017-07-18 10:10:15 +02002213 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rauht), rauht_pl);
2214}
2215
Arkadi Sharshevsky1d1056d82017-08-31 17:59:13 +02002216bool mlxsw_sp_neigh_ipv6_ignore(struct mlxsw_sp_neigh_entry *neigh_entry)
Arkadi Sharshevskyd5eb89c2017-07-18 10:10:15 +02002217{
Arkadi Sharshevsky1d1056d82017-08-31 17:59:13 +02002218 struct neighbour *n = neigh_entry->key.n;
2219
Arkadi Sharshevskyd5eb89c2017-07-18 10:10:15 +02002220 /* Packets with a link-local destination address are trapped
2221 * after LPM lookup and never reach the neighbour table, so
2222 * there is no need to program such neighbours to the device.
2223 */
2224 if (ipv6_addr_type((struct in6_addr *) &n->primary_key) &
2225 IPV6_ADDR_LINKLOCAL)
2226 return true;
2227 return false;
2228}
2229
2230static void
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002231mlxsw_sp_neigh_entry_update(struct mlxsw_sp *mlxsw_sp,
2232 struct mlxsw_sp_neigh_entry *neigh_entry,
2233 bool adding)
2234{
2235 if (!adding && !neigh_entry->connected)
2236 return;
2237 neigh_entry->connected = adding;
Ido Schimmelb5f3e0d2017-07-24 09:56:00 +02002238 if (neigh_entry->key.n->tbl->family == AF_INET) {
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002239 mlxsw_sp_router_neigh_entry_op4(mlxsw_sp, neigh_entry,
2240 mlxsw_sp_rauht_op(adding));
Ido Schimmelb5f3e0d2017-07-24 09:56:00 +02002241 } else if (neigh_entry->key.n->tbl->family == AF_INET6) {
Arkadi Sharshevsky1d1056d82017-08-31 17:59:13 +02002242 if (mlxsw_sp_neigh_ipv6_ignore(neigh_entry))
Arkadi Sharshevskyd5eb89c2017-07-18 10:10:15 +02002243 return;
2244 mlxsw_sp_router_neigh_entry_op6(mlxsw_sp, neigh_entry,
2245 mlxsw_sp_rauht_op(adding));
2246 } else {
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002247 WARN_ON_ONCE(1);
Arkadi Sharshevskyd5eb89c2017-07-18 10:10:15 +02002248 }
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002249}
2250
Arkadi Sharshevskya481d712017-08-24 08:40:10 +02002251void
2252mlxsw_sp_neigh_entry_counter_update(struct mlxsw_sp *mlxsw_sp,
2253 struct mlxsw_sp_neigh_entry *neigh_entry,
2254 bool adding)
2255{
2256 if (adding)
2257 mlxsw_sp_neigh_counter_alloc(mlxsw_sp, neigh_entry);
2258 else
2259 mlxsw_sp_neigh_counter_free(mlxsw_sp, neigh_entry);
2260 mlxsw_sp_neigh_entry_update(mlxsw_sp, neigh_entry, true);
2261}
2262
Ido Schimmelceb88812017-11-02 17:14:07 +01002263struct mlxsw_sp_netevent_work {
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002264 struct work_struct work;
2265 struct mlxsw_sp *mlxsw_sp;
2266 struct neighbour *n;
2267};
2268
2269static void mlxsw_sp_router_neigh_event_work(struct work_struct *work)
2270{
Ido Schimmelceb88812017-11-02 17:14:07 +01002271 struct mlxsw_sp_netevent_work *net_work =
2272 container_of(work, struct mlxsw_sp_netevent_work, work);
2273 struct mlxsw_sp *mlxsw_sp = net_work->mlxsw_sp;
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002274 struct mlxsw_sp_neigh_entry *neigh_entry;
Ido Schimmelceb88812017-11-02 17:14:07 +01002275 struct neighbour *n = net_work->n;
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002276 unsigned char ha[ETH_ALEN];
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002277 bool entry_connected;
Ido Schimmel93a87e52016-12-23 09:32:49 +01002278 u8 nud_state, dead;
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002279
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002280 /* If these parameters are changed after we release the lock,
2281 * then we are guaranteed to receive another event letting us
2282 * know about it.
2283 */
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002284 read_lock_bh(&n->lock);
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002285 memcpy(ha, n->ha, ETH_ALEN);
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002286 nud_state = n->nud_state;
Ido Schimmel93a87e52016-12-23 09:32:49 +01002287 dead = n->dead;
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002288 read_unlock_bh(&n->lock);
2289
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002290 rtnl_lock();
Ido Schimmel93a87e52016-12-23 09:32:49 +01002291 entry_connected = nud_state & NUD_VALID && !dead;
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002292 neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp, n);
2293 if (!entry_connected && !neigh_entry)
2294 goto out;
2295 if (!neigh_entry) {
2296 neigh_entry = mlxsw_sp_neigh_entry_create(mlxsw_sp, n);
2297 if (IS_ERR(neigh_entry))
2298 goto out;
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002299 }
2300
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002301 memcpy(neigh_entry->ha, ha, ETH_ALEN);
2302 mlxsw_sp_neigh_entry_update(mlxsw_sp, neigh_entry, entry_connected);
2303 mlxsw_sp_nexthop_neigh_update(mlxsw_sp, neigh_entry, !entry_connected);
2304
2305 if (!neigh_entry->connected && list_empty(&neigh_entry->nexthop_list))
2306 mlxsw_sp_neigh_entry_destroy(mlxsw_sp, neigh_entry);
2307
2308out:
2309 rtnl_unlock();
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002310 neigh_release(n);
Ido Schimmelceb88812017-11-02 17:14:07 +01002311 kfree(net_work);
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002312}
2313
Ido Schimmel28678f02017-11-02 17:14:10 +01002314static int mlxsw_sp_mp_hash_init(struct mlxsw_sp *mlxsw_sp);
2315
2316static void mlxsw_sp_router_mp_hash_event_work(struct work_struct *work)
2317{
2318 struct mlxsw_sp_netevent_work *net_work =
2319 container_of(work, struct mlxsw_sp_netevent_work, work);
2320 struct mlxsw_sp *mlxsw_sp = net_work->mlxsw_sp;
2321
2322 mlxsw_sp_mp_hash_init(mlxsw_sp);
2323 kfree(net_work);
2324}
2325
2326static int mlxsw_sp_router_netevent_event(struct notifier_block *nb,
Ido Schimmel48fac882017-11-02 17:14:06 +01002327 unsigned long event, void *ptr)
Yotam Gigic723c7352016-07-05 11:27:43 +02002328{
Ido Schimmelceb88812017-11-02 17:14:07 +01002329 struct mlxsw_sp_netevent_work *net_work;
Yotam Gigic723c7352016-07-05 11:27:43 +02002330 struct mlxsw_sp_port *mlxsw_sp_port;
Ido Schimmel28678f02017-11-02 17:14:10 +01002331 struct mlxsw_sp_router *router;
Yotam Gigic723c7352016-07-05 11:27:43 +02002332 struct mlxsw_sp *mlxsw_sp;
2333 unsigned long interval;
2334 struct neigh_parms *p;
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002335 struct neighbour *n;
Ido Schimmel28678f02017-11-02 17:14:10 +01002336 struct net *net;
Yotam Gigic723c7352016-07-05 11:27:43 +02002337
2338 switch (event) {
2339 case NETEVENT_DELAY_PROBE_TIME_UPDATE:
2340 p = ptr;
2341
2342 /* We don't care about changes in the default table. */
Ido Schimmelb5f3e0d2017-07-24 09:56:00 +02002343 if (!p->dev || (p->tbl->family != AF_INET &&
2344 p->tbl->family != AF_INET6))
Yotam Gigic723c7352016-07-05 11:27:43 +02002345 return NOTIFY_DONE;
2346
2347 /* We are in atomic context and can't take RTNL mutex,
2348 * so use RCU variant to walk the device chain.
2349 */
2350 mlxsw_sp_port = mlxsw_sp_port_lower_dev_hold(p->dev);
2351 if (!mlxsw_sp_port)
2352 return NOTIFY_DONE;
2353
2354 mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2355 interval = jiffies_to_msecs(NEIGH_VAR(p, DELAY_PROBE_TIME));
Ido Schimmel9011b672017-05-16 19:38:25 +02002356 mlxsw_sp->router->neighs_update.interval = interval;
Yotam Gigic723c7352016-07-05 11:27:43 +02002357
2358 mlxsw_sp_port_dev_put(mlxsw_sp_port);
2359 break;
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002360 case NETEVENT_NEIGH_UPDATE:
2361 n = ptr;
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002362
Ido Schimmelb5f3e0d2017-07-24 09:56:00 +02002363 if (n->tbl->family != AF_INET && n->tbl->family != AF_INET6)
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002364 return NOTIFY_DONE;
2365
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002366 mlxsw_sp_port = mlxsw_sp_port_lower_dev_hold(n->dev);
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002367 if (!mlxsw_sp_port)
2368 return NOTIFY_DONE;
2369
Ido Schimmelceb88812017-11-02 17:14:07 +01002370 net_work = kzalloc(sizeof(*net_work), GFP_ATOMIC);
2371 if (!net_work) {
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002372 mlxsw_sp_port_dev_put(mlxsw_sp_port);
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002373 return NOTIFY_BAD;
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002374 }
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002375
Ido Schimmelceb88812017-11-02 17:14:07 +01002376 INIT_WORK(&net_work->work, mlxsw_sp_router_neigh_event_work);
2377 net_work->mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2378 net_work->n = n;
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002379
2380 /* Take a reference to ensure the neighbour won't be
2381 * destructed until we drop the reference in delayed
2382 * work.
2383 */
2384 neigh_clone(n);
Ido Schimmelceb88812017-11-02 17:14:07 +01002385 mlxsw_core_schedule_work(&net_work->work);
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002386 mlxsw_sp_port_dev_put(mlxsw_sp_port);
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002387 break;
Ido Schimmel28678f02017-11-02 17:14:10 +01002388 case NETEVENT_MULTIPATH_HASH_UPDATE:
2389 net = ptr;
2390
2391 if (!net_eq(net, &init_net))
2392 return NOTIFY_DONE;
2393
2394 net_work = kzalloc(sizeof(*net_work), GFP_ATOMIC);
2395 if (!net_work)
2396 return NOTIFY_BAD;
2397
2398 router = container_of(nb, struct mlxsw_sp_router, netevent_nb);
2399 INIT_WORK(&net_work->work, mlxsw_sp_router_mp_hash_event_work);
2400 net_work->mlxsw_sp = router->mlxsw_sp;
2401 mlxsw_core_schedule_work(&net_work->work);
2402 break;
Yotam Gigic723c7352016-07-05 11:27:43 +02002403 }
2404
2405 return NOTIFY_DONE;
2406}
2407
Jiri Pirko6cf3c972016-07-05 11:27:39 +02002408static int mlxsw_sp_neigh_init(struct mlxsw_sp *mlxsw_sp)
2409{
Yotam Gigic723c7352016-07-05 11:27:43 +02002410 int err;
2411
Ido Schimmel9011b672017-05-16 19:38:25 +02002412 err = rhashtable_init(&mlxsw_sp->router->neigh_ht,
Yotam Gigic723c7352016-07-05 11:27:43 +02002413 &mlxsw_sp_neigh_ht_params);
2414 if (err)
2415 return err;
2416
2417 /* Initialize the polling interval according to the default
2418 * table.
2419 */
2420 mlxsw_sp_router_neighs_update_interval_init(mlxsw_sp);
2421
Yotam Gigi0b2361d2016-07-05 11:27:52 +02002422 /* Create the delayed works for the activity_update */
Ido Schimmel9011b672017-05-16 19:38:25 +02002423 INIT_DELAYED_WORK(&mlxsw_sp->router->neighs_update.dw,
Yotam Gigic723c7352016-07-05 11:27:43 +02002424 mlxsw_sp_router_neighs_update_work);
Ido Schimmel9011b672017-05-16 19:38:25 +02002425 INIT_DELAYED_WORK(&mlxsw_sp->router->nexthop_probe_dw,
Yotam Gigi0b2361d2016-07-05 11:27:52 +02002426 mlxsw_sp_router_probe_unresolved_nexthops);
Ido Schimmel9011b672017-05-16 19:38:25 +02002427 mlxsw_core_schedule_dw(&mlxsw_sp->router->neighs_update.dw, 0);
2428 mlxsw_core_schedule_dw(&mlxsw_sp->router->nexthop_probe_dw, 0);
Yotam Gigic723c7352016-07-05 11:27:43 +02002429 return 0;
Jiri Pirko6cf3c972016-07-05 11:27:39 +02002430}
2431
2432static void mlxsw_sp_neigh_fini(struct mlxsw_sp *mlxsw_sp)
2433{
Ido Schimmel9011b672017-05-16 19:38:25 +02002434 cancel_delayed_work_sync(&mlxsw_sp->router->neighs_update.dw);
2435 cancel_delayed_work_sync(&mlxsw_sp->router->nexthop_probe_dw);
2436 rhashtable_destroy(&mlxsw_sp->router->neigh_ht);
Jiri Pirko6cf3c972016-07-05 11:27:39 +02002437}
2438
Ido Schimmel9665b742017-02-08 11:16:42 +01002439static void mlxsw_sp_neigh_rif_gone_sync(struct mlxsw_sp *mlxsw_sp,
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01002440 struct mlxsw_sp_rif *rif)
Ido Schimmel9665b742017-02-08 11:16:42 +01002441{
2442 struct mlxsw_sp_neigh_entry *neigh_entry, *tmp;
2443
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01002444 list_for_each_entry_safe(neigh_entry, tmp, &rif->neigh_list,
Petr Machata8ba6b302017-12-17 17:16:43 +01002445 rif_list_node) {
2446 mlxsw_sp_neigh_entry_update(mlxsw_sp, neigh_entry, false);
Ido Schimmel9665b742017-02-08 11:16:42 +01002447 mlxsw_sp_neigh_entry_destroy(mlxsw_sp, neigh_entry);
Petr Machata8ba6b302017-12-17 17:16:43 +01002448 }
Ido Schimmel9665b742017-02-08 11:16:42 +01002449}
2450
Petr Machata35225e42017-09-02 23:49:22 +02002451enum mlxsw_sp_nexthop_type {
2452 MLXSW_SP_NEXTHOP_TYPE_ETH,
Petr Machata1012b9a2017-09-02 23:49:23 +02002453 MLXSW_SP_NEXTHOP_TYPE_IPIP,
Petr Machata35225e42017-09-02 23:49:22 +02002454};
2455
Ido Schimmelc53b8e12017-02-08 11:16:30 +01002456struct mlxsw_sp_nexthop_key {
2457 struct fib_nh *fib_nh;
2458};
2459
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002460struct mlxsw_sp_nexthop {
2461 struct list_head neigh_list_node; /* member of neigh entry list */
Ido Schimmel9665b742017-02-08 11:16:42 +01002462 struct list_head rif_list_node;
Arkadi Sharshevskydbe45982017-09-25 10:32:23 +02002463 struct list_head router_list_node;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002464 struct mlxsw_sp_nexthop_group *nh_grp; /* pointer back to the group
2465 * this belongs to
2466 */
Ido Schimmelc53b8e12017-02-08 11:16:30 +01002467 struct rhash_head ht_node;
2468 struct mlxsw_sp_nexthop_key key;
Ido Schimmel58adf2c2017-07-18 10:10:19 +02002469 unsigned char gw_addr[sizeof(struct in6_addr)];
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02002470 int ifindex;
Ido Schimmel408bd942017-10-22 23:11:46 +02002471 int nh_weight;
Ido Schimmeleb789982017-10-22 23:11:48 +02002472 int norm_nh_weight;
2473 int num_adj_entries;
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01002474 struct mlxsw_sp_rif *rif;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002475 u8 should_offload:1, /* set indicates this neigh is connected and
2476 * should be put to KVD linear area of this group.
2477 */
2478 offloaded:1, /* set in case the neigh is actually put into
2479 * KVD linear area of this group.
2480 */
2481 update:1; /* set indicates that MAC of this neigh should be
2482 * updated in HW
2483 */
Petr Machata35225e42017-09-02 23:49:22 +02002484 enum mlxsw_sp_nexthop_type type;
2485 union {
2486 struct mlxsw_sp_neigh_entry *neigh_entry;
Petr Machata1012b9a2017-09-02 23:49:23 +02002487 struct mlxsw_sp_ipip_entry *ipip_entry;
Petr Machata35225e42017-09-02 23:49:22 +02002488 };
Arkadi Sharshevskya5390272017-09-25 10:32:28 +02002489 unsigned int counter_index;
2490 bool counter_valid;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002491};
2492
2493struct mlxsw_sp_nexthop_group {
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02002494 void *priv;
Ido Schimmele9ad5e72017-02-08 11:16:29 +01002495 struct rhash_head ht_node;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002496 struct list_head fib_list; /* list of fib entries that use this group */
Ido Schimmel58adf2c2017-07-18 10:10:19 +02002497 struct neigh_table *neigh_tbl;
Ido Schimmelb3e8d1e2017-02-08 11:16:32 +01002498 u8 adj_index_valid:1,
2499 gateway:1; /* routes using the group use a gateway */
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002500 u32 adj_index;
2501 u16 ecmp_size;
2502 u16 count;
Ido Schimmeleb789982017-10-22 23:11:48 +02002503 int sum_norm_weight;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002504 struct mlxsw_sp_nexthop nexthops[0];
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01002505#define nh_rif nexthops[0].rif
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002506};
2507
Arkadi Sharshevsky427e6522017-09-25 10:32:30 +02002508void mlxsw_sp_nexthop_counter_alloc(struct mlxsw_sp *mlxsw_sp,
2509 struct mlxsw_sp_nexthop *nh)
Arkadi Sharshevskya5390272017-09-25 10:32:28 +02002510{
2511 struct devlink *devlink;
2512
2513 devlink = priv_to_devlink(mlxsw_sp->core);
2514 if (!devlink_dpipe_table_counter_enabled(devlink,
2515 MLXSW_SP_DPIPE_TABLE_NAME_ADJ))
2516 return;
2517
2518 if (mlxsw_sp_flow_counter_alloc(mlxsw_sp, &nh->counter_index))
2519 return;
2520
2521 nh->counter_valid = true;
2522}
2523
Arkadi Sharshevsky427e6522017-09-25 10:32:30 +02002524void mlxsw_sp_nexthop_counter_free(struct mlxsw_sp *mlxsw_sp,
2525 struct mlxsw_sp_nexthop *nh)
Arkadi Sharshevskya5390272017-09-25 10:32:28 +02002526{
2527 if (!nh->counter_valid)
2528 return;
2529 mlxsw_sp_flow_counter_free(mlxsw_sp, nh->counter_index);
2530 nh->counter_valid = false;
2531}
2532
2533int mlxsw_sp_nexthop_counter_get(struct mlxsw_sp *mlxsw_sp,
2534 struct mlxsw_sp_nexthop *nh, u64 *p_counter)
2535{
2536 if (!nh->counter_valid)
2537 return -EINVAL;
2538
2539 return mlxsw_sp_flow_counter_get(mlxsw_sp, nh->counter_index,
2540 p_counter, NULL);
2541}
2542
Arkadi Sharshevskyc556cd22017-09-25 10:32:25 +02002543struct mlxsw_sp_nexthop *mlxsw_sp_nexthop_next(struct mlxsw_sp_router *router,
2544 struct mlxsw_sp_nexthop *nh)
2545{
2546 if (!nh) {
2547 if (list_empty(&router->nexthop_list))
2548 return NULL;
2549 else
2550 return list_first_entry(&router->nexthop_list,
2551 typeof(*nh), router_list_node);
2552 }
2553 if (list_is_last(&nh->router_list_node, &router->nexthop_list))
2554 return NULL;
2555 return list_next_entry(nh, router_list_node);
2556}
2557
2558bool mlxsw_sp_nexthop_offload(struct mlxsw_sp_nexthop *nh)
2559{
2560 return nh->offloaded;
2561}
2562
2563unsigned char *mlxsw_sp_nexthop_ha(struct mlxsw_sp_nexthop *nh)
2564{
2565 if (!nh->offloaded)
2566 return NULL;
2567 return nh->neigh_entry->ha;
2568}
2569
2570int mlxsw_sp_nexthop_indexes(struct mlxsw_sp_nexthop *nh, u32 *p_adj_index,
Ido Schimmele69cd9d2017-10-22 23:11:43 +02002571 u32 *p_adj_size, u32 *p_adj_hash_index)
Arkadi Sharshevskyc556cd22017-09-25 10:32:25 +02002572{
2573 struct mlxsw_sp_nexthop_group *nh_grp = nh->nh_grp;
2574 u32 adj_hash_index = 0;
2575 int i;
2576
2577 if (!nh->offloaded || !nh_grp->adj_index_valid)
2578 return -EINVAL;
2579
2580 *p_adj_index = nh_grp->adj_index;
Ido Schimmele69cd9d2017-10-22 23:11:43 +02002581 *p_adj_size = nh_grp->ecmp_size;
Arkadi Sharshevskyc556cd22017-09-25 10:32:25 +02002582
2583 for (i = 0; i < nh_grp->count; i++) {
2584 struct mlxsw_sp_nexthop *nh_iter = &nh_grp->nexthops[i];
2585
2586 if (nh_iter == nh)
2587 break;
2588 if (nh_iter->offloaded)
Ido Schimmeleb789982017-10-22 23:11:48 +02002589 adj_hash_index += nh_iter->num_adj_entries;
Arkadi Sharshevskyc556cd22017-09-25 10:32:25 +02002590 }
2591
2592 *p_adj_hash_index = adj_hash_index;
2593 return 0;
2594}
2595
2596struct mlxsw_sp_rif *mlxsw_sp_nexthop_rif(struct mlxsw_sp_nexthop *nh)
2597{
2598 return nh->rif;
2599}
2600
2601bool mlxsw_sp_nexthop_group_has_ipip(struct mlxsw_sp_nexthop *nh)
2602{
2603 struct mlxsw_sp_nexthop_group *nh_grp = nh->nh_grp;
2604 int i;
2605
2606 for (i = 0; i < nh_grp->count; i++) {
2607 struct mlxsw_sp_nexthop *nh_iter = &nh_grp->nexthops[i];
2608
2609 if (nh_iter->type == MLXSW_SP_NEXTHOP_TYPE_IPIP)
2610 return true;
2611 }
2612 return false;
2613}
2614
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02002615static struct fib_info *
2616mlxsw_sp_nexthop4_group_fi(const struct mlxsw_sp_nexthop_group *nh_grp)
2617{
2618 return nh_grp->priv;
2619}
2620
2621struct mlxsw_sp_nexthop_group_cmp_arg {
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02002622 enum mlxsw_sp_l3proto proto;
2623 union {
2624 struct fib_info *fi;
2625 struct mlxsw_sp_fib6_entry *fib6_entry;
2626 };
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02002627};
2628
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02002629static bool
2630mlxsw_sp_nexthop6_group_has_nexthop(const struct mlxsw_sp_nexthop_group *nh_grp,
Ido Schimmel3743d882018-01-12 17:15:59 +01002631 const struct in6_addr *gw, int ifindex,
2632 int weight)
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02002633{
2634 int i;
2635
2636 for (i = 0; i < nh_grp->count; i++) {
2637 const struct mlxsw_sp_nexthop *nh;
2638
2639 nh = &nh_grp->nexthops[i];
Ido Schimmel3743d882018-01-12 17:15:59 +01002640 if (nh->ifindex == ifindex && nh->nh_weight == weight &&
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02002641 ipv6_addr_equal(gw, (struct in6_addr *) nh->gw_addr))
2642 return true;
2643 }
2644
2645 return false;
2646}
2647
2648static bool
2649mlxsw_sp_nexthop6_group_cmp(const struct mlxsw_sp_nexthop_group *nh_grp,
2650 const struct mlxsw_sp_fib6_entry *fib6_entry)
2651{
2652 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
2653
2654 if (nh_grp->count != fib6_entry->nrt6)
2655 return false;
2656
2657 list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) {
2658 struct in6_addr *gw;
Ido Schimmel3743d882018-01-12 17:15:59 +01002659 int ifindex, weight;
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02002660
2661 ifindex = mlxsw_sp_rt6->rt->dst.dev->ifindex;
Ido Schimmel3743d882018-01-12 17:15:59 +01002662 weight = mlxsw_sp_rt6->rt->rt6i_nh_weight;
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02002663 gw = &mlxsw_sp_rt6->rt->rt6i_gateway;
Ido Schimmel3743d882018-01-12 17:15:59 +01002664 if (!mlxsw_sp_nexthop6_group_has_nexthop(nh_grp, gw, ifindex,
2665 weight))
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02002666 return false;
2667 }
2668
2669 return true;
2670}
2671
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02002672static int
2673mlxsw_sp_nexthop_group_cmp(struct rhashtable_compare_arg *arg, const void *ptr)
2674{
2675 const struct mlxsw_sp_nexthop_group_cmp_arg *cmp_arg = arg->key;
2676 const struct mlxsw_sp_nexthop_group *nh_grp = ptr;
2677
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02002678 switch (cmp_arg->proto) {
2679 case MLXSW_SP_L3_PROTO_IPV4:
2680 return cmp_arg->fi != mlxsw_sp_nexthop4_group_fi(nh_grp);
2681 case MLXSW_SP_L3_PROTO_IPV6:
2682 return !mlxsw_sp_nexthop6_group_cmp(nh_grp,
2683 cmp_arg->fib6_entry);
2684 default:
2685 WARN_ON(1);
2686 return 1;
2687 }
2688}
2689
2690static int
2691mlxsw_sp_nexthop_group_type(const struct mlxsw_sp_nexthop_group *nh_grp)
2692{
2693 return nh_grp->neigh_tbl->family;
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02002694}
2695
2696static u32 mlxsw_sp_nexthop_group_hash_obj(const void *data, u32 len, u32 seed)
2697{
2698 const struct mlxsw_sp_nexthop_group *nh_grp = data;
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02002699 const struct mlxsw_sp_nexthop *nh;
2700 struct fib_info *fi;
2701 unsigned int val;
2702 int i;
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02002703
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02002704 switch (mlxsw_sp_nexthop_group_type(nh_grp)) {
2705 case AF_INET:
2706 fi = mlxsw_sp_nexthop4_group_fi(nh_grp);
2707 return jhash(&fi, sizeof(fi), seed);
2708 case AF_INET6:
2709 val = nh_grp->count;
2710 for (i = 0; i < nh_grp->count; i++) {
2711 nh = &nh_grp->nexthops[i];
2712 val ^= nh->ifindex;
2713 }
2714 return jhash(&val, sizeof(val), seed);
2715 default:
2716 WARN_ON(1);
2717 return 0;
2718 }
2719}
2720
2721static u32
2722mlxsw_sp_nexthop6_group_hash(struct mlxsw_sp_fib6_entry *fib6_entry, u32 seed)
2723{
2724 unsigned int val = fib6_entry->nrt6;
2725 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
2726 struct net_device *dev;
2727
2728 list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) {
2729 dev = mlxsw_sp_rt6->rt->dst.dev;
2730 val ^= dev->ifindex;
2731 }
2732
2733 return jhash(&val, sizeof(val), seed);
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02002734}
2735
2736static u32
2737mlxsw_sp_nexthop_group_hash(const void *data, u32 len, u32 seed)
2738{
2739 const struct mlxsw_sp_nexthop_group_cmp_arg *cmp_arg = data;
2740
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02002741 switch (cmp_arg->proto) {
2742 case MLXSW_SP_L3_PROTO_IPV4:
2743 return jhash(&cmp_arg->fi, sizeof(cmp_arg->fi), seed);
2744 case MLXSW_SP_L3_PROTO_IPV6:
2745 return mlxsw_sp_nexthop6_group_hash(cmp_arg->fib6_entry, seed);
2746 default:
2747 WARN_ON(1);
2748 return 0;
2749 }
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02002750}
2751
Ido Schimmele9ad5e72017-02-08 11:16:29 +01002752static const struct rhashtable_params mlxsw_sp_nexthop_group_ht_params = {
Ido Schimmele9ad5e72017-02-08 11:16:29 +01002753 .head_offset = offsetof(struct mlxsw_sp_nexthop_group, ht_node),
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02002754 .hashfn = mlxsw_sp_nexthop_group_hash,
2755 .obj_hashfn = mlxsw_sp_nexthop_group_hash_obj,
2756 .obj_cmpfn = mlxsw_sp_nexthop_group_cmp,
Ido Schimmele9ad5e72017-02-08 11:16:29 +01002757};
2758
2759static int mlxsw_sp_nexthop_group_insert(struct mlxsw_sp *mlxsw_sp,
2760 struct mlxsw_sp_nexthop_group *nh_grp)
2761{
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02002762 if (mlxsw_sp_nexthop_group_type(nh_grp) == AF_INET6 &&
2763 !nh_grp->gateway)
2764 return 0;
2765
Ido Schimmel9011b672017-05-16 19:38:25 +02002766 return rhashtable_insert_fast(&mlxsw_sp->router->nexthop_group_ht,
Ido Schimmele9ad5e72017-02-08 11:16:29 +01002767 &nh_grp->ht_node,
2768 mlxsw_sp_nexthop_group_ht_params);
2769}
2770
2771static void mlxsw_sp_nexthop_group_remove(struct mlxsw_sp *mlxsw_sp,
2772 struct mlxsw_sp_nexthop_group *nh_grp)
2773{
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02002774 if (mlxsw_sp_nexthop_group_type(nh_grp) == AF_INET6 &&
2775 !nh_grp->gateway)
2776 return;
2777
Ido Schimmel9011b672017-05-16 19:38:25 +02002778 rhashtable_remove_fast(&mlxsw_sp->router->nexthop_group_ht,
Ido Schimmele9ad5e72017-02-08 11:16:29 +01002779 &nh_grp->ht_node,
2780 mlxsw_sp_nexthop_group_ht_params);
2781}
2782
2783static struct mlxsw_sp_nexthop_group *
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02002784mlxsw_sp_nexthop4_group_lookup(struct mlxsw_sp *mlxsw_sp,
2785 struct fib_info *fi)
Ido Schimmele9ad5e72017-02-08 11:16:29 +01002786{
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02002787 struct mlxsw_sp_nexthop_group_cmp_arg cmp_arg;
2788
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02002789 cmp_arg.proto = MLXSW_SP_L3_PROTO_IPV4;
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02002790 cmp_arg.fi = fi;
2791 return rhashtable_lookup_fast(&mlxsw_sp->router->nexthop_group_ht,
2792 &cmp_arg,
Ido Schimmele9ad5e72017-02-08 11:16:29 +01002793 mlxsw_sp_nexthop_group_ht_params);
2794}
2795
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02002796static struct mlxsw_sp_nexthop_group *
2797mlxsw_sp_nexthop6_group_lookup(struct mlxsw_sp *mlxsw_sp,
2798 struct mlxsw_sp_fib6_entry *fib6_entry)
2799{
2800 struct mlxsw_sp_nexthop_group_cmp_arg cmp_arg;
2801
2802 cmp_arg.proto = MLXSW_SP_L3_PROTO_IPV6;
2803 cmp_arg.fib6_entry = fib6_entry;
2804 return rhashtable_lookup_fast(&mlxsw_sp->router->nexthop_group_ht,
2805 &cmp_arg,
2806 mlxsw_sp_nexthop_group_ht_params);
2807}
2808
Ido Schimmelc53b8e12017-02-08 11:16:30 +01002809static const struct rhashtable_params mlxsw_sp_nexthop_ht_params = {
2810 .key_offset = offsetof(struct mlxsw_sp_nexthop, key),
2811 .head_offset = offsetof(struct mlxsw_sp_nexthop, ht_node),
2812 .key_len = sizeof(struct mlxsw_sp_nexthop_key),
2813};
2814
2815static int mlxsw_sp_nexthop_insert(struct mlxsw_sp *mlxsw_sp,
2816 struct mlxsw_sp_nexthop *nh)
2817{
Ido Schimmel9011b672017-05-16 19:38:25 +02002818 return rhashtable_insert_fast(&mlxsw_sp->router->nexthop_ht,
Ido Schimmelc53b8e12017-02-08 11:16:30 +01002819 &nh->ht_node, mlxsw_sp_nexthop_ht_params);
2820}
2821
2822static void mlxsw_sp_nexthop_remove(struct mlxsw_sp *mlxsw_sp,
2823 struct mlxsw_sp_nexthop *nh)
2824{
Ido Schimmel9011b672017-05-16 19:38:25 +02002825 rhashtable_remove_fast(&mlxsw_sp->router->nexthop_ht, &nh->ht_node,
Ido Schimmelc53b8e12017-02-08 11:16:30 +01002826 mlxsw_sp_nexthop_ht_params);
2827}
2828
Ido Schimmelad178c82017-02-08 11:16:40 +01002829static struct mlxsw_sp_nexthop *
2830mlxsw_sp_nexthop_lookup(struct mlxsw_sp *mlxsw_sp,
2831 struct mlxsw_sp_nexthop_key key)
2832{
Ido Schimmel9011b672017-05-16 19:38:25 +02002833 return rhashtable_lookup_fast(&mlxsw_sp->router->nexthop_ht, &key,
Ido Schimmelad178c82017-02-08 11:16:40 +01002834 mlxsw_sp_nexthop_ht_params);
2835}
2836
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002837static int mlxsw_sp_adj_index_mass_update_vr(struct mlxsw_sp *mlxsw_sp,
Ido Schimmel76610eb2017-03-10 08:53:41 +01002838 const struct mlxsw_sp_fib *fib,
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002839 u32 adj_index, u16 ecmp_size,
2840 u32 new_adj_index,
2841 u16 new_ecmp_size)
2842{
2843 char raleu_pl[MLXSW_REG_RALEU_LEN];
2844
Ido Schimmel1a9234e662016-09-19 08:29:26 +02002845 mlxsw_reg_raleu_pack(raleu_pl,
Ido Schimmel76610eb2017-03-10 08:53:41 +01002846 (enum mlxsw_reg_ralxx_protocol) fib->proto,
2847 fib->vr->id, adj_index, ecmp_size, new_adj_index,
Ido Schimmel1a9234e662016-09-19 08:29:26 +02002848 new_ecmp_size);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002849 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raleu), raleu_pl);
2850}
2851
2852static int mlxsw_sp_adj_index_mass_update(struct mlxsw_sp *mlxsw_sp,
2853 struct mlxsw_sp_nexthop_group *nh_grp,
2854 u32 old_adj_index, u16 old_ecmp_size)
2855{
2856 struct mlxsw_sp_fib_entry *fib_entry;
Ido Schimmel76610eb2017-03-10 08:53:41 +01002857 struct mlxsw_sp_fib *fib = NULL;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002858 int err;
2859
2860 list_for_each_entry(fib_entry, &nh_grp->fib_list, nexthop_group_node) {
Ido Schimmel76610eb2017-03-10 08:53:41 +01002861 if (fib == fib_entry->fib_node->fib)
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002862 continue;
Ido Schimmel76610eb2017-03-10 08:53:41 +01002863 fib = fib_entry->fib_node->fib;
2864 err = mlxsw_sp_adj_index_mass_update_vr(mlxsw_sp, fib,
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002865 old_adj_index,
2866 old_ecmp_size,
2867 nh_grp->adj_index,
2868 nh_grp->ecmp_size);
2869 if (err)
2870 return err;
2871 }
2872 return 0;
2873}
2874
Ido Schimmeleb789982017-10-22 23:11:48 +02002875static int __mlxsw_sp_nexthop_update(struct mlxsw_sp *mlxsw_sp, u32 adj_index,
2876 struct mlxsw_sp_nexthop *nh)
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002877{
2878 struct mlxsw_sp_neigh_entry *neigh_entry = nh->neigh_entry;
2879 char ratr_pl[MLXSW_REG_RATR_LEN];
2880
2881 mlxsw_reg_ratr_pack(ratr_pl, MLXSW_REG_RATR_OP_WRITE_WRITE_ENTRY,
Petr Machata89e41982017-09-02 23:49:15 +02002882 true, MLXSW_REG_RATR_TYPE_ETHERNET,
2883 adj_index, neigh_entry->rif);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002884 mlxsw_reg_ratr_eth_entry_pack(ratr_pl, neigh_entry->ha);
Arkadi Sharshevskya5390272017-09-25 10:32:28 +02002885 if (nh->counter_valid)
2886 mlxsw_reg_ratr_counter_pack(ratr_pl, nh->counter_index, true);
2887 else
2888 mlxsw_reg_ratr_counter_pack(ratr_pl, 0, false);
2889
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002890 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ratr), ratr_pl);
2891}
2892
Ido Schimmeleb789982017-10-22 23:11:48 +02002893int mlxsw_sp_nexthop_update(struct mlxsw_sp *mlxsw_sp, u32 adj_index,
2894 struct mlxsw_sp_nexthop *nh)
2895{
2896 int i;
2897
2898 for (i = 0; i < nh->num_adj_entries; i++) {
2899 int err;
2900
2901 err = __mlxsw_sp_nexthop_update(mlxsw_sp, adj_index + i, nh);
2902 if (err)
2903 return err;
2904 }
2905
2906 return 0;
2907}
2908
2909static int __mlxsw_sp_nexthop_ipip_update(struct mlxsw_sp *mlxsw_sp,
2910 u32 adj_index,
2911 struct mlxsw_sp_nexthop *nh)
Petr Machata1012b9a2017-09-02 23:49:23 +02002912{
2913 const struct mlxsw_sp_ipip_ops *ipip_ops;
2914
2915 ipip_ops = mlxsw_sp->router->ipip_ops_arr[nh->ipip_entry->ipipt];
2916 return ipip_ops->nexthop_update(mlxsw_sp, adj_index, nh->ipip_entry);
2917}
2918
Ido Schimmeleb789982017-10-22 23:11:48 +02002919static int mlxsw_sp_nexthop_ipip_update(struct mlxsw_sp *mlxsw_sp,
2920 u32 adj_index,
2921 struct mlxsw_sp_nexthop *nh)
2922{
2923 int i;
2924
2925 for (i = 0; i < nh->num_adj_entries; i++) {
2926 int err;
2927
2928 err = __mlxsw_sp_nexthop_ipip_update(mlxsw_sp, adj_index + i,
2929 nh);
2930 if (err)
2931 return err;
2932 }
2933
2934 return 0;
2935}
2936
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002937static int
Petr Machata35225e42017-09-02 23:49:22 +02002938mlxsw_sp_nexthop_group_update(struct mlxsw_sp *mlxsw_sp,
2939 struct mlxsw_sp_nexthop_group *nh_grp,
2940 bool reallocate)
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002941{
2942 u32 adj_index = nh_grp->adj_index; /* base */
2943 struct mlxsw_sp_nexthop *nh;
2944 int i;
2945 int err;
2946
2947 for (i = 0; i < nh_grp->count; i++) {
2948 nh = &nh_grp->nexthops[i];
2949
2950 if (!nh->should_offload) {
2951 nh->offloaded = 0;
2952 continue;
2953 }
2954
Ido Schimmela59b7e02017-01-23 11:11:42 +01002955 if (nh->update || reallocate) {
Petr Machata35225e42017-09-02 23:49:22 +02002956 switch (nh->type) {
2957 case MLXSW_SP_NEXTHOP_TYPE_ETH:
Arkadi Sharshevskya5390272017-09-25 10:32:28 +02002958 err = mlxsw_sp_nexthop_update
Petr Machata35225e42017-09-02 23:49:22 +02002959 (mlxsw_sp, adj_index, nh);
2960 break;
Petr Machata1012b9a2017-09-02 23:49:23 +02002961 case MLXSW_SP_NEXTHOP_TYPE_IPIP:
2962 err = mlxsw_sp_nexthop_ipip_update
2963 (mlxsw_sp, adj_index, nh);
2964 break;
Petr Machata35225e42017-09-02 23:49:22 +02002965 }
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002966 if (err)
2967 return err;
2968 nh->update = 0;
2969 nh->offloaded = 1;
2970 }
Ido Schimmeleb789982017-10-22 23:11:48 +02002971 adj_index += nh->num_adj_entries;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002972 }
2973 return 0;
2974}
2975
Ido Schimmel1819ae32017-07-21 18:04:28 +02002976static bool
2977mlxsw_sp_fib_node_entry_is_first(const struct mlxsw_sp_fib_node *fib_node,
2978 const struct mlxsw_sp_fib_entry *fib_entry);
2979
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002980static int
2981mlxsw_sp_nexthop_fib_entries_update(struct mlxsw_sp *mlxsw_sp,
2982 struct mlxsw_sp_nexthop_group *nh_grp)
2983{
2984 struct mlxsw_sp_fib_entry *fib_entry;
2985 int err;
2986
2987 list_for_each_entry(fib_entry, &nh_grp->fib_list, nexthop_group_node) {
Ido Schimmel1819ae32017-07-21 18:04:28 +02002988 if (!mlxsw_sp_fib_node_entry_is_first(fib_entry->fib_node,
2989 fib_entry))
2990 continue;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002991 err = mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
2992 if (err)
2993 return err;
2994 }
2995 return 0;
2996}
2997
2998static void
Ido Schimmel77d964e2017-08-02 09:56:05 +02002999mlxsw_sp_fib_entry_offload_refresh(struct mlxsw_sp_fib_entry *fib_entry,
3000 enum mlxsw_reg_ralue_op op, int err);
3001
3002static void
3003mlxsw_sp_nexthop_fib_entries_refresh(struct mlxsw_sp_nexthop_group *nh_grp)
3004{
3005 enum mlxsw_reg_ralue_op op = MLXSW_REG_RALUE_OP_WRITE_WRITE;
3006 struct mlxsw_sp_fib_entry *fib_entry;
3007
3008 list_for_each_entry(fib_entry, &nh_grp->fib_list, nexthop_group_node) {
3009 if (!mlxsw_sp_fib_node_entry_is_first(fib_entry->fib_node,
3010 fib_entry))
3011 continue;
3012 mlxsw_sp_fib_entry_offload_refresh(fib_entry, op, 0);
3013 }
3014}
3015
Ido Schimmel425a08c2017-10-22 23:11:47 +02003016static void mlxsw_sp_adj_grp_size_round_up(u16 *p_adj_grp_size)
3017{
3018 /* Valid sizes for an adjacency group are:
3019 * 1-64, 512, 1024, 2048 and 4096.
3020 */
3021 if (*p_adj_grp_size <= 64)
3022 return;
3023 else if (*p_adj_grp_size <= 512)
3024 *p_adj_grp_size = 512;
3025 else if (*p_adj_grp_size <= 1024)
3026 *p_adj_grp_size = 1024;
3027 else if (*p_adj_grp_size <= 2048)
3028 *p_adj_grp_size = 2048;
3029 else
3030 *p_adj_grp_size = 4096;
3031}
3032
3033static void mlxsw_sp_adj_grp_size_round_down(u16 *p_adj_grp_size,
3034 unsigned int alloc_size)
3035{
3036 if (alloc_size >= 4096)
3037 *p_adj_grp_size = 4096;
3038 else if (alloc_size >= 2048)
3039 *p_adj_grp_size = 2048;
3040 else if (alloc_size >= 1024)
3041 *p_adj_grp_size = 1024;
3042 else if (alloc_size >= 512)
3043 *p_adj_grp_size = 512;
3044}
3045
3046static int mlxsw_sp_fix_adj_grp_size(struct mlxsw_sp *mlxsw_sp,
3047 u16 *p_adj_grp_size)
3048{
3049 unsigned int alloc_size;
3050 int err;
3051
3052 /* Round up the requested group size to the next size supported
3053 * by the device and make sure the request can be satisfied.
3054 */
3055 mlxsw_sp_adj_grp_size_round_up(p_adj_grp_size);
3056 err = mlxsw_sp_kvdl_alloc_size_query(mlxsw_sp, *p_adj_grp_size,
3057 &alloc_size);
3058 if (err)
3059 return err;
3060 /* It is possible the allocation results in more allocated
3061 * entries than requested. Try to use as much of them as
3062 * possible.
3063 */
3064 mlxsw_sp_adj_grp_size_round_down(p_adj_grp_size, alloc_size);
3065
3066 return 0;
3067}
3068
Ido Schimmel77d964e2017-08-02 09:56:05 +02003069static void
Ido Schimmeleb789982017-10-22 23:11:48 +02003070mlxsw_sp_nexthop_group_normalize(struct mlxsw_sp_nexthop_group *nh_grp)
3071{
3072 int i, g = 0, sum_norm_weight = 0;
3073 struct mlxsw_sp_nexthop *nh;
3074
3075 for (i = 0; i < nh_grp->count; i++) {
3076 nh = &nh_grp->nexthops[i];
3077
3078 if (!nh->should_offload)
3079 continue;
3080 if (g > 0)
3081 g = gcd(nh->nh_weight, g);
3082 else
3083 g = nh->nh_weight;
3084 }
3085
3086 for (i = 0; i < nh_grp->count; i++) {
3087 nh = &nh_grp->nexthops[i];
3088
3089 if (!nh->should_offload)
3090 continue;
3091 nh->norm_nh_weight = nh->nh_weight / g;
3092 sum_norm_weight += nh->norm_nh_weight;
3093 }
3094
3095 nh_grp->sum_norm_weight = sum_norm_weight;
3096}
3097
3098static void
3099mlxsw_sp_nexthop_group_rebalance(struct mlxsw_sp_nexthop_group *nh_grp)
3100{
3101 int total = nh_grp->sum_norm_weight;
3102 u16 ecmp_size = nh_grp->ecmp_size;
3103 int i, weight = 0, lower_bound = 0;
3104
3105 for (i = 0; i < nh_grp->count; i++) {
3106 struct mlxsw_sp_nexthop *nh = &nh_grp->nexthops[i];
3107 int upper_bound;
3108
3109 if (!nh->should_offload)
3110 continue;
3111 weight += nh->norm_nh_weight;
3112 upper_bound = DIV_ROUND_CLOSEST(ecmp_size * weight, total);
3113 nh->num_adj_entries = upper_bound - lower_bound;
3114 lower_bound = upper_bound;
3115 }
3116}
3117
3118static void
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003119mlxsw_sp_nexthop_group_refresh(struct mlxsw_sp *mlxsw_sp,
3120 struct mlxsw_sp_nexthop_group *nh_grp)
3121{
Ido Schimmeleb789982017-10-22 23:11:48 +02003122 u16 ecmp_size, old_ecmp_size;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003123 struct mlxsw_sp_nexthop *nh;
3124 bool offload_change = false;
3125 u32 adj_index;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003126 bool old_adj_index_valid;
3127 u32 old_adj_index;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003128 int i;
3129 int err;
3130
Ido Schimmelb3e8d1e2017-02-08 11:16:32 +01003131 if (!nh_grp->gateway) {
3132 mlxsw_sp_nexthop_fib_entries_update(mlxsw_sp, nh_grp);
3133 return;
3134 }
3135
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003136 for (i = 0; i < nh_grp->count; i++) {
3137 nh = &nh_grp->nexthops[i];
3138
Petr Machata56b8a9e2017-07-31 09:27:29 +02003139 if (nh->should_offload != nh->offloaded) {
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003140 offload_change = true;
3141 if (nh->should_offload)
3142 nh->update = 1;
3143 }
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003144 }
3145 if (!offload_change) {
3146 /* Nothing was added or removed, so no need to reallocate. Just
3147 * update MAC on existing adjacency indexes.
3148 */
Petr Machata35225e42017-09-02 23:49:22 +02003149 err = mlxsw_sp_nexthop_group_update(mlxsw_sp, nh_grp, false);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003150 if (err) {
3151 dev_warn(mlxsw_sp->bus_info->dev, "Failed to update neigh MAC in adjacency table.\n");
3152 goto set_trap;
3153 }
3154 return;
3155 }
Ido Schimmeleb789982017-10-22 23:11:48 +02003156 mlxsw_sp_nexthop_group_normalize(nh_grp);
3157 if (!nh_grp->sum_norm_weight)
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003158 /* No neigh of this group is connected so we just set
3159 * the trap and let everthing flow through kernel.
3160 */
3161 goto set_trap;
3162
Ido Schimmeleb789982017-10-22 23:11:48 +02003163 ecmp_size = nh_grp->sum_norm_weight;
Ido Schimmel425a08c2017-10-22 23:11:47 +02003164 err = mlxsw_sp_fix_adj_grp_size(mlxsw_sp, &ecmp_size);
3165 if (err)
3166 /* No valid allocation size available. */
3167 goto set_trap;
3168
Arkadi Sharshevsky13124442017-03-25 08:28:22 +01003169 err = mlxsw_sp_kvdl_alloc(mlxsw_sp, ecmp_size, &adj_index);
3170 if (err) {
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003171 /* We ran out of KVD linear space, just set the
3172 * trap and let everything flow through kernel.
3173 */
3174 dev_warn(mlxsw_sp->bus_info->dev, "Failed to allocate KVD linear area for nexthop group.\n");
3175 goto set_trap;
3176 }
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003177 old_adj_index_valid = nh_grp->adj_index_valid;
3178 old_adj_index = nh_grp->adj_index;
3179 old_ecmp_size = nh_grp->ecmp_size;
3180 nh_grp->adj_index_valid = 1;
3181 nh_grp->adj_index = adj_index;
3182 nh_grp->ecmp_size = ecmp_size;
Ido Schimmeleb789982017-10-22 23:11:48 +02003183 mlxsw_sp_nexthop_group_rebalance(nh_grp);
Petr Machata35225e42017-09-02 23:49:22 +02003184 err = mlxsw_sp_nexthop_group_update(mlxsw_sp, nh_grp, true);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003185 if (err) {
3186 dev_warn(mlxsw_sp->bus_info->dev, "Failed to update neigh MAC in adjacency table.\n");
3187 goto set_trap;
3188 }
3189
3190 if (!old_adj_index_valid) {
3191 /* The trap was set for fib entries, so we have to call
3192 * fib entry update to unset it and use adjacency index.
3193 */
3194 err = mlxsw_sp_nexthop_fib_entries_update(mlxsw_sp, nh_grp);
3195 if (err) {
3196 dev_warn(mlxsw_sp->bus_info->dev, "Failed to add adjacency index to fib entries.\n");
3197 goto set_trap;
3198 }
3199 return;
3200 }
3201
3202 err = mlxsw_sp_adj_index_mass_update(mlxsw_sp, nh_grp,
3203 old_adj_index, old_ecmp_size);
3204 mlxsw_sp_kvdl_free(mlxsw_sp, old_adj_index);
3205 if (err) {
3206 dev_warn(mlxsw_sp->bus_info->dev, "Failed to mass-update adjacency index for nexthop group.\n");
3207 goto set_trap;
3208 }
Ido Schimmel77d964e2017-08-02 09:56:05 +02003209
3210 /* Offload state within the group changed, so update the flags. */
3211 mlxsw_sp_nexthop_fib_entries_refresh(nh_grp);
3212
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003213 return;
3214
3215set_trap:
3216 old_adj_index_valid = nh_grp->adj_index_valid;
3217 nh_grp->adj_index_valid = 0;
3218 for (i = 0; i < nh_grp->count; i++) {
3219 nh = &nh_grp->nexthops[i];
3220 nh->offloaded = 0;
3221 }
3222 err = mlxsw_sp_nexthop_fib_entries_update(mlxsw_sp, nh_grp);
3223 if (err)
3224 dev_warn(mlxsw_sp->bus_info->dev, "Failed to set traps for fib entries.\n");
3225 if (old_adj_index_valid)
3226 mlxsw_sp_kvdl_free(mlxsw_sp, nh_grp->adj_index);
3227}
3228
3229static void __mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp_nexthop *nh,
3230 bool removing)
3231{
Petr Machata213666a2017-07-31 09:27:30 +02003232 if (!removing)
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003233 nh->should_offload = 1;
Ido Schimmel8764a822017-12-25 08:57:35 +01003234 else
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003235 nh->should_offload = 0;
3236 nh->update = 1;
3237}
3238
3239static void
3240mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp *mlxsw_sp,
3241 struct mlxsw_sp_neigh_entry *neigh_entry,
3242 bool removing)
3243{
3244 struct mlxsw_sp_nexthop *nh;
3245
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003246 list_for_each_entry(nh, &neigh_entry->nexthop_list,
3247 neigh_list_node) {
3248 __mlxsw_sp_nexthop_neigh_update(nh, removing);
3249 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nh_grp);
3250 }
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003251}
3252
Ido Schimmel9665b742017-02-08 11:16:42 +01003253static void mlxsw_sp_nexthop_rif_init(struct mlxsw_sp_nexthop *nh,
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003254 struct mlxsw_sp_rif *rif)
Ido Schimmel9665b742017-02-08 11:16:42 +01003255{
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003256 if (nh->rif)
Ido Schimmel9665b742017-02-08 11:16:42 +01003257 return;
3258
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003259 nh->rif = rif;
3260 list_add(&nh->rif_list_node, &rif->nexthop_list);
Ido Schimmel9665b742017-02-08 11:16:42 +01003261}
3262
3263static void mlxsw_sp_nexthop_rif_fini(struct mlxsw_sp_nexthop *nh)
3264{
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003265 if (!nh->rif)
Ido Schimmel9665b742017-02-08 11:16:42 +01003266 return;
3267
3268 list_del(&nh->rif_list_node);
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003269 nh->rif = NULL;
Ido Schimmel9665b742017-02-08 11:16:42 +01003270}
3271
Ido Schimmela8c97012017-02-08 11:16:35 +01003272static int mlxsw_sp_nexthop_neigh_init(struct mlxsw_sp *mlxsw_sp,
3273 struct mlxsw_sp_nexthop *nh)
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003274{
3275 struct mlxsw_sp_neigh_entry *neigh_entry;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003276 struct neighbour *n;
Ido Schimmel93a87e52016-12-23 09:32:49 +01003277 u8 nud_state, dead;
Ido Schimmelc53b8e12017-02-08 11:16:30 +01003278 int err;
3279
Ido Schimmelad178c82017-02-08 11:16:40 +01003280 if (!nh->nh_grp->gateway || nh->neigh_entry)
Ido Schimmelb8399a12017-02-08 11:16:33 +01003281 return 0;
3282
Jiri Pirko33b13412016-11-10 12:31:04 +01003283 /* Take a reference of neigh here ensuring that neigh would
Petr Machata8de3c172017-07-31 09:27:25 +02003284 * not be destructed before the nexthop entry is finished.
Jiri Pirko33b13412016-11-10 12:31:04 +01003285 * The reference is taken either in neigh_lookup() or
Ido Schimmelfd76d912017-02-06 16:20:17 +01003286 * in neigh_create() in case n is not found.
Jiri Pirko33b13412016-11-10 12:31:04 +01003287 */
Ido Schimmel58adf2c2017-07-18 10:10:19 +02003288 n = neigh_lookup(nh->nh_grp->neigh_tbl, &nh->gw_addr, nh->rif->dev);
Jiri Pirko33b13412016-11-10 12:31:04 +01003289 if (!n) {
Ido Schimmel58adf2c2017-07-18 10:10:19 +02003290 n = neigh_create(nh->nh_grp->neigh_tbl, &nh->gw_addr,
3291 nh->rif->dev);
Ido Schimmela8c97012017-02-08 11:16:35 +01003292 if (IS_ERR(n))
3293 return PTR_ERR(n);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003294 neigh_event_send(n, NULL);
Jiri Pirko33b13412016-11-10 12:31:04 +01003295 }
3296 neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp, n);
3297 if (!neigh_entry) {
Ido Schimmel5c8802f2017-02-06 16:20:13 +01003298 neigh_entry = mlxsw_sp_neigh_entry_create(mlxsw_sp, n);
3299 if (IS_ERR(neigh_entry)) {
Ido Schimmelc53b8e12017-02-08 11:16:30 +01003300 err = -EINVAL;
3301 goto err_neigh_entry_create;
Ido Schimmel5c8802f2017-02-06 16:20:13 +01003302 }
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003303 }
Yotam Gigib2157142016-07-05 11:27:51 +02003304
3305 /* If that is the first nexthop connected to that neigh, add to
3306 * nexthop_neighs_list
3307 */
3308 if (list_empty(&neigh_entry->nexthop_list))
3309 list_add_tail(&neigh_entry->nexthop_neighs_list_node,
Ido Schimmel9011b672017-05-16 19:38:25 +02003310 &mlxsw_sp->router->nexthop_neighs_list);
Yotam Gigib2157142016-07-05 11:27:51 +02003311
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003312 nh->neigh_entry = neigh_entry;
3313 list_add_tail(&nh->neigh_list_node, &neigh_entry->nexthop_list);
3314 read_lock_bh(&n->lock);
3315 nud_state = n->nud_state;
Ido Schimmel93a87e52016-12-23 09:32:49 +01003316 dead = n->dead;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003317 read_unlock_bh(&n->lock);
Ido Schimmel93a87e52016-12-23 09:32:49 +01003318 __mlxsw_sp_nexthop_neigh_update(nh, !(nud_state & NUD_VALID && !dead));
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003319
3320 return 0;
Ido Schimmelc53b8e12017-02-08 11:16:30 +01003321
3322err_neigh_entry_create:
3323 neigh_release(n);
Ido Schimmelc53b8e12017-02-08 11:16:30 +01003324 return err;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003325}
3326
Ido Schimmela8c97012017-02-08 11:16:35 +01003327static void mlxsw_sp_nexthop_neigh_fini(struct mlxsw_sp *mlxsw_sp,
3328 struct mlxsw_sp_nexthop *nh)
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003329{
3330 struct mlxsw_sp_neigh_entry *neigh_entry = nh->neigh_entry;
Ido Schimmela8c97012017-02-08 11:16:35 +01003331 struct neighbour *n;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003332
Ido Schimmelb8399a12017-02-08 11:16:33 +01003333 if (!neigh_entry)
Ido Schimmela8c97012017-02-08 11:16:35 +01003334 return;
3335 n = neigh_entry->key.n;
Ido Schimmelb8399a12017-02-08 11:16:33 +01003336
Ido Schimmel58312122016-12-23 09:32:50 +01003337 __mlxsw_sp_nexthop_neigh_update(nh, true);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003338 list_del(&nh->neigh_list_node);
Ido Schimmele58be792017-02-08 11:16:28 +01003339 nh->neigh_entry = NULL;
Yotam Gigib2157142016-07-05 11:27:51 +02003340
3341 /* If that is the last nexthop connected to that neigh, remove from
3342 * nexthop_neighs_list
3343 */
Ido Schimmele58be792017-02-08 11:16:28 +01003344 if (list_empty(&neigh_entry->nexthop_list))
3345 list_del(&neigh_entry->nexthop_neighs_list_node);
Yotam Gigib2157142016-07-05 11:27:51 +02003346
Ido Schimmel5c8802f2017-02-06 16:20:13 +01003347 if (!neigh_entry->connected && list_empty(&neigh_entry->nexthop_list))
3348 mlxsw_sp_neigh_entry_destroy(mlxsw_sp, neigh_entry);
3349
3350 neigh_release(n);
Ido Schimmela8c97012017-02-08 11:16:35 +01003351}
Ido Schimmelc53b8e12017-02-08 11:16:30 +01003352
Petr Machata44b0fff2017-11-03 10:03:44 +01003353static bool mlxsw_sp_ipip_netdev_ul_up(struct net_device *ol_dev)
3354{
3355 struct net_device *ul_dev = __mlxsw_sp_ipip_netdev_ul_dev_get(ol_dev);
3356
3357 return ul_dev ? (ul_dev->flags & IFF_UP) : true;
3358}
3359
Petr Machatad97cda52017-11-28 13:17:13 +01003360static void mlxsw_sp_nexthop_ipip_init(struct mlxsw_sp *mlxsw_sp,
3361 struct mlxsw_sp_nexthop *nh,
3362 struct mlxsw_sp_ipip_entry *ipip_entry)
Petr Machata1012b9a2017-09-02 23:49:23 +02003363{
Petr Machata44b0fff2017-11-03 10:03:44 +01003364 bool removing;
3365
Petr Machata1012b9a2017-09-02 23:49:23 +02003366 if (!nh->nh_grp->gateway || nh->ipip_entry)
Petr Machatad97cda52017-11-28 13:17:13 +01003367 return;
Petr Machata1012b9a2017-09-02 23:49:23 +02003368
Petr Machatad97cda52017-11-28 13:17:13 +01003369 nh->ipip_entry = ipip_entry;
3370 removing = !mlxsw_sp_ipip_netdev_ul_up(ipip_entry->ol_dev);
Petr Machata44b0fff2017-11-03 10:03:44 +01003371 __mlxsw_sp_nexthop_neigh_update(nh, removing);
Petr Machatad97cda52017-11-28 13:17:13 +01003372 mlxsw_sp_nexthop_rif_init(nh, &ipip_entry->ol_lb->common);
Petr Machata1012b9a2017-09-02 23:49:23 +02003373}
3374
3375static void mlxsw_sp_nexthop_ipip_fini(struct mlxsw_sp *mlxsw_sp,
3376 struct mlxsw_sp_nexthop *nh)
3377{
3378 struct mlxsw_sp_ipip_entry *ipip_entry = nh->ipip_entry;
3379
3380 if (!ipip_entry)
3381 return;
3382
3383 __mlxsw_sp_nexthop_neigh_update(nh, true);
Petr Machata1012b9a2017-09-02 23:49:23 +02003384 nh->ipip_entry = NULL;
3385}
3386
3387static bool mlxsw_sp_nexthop4_ipip_type(const struct mlxsw_sp *mlxsw_sp,
3388 const struct fib_nh *fib_nh,
3389 enum mlxsw_sp_ipip_type *p_ipipt)
3390{
3391 struct net_device *dev = fib_nh->nh_dev;
3392
3393 return dev &&
3394 fib_nh->nh_parent->fib_type == RTN_UNICAST &&
3395 mlxsw_sp_netdev_ipip_type(mlxsw_sp, dev, p_ipipt);
3396}
3397
Petr Machata35225e42017-09-02 23:49:22 +02003398static void mlxsw_sp_nexthop_type_fini(struct mlxsw_sp *mlxsw_sp,
3399 struct mlxsw_sp_nexthop *nh)
3400{
3401 switch (nh->type) {
3402 case MLXSW_SP_NEXTHOP_TYPE_ETH:
3403 mlxsw_sp_nexthop_neigh_fini(mlxsw_sp, nh);
3404 mlxsw_sp_nexthop_rif_fini(nh);
3405 break;
Petr Machata1012b9a2017-09-02 23:49:23 +02003406 case MLXSW_SP_NEXTHOP_TYPE_IPIP:
Petr Machatade0f43c2017-10-02 12:14:57 +02003407 mlxsw_sp_nexthop_rif_fini(nh);
Petr Machata1012b9a2017-09-02 23:49:23 +02003408 mlxsw_sp_nexthop_ipip_fini(mlxsw_sp, nh);
3409 break;
Petr Machata35225e42017-09-02 23:49:22 +02003410 }
3411}
3412
3413static int mlxsw_sp_nexthop4_type_init(struct mlxsw_sp *mlxsw_sp,
3414 struct mlxsw_sp_nexthop *nh,
3415 struct fib_nh *fib_nh)
3416{
Petr Machatad97cda52017-11-28 13:17:13 +01003417 const struct mlxsw_sp_ipip_ops *ipip_ops;
Petr Machata35225e42017-09-02 23:49:22 +02003418 struct net_device *dev = fib_nh->nh_dev;
Petr Machatad97cda52017-11-28 13:17:13 +01003419 struct mlxsw_sp_ipip_entry *ipip_entry;
Petr Machata35225e42017-09-02 23:49:22 +02003420 struct mlxsw_sp_rif *rif;
3421 int err;
3422
Petr Machatad97cda52017-11-28 13:17:13 +01003423 ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, dev);
3424 if (ipip_entry) {
3425 ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt];
3426 if (ipip_ops->can_offload(mlxsw_sp, dev,
3427 MLXSW_SP_L3_PROTO_IPV4)) {
3428 nh->type = MLXSW_SP_NEXTHOP_TYPE_IPIP;
3429 mlxsw_sp_nexthop_ipip_init(mlxsw_sp, nh, ipip_entry);
3430 return 0;
3431 }
Petr Machata1012b9a2017-09-02 23:49:23 +02003432 }
3433
Petr Machata35225e42017-09-02 23:49:22 +02003434 nh->type = MLXSW_SP_NEXTHOP_TYPE_ETH;
3435 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
3436 if (!rif)
3437 return 0;
3438
3439 mlxsw_sp_nexthop_rif_init(nh, rif);
3440 err = mlxsw_sp_nexthop_neigh_init(mlxsw_sp, nh);
3441 if (err)
3442 goto err_neigh_init;
3443
3444 return 0;
3445
3446err_neigh_init:
3447 mlxsw_sp_nexthop_rif_fini(nh);
3448 return err;
3449}
3450
3451static void mlxsw_sp_nexthop4_type_fini(struct mlxsw_sp *mlxsw_sp,
3452 struct mlxsw_sp_nexthop *nh)
3453{
3454 mlxsw_sp_nexthop_type_fini(mlxsw_sp, nh);
3455}
3456
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02003457static int mlxsw_sp_nexthop4_init(struct mlxsw_sp *mlxsw_sp,
3458 struct mlxsw_sp_nexthop_group *nh_grp,
3459 struct mlxsw_sp_nexthop *nh,
3460 struct fib_nh *fib_nh)
Ido Schimmela8c97012017-02-08 11:16:35 +01003461{
3462 struct net_device *dev = fib_nh->nh_dev;
Ido Schimmeldf6dd79b2017-02-08 14:36:49 +01003463 struct in_device *in_dev;
Ido Schimmela8c97012017-02-08 11:16:35 +01003464 int err;
3465
3466 nh->nh_grp = nh_grp;
3467 nh->key.fib_nh = fib_nh;
Ido Schimmel408bd942017-10-22 23:11:46 +02003468#ifdef CONFIG_IP_ROUTE_MULTIPATH
3469 nh->nh_weight = fib_nh->nh_weight;
3470#else
3471 nh->nh_weight = 1;
3472#endif
Ido Schimmel58adf2c2017-07-18 10:10:19 +02003473 memcpy(&nh->gw_addr, &fib_nh->nh_gw, sizeof(fib_nh->nh_gw));
Ido Schimmela8c97012017-02-08 11:16:35 +01003474 err = mlxsw_sp_nexthop_insert(mlxsw_sp, nh);
3475 if (err)
3476 return err;
3477
Arkadi Sharshevskya5390272017-09-25 10:32:28 +02003478 mlxsw_sp_nexthop_counter_alloc(mlxsw_sp, nh);
Arkadi Sharshevskydbe45982017-09-25 10:32:23 +02003479 list_add_tail(&nh->router_list_node, &mlxsw_sp->router->nexthop_list);
3480
Ido Schimmel97989ee2017-03-10 08:53:38 +01003481 if (!dev)
3482 return 0;
3483
Ido Schimmeldf6dd79b2017-02-08 14:36:49 +01003484 in_dev = __in_dev_get_rtnl(dev);
3485 if (in_dev && IN_DEV_IGNORE_ROUTES_WITH_LINKDOWN(in_dev) &&
3486 fib_nh->nh_flags & RTNH_F_LINKDOWN)
3487 return 0;
3488
Petr Machata35225e42017-09-02 23:49:22 +02003489 err = mlxsw_sp_nexthop4_type_init(mlxsw_sp, nh, fib_nh);
Ido Schimmela8c97012017-02-08 11:16:35 +01003490 if (err)
3491 goto err_nexthop_neigh_init;
3492
3493 return 0;
3494
3495err_nexthop_neigh_init:
3496 mlxsw_sp_nexthop_remove(mlxsw_sp, nh);
3497 return err;
3498}
3499
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02003500static void mlxsw_sp_nexthop4_fini(struct mlxsw_sp *mlxsw_sp,
3501 struct mlxsw_sp_nexthop *nh)
Ido Schimmela8c97012017-02-08 11:16:35 +01003502{
Petr Machata35225e42017-09-02 23:49:22 +02003503 mlxsw_sp_nexthop4_type_fini(mlxsw_sp, nh);
Arkadi Sharshevskydbe45982017-09-25 10:32:23 +02003504 list_del(&nh->router_list_node);
Arkadi Sharshevskya5390272017-09-25 10:32:28 +02003505 mlxsw_sp_nexthop_counter_free(mlxsw_sp, nh);
Ido Schimmelc53b8e12017-02-08 11:16:30 +01003506 mlxsw_sp_nexthop_remove(mlxsw_sp, nh);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003507}
3508
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02003509static void mlxsw_sp_nexthop4_event(struct mlxsw_sp *mlxsw_sp,
3510 unsigned long event, struct fib_nh *fib_nh)
Ido Schimmelad178c82017-02-08 11:16:40 +01003511{
3512 struct mlxsw_sp_nexthop_key key;
3513 struct mlxsw_sp_nexthop *nh;
Ido Schimmelad178c82017-02-08 11:16:40 +01003514
Ido Schimmel9011b672017-05-16 19:38:25 +02003515 if (mlxsw_sp->router->aborted)
Ido Schimmelad178c82017-02-08 11:16:40 +01003516 return;
3517
3518 key.fib_nh = fib_nh;
3519 nh = mlxsw_sp_nexthop_lookup(mlxsw_sp, key);
3520 if (WARN_ON_ONCE(!nh))
3521 return;
3522
Ido Schimmelad178c82017-02-08 11:16:40 +01003523 switch (event) {
3524 case FIB_EVENT_NH_ADD:
Petr Machata35225e42017-09-02 23:49:22 +02003525 mlxsw_sp_nexthop4_type_init(mlxsw_sp, nh, fib_nh);
Ido Schimmelad178c82017-02-08 11:16:40 +01003526 break;
3527 case FIB_EVENT_NH_DEL:
Petr Machata35225e42017-09-02 23:49:22 +02003528 mlxsw_sp_nexthop4_type_fini(mlxsw_sp, nh);
Ido Schimmelad178c82017-02-08 11:16:40 +01003529 break;
3530 }
3531
3532 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nh_grp);
3533}
3534
Petr Machata0c5f1cd2017-11-03 10:03:38 +01003535static void mlxsw_sp_nexthop_rif_update(struct mlxsw_sp *mlxsw_sp,
3536 struct mlxsw_sp_rif *rif)
3537{
3538 struct mlxsw_sp_nexthop *nh;
Petr Machata44b0fff2017-11-03 10:03:44 +01003539 bool removing;
Petr Machata0c5f1cd2017-11-03 10:03:38 +01003540
3541 list_for_each_entry(nh, &rif->nexthop_list, rif_list_node) {
Petr Machata44b0fff2017-11-03 10:03:44 +01003542 switch (nh->type) {
3543 case MLXSW_SP_NEXTHOP_TYPE_ETH:
3544 removing = false;
3545 break;
3546 case MLXSW_SP_NEXTHOP_TYPE_IPIP:
3547 removing = !mlxsw_sp_ipip_netdev_ul_up(rif->dev);
3548 break;
3549 default:
3550 WARN_ON(1);
3551 continue;
3552 }
3553
3554 __mlxsw_sp_nexthop_neigh_update(nh, removing);
Petr Machata0c5f1cd2017-11-03 10:03:38 +01003555 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nh_grp);
3556 }
3557}
3558
Petr Machata09dbf622017-11-28 13:17:14 +01003559static void mlxsw_sp_nexthop_rif_migrate(struct mlxsw_sp *mlxsw_sp,
3560 struct mlxsw_sp_rif *old_rif,
3561 struct mlxsw_sp_rif *new_rif)
3562{
3563 struct mlxsw_sp_nexthop *nh;
3564
3565 list_splice_init(&old_rif->nexthop_list, &new_rif->nexthop_list);
3566 list_for_each_entry(nh, &new_rif->nexthop_list, rif_list_node)
3567 nh->rif = new_rif;
3568 mlxsw_sp_nexthop_rif_update(mlxsw_sp, new_rif);
3569}
3570
Ido Schimmel9665b742017-02-08 11:16:42 +01003571static void mlxsw_sp_nexthop_rif_gone_sync(struct mlxsw_sp *mlxsw_sp,
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003572 struct mlxsw_sp_rif *rif)
Ido Schimmel9665b742017-02-08 11:16:42 +01003573{
3574 struct mlxsw_sp_nexthop *nh, *tmp;
3575
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003576 list_for_each_entry_safe(nh, tmp, &rif->nexthop_list, rif_list_node) {
Petr Machata35225e42017-09-02 23:49:22 +02003577 mlxsw_sp_nexthop_type_fini(mlxsw_sp, nh);
Ido Schimmel9665b742017-02-08 11:16:42 +01003578 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nh_grp);
3579 }
3580}
3581
Petr Machata9b014512017-09-02 23:49:20 +02003582static bool mlxsw_sp_fi_is_gateway(const struct mlxsw_sp *mlxsw_sp,
3583 const struct fib_info *fi)
3584{
Petr Machata1012b9a2017-09-02 23:49:23 +02003585 return fi->fib_nh->nh_scope == RT_SCOPE_LINK ||
3586 mlxsw_sp_nexthop4_ipip_type(mlxsw_sp, fi->fib_nh, NULL);
Petr Machata9b014512017-09-02 23:49:20 +02003587}
3588
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003589static struct mlxsw_sp_nexthop_group *
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02003590mlxsw_sp_nexthop4_group_create(struct mlxsw_sp *mlxsw_sp, struct fib_info *fi)
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003591{
3592 struct mlxsw_sp_nexthop_group *nh_grp;
3593 struct mlxsw_sp_nexthop *nh;
3594 struct fib_nh *fib_nh;
3595 size_t alloc_size;
3596 int i;
3597 int err;
3598
3599 alloc_size = sizeof(*nh_grp) +
3600 fi->fib_nhs * sizeof(struct mlxsw_sp_nexthop);
3601 nh_grp = kzalloc(alloc_size, GFP_KERNEL);
3602 if (!nh_grp)
3603 return ERR_PTR(-ENOMEM);
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02003604 nh_grp->priv = fi;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003605 INIT_LIST_HEAD(&nh_grp->fib_list);
Ido Schimmel58adf2c2017-07-18 10:10:19 +02003606 nh_grp->neigh_tbl = &arp_tbl;
3607
Petr Machata9b014512017-09-02 23:49:20 +02003608 nh_grp->gateway = mlxsw_sp_fi_is_gateway(mlxsw_sp, fi);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003609 nh_grp->count = fi->fib_nhs;
Ido Schimmel7387dbb2017-07-12 09:12:53 +02003610 fib_info_hold(fi);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003611 for (i = 0; i < nh_grp->count; i++) {
3612 nh = &nh_grp->nexthops[i];
3613 fib_nh = &fi->fib_nh[i];
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02003614 err = mlxsw_sp_nexthop4_init(mlxsw_sp, nh_grp, nh, fib_nh);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003615 if (err)
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02003616 goto err_nexthop4_init;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003617 }
Ido Schimmele9ad5e72017-02-08 11:16:29 +01003618 err = mlxsw_sp_nexthop_group_insert(mlxsw_sp, nh_grp);
3619 if (err)
3620 goto err_nexthop_group_insert;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003621 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
3622 return nh_grp;
3623
Ido Schimmele9ad5e72017-02-08 11:16:29 +01003624err_nexthop_group_insert:
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02003625err_nexthop4_init:
Ido Schimmeldf6dd79b2017-02-08 14:36:49 +01003626 for (i--; i >= 0; i--) {
3627 nh = &nh_grp->nexthops[i];
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02003628 mlxsw_sp_nexthop4_fini(mlxsw_sp, nh);
Ido Schimmeldf6dd79b2017-02-08 14:36:49 +01003629 }
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02003630 fib_info_put(fi);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003631 kfree(nh_grp);
3632 return ERR_PTR(err);
3633}
3634
3635static void
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02003636mlxsw_sp_nexthop4_group_destroy(struct mlxsw_sp *mlxsw_sp,
3637 struct mlxsw_sp_nexthop_group *nh_grp)
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003638{
3639 struct mlxsw_sp_nexthop *nh;
3640 int i;
3641
Ido Schimmele9ad5e72017-02-08 11:16:29 +01003642 mlxsw_sp_nexthop_group_remove(mlxsw_sp, nh_grp);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003643 for (i = 0; i < nh_grp->count; i++) {
3644 nh = &nh_grp->nexthops[i];
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02003645 mlxsw_sp_nexthop4_fini(mlxsw_sp, nh);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003646 }
Ido Schimmel58312122016-12-23 09:32:50 +01003647 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
3648 WARN_ON_ONCE(nh_grp->adj_index_valid);
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02003649 fib_info_put(mlxsw_sp_nexthop4_group_fi(nh_grp));
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003650 kfree(nh_grp);
3651}
3652
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02003653static int mlxsw_sp_nexthop4_group_get(struct mlxsw_sp *mlxsw_sp,
3654 struct mlxsw_sp_fib_entry *fib_entry,
3655 struct fib_info *fi)
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003656{
3657 struct mlxsw_sp_nexthop_group *nh_grp;
3658
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02003659 nh_grp = mlxsw_sp_nexthop4_group_lookup(mlxsw_sp, fi);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003660 if (!nh_grp) {
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02003661 nh_grp = mlxsw_sp_nexthop4_group_create(mlxsw_sp, fi);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003662 if (IS_ERR(nh_grp))
3663 return PTR_ERR(nh_grp);
3664 }
3665 list_add_tail(&fib_entry->nexthop_group_node, &nh_grp->fib_list);
3666 fib_entry->nh_group = nh_grp;
3667 return 0;
3668}
3669
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02003670static void mlxsw_sp_nexthop4_group_put(struct mlxsw_sp *mlxsw_sp,
3671 struct mlxsw_sp_fib_entry *fib_entry)
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003672{
3673 struct mlxsw_sp_nexthop_group *nh_grp = fib_entry->nh_group;
3674
3675 list_del(&fib_entry->nexthop_group_node);
3676 if (!list_empty(&nh_grp->fib_list))
3677 return;
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02003678 mlxsw_sp_nexthop4_group_destroy(mlxsw_sp, nh_grp);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003679}
3680
Ido Schimmel013b20f2017-02-08 11:16:36 +01003681static bool
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02003682mlxsw_sp_fib4_entry_should_offload(const struct mlxsw_sp_fib_entry *fib_entry)
3683{
3684 struct mlxsw_sp_fib4_entry *fib4_entry;
3685
3686 fib4_entry = container_of(fib_entry, struct mlxsw_sp_fib4_entry,
3687 common);
3688 return !fib4_entry->tos;
3689}
3690
3691static bool
Ido Schimmel013b20f2017-02-08 11:16:36 +01003692mlxsw_sp_fib_entry_should_offload(const struct mlxsw_sp_fib_entry *fib_entry)
3693{
3694 struct mlxsw_sp_nexthop_group *nh_group = fib_entry->nh_group;
3695
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02003696 switch (fib_entry->fib_node->fib->proto) {
3697 case MLXSW_SP_L3_PROTO_IPV4:
3698 if (!mlxsw_sp_fib4_entry_should_offload(fib_entry))
3699 return false;
3700 break;
3701 case MLXSW_SP_L3_PROTO_IPV6:
3702 break;
3703 }
Ido Schimmel9aecce12017-02-09 10:28:42 +01003704
Ido Schimmel013b20f2017-02-08 11:16:36 +01003705 switch (fib_entry->type) {
3706 case MLXSW_SP_FIB_ENTRY_TYPE_REMOTE:
3707 return !!nh_group->adj_index_valid;
3708 case MLXSW_SP_FIB_ENTRY_TYPE_LOCAL:
Ido Schimmel70ad3502017-02-08 11:16:38 +01003709 return !!nh_group->nh_rif;
Petr Machata4607f6d2017-09-02 23:49:25 +02003710 case MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP:
3711 return true;
Ido Schimmel013b20f2017-02-08 11:16:36 +01003712 default:
3713 return false;
3714 }
3715}
3716
Ido Schimmel428b8512017-08-03 13:28:28 +02003717static struct mlxsw_sp_nexthop *
3718mlxsw_sp_rt6_nexthop(struct mlxsw_sp_nexthop_group *nh_grp,
3719 const struct mlxsw_sp_rt6 *mlxsw_sp_rt6)
3720{
3721 int i;
3722
3723 for (i = 0; i < nh_grp->count; i++) {
3724 struct mlxsw_sp_nexthop *nh = &nh_grp->nexthops[i];
3725 struct rt6_info *rt = mlxsw_sp_rt6->rt;
3726
3727 if (nh->rif && nh->rif->dev == rt->dst.dev &&
3728 ipv6_addr_equal((const struct in6_addr *) &nh->gw_addr,
3729 &rt->rt6i_gateway))
3730 return nh;
3731 continue;
3732 }
3733
3734 return NULL;
3735}
3736
Ido Schimmel3984d1a2017-08-02 09:56:03 +02003737static void
3738mlxsw_sp_fib4_entry_offload_set(struct mlxsw_sp_fib_entry *fib_entry)
3739{
3740 struct mlxsw_sp_nexthop_group *nh_grp = fib_entry->nh_group;
3741 int i;
3742
Petr Machata4607f6d2017-09-02 23:49:25 +02003743 if (fib_entry->type == MLXSW_SP_FIB_ENTRY_TYPE_LOCAL ||
3744 fib_entry->type == MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP) {
Ido Schimmel3984d1a2017-08-02 09:56:03 +02003745 nh_grp->nexthops->key.fib_nh->nh_flags |= RTNH_F_OFFLOAD;
3746 return;
3747 }
3748
3749 for (i = 0; i < nh_grp->count; i++) {
3750 struct mlxsw_sp_nexthop *nh = &nh_grp->nexthops[i];
3751
3752 if (nh->offloaded)
3753 nh->key.fib_nh->nh_flags |= RTNH_F_OFFLOAD;
3754 else
3755 nh->key.fib_nh->nh_flags &= ~RTNH_F_OFFLOAD;
3756 }
3757}
3758
3759static void
3760mlxsw_sp_fib4_entry_offload_unset(struct mlxsw_sp_fib_entry *fib_entry)
3761{
3762 struct mlxsw_sp_nexthop_group *nh_grp = fib_entry->nh_group;
3763 int i;
3764
3765 for (i = 0; i < nh_grp->count; i++) {
3766 struct mlxsw_sp_nexthop *nh = &nh_grp->nexthops[i];
3767
3768 nh->key.fib_nh->nh_flags &= ~RTNH_F_OFFLOAD;
3769 }
3770}
3771
Ido Schimmel428b8512017-08-03 13:28:28 +02003772static void
3773mlxsw_sp_fib6_entry_offload_set(struct mlxsw_sp_fib_entry *fib_entry)
3774{
3775 struct mlxsw_sp_fib6_entry *fib6_entry;
3776 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
3777
3778 fib6_entry = container_of(fib_entry, struct mlxsw_sp_fib6_entry,
3779 common);
3780
3781 if (fib_entry->type == MLXSW_SP_FIB_ENTRY_TYPE_LOCAL) {
3782 list_first_entry(&fib6_entry->rt6_list, struct mlxsw_sp_rt6,
Ido Schimmelfe400792017-08-15 09:09:49 +02003783 list)->rt->rt6i_nh_flags |= RTNH_F_OFFLOAD;
Ido Schimmel428b8512017-08-03 13:28:28 +02003784 return;
3785 }
3786
3787 list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) {
3788 struct mlxsw_sp_nexthop_group *nh_grp = fib_entry->nh_group;
3789 struct mlxsw_sp_nexthop *nh;
3790
3791 nh = mlxsw_sp_rt6_nexthop(nh_grp, mlxsw_sp_rt6);
3792 if (nh && nh->offloaded)
Ido Schimmelfe400792017-08-15 09:09:49 +02003793 mlxsw_sp_rt6->rt->rt6i_nh_flags |= RTNH_F_OFFLOAD;
Ido Schimmel428b8512017-08-03 13:28:28 +02003794 else
Ido Schimmelfe400792017-08-15 09:09:49 +02003795 mlxsw_sp_rt6->rt->rt6i_nh_flags &= ~RTNH_F_OFFLOAD;
Ido Schimmel428b8512017-08-03 13:28:28 +02003796 }
3797}
3798
3799static void
3800mlxsw_sp_fib6_entry_offload_unset(struct mlxsw_sp_fib_entry *fib_entry)
3801{
3802 struct mlxsw_sp_fib6_entry *fib6_entry;
3803 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
3804
3805 fib6_entry = container_of(fib_entry, struct mlxsw_sp_fib6_entry,
3806 common);
3807 list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) {
3808 struct rt6_info *rt = mlxsw_sp_rt6->rt;
3809
Ido Schimmelfe400792017-08-15 09:09:49 +02003810 rt->rt6i_nh_flags &= ~RTNH_F_OFFLOAD;
Ido Schimmel428b8512017-08-03 13:28:28 +02003811 }
3812}
3813
Ido Schimmel013b20f2017-02-08 11:16:36 +01003814static void mlxsw_sp_fib_entry_offload_set(struct mlxsw_sp_fib_entry *fib_entry)
3815{
Ido Schimmel76610eb2017-03-10 08:53:41 +01003816 switch (fib_entry->fib_node->fib->proto) {
Ido Schimmel013b20f2017-02-08 11:16:36 +01003817 case MLXSW_SP_L3_PROTO_IPV4:
Ido Schimmel3984d1a2017-08-02 09:56:03 +02003818 mlxsw_sp_fib4_entry_offload_set(fib_entry);
Ido Schimmel013b20f2017-02-08 11:16:36 +01003819 break;
3820 case MLXSW_SP_L3_PROTO_IPV6:
Ido Schimmel428b8512017-08-03 13:28:28 +02003821 mlxsw_sp_fib6_entry_offload_set(fib_entry);
3822 break;
Ido Schimmel013b20f2017-02-08 11:16:36 +01003823 }
3824}
3825
3826static void
3827mlxsw_sp_fib_entry_offload_unset(struct mlxsw_sp_fib_entry *fib_entry)
3828{
Ido Schimmel76610eb2017-03-10 08:53:41 +01003829 switch (fib_entry->fib_node->fib->proto) {
Ido Schimmel013b20f2017-02-08 11:16:36 +01003830 case MLXSW_SP_L3_PROTO_IPV4:
Ido Schimmel3984d1a2017-08-02 09:56:03 +02003831 mlxsw_sp_fib4_entry_offload_unset(fib_entry);
Ido Schimmel013b20f2017-02-08 11:16:36 +01003832 break;
3833 case MLXSW_SP_L3_PROTO_IPV6:
Ido Schimmel428b8512017-08-03 13:28:28 +02003834 mlxsw_sp_fib6_entry_offload_unset(fib_entry);
3835 break;
Ido Schimmel013b20f2017-02-08 11:16:36 +01003836 }
Ido Schimmel013b20f2017-02-08 11:16:36 +01003837}
3838
3839static void
3840mlxsw_sp_fib_entry_offload_refresh(struct mlxsw_sp_fib_entry *fib_entry,
3841 enum mlxsw_reg_ralue_op op, int err)
3842{
3843 switch (op) {
3844 case MLXSW_REG_RALUE_OP_WRITE_DELETE:
Ido Schimmel013b20f2017-02-08 11:16:36 +01003845 return mlxsw_sp_fib_entry_offload_unset(fib_entry);
3846 case MLXSW_REG_RALUE_OP_WRITE_WRITE:
3847 if (err)
3848 return;
Ido Schimmel1353ee72017-08-02 09:56:04 +02003849 if (mlxsw_sp_fib_entry_should_offload(fib_entry))
Ido Schimmel013b20f2017-02-08 11:16:36 +01003850 mlxsw_sp_fib_entry_offload_set(fib_entry);
Petr Machata85f44a12017-10-02 12:21:58 +02003851 else
Ido Schimmel013b20f2017-02-08 11:16:36 +01003852 mlxsw_sp_fib_entry_offload_unset(fib_entry);
3853 return;
3854 default:
3855 return;
3856 }
3857}
3858
Ido Schimmel9dbf4d72017-07-18 10:10:24 +02003859static void
3860mlxsw_sp_fib_entry_ralue_pack(char *ralue_pl,
3861 const struct mlxsw_sp_fib_entry *fib_entry,
3862 enum mlxsw_reg_ralue_op op)
3863{
3864 struct mlxsw_sp_fib *fib = fib_entry->fib_node->fib;
3865 enum mlxsw_reg_ralxx_protocol proto;
3866 u32 *p_dip;
3867
3868 proto = (enum mlxsw_reg_ralxx_protocol) fib->proto;
3869
3870 switch (fib->proto) {
3871 case MLXSW_SP_L3_PROTO_IPV4:
3872 p_dip = (u32 *) fib_entry->fib_node->key.addr;
3873 mlxsw_reg_ralue_pack4(ralue_pl, proto, op, fib->vr->id,
3874 fib_entry->fib_node->key.prefix_len,
3875 *p_dip);
3876 break;
3877 case MLXSW_SP_L3_PROTO_IPV6:
3878 mlxsw_reg_ralue_pack6(ralue_pl, proto, op, fib->vr->id,
3879 fib_entry->fib_node->key.prefix_len,
3880 fib_entry->fib_node->key.addr);
3881 break;
3882 }
3883}
3884
3885static int mlxsw_sp_fib_entry_op_remote(struct mlxsw_sp *mlxsw_sp,
3886 struct mlxsw_sp_fib_entry *fib_entry,
3887 enum mlxsw_reg_ralue_op op)
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003888{
3889 char ralue_pl[MLXSW_REG_RALUE_LEN];
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003890 enum mlxsw_reg_ralue_trap_action trap_action;
3891 u16 trap_id = 0;
3892 u32 adjacency_index = 0;
3893 u16 ecmp_size = 0;
3894
3895 /* In case the nexthop group adjacency index is valid, use it
3896 * with provided ECMP size. Otherwise, setup trap and pass
3897 * traffic to kernel.
3898 */
Ido Schimmel4b411472017-02-08 11:16:37 +01003899 if (mlxsw_sp_fib_entry_should_offload(fib_entry)) {
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003900 trap_action = MLXSW_REG_RALUE_TRAP_ACTION_NOP;
3901 adjacency_index = fib_entry->nh_group->adj_index;
3902 ecmp_size = fib_entry->nh_group->ecmp_size;
3903 } else {
3904 trap_action = MLXSW_REG_RALUE_TRAP_ACTION_TRAP;
3905 trap_id = MLXSW_TRAP_ID_RTR_INGRESS0;
3906 }
3907
Ido Schimmel9dbf4d72017-07-18 10:10:24 +02003908 mlxsw_sp_fib_entry_ralue_pack(ralue_pl, fib_entry, op);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003909 mlxsw_reg_ralue_act_remote_pack(ralue_pl, trap_action, trap_id,
3910 adjacency_index, ecmp_size);
3911 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
3912}
3913
Ido Schimmel9dbf4d72017-07-18 10:10:24 +02003914static int mlxsw_sp_fib_entry_op_local(struct mlxsw_sp *mlxsw_sp,
3915 struct mlxsw_sp_fib_entry *fib_entry,
3916 enum mlxsw_reg_ralue_op op)
Jiri Pirko61c503f2016-07-04 08:23:11 +02003917{
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003918 struct mlxsw_sp_rif *rif = fib_entry->nh_group->nh_rif;
Ido Schimmel70ad3502017-02-08 11:16:38 +01003919 enum mlxsw_reg_ralue_trap_action trap_action;
Jiri Pirko61c503f2016-07-04 08:23:11 +02003920 char ralue_pl[MLXSW_REG_RALUE_LEN];
Ido Schimmel70ad3502017-02-08 11:16:38 +01003921 u16 trap_id = 0;
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003922 u16 rif_index = 0;
Ido Schimmel70ad3502017-02-08 11:16:38 +01003923
3924 if (mlxsw_sp_fib_entry_should_offload(fib_entry)) {
3925 trap_action = MLXSW_REG_RALUE_TRAP_ACTION_NOP;
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003926 rif_index = rif->rif_index;
Ido Schimmel70ad3502017-02-08 11:16:38 +01003927 } else {
3928 trap_action = MLXSW_REG_RALUE_TRAP_ACTION_TRAP;
3929 trap_id = MLXSW_TRAP_ID_RTR_INGRESS0;
3930 }
Jiri Pirko61c503f2016-07-04 08:23:11 +02003931
Ido Schimmel9dbf4d72017-07-18 10:10:24 +02003932 mlxsw_sp_fib_entry_ralue_pack(ralue_pl, fib_entry, op);
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003933 mlxsw_reg_ralue_act_local_pack(ralue_pl, trap_action, trap_id,
3934 rif_index);
Jiri Pirko61c503f2016-07-04 08:23:11 +02003935 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
3936}
3937
Ido Schimmel9dbf4d72017-07-18 10:10:24 +02003938static int mlxsw_sp_fib_entry_op_trap(struct mlxsw_sp *mlxsw_sp,
3939 struct mlxsw_sp_fib_entry *fib_entry,
3940 enum mlxsw_reg_ralue_op op)
Jiri Pirko61c503f2016-07-04 08:23:11 +02003941{
3942 char ralue_pl[MLXSW_REG_RALUE_LEN];
Jiri Pirko61c503f2016-07-04 08:23:11 +02003943
Ido Schimmel9dbf4d72017-07-18 10:10:24 +02003944 mlxsw_sp_fib_entry_ralue_pack(ralue_pl, fib_entry, op);
Jiri Pirko61c503f2016-07-04 08:23:11 +02003945 mlxsw_reg_ralue_act_ip2me_pack(ralue_pl);
3946 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
3947}
3948
Petr Machata4607f6d2017-09-02 23:49:25 +02003949static int
3950mlxsw_sp_fib_entry_op_ipip_decap(struct mlxsw_sp *mlxsw_sp,
3951 struct mlxsw_sp_fib_entry *fib_entry,
3952 enum mlxsw_reg_ralue_op op)
3953{
3954 struct mlxsw_sp_ipip_entry *ipip_entry = fib_entry->decap.ipip_entry;
3955 const struct mlxsw_sp_ipip_ops *ipip_ops;
3956
3957 if (WARN_ON(!ipip_entry))
3958 return -EINVAL;
3959
3960 ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt];
3961 return ipip_ops->fib_entry_op(mlxsw_sp, ipip_entry, op,
3962 fib_entry->decap.tunnel_index);
3963}
3964
Ido Schimmel9dbf4d72017-07-18 10:10:24 +02003965static int __mlxsw_sp_fib_entry_op(struct mlxsw_sp *mlxsw_sp,
3966 struct mlxsw_sp_fib_entry *fib_entry,
3967 enum mlxsw_reg_ralue_op op)
Jiri Pirko61c503f2016-07-04 08:23:11 +02003968{
3969 switch (fib_entry->type) {
3970 case MLXSW_SP_FIB_ENTRY_TYPE_REMOTE:
Ido Schimmel9dbf4d72017-07-18 10:10:24 +02003971 return mlxsw_sp_fib_entry_op_remote(mlxsw_sp, fib_entry, op);
Jiri Pirko61c503f2016-07-04 08:23:11 +02003972 case MLXSW_SP_FIB_ENTRY_TYPE_LOCAL:
Ido Schimmel9dbf4d72017-07-18 10:10:24 +02003973 return mlxsw_sp_fib_entry_op_local(mlxsw_sp, fib_entry, op);
Jiri Pirko61c503f2016-07-04 08:23:11 +02003974 case MLXSW_SP_FIB_ENTRY_TYPE_TRAP:
Ido Schimmel9dbf4d72017-07-18 10:10:24 +02003975 return mlxsw_sp_fib_entry_op_trap(mlxsw_sp, fib_entry, op);
Petr Machata4607f6d2017-09-02 23:49:25 +02003976 case MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP:
3977 return mlxsw_sp_fib_entry_op_ipip_decap(mlxsw_sp,
3978 fib_entry, op);
Jiri Pirko61c503f2016-07-04 08:23:11 +02003979 }
3980 return -EINVAL;
3981}
3982
3983static int mlxsw_sp_fib_entry_op(struct mlxsw_sp *mlxsw_sp,
3984 struct mlxsw_sp_fib_entry *fib_entry,
3985 enum mlxsw_reg_ralue_op op)
3986{
Ido Schimmel9dbf4d72017-07-18 10:10:24 +02003987 int err = __mlxsw_sp_fib_entry_op(mlxsw_sp, fib_entry, op);
Ido Schimmel013b20f2017-02-08 11:16:36 +01003988
Ido Schimmel013b20f2017-02-08 11:16:36 +01003989 mlxsw_sp_fib_entry_offload_refresh(fib_entry, op, err);
Ido Schimmel9dbf4d72017-07-18 10:10:24 +02003990
Ido Schimmel013b20f2017-02-08 11:16:36 +01003991 return err;
Jiri Pirko61c503f2016-07-04 08:23:11 +02003992}
3993
3994static int mlxsw_sp_fib_entry_update(struct mlxsw_sp *mlxsw_sp,
3995 struct mlxsw_sp_fib_entry *fib_entry)
3996{
Jiri Pirko7146da32016-09-01 10:37:41 +02003997 return mlxsw_sp_fib_entry_op(mlxsw_sp, fib_entry,
3998 MLXSW_REG_RALUE_OP_WRITE_WRITE);
Jiri Pirko61c503f2016-07-04 08:23:11 +02003999}
4000
4001static int mlxsw_sp_fib_entry_del(struct mlxsw_sp *mlxsw_sp,
4002 struct mlxsw_sp_fib_entry *fib_entry)
4003{
4004 return mlxsw_sp_fib_entry_op(mlxsw_sp, fib_entry,
4005 MLXSW_REG_RALUE_OP_WRITE_DELETE);
4006}
4007
Jiri Pirko61c503f2016-07-04 08:23:11 +02004008static int
Ido Schimmel013b20f2017-02-08 11:16:36 +01004009mlxsw_sp_fib4_entry_type_set(struct mlxsw_sp *mlxsw_sp,
4010 const struct fib_entry_notifier_info *fen_info,
4011 struct mlxsw_sp_fib_entry *fib_entry)
Jiri Pirko61c503f2016-07-04 08:23:11 +02004012{
Petr Machata4607f6d2017-09-02 23:49:25 +02004013 union mlxsw_sp_l3addr dip = { .addr4 = htonl(fen_info->dst) };
4014 struct net_device *dev = fen_info->fi->fib_dev;
4015 struct mlxsw_sp_ipip_entry *ipip_entry;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02004016 struct fib_info *fi = fen_info->fi;
Jiri Pirko61c503f2016-07-04 08:23:11 +02004017
Ido Schimmel97989ee2017-03-10 08:53:38 +01004018 switch (fen_info->type) {
Ido Schimmel97989ee2017-03-10 08:53:38 +01004019 case RTN_LOCAL:
Petr Machata4607f6d2017-09-02 23:49:25 +02004020 ipip_entry = mlxsw_sp_ipip_entry_find_by_decap(mlxsw_sp, dev,
4021 MLXSW_SP_L3_PROTO_IPV4, dip);
Petr Machata57c77ce2017-11-28 13:17:11 +01004022 if (ipip_entry && ipip_entry->ol_dev->flags & IFF_UP) {
Petr Machata4607f6d2017-09-02 23:49:25 +02004023 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP;
4024 return mlxsw_sp_fib_entry_decap_init(mlxsw_sp,
4025 fib_entry,
4026 ipip_entry);
4027 }
4028 /* fall through */
4029 case RTN_BROADCAST:
Jiri Pirko61c503f2016-07-04 08:23:11 +02004030 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
4031 return 0;
Ido Schimmel97989ee2017-03-10 08:53:38 +01004032 case RTN_UNREACHABLE: /* fall through */
4033 case RTN_BLACKHOLE: /* fall through */
4034 case RTN_PROHIBIT:
4035 /* Packets hitting these routes need to be trapped, but
4036 * can do so with a lower priority than packets directed
4037 * at the host, so use action type local instead of trap.
4038 */
Jiri Pirkob45f64d2016-09-26 12:52:31 +02004039 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_LOCAL;
Ido Schimmel97989ee2017-03-10 08:53:38 +01004040 return 0;
4041 case RTN_UNICAST:
Petr Machata9b014512017-09-02 23:49:20 +02004042 if (mlxsw_sp_fi_is_gateway(mlxsw_sp, fi))
Ido Schimmel97989ee2017-03-10 08:53:38 +01004043 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_REMOTE;
Petr Machata9b014512017-09-02 23:49:20 +02004044 else
4045 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_LOCAL;
Ido Schimmel97989ee2017-03-10 08:53:38 +01004046 return 0;
4047 default:
4048 return -EINVAL;
4049 }
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02004050}
4051
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004052static struct mlxsw_sp_fib4_entry *
Ido Schimmel9aecce12017-02-09 10:28:42 +01004053mlxsw_sp_fib4_entry_create(struct mlxsw_sp *mlxsw_sp,
4054 struct mlxsw_sp_fib_node *fib_node,
4055 const struct fib_entry_notifier_info *fen_info)
Jiri Pirko5b004412016-09-01 10:37:40 +02004056{
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004057 struct mlxsw_sp_fib4_entry *fib4_entry;
Jiri Pirko5b004412016-09-01 10:37:40 +02004058 struct mlxsw_sp_fib_entry *fib_entry;
Ido Schimmel9aecce12017-02-09 10:28:42 +01004059 int err;
4060
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004061 fib4_entry = kzalloc(sizeof(*fib4_entry), GFP_KERNEL);
4062 if (!fib4_entry)
4063 return ERR_PTR(-ENOMEM);
4064 fib_entry = &fib4_entry->common;
Ido Schimmel9aecce12017-02-09 10:28:42 +01004065
4066 err = mlxsw_sp_fib4_entry_type_set(mlxsw_sp, fen_info, fib_entry);
4067 if (err)
4068 goto err_fib4_entry_type_set;
4069
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02004070 err = mlxsw_sp_nexthop4_group_get(mlxsw_sp, fib_entry, fen_info->fi);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004071 if (err)
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02004072 goto err_nexthop4_group_get;
Ido Schimmel9aecce12017-02-09 10:28:42 +01004073
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004074 fib4_entry->prio = fen_info->fi->fib_priority;
4075 fib4_entry->tb_id = fen_info->tb_id;
4076 fib4_entry->type = fen_info->type;
4077 fib4_entry->tos = fen_info->tos;
Ido Schimmel9aecce12017-02-09 10:28:42 +01004078
4079 fib_entry->fib_node = fib_node;
4080
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004081 return fib4_entry;
Ido Schimmel9aecce12017-02-09 10:28:42 +01004082
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02004083err_nexthop4_group_get:
Ido Schimmel9aecce12017-02-09 10:28:42 +01004084err_fib4_entry_type_set:
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004085 kfree(fib4_entry);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004086 return ERR_PTR(err);
4087}
4088
4089static void mlxsw_sp_fib4_entry_destroy(struct mlxsw_sp *mlxsw_sp,
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004090 struct mlxsw_sp_fib4_entry *fib4_entry)
Ido Schimmel9aecce12017-02-09 10:28:42 +01004091{
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02004092 mlxsw_sp_nexthop4_group_put(mlxsw_sp, &fib4_entry->common);
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004093 kfree(fib4_entry);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004094}
4095
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004096static struct mlxsw_sp_fib4_entry *
Ido Schimmel9aecce12017-02-09 10:28:42 +01004097mlxsw_sp_fib4_entry_lookup(struct mlxsw_sp *mlxsw_sp,
4098 const struct fib_entry_notifier_info *fen_info)
4099{
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004100 struct mlxsw_sp_fib4_entry *fib4_entry;
Ido Schimmel9aecce12017-02-09 10:28:42 +01004101 struct mlxsw_sp_fib_node *fib_node;
Ido Schimmel160e22a2017-07-18 10:10:20 +02004102 struct mlxsw_sp_fib *fib;
4103 struct mlxsw_sp_vr *vr;
Ido Schimmel9aecce12017-02-09 10:28:42 +01004104
Ido Schimmel160e22a2017-07-18 10:10:20 +02004105 vr = mlxsw_sp_vr_find(mlxsw_sp, fen_info->tb_id);
4106 if (!vr)
4107 return NULL;
4108 fib = mlxsw_sp_vr_fib(vr, MLXSW_SP_L3_PROTO_IPV4);
4109
4110 fib_node = mlxsw_sp_fib_node_lookup(fib, &fen_info->dst,
4111 sizeof(fen_info->dst),
4112 fen_info->dst_len);
4113 if (!fib_node)
Ido Schimmel9aecce12017-02-09 10:28:42 +01004114 return NULL;
4115
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004116 list_for_each_entry(fib4_entry, &fib_node->entry_list, common.list) {
4117 if (fib4_entry->tb_id == fen_info->tb_id &&
4118 fib4_entry->tos == fen_info->tos &&
4119 fib4_entry->type == fen_info->type &&
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02004120 mlxsw_sp_nexthop4_group_fi(fib4_entry->common.nh_group) ==
4121 fen_info->fi) {
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004122 return fib4_entry;
Ido Schimmel9aecce12017-02-09 10:28:42 +01004123 }
4124 }
4125
4126 return NULL;
4127}
4128
4129static const struct rhashtable_params mlxsw_sp_fib_ht_params = {
4130 .key_offset = offsetof(struct mlxsw_sp_fib_node, key),
4131 .head_offset = offsetof(struct mlxsw_sp_fib_node, ht_node),
4132 .key_len = sizeof(struct mlxsw_sp_fib_key),
4133 .automatic_shrinking = true,
4134};
4135
4136static int mlxsw_sp_fib_node_insert(struct mlxsw_sp_fib *fib,
4137 struct mlxsw_sp_fib_node *fib_node)
4138{
4139 return rhashtable_insert_fast(&fib->ht, &fib_node->ht_node,
4140 mlxsw_sp_fib_ht_params);
4141}
4142
4143static void mlxsw_sp_fib_node_remove(struct mlxsw_sp_fib *fib,
4144 struct mlxsw_sp_fib_node *fib_node)
4145{
4146 rhashtable_remove_fast(&fib->ht, &fib_node->ht_node,
4147 mlxsw_sp_fib_ht_params);
4148}
4149
4150static struct mlxsw_sp_fib_node *
4151mlxsw_sp_fib_node_lookup(struct mlxsw_sp_fib *fib, const void *addr,
4152 size_t addr_len, unsigned char prefix_len)
4153{
4154 struct mlxsw_sp_fib_key key;
4155
4156 memset(&key, 0, sizeof(key));
4157 memcpy(key.addr, addr, addr_len);
4158 key.prefix_len = prefix_len;
4159 return rhashtable_lookup_fast(&fib->ht, &key, mlxsw_sp_fib_ht_params);
4160}
4161
4162static struct mlxsw_sp_fib_node *
Ido Schimmel76610eb2017-03-10 08:53:41 +01004163mlxsw_sp_fib_node_create(struct mlxsw_sp_fib *fib, const void *addr,
Ido Schimmel9aecce12017-02-09 10:28:42 +01004164 size_t addr_len, unsigned char prefix_len)
4165{
4166 struct mlxsw_sp_fib_node *fib_node;
4167
4168 fib_node = kzalloc(sizeof(*fib_node), GFP_KERNEL);
4169 if (!fib_node)
4170 return NULL;
4171
4172 INIT_LIST_HEAD(&fib_node->entry_list);
Ido Schimmel76610eb2017-03-10 08:53:41 +01004173 list_add(&fib_node->list, &fib->node_list);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004174 memcpy(fib_node->key.addr, addr, addr_len);
4175 fib_node->key.prefix_len = prefix_len;
Ido Schimmel9aecce12017-02-09 10:28:42 +01004176
4177 return fib_node;
4178}
4179
4180static void mlxsw_sp_fib_node_destroy(struct mlxsw_sp_fib_node *fib_node)
4181{
Ido Schimmel9aecce12017-02-09 10:28:42 +01004182 list_del(&fib_node->list);
4183 WARN_ON(!list_empty(&fib_node->entry_list));
4184 kfree(fib_node);
4185}
4186
4187static bool
4188mlxsw_sp_fib_node_entry_is_first(const struct mlxsw_sp_fib_node *fib_node,
4189 const struct mlxsw_sp_fib_entry *fib_entry)
4190{
4191 return list_first_entry(&fib_node->entry_list,
4192 struct mlxsw_sp_fib_entry, list) == fib_entry;
4193}
4194
Ido Schimmelfc922bb2017-08-14 10:54:05 +02004195static int mlxsw_sp_fib_lpm_tree_link(struct mlxsw_sp *mlxsw_sp,
Ido Schimmelfc922bb2017-08-14 10:54:05 +02004196 struct mlxsw_sp_fib_node *fib_node)
4197{
4198 struct mlxsw_sp_prefix_usage req_prefix_usage = {{ 0 } };
Ido Schimmel3aad95d2018-01-22 09:17:41 +01004199 struct mlxsw_sp_fib *fib = fib_node->fib;
Ido Schimmelfc922bb2017-08-14 10:54:05 +02004200 struct mlxsw_sp_lpm_tree *lpm_tree;
4201 int err;
4202
4203 /* Since the tree is shared between all virtual routers we must
4204 * make sure it contains all the required prefix lengths. This
4205 * can be computed by either adding the new prefix length to the
4206 * existing prefix usage of a bound tree, or by aggregating the
4207 * prefix lengths across all virtual routers and adding the new
4208 * one as well.
4209 */
4210 if (fib->lpm_tree)
4211 mlxsw_sp_prefix_usage_cpy(&req_prefix_usage,
4212 &fib->lpm_tree->prefix_usage);
4213 else
4214 mlxsw_sp_vrs_prefixes(mlxsw_sp, fib->proto, &req_prefix_usage);
4215 mlxsw_sp_prefix_usage_set(&req_prefix_usage, fib_node->key.prefix_len);
4216
4217 lpm_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, &req_prefix_usage,
4218 fib->proto);
4219 if (IS_ERR(lpm_tree))
4220 return PTR_ERR(lpm_tree);
4221
4222 if (fib->lpm_tree && fib->lpm_tree->id == lpm_tree->id)
4223 return 0;
4224
4225 err = mlxsw_sp_vrs_lpm_tree_replace(mlxsw_sp, fib, lpm_tree);
4226 if (err)
4227 return err;
4228
4229 return 0;
4230}
4231
4232static void mlxsw_sp_fib_lpm_tree_unlink(struct mlxsw_sp *mlxsw_sp,
Ido Schimmel3aad95d2018-01-22 09:17:41 +01004233 struct mlxsw_sp_fib_node *fib_node)
Ido Schimmelfc922bb2017-08-14 10:54:05 +02004234{
Ido Schimmel3aad95d2018-01-22 09:17:41 +01004235 struct mlxsw_sp_fib *fib = fib_node->fib;
4236
Ido Schimmel4fd00312018-01-22 09:17:40 +01004237 if (!list_is_singular(&fib->node_list))
Ido Schimmelfc922bb2017-08-14 10:54:05 +02004238 return;
Ido Schimmel4fd00312018-01-22 09:17:40 +01004239 /* Last node is being unlinked from the FIB. Unbind the
4240 * tree and drop the reference.
4241 */
Ido Schimmelfc922bb2017-08-14 10:54:05 +02004242 mlxsw_sp_vr_lpm_tree_unbind(mlxsw_sp, fib);
4243 mlxsw_sp_lpm_tree_put(mlxsw_sp, fib->lpm_tree);
4244 fib->lpm_tree = NULL;
4245}
4246
Ido Schimmel9aecce12017-02-09 10:28:42 +01004247static void mlxsw_sp_fib_node_prefix_inc(struct mlxsw_sp_fib_node *fib_node)
4248{
4249 unsigned char prefix_len = fib_node->key.prefix_len;
Ido Schimmel76610eb2017-03-10 08:53:41 +01004250 struct mlxsw_sp_fib *fib = fib_node->fib;
Ido Schimmel9aecce12017-02-09 10:28:42 +01004251
4252 if (fib->prefix_ref_count[prefix_len]++ == 0)
4253 mlxsw_sp_prefix_usage_set(&fib->prefix_usage, prefix_len);
4254}
4255
4256static void mlxsw_sp_fib_node_prefix_dec(struct mlxsw_sp_fib_node *fib_node)
4257{
4258 unsigned char prefix_len = fib_node->key.prefix_len;
Ido Schimmel76610eb2017-03-10 08:53:41 +01004259 struct mlxsw_sp_fib *fib = fib_node->fib;
Ido Schimmel9aecce12017-02-09 10:28:42 +01004260
4261 if (--fib->prefix_ref_count[prefix_len] == 0)
4262 mlxsw_sp_prefix_usage_clear(&fib->prefix_usage, prefix_len);
4263}
4264
Ido Schimmel76610eb2017-03-10 08:53:41 +01004265static int mlxsw_sp_fib_node_init(struct mlxsw_sp *mlxsw_sp,
4266 struct mlxsw_sp_fib_node *fib_node,
4267 struct mlxsw_sp_fib *fib)
4268{
Ido Schimmel76610eb2017-03-10 08:53:41 +01004269 int err;
4270
4271 err = mlxsw_sp_fib_node_insert(fib, fib_node);
4272 if (err)
4273 return err;
4274 fib_node->fib = fib;
4275
Ido Schimmel3aad95d2018-01-22 09:17:41 +01004276 err = mlxsw_sp_fib_lpm_tree_link(mlxsw_sp, fib_node);
Ido Schimmelfc922bb2017-08-14 10:54:05 +02004277 if (err)
4278 goto err_fib_lpm_tree_link;
Ido Schimmel76610eb2017-03-10 08:53:41 +01004279
4280 mlxsw_sp_fib_node_prefix_inc(fib_node);
4281
4282 return 0;
4283
Ido Schimmelfc922bb2017-08-14 10:54:05 +02004284err_fib_lpm_tree_link:
Ido Schimmel76610eb2017-03-10 08:53:41 +01004285 fib_node->fib = NULL;
4286 mlxsw_sp_fib_node_remove(fib, fib_node);
4287 return err;
4288}
4289
4290static void mlxsw_sp_fib_node_fini(struct mlxsw_sp *mlxsw_sp,
4291 struct mlxsw_sp_fib_node *fib_node)
4292{
Ido Schimmel76610eb2017-03-10 08:53:41 +01004293 struct mlxsw_sp_fib *fib = fib_node->fib;
4294
4295 mlxsw_sp_fib_node_prefix_dec(fib_node);
Ido Schimmel3aad95d2018-01-22 09:17:41 +01004296 mlxsw_sp_fib_lpm_tree_unlink(mlxsw_sp, fib_node);
Ido Schimmel76610eb2017-03-10 08:53:41 +01004297 fib_node->fib = NULL;
4298 mlxsw_sp_fib_node_remove(fib, fib_node);
4299}
4300
Ido Schimmel9aecce12017-02-09 10:28:42 +01004301static struct mlxsw_sp_fib_node *
Ido Schimmel731ea1c2017-07-18 10:10:21 +02004302mlxsw_sp_fib_node_get(struct mlxsw_sp *mlxsw_sp, u32 tb_id, const void *addr,
4303 size_t addr_len, unsigned char prefix_len,
4304 enum mlxsw_sp_l3proto proto)
Ido Schimmel9aecce12017-02-09 10:28:42 +01004305{
4306 struct mlxsw_sp_fib_node *fib_node;
Ido Schimmel76610eb2017-03-10 08:53:41 +01004307 struct mlxsw_sp_fib *fib;
Jiri Pirko5b004412016-09-01 10:37:40 +02004308 struct mlxsw_sp_vr *vr;
4309 int err;
4310
David Ahernf8fa9b42017-10-18 09:56:56 -07004311 vr = mlxsw_sp_vr_get(mlxsw_sp, tb_id, NULL);
Jiri Pirko5b004412016-09-01 10:37:40 +02004312 if (IS_ERR(vr))
4313 return ERR_CAST(vr);
Ido Schimmel731ea1c2017-07-18 10:10:21 +02004314 fib = mlxsw_sp_vr_fib(vr, proto);
Jiri Pirko5b004412016-09-01 10:37:40 +02004315
Ido Schimmel731ea1c2017-07-18 10:10:21 +02004316 fib_node = mlxsw_sp_fib_node_lookup(fib, addr, addr_len, prefix_len);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004317 if (fib_node)
4318 return fib_node;
4319
Ido Schimmel731ea1c2017-07-18 10:10:21 +02004320 fib_node = mlxsw_sp_fib_node_create(fib, addr, addr_len, prefix_len);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004321 if (!fib_node) {
Jiri Pirko5b004412016-09-01 10:37:40 +02004322 err = -ENOMEM;
Ido Schimmel9aecce12017-02-09 10:28:42 +01004323 goto err_fib_node_create;
Jiri Pirko5b004412016-09-01 10:37:40 +02004324 }
Jiri Pirko5b004412016-09-01 10:37:40 +02004325
Ido Schimmel76610eb2017-03-10 08:53:41 +01004326 err = mlxsw_sp_fib_node_init(mlxsw_sp, fib_node, fib);
4327 if (err)
4328 goto err_fib_node_init;
4329
Ido Schimmel9aecce12017-02-09 10:28:42 +01004330 return fib_node;
Jiri Pirko5b004412016-09-01 10:37:40 +02004331
Ido Schimmel76610eb2017-03-10 08:53:41 +01004332err_fib_node_init:
4333 mlxsw_sp_fib_node_destroy(fib_node);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004334err_fib_node_create:
Ido Schimmel76610eb2017-03-10 08:53:41 +01004335 mlxsw_sp_vr_put(vr);
Jiri Pirko5b004412016-09-01 10:37:40 +02004336 return ERR_PTR(err);
4337}
4338
Ido Schimmel731ea1c2017-07-18 10:10:21 +02004339static void mlxsw_sp_fib_node_put(struct mlxsw_sp *mlxsw_sp,
4340 struct mlxsw_sp_fib_node *fib_node)
Jiri Pirko5b004412016-09-01 10:37:40 +02004341{
Ido Schimmel76610eb2017-03-10 08:53:41 +01004342 struct mlxsw_sp_vr *vr = fib_node->fib->vr;
Jiri Pirko5b004412016-09-01 10:37:40 +02004343
Ido Schimmel9aecce12017-02-09 10:28:42 +01004344 if (!list_empty(&fib_node->entry_list))
4345 return;
Ido Schimmel76610eb2017-03-10 08:53:41 +01004346 mlxsw_sp_fib_node_fini(mlxsw_sp, fib_node);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004347 mlxsw_sp_fib_node_destroy(fib_node);
Ido Schimmel76610eb2017-03-10 08:53:41 +01004348 mlxsw_sp_vr_put(vr);
Jiri Pirko5b004412016-09-01 10:37:40 +02004349}
4350
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004351static struct mlxsw_sp_fib4_entry *
Ido Schimmel9aecce12017-02-09 10:28:42 +01004352mlxsw_sp_fib4_node_entry_find(const struct mlxsw_sp_fib_node *fib_node,
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004353 const struct mlxsw_sp_fib4_entry *new4_entry)
Jiri Pirko61c503f2016-07-04 08:23:11 +02004354{
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004355 struct mlxsw_sp_fib4_entry *fib4_entry;
Ido Schimmel9aecce12017-02-09 10:28:42 +01004356
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004357 list_for_each_entry(fib4_entry, &fib_node->entry_list, common.list) {
4358 if (fib4_entry->tb_id > new4_entry->tb_id)
Ido Schimmel9aecce12017-02-09 10:28:42 +01004359 continue;
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004360 if (fib4_entry->tb_id != new4_entry->tb_id)
Ido Schimmel9aecce12017-02-09 10:28:42 +01004361 break;
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004362 if (fib4_entry->tos > new4_entry->tos)
Ido Schimmel9aecce12017-02-09 10:28:42 +01004363 continue;
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004364 if (fib4_entry->prio >= new4_entry->prio ||
4365 fib4_entry->tos < new4_entry->tos)
4366 return fib4_entry;
Ido Schimmel9aecce12017-02-09 10:28:42 +01004367 }
4368
4369 return NULL;
4370}
4371
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004372static int
4373mlxsw_sp_fib4_node_list_append(struct mlxsw_sp_fib4_entry *fib4_entry,
4374 struct mlxsw_sp_fib4_entry *new4_entry)
Ido Schimmel4283bce2017-02-09 10:28:43 +01004375{
4376 struct mlxsw_sp_fib_node *fib_node;
4377
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004378 if (WARN_ON(!fib4_entry))
Ido Schimmel4283bce2017-02-09 10:28:43 +01004379 return -EINVAL;
4380
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004381 fib_node = fib4_entry->common.fib_node;
4382 list_for_each_entry_from(fib4_entry, &fib_node->entry_list,
4383 common.list) {
4384 if (fib4_entry->tb_id != new4_entry->tb_id ||
4385 fib4_entry->tos != new4_entry->tos ||
4386 fib4_entry->prio != new4_entry->prio)
Ido Schimmel4283bce2017-02-09 10:28:43 +01004387 break;
4388 }
4389
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004390 list_add_tail(&new4_entry->common.list, &fib4_entry->common.list);
Ido Schimmel4283bce2017-02-09 10:28:43 +01004391 return 0;
4392}
4393
Ido Schimmel9aecce12017-02-09 10:28:42 +01004394static int
Ido Schimmel9efbee62017-07-18 10:10:28 +02004395mlxsw_sp_fib4_node_list_insert(struct mlxsw_sp_fib4_entry *new4_entry,
Ido Schimmel599cf8f2017-02-09 10:28:44 +01004396 bool replace, bool append)
Ido Schimmel9aecce12017-02-09 10:28:42 +01004397{
Ido Schimmel9efbee62017-07-18 10:10:28 +02004398 struct mlxsw_sp_fib_node *fib_node = new4_entry->common.fib_node;
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004399 struct mlxsw_sp_fib4_entry *fib4_entry;
Ido Schimmel9aecce12017-02-09 10:28:42 +01004400
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004401 fib4_entry = mlxsw_sp_fib4_node_entry_find(fib_node, new4_entry);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004402
Ido Schimmel4283bce2017-02-09 10:28:43 +01004403 if (append)
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004404 return mlxsw_sp_fib4_node_list_append(fib4_entry, new4_entry);
4405 if (replace && WARN_ON(!fib4_entry))
Ido Schimmel599cf8f2017-02-09 10:28:44 +01004406 return -EINVAL;
Ido Schimmel4283bce2017-02-09 10:28:43 +01004407
Ido Schimmel599cf8f2017-02-09 10:28:44 +01004408 /* Insert new entry before replaced one, so that we can later
4409 * remove the second.
4410 */
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004411 if (fib4_entry) {
4412 list_add_tail(&new4_entry->common.list,
4413 &fib4_entry->common.list);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004414 } else {
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004415 struct mlxsw_sp_fib4_entry *last;
Ido Schimmel9aecce12017-02-09 10:28:42 +01004416
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004417 list_for_each_entry(last, &fib_node->entry_list, common.list) {
4418 if (new4_entry->tb_id > last->tb_id)
Ido Schimmel9aecce12017-02-09 10:28:42 +01004419 break;
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004420 fib4_entry = last;
Ido Schimmel9aecce12017-02-09 10:28:42 +01004421 }
4422
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004423 if (fib4_entry)
4424 list_add(&new4_entry->common.list,
4425 &fib4_entry->common.list);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004426 else
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004427 list_add(&new4_entry->common.list,
4428 &fib_node->entry_list);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004429 }
4430
4431 return 0;
4432}
4433
4434static void
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004435mlxsw_sp_fib4_node_list_remove(struct mlxsw_sp_fib4_entry *fib4_entry)
Ido Schimmel9aecce12017-02-09 10:28:42 +01004436{
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004437 list_del(&fib4_entry->common.list);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004438}
4439
Ido Schimmel80c238f2017-07-18 10:10:29 +02004440static int mlxsw_sp_fib_node_entry_add(struct mlxsw_sp *mlxsw_sp,
4441 struct mlxsw_sp_fib_entry *fib_entry)
Ido Schimmel9aecce12017-02-09 10:28:42 +01004442{
Ido Schimmel9efbee62017-07-18 10:10:28 +02004443 struct mlxsw_sp_fib_node *fib_node = fib_entry->fib_node;
4444
Ido Schimmel9aecce12017-02-09 10:28:42 +01004445 if (!mlxsw_sp_fib_node_entry_is_first(fib_node, fib_entry))
4446 return 0;
4447
4448 /* To prevent packet loss, overwrite the previously offloaded
4449 * entry.
4450 */
4451 if (!list_is_singular(&fib_node->entry_list)) {
4452 enum mlxsw_reg_ralue_op op = MLXSW_REG_RALUE_OP_WRITE_DELETE;
4453 struct mlxsw_sp_fib_entry *n = list_next_entry(fib_entry, list);
4454
4455 mlxsw_sp_fib_entry_offload_refresh(n, op, 0);
4456 }
4457
4458 return mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
4459}
4460
Ido Schimmel80c238f2017-07-18 10:10:29 +02004461static void mlxsw_sp_fib_node_entry_del(struct mlxsw_sp *mlxsw_sp,
4462 struct mlxsw_sp_fib_entry *fib_entry)
Ido Schimmel9aecce12017-02-09 10:28:42 +01004463{
Ido Schimmel9efbee62017-07-18 10:10:28 +02004464 struct mlxsw_sp_fib_node *fib_node = fib_entry->fib_node;
4465
Ido Schimmel9aecce12017-02-09 10:28:42 +01004466 if (!mlxsw_sp_fib_node_entry_is_first(fib_node, fib_entry))
4467 return;
4468
4469 /* Promote the next entry by overwriting the deleted entry */
4470 if (!list_is_singular(&fib_node->entry_list)) {
4471 struct mlxsw_sp_fib_entry *n = list_next_entry(fib_entry, list);
4472 enum mlxsw_reg_ralue_op op = MLXSW_REG_RALUE_OP_WRITE_DELETE;
4473
4474 mlxsw_sp_fib_entry_update(mlxsw_sp, n);
4475 mlxsw_sp_fib_entry_offload_refresh(fib_entry, op, 0);
4476 return;
4477 }
4478
4479 mlxsw_sp_fib_entry_del(mlxsw_sp, fib_entry);
4480}
4481
4482static int mlxsw_sp_fib4_node_entry_link(struct mlxsw_sp *mlxsw_sp,
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004483 struct mlxsw_sp_fib4_entry *fib4_entry,
Ido Schimmel599cf8f2017-02-09 10:28:44 +01004484 bool replace, bool append)
Ido Schimmel9aecce12017-02-09 10:28:42 +01004485{
Ido Schimmel9aecce12017-02-09 10:28:42 +01004486 int err;
4487
Ido Schimmel9efbee62017-07-18 10:10:28 +02004488 err = mlxsw_sp_fib4_node_list_insert(fib4_entry, replace, append);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004489 if (err)
4490 return err;
4491
Ido Schimmel80c238f2017-07-18 10:10:29 +02004492 err = mlxsw_sp_fib_node_entry_add(mlxsw_sp, &fib4_entry->common);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004493 if (err)
Ido Schimmel80c238f2017-07-18 10:10:29 +02004494 goto err_fib_node_entry_add;
Ido Schimmel9aecce12017-02-09 10:28:42 +01004495
Ido Schimmel9aecce12017-02-09 10:28:42 +01004496 return 0;
4497
Ido Schimmel80c238f2017-07-18 10:10:29 +02004498err_fib_node_entry_add:
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004499 mlxsw_sp_fib4_node_list_remove(fib4_entry);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004500 return err;
4501}
4502
4503static void
4504mlxsw_sp_fib4_node_entry_unlink(struct mlxsw_sp *mlxsw_sp,
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004505 struct mlxsw_sp_fib4_entry *fib4_entry)
Ido Schimmel9aecce12017-02-09 10:28:42 +01004506{
Ido Schimmel80c238f2017-07-18 10:10:29 +02004507 mlxsw_sp_fib_node_entry_del(mlxsw_sp, &fib4_entry->common);
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004508 mlxsw_sp_fib4_node_list_remove(fib4_entry);
Petr Machata4607f6d2017-09-02 23:49:25 +02004509
4510 if (fib4_entry->common.type == MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP)
4511 mlxsw_sp_fib_entry_decap_fini(mlxsw_sp, &fib4_entry->common);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004512}
4513
Ido Schimmel599cf8f2017-02-09 10:28:44 +01004514static void mlxsw_sp_fib4_entry_replace(struct mlxsw_sp *mlxsw_sp,
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004515 struct mlxsw_sp_fib4_entry *fib4_entry,
Ido Schimmel599cf8f2017-02-09 10:28:44 +01004516 bool replace)
4517{
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004518 struct mlxsw_sp_fib_node *fib_node = fib4_entry->common.fib_node;
4519 struct mlxsw_sp_fib4_entry *replaced;
Ido Schimmel599cf8f2017-02-09 10:28:44 +01004520
4521 if (!replace)
4522 return;
4523
4524 /* We inserted the new entry before replaced one */
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004525 replaced = list_next_entry(fib4_entry, common.list);
Ido Schimmel599cf8f2017-02-09 10:28:44 +01004526
4527 mlxsw_sp_fib4_node_entry_unlink(mlxsw_sp, replaced);
4528 mlxsw_sp_fib4_entry_destroy(mlxsw_sp, replaced);
Ido Schimmel731ea1c2017-07-18 10:10:21 +02004529 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
Ido Schimmel599cf8f2017-02-09 10:28:44 +01004530}
4531
Ido Schimmel9aecce12017-02-09 10:28:42 +01004532static int
4533mlxsw_sp_router_fib4_add(struct mlxsw_sp *mlxsw_sp,
Ido Schimmel4283bce2017-02-09 10:28:43 +01004534 const struct fib_entry_notifier_info *fen_info,
Ido Schimmel599cf8f2017-02-09 10:28:44 +01004535 bool replace, bool append)
Ido Schimmel9aecce12017-02-09 10:28:42 +01004536{
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004537 struct mlxsw_sp_fib4_entry *fib4_entry;
Ido Schimmel9aecce12017-02-09 10:28:42 +01004538 struct mlxsw_sp_fib_node *fib_node;
Jiri Pirko61c503f2016-07-04 08:23:11 +02004539 int err;
4540
Ido Schimmel9011b672017-05-16 19:38:25 +02004541 if (mlxsw_sp->router->aborted)
Jiri Pirkob45f64d2016-09-26 12:52:31 +02004542 return 0;
4543
Ido Schimmel731ea1c2017-07-18 10:10:21 +02004544 fib_node = mlxsw_sp_fib_node_get(mlxsw_sp, fen_info->tb_id,
4545 &fen_info->dst, sizeof(fen_info->dst),
4546 fen_info->dst_len,
4547 MLXSW_SP_L3_PROTO_IPV4);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004548 if (IS_ERR(fib_node)) {
4549 dev_warn(mlxsw_sp->bus_info->dev, "Failed to get FIB node\n");
4550 return PTR_ERR(fib_node);
4551 }
4552
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004553 fib4_entry = mlxsw_sp_fib4_entry_create(mlxsw_sp, fib_node, fen_info);
4554 if (IS_ERR(fib4_entry)) {
Ido Schimmel9aecce12017-02-09 10:28:42 +01004555 dev_warn(mlxsw_sp->bus_info->dev, "Failed to create FIB entry\n");
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004556 err = PTR_ERR(fib4_entry);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004557 goto err_fib4_entry_create;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02004558 }
Jiri Pirko61c503f2016-07-04 08:23:11 +02004559
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004560 err = mlxsw_sp_fib4_node_entry_link(mlxsw_sp, fib4_entry, replace,
Ido Schimmel599cf8f2017-02-09 10:28:44 +01004561 append);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02004562 if (err) {
Ido Schimmel9aecce12017-02-09 10:28:42 +01004563 dev_warn(mlxsw_sp->bus_info->dev, "Failed to link FIB entry to node\n");
4564 goto err_fib4_node_entry_link;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02004565 }
Ido Schimmel9aecce12017-02-09 10:28:42 +01004566
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004567 mlxsw_sp_fib4_entry_replace(mlxsw_sp, fib4_entry, replace);
Ido Schimmel599cf8f2017-02-09 10:28:44 +01004568
Jiri Pirko61c503f2016-07-04 08:23:11 +02004569 return 0;
4570
Ido Schimmel9aecce12017-02-09 10:28:42 +01004571err_fib4_node_entry_link:
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004572 mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib4_entry);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004573err_fib4_entry_create:
Ido Schimmel731ea1c2017-07-18 10:10:21 +02004574 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
Jiri Pirko61c503f2016-07-04 08:23:11 +02004575 return err;
4576}
4577
Jiri Pirko37956d72016-10-20 16:05:43 +02004578static void mlxsw_sp_router_fib4_del(struct mlxsw_sp *mlxsw_sp,
4579 struct fib_entry_notifier_info *fen_info)
Jiri Pirko61c503f2016-07-04 08:23:11 +02004580{
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004581 struct mlxsw_sp_fib4_entry *fib4_entry;
Ido Schimmel9aecce12017-02-09 10:28:42 +01004582 struct mlxsw_sp_fib_node *fib_node;
Jiri Pirko61c503f2016-07-04 08:23:11 +02004583
Ido Schimmel9011b672017-05-16 19:38:25 +02004584 if (mlxsw_sp->router->aborted)
Jiri Pirko37956d72016-10-20 16:05:43 +02004585 return;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02004586
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004587 fib4_entry = mlxsw_sp_fib4_entry_lookup(mlxsw_sp, fen_info);
4588 if (WARN_ON(!fib4_entry))
Jiri Pirko37956d72016-10-20 16:05:43 +02004589 return;
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004590 fib_node = fib4_entry->common.fib_node;
Jiri Pirko5b004412016-09-01 10:37:40 +02004591
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004592 mlxsw_sp_fib4_node_entry_unlink(mlxsw_sp, fib4_entry);
4593 mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib4_entry);
Ido Schimmel731ea1c2017-07-18 10:10:21 +02004594 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
Jiri Pirko61c503f2016-07-04 08:23:11 +02004595}
Jiri Pirkob45f64d2016-09-26 12:52:31 +02004596
Ido Schimmel428b8512017-08-03 13:28:28 +02004597static bool mlxsw_sp_fib6_rt_should_ignore(const struct rt6_info *rt)
4598{
4599 /* Packets with link-local destination IP arriving to the router
4600 * are trapped to the CPU, so no need to program specific routes
4601 * for them.
4602 */
4603 if (ipv6_addr_type(&rt->rt6i_dst.addr) & IPV6_ADDR_LINKLOCAL)
4604 return true;
4605
4606 /* Multicast routes aren't supported, so ignore them. Neighbour
4607 * Discovery packets are specifically trapped.
4608 */
4609 if (ipv6_addr_type(&rt->rt6i_dst.addr) & IPV6_ADDR_MULTICAST)
4610 return true;
4611
4612 /* Cloned routes are irrelevant in the forwarding path. */
4613 if (rt->rt6i_flags & RTF_CACHE)
4614 return true;
4615
4616 return false;
4617}
4618
4619static struct mlxsw_sp_rt6 *mlxsw_sp_rt6_create(struct rt6_info *rt)
4620{
4621 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
4622
4623 mlxsw_sp_rt6 = kzalloc(sizeof(*mlxsw_sp_rt6), GFP_KERNEL);
4624 if (!mlxsw_sp_rt6)
4625 return ERR_PTR(-ENOMEM);
4626
4627 /* In case of route replace, replaced route is deleted with
4628 * no notification. Take reference to prevent accessing freed
4629 * memory.
4630 */
4631 mlxsw_sp_rt6->rt = rt;
4632 rt6_hold(rt);
4633
4634 return mlxsw_sp_rt6;
4635}
4636
4637#if IS_ENABLED(CONFIG_IPV6)
4638static void mlxsw_sp_rt6_release(struct rt6_info *rt)
4639{
4640 rt6_release(rt);
4641}
4642#else
4643static void mlxsw_sp_rt6_release(struct rt6_info *rt)
4644{
4645}
4646#endif
4647
4648static void mlxsw_sp_rt6_destroy(struct mlxsw_sp_rt6 *mlxsw_sp_rt6)
4649{
4650 mlxsw_sp_rt6_release(mlxsw_sp_rt6->rt);
4651 kfree(mlxsw_sp_rt6);
4652}
4653
4654static bool mlxsw_sp_fib6_rt_can_mp(const struct rt6_info *rt)
4655{
4656 /* RTF_CACHE routes are ignored */
4657 return (rt->rt6i_flags & (RTF_GATEWAY | RTF_ADDRCONF)) == RTF_GATEWAY;
4658}
4659
4660static struct rt6_info *
4661mlxsw_sp_fib6_entry_rt(const struct mlxsw_sp_fib6_entry *fib6_entry)
4662{
4663 return list_first_entry(&fib6_entry->rt6_list, struct mlxsw_sp_rt6,
4664 list)->rt;
4665}
4666
4667static struct mlxsw_sp_fib6_entry *
4668mlxsw_sp_fib6_node_mp_entry_find(const struct mlxsw_sp_fib_node *fib_node,
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02004669 const struct rt6_info *nrt, bool replace)
Ido Schimmel428b8512017-08-03 13:28:28 +02004670{
4671 struct mlxsw_sp_fib6_entry *fib6_entry;
4672
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02004673 if (!mlxsw_sp_fib6_rt_can_mp(nrt) || replace)
Ido Schimmel428b8512017-08-03 13:28:28 +02004674 return NULL;
4675
4676 list_for_each_entry(fib6_entry, &fib_node->entry_list, common.list) {
4677 struct rt6_info *rt = mlxsw_sp_fib6_entry_rt(fib6_entry);
4678
4679 /* RT6_TABLE_LOCAL and RT6_TABLE_MAIN share the same
4680 * virtual router.
4681 */
4682 if (rt->rt6i_table->tb6_id > nrt->rt6i_table->tb6_id)
4683 continue;
4684 if (rt->rt6i_table->tb6_id != nrt->rt6i_table->tb6_id)
4685 break;
4686 if (rt->rt6i_metric < nrt->rt6i_metric)
4687 continue;
4688 if (rt->rt6i_metric == nrt->rt6i_metric &&
4689 mlxsw_sp_fib6_rt_can_mp(rt))
4690 return fib6_entry;
4691 if (rt->rt6i_metric > nrt->rt6i_metric)
4692 break;
4693 }
4694
4695 return NULL;
4696}
4697
4698static struct mlxsw_sp_rt6 *
4699mlxsw_sp_fib6_entry_rt_find(const struct mlxsw_sp_fib6_entry *fib6_entry,
4700 const struct rt6_info *rt)
4701{
4702 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
4703
4704 list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) {
4705 if (mlxsw_sp_rt6->rt == rt)
4706 return mlxsw_sp_rt6;
4707 }
4708
4709 return NULL;
4710}
4711
Petr Machata8f28a302017-09-02 23:49:24 +02004712static bool mlxsw_sp_nexthop6_ipip_type(const struct mlxsw_sp *mlxsw_sp,
4713 const struct rt6_info *rt,
4714 enum mlxsw_sp_ipip_type *ret)
4715{
4716 return rt->dst.dev &&
4717 mlxsw_sp_netdev_ipip_type(mlxsw_sp, rt->dst.dev, ret);
4718}
4719
Petr Machata35225e42017-09-02 23:49:22 +02004720static int mlxsw_sp_nexthop6_type_init(struct mlxsw_sp *mlxsw_sp,
4721 struct mlxsw_sp_nexthop_group *nh_grp,
4722 struct mlxsw_sp_nexthop *nh,
4723 const struct rt6_info *rt)
Ido Schimmel428b8512017-08-03 13:28:28 +02004724{
Petr Machatad97cda52017-11-28 13:17:13 +01004725 const struct mlxsw_sp_ipip_ops *ipip_ops;
4726 struct mlxsw_sp_ipip_entry *ipip_entry;
Ido Schimmel428b8512017-08-03 13:28:28 +02004727 struct net_device *dev = rt->dst.dev;
4728 struct mlxsw_sp_rif *rif;
4729 int err;
4730
Petr Machatad97cda52017-11-28 13:17:13 +01004731 ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, dev);
4732 if (ipip_entry) {
4733 ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt];
4734 if (ipip_ops->can_offload(mlxsw_sp, dev,
4735 MLXSW_SP_L3_PROTO_IPV6)) {
4736 nh->type = MLXSW_SP_NEXTHOP_TYPE_IPIP;
4737 mlxsw_sp_nexthop_ipip_init(mlxsw_sp, nh, ipip_entry);
4738 return 0;
4739 }
Petr Machata8f28a302017-09-02 23:49:24 +02004740 }
4741
Petr Machata35225e42017-09-02 23:49:22 +02004742 nh->type = MLXSW_SP_NEXTHOP_TYPE_ETH;
Ido Schimmel428b8512017-08-03 13:28:28 +02004743 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
4744 if (!rif)
4745 return 0;
4746 mlxsw_sp_nexthop_rif_init(nh, rif);
4747
4748 err = mlxsw_sp_nexthop_neigh_init(mlxsw_sp, nh);
4749 if (err)
4750 goto err_nexthop_neigh_init;
4751
4752 return 0;
4753
4754err_nexthop_neigh_init:
4755 mlxsw_sp_nexthop_rif_fini(nh);
4756 return err;
4757}
4758
Petr Machata35225e42017-09-02 23:49:22 +02004759static void mlxsw_sp_nexthop6_type_fini(struct mlxsw_sp *mlxsw_sp,
4760 struct mlxsw_sp_nexthop *nh)
4761{
4762 mlxsw_sp_nexthop_type_fini(mlxsw_sp, nh);
4763}
4764
4765static int mlxsw_sp_nexthop6_init(struct mlxsw_sp *mlxsw_sp,
4766 struct mlxsw_sp_nexthop_group *nh_grp,
4767 struct mlxsw_sp_nexthop *nh,
4768 const struct rt6_info *rt)
4769{
4770 struct net_device *dev = rt->dst.dev;
4771
4772 nh->nh_grp = nh_grp;
Ido Schimmel3743d882018-01-12 17:15:59 +01004773 nh->nh_weight = rt->rt6i_nh_weight;
Petr Machata35225e42017-09-02 23:49:22 +02004774 memcpy(&nh->gw_addr, &rt->rt6i_gateway, sizeof(nh->gw_addr));
Arkadi Sharshevskya5390272017-09-25 10:32:28 +02004775 mlxsw_sp_nexthop_counter_alloc(mlxsw_sp, nh);
Petr Machata35225e42017-09-02 23:49:22 +02004776
Arkadi Sharshevskydbe45982017-09-25 10:32:23 +02004777 list_add_tail(&nh->router_list_node, &mlxsw_sp->router->nexthop_list);
4778
Petr Machata35225e42017-09-02 23:49:22 +02004779 if (!dev)
4780 return 0;
4781 nh->ifindex = dev->ifindex;
4782
4783 return mlxsw_sp_nexthop6_type_init(mlxsw_sp, nh_grp, nh, rt);
4784}
4785
Ido Schimmel428b8512017-08-03 13:28:28 +02004786static void mlxsw_sp_nexthop6_fini(struct mlxsw_sp *mlxsw_sp,
4787 struct mlxsw_sp_nexthop *nh)
4788{
Petr Machata35225e42017-09-02 23:49:22 +02004789 mlxsw_sp_nexthop6_type_fini(mlxsw_sp, nh);
Arkadi Sharshevskydbe45982017-09-25 10:32:23 +02004790 list_del(&nh->router_list_node);
Arkadi Sharshevskya5390272017-09-25 10:32:28 +02004791 mlxsw_sp_nexthop_counter_free(mlxsw_sp, nh);
Ido Schimmel428b8512017-08-03 13:28:28 +02004792}
4793
Petr Machataf6050ee2017-09-02 23:49:21 +02004794static bool mlxsw_sp_rt6_is_gateway(const struct mlxsw_sp *mlxsw_sp,
4795 const struct rt6_info *rt)
4796{
Petr Machata8f28a302017-09-02 23:49:24 +02004797 return rt->rt6i_flags & RTF_GATEWAY ||
4798 mlxsw_sp_nexthop6_ipip_type(mlxsw_sp, rt, NULL);
Petr Machataf6050ee2017-09-02 23:49:21 +02004799}
4800
Ido Schimmel428b8512017-08-03 13:28:28 +02004801static struct mlxsw_sp_nexthop_group *
4802mlxsw_sp_nexthop6_group_create(struct mlxsw_sp *mlxsw_sp,
4803 struct mlxsw_sp_fib6_entry *fib6_entry)
4804{
4805 struct mlxsw_sp_nexthop_group *nh_grp;
4806 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
4807 struct mlxsw_sp_nexthop *nh;
4808 size_t alloc_size;
4809 int i = 0;
4810 int err;
4811
4812 alloc_size = sizeof(*nh_grp) +
4813 fib6_entry->nrt6 * sizeof(struct mlxsw_sp_nexthop);
4814 nh_grp = kzalloc(alloc_size, GFP_KERNEL);
4815 if (!nh_grp)
4816 return ERR_PTR(-ENOMEM);
4817 INIT_LIST_HEAD(&nh_grp->fib_list);
4818#if IS_ENABLED(CONFIG_IPV6)
4819 nh_grp->neigh_tbl = &nd_tbl;
4820#endif
4821 mlxsw_sp_rt6 = list_first_entry(&fib6_entry->rt6_list,
4822 struct mlxsw_sp_rt6, list);
Petr Machataf6050ee2017-09-02 23:49:21 +02004823 nh_grp->gateway = mlxsw_sp_rt6_is_gateway(mlxsw_sp, mlxsw_sp_rt6->rt);
Ido Schimmel428b8512017-08-03 13:28:28 +02004824 nh_grp->count = fib6_entry->nrt6;
4825 for (i = 0; i < nh_grp->count; i++) {
4826 struct rt6_info *rt = mlxsw_sp_rt6->rt;
4827
4828 nh = &nh_grp->nexthops[i];
4829 err = mlxsw_sp_nexthop6_init(mlxsw_sp, nh_grp, nh, rt);
4830 if (err)
4831 goto err_nexthop6_init;
4832 mlxsw_sp_rt6 = list_next_entry(mlxsw_sp_rt6, list);
4833 }
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02004834
4835 err = mlxsw_sp_nexthop_group_insert(mlxsw_sp, nh_grp);
4836 if (err)
4837 goto err_nexthop_group_insert;
4838
Ido Schimmel428b8512017-08-03 13:28:28 +02004839 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
4840 return nh_grp;
4841
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02004842err_nexthop_group_insert:
Ido Schimmel428b8512017-08-03 13:28:28 +02004843err_nexthop6_init:
4844 for (i--; i >= 0; i--) {
4845 nh = &nh_grp->nexthops[i];
4846 mlxsw_sp_nexthop6_fini(mlxsw_sp, nh);
4847 }
4848 kfree(nh_grp);
4849 return ERR_PTR(err);
4850}
4851
4852static void
4853mlxsw_sp_nexthop6_group_destroy(struct mlxsw_sp *mlxsw_sp,
4854 struct mlxsw_sp_nexthop_group *nh_grp)
4855{
4856 struct mlxsw_sp_nexthop *nh;
4857 int i = nh_grp->count;
4858
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02004859 mlxsw_sp_nexthop_group_remove(mlxsw_sp, nh_grp);
Ido Schimmel428b8512017-08-03 13:28:28 +02004860 for (i--; i >= 0; i--) {
4861 nh = &nh_grp->nexthops[i];
4862 mlxsw_sp_nexthop6_fini(mlxsw_sp, nh);
4863 }
4864 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
4865 WARN_ON(nh_grp->adj_index_valid);
4866 kfree(nh_grp);
4867}
4868
4869static int mlxsw_sp_nexthop6_group_get(struct mlxsw_sp *mlxsw_sp,
4870 struct mlxsw_sp_fib6_entry *fib6_entry)
4871{
4872 struct mlxsw_sp_nexthop_group *nh_grp;
4873
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02004874 nh_grp = mlxsw_sp_nexthop6_group_lookup(mlxsw_sp, fib6_entry);
4875 if (!nh_grp) {
4876 nh_grp = mlxsw_sp_nexthop6_group_create(mlxsw_sp, fib6_entry);
4877 if (IS_ERR(nh_grp))
4878 return PTR_ERR(nh_grp);
4879 }
Ido Schimmel428b8512017-08-03 13:28:28 +02004880
4881 list_add_tail(&fib6_entry->common.nexthop_group_node,
4882 &nh_grp->fib_list);
4883 fib6_entry->common.nh_group = nh_grp;
4884
4885 return 0;
4886}
4887
4888static void mlxsw_sp_nexthop6_group_put(struct mlxsw_sp *mlxsw_sp,
4889 struct mlxsw_sp_fib_entry *fib_entry)
4890{
4891 struct mlxsw_sp_nexthop_group *nh_grp = fib_entry->nh_group;
4892
4893 list_del(&fib_entry->nexthop_group_node);
4894 if (!list_empty(&nh_grp->fib_list))
4895 return;
4896 mlxsw_sp_nexthop6_group_destroy(mlxsw_sp, nh_grp);
4897}
4898
4899static int
4900mlxsw_sp_nexthop6_group_update(struct mlxsw_sp *mlxsw_sp,
4901 struct mlxsw_sp_fib6_entry *fib6_entry)
4902{
4903 struct mlxsw_sp_nexthop_group *old_nh_grp = fib6_entry->common.nh_group;
4904 int err;
4905
4906 fib6_entry->common.nh_group = NULL;
4907 list_del(&fib6_entry->common.nexthop_group_node);
4908
4909 err = mlxsw_sp_nexthop6_group_get(mlxsw_sp, fib6_entry);
4910 if (err)
4911 goto err_nexthop6_group_get;
4912
4913 /* In case this entry is offloaded, then the adjacency index
4914 * currently associated with it in the device's table is that
4915 * of the old group. Start using the new one instead.
4916 */
4917 err = mlxsw_sp_fib_node_entry_add(mlxsw_sp, &fib6_entry->common);
4918 if (err)
4919 goto err_fib_node_entry_add;
4920
4921 if (list_empty(&old_nh_grp->fib_list))
4922 mlxsw_sp_nexthop6_group_destroy(mlxsw_sp, old_nh_grp);
4923
4924 return 0;
4925
4926err_fib_node_entry_add:
4927 mlxsw_sp_nexthop6_group_put(mlxsw_sp, &fib6_entry->common);
4928err_nexthop6_group_get:
4929 list_add_tail(&fib6_entry->common.nexthop_group_node,
4930 &old_nh_grp->fib_list);
4931 fib6_entry->common.nh_group = old_nh_grp;
4932 return err;
4933}
4934
4935static int
4936mlxsw_sp_fib6_entry_nexthop_add(struct mlxsw_sp *mlxsw_sp,
4937 struct mlxsw_sp_fib6_entry *fib6_entry,
4938 struct rt6_info *rt)
4939{
4940 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
4941 int err;
4942
4943 mlxsw_sp_rt6 = mlxsw_sp_rt6_create(rt);
4944 if (IS_ERR(mlxsw_sp_rt6))
4945 return PTR_ERR(mlxsw_sp_rt6);
4946
4947 list_add_tail(&mlxsw_sp_rt6->list, &fib6_entry->rt6_list);
4948 fib6_entry->nrt6++;
4949
4950 err = mlxsw_sp_nexthop6_group_update(mlxsw_sp, fib6_entry);
4951 if (err)
4952 goto err_nexthop6_group_update;
4953
4954 return 0;
4955
4956err_nexthop6_group_update:
4957 fib6_entry->nrt6--;
4958 list_del(&mlxsw_sp_rt6->list);
4959 mlxsw_sp_rt6_destroy(mlxsw_sp_rt6);
4960 return err;
4961}
4962
4963static void
4964mlxsw_sp_fib6_entry_nexthop_del(struct mlxsw_sp *mlxsw_sp,
4965 struct mlxsw_sp_fib6_entry *fib6_entry,
4966 struct rt6_info *rt)
4967{
4968 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
4969
4970 mlxsw_sp_rt6 = mlxsw_sp_fib6_entry_rt_find(fib6_entry, rt);
4971 if (WARN_ON(!mlxsw_sp_rt6))
4972 return;
4973
4974 fib6_entry->nrt6--;
4975 list_del(&mlxsw_sp_rt6->list);
4976 mlxsw_sp_nexthop6_group_update(mlxsw_sp, fib6_entry);
4977 mlxsw_sp_rt6_destroy(mlxsw_sp_rt6);
4978}
4979
Petr Machataf6050ee2017-09-02 23:49:21 +02004980static void mlxsw_sp_fib6_entry_type_set(struct mlxsw_sp *mlxsw_sp,
4981 struct mlxsw_sp_fib_entry *fib_entry,
Ido Schimmel428b8512017-08-03 13:28:28 +02004982 const struct rt6_info *rt)
4983{
4984 /* Packets hitting RTF_REJECT routes need to be discarded by the
4985 * stack. We can rely on their destination device not having a
4986 * RIF (it's the loopback device) and can thus use action type
4987 * local, which will cause them to be trapped with a lower
4988 * priority than packets that need to be locally received.
4989 */
Ido Schimmeld3b6d372017-09-01 10:58:55 +02004990 if (rt->rt6i_flags & (RTF_LOCAL | RTF_ANYCAST))
Ido Schimmel428b8512017-08-03 13:28:28 +02004991 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
4992 else if (rt->rt6i_flags & RTF_REJECT)
4993 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_LOCAL;
Petr Machataf6050ee2017-09-02 23:49:21 +02004994 else if (mlxsw_sp_rt6_is_gateway(mlxsw_sp, rt))
Ido Schimmel428b8512017-08-03 13:28:28 +02004995 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_REMOTE;
4996 else
4997 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_LOCAL;
4998}
4999
5000static void
5001mlxsw_sp_fib6_entry_rt_destroy_all(struct mlxsw_sp_fib6_entry *fib6_entry)
5002{
5003 struct mlxsw_sp_rt6 *mlxsw_sp_rt6, *tmp;
5004
5005 list_for_each_entry_safe(mlxsw_sp_rt6, tmp, &fib6_entry->rt6_list,
5006 list) {
5007 fib6_entry->nrt6--;
5008 list_del(&mlxsw_sp_rt6->list);
5009 mlxsw_sp_rt6_destroy(mlxsw_sp_rt6);
5010 }
5011}
5012
5013static struct mlxsw_sp_fib6_entry *
5014mlxsw_sp_fib6_entry_create(struct mlxsw_sp *mlxsw_sp,
5015 struct mlxsw_sp_fib_node *fib_node,
5016 struct rt6_info *rt)
5017{
5018 struct mlxsw_sp_fib6_entry *fib6_entry;
5019 struct mlxsw_sp_fib_entry *fib_entry;
5020 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
5021 int err;
5022
5023 fib6_entry = kzalloc(sizeof(*fib6_entry), GFP_KERNEL);
5024 if (!fib6_entry)
5025 return ERR_PTR(-ENOMEM);
5026 fib_entry = &fib6_entry->common;
5027
5028 mlxsw_sp_rt6 = mlxsw_sp_rt6_create(rt);
5029 if (IS_ERR(mlxsw_sp_rt6)) {
5030 err = PTR_ERR(mlxsw_sp_rt6);
5031 goto err_rt6_create;
5032 }
5033
Petr Machataf6050ee2017-09-02 23:49:21 +02005034 mlxsw_sp_fib6_entry_type_set(mlxsw_sp, fib_entry, mlxsw_sp_rt6->rt);
Ido Schimmel428b8512017-08-03 13:28:28 +02005035
5036 INIT_LIST_HEAD(&fib6_entry->rt6_list);
5037 list_add_tail(&mlxsw_sp_rt6->list, &fib6_entry->rt6_list);
5038 fib6_entry->nrt6 = 1;
5039 err = mlxsw_sp_nexthop6_group_get(mlxsw_sp, fib6_entry);
5040 if (err)
5041 goto err_nexthop6_group_get;
5042
5043 fib_entry->fib_node = fib_node;
5044
5045 return fib6_entry;
5046
5047err_nexthop6_group_get:
5048 list_del(&mlxsw_sp_rt6->list);
5049 mlxsw_sp_rt6_destroy(mlxsw_sp_rt6);
5050err_rt6_create:
5051 kfree(fib6_entry);
5052 return ERR_PTR(err);
5053}
5054
5055static void mlxsw_sp_fib6_entry_destroy(struct mlxsw_sp *mlxsw_sp,
5056 struct mlxsw_sp_fib6_entry *fib6_entry)
5057{
5058 mlxsw_sp_nexthop6_group_put(mlxsw_sp, &fib6_entry->common);
5059 mlxsw_sp_fib6_entry_rt_destroy_all(fib6_entry);
5060 WARN_ON(fib6_entry->nrt6);
5061 kfree(fib6_entry);
5062}
5063
5064static struct mlxsw_sp_fib6_entry *
5065mlxsw_sp_fib6_node_entry_find(const struct mlxsw_sp_fib_node *fib_node,
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02005066 const struct rt6_info *nrt, bool replace)
Ido Schimmel428b8512017-08-03 13:28:28 +02005067{
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02005068 struct mlxsw_sp_fib6_entry *fib6_entry, *fallback = NULL;
Ido Schimmel428b8512017-08-03 13:28:28 +02005069
5070 list_for_each_entry(fib6_entry, &fib_node->entry_list, common.list) {
5071 struct rt6_info *rt = mlxsw_sp_fib6_entry_rt(fib6_entry);
5072
5073 if (rt->rt6i_table->tb6_id > nrt->rt6i_table->tb6_id)
5074 continue;
5075 if (rt->rt6i_table->tb6_id != nrt->rt6i_table->tb6_id)
5076 break;
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02005077 if (replace && rt->rt6i_metric == nrt->rt6i_metric) {
5078 if (mlxsw_sp_fib6_rt_can_mp(rt) ==
5079 mlxsw_sp_fib6_rt_can_mp(nrt))
5080 return fib6_entry;
5081 if (mlxsw_sp_fib6_rt_can_mp(nrt))
5082 fallback = fallback ?: fib6_entry;
5083 }
Ido Schimmel428b8512017-08-03 13:28:28 +02005084 if (rt->rt6i_metric > nrt->rt6i_metric)
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02005085 return fallback ?: fib6_entry;
Ido Schimmel428b8512017-08-03 13:28:28 +02005086 }
5087
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02005088 return fallback;
Ido Schimmel428b8512017-08-03 13:28:28 +02005089}
5090
5091static int
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02005092mlxsw_sp_fib6_node_list_insert(struct mlxsw_sp_fib6_entry *new6_entry,
5093 bool replace)
Ido Schimmel428b8512017-08-03 13:28:28 +02005094{
5095 struct mlxsw_sp_fib_node *fib_node = new6_entry->common.fib_node;
5096 struct rt6_info *nrt = mlxsw_sp_fib6_entry_rt(new6_entry);
5097 struct mlxsw_sp_fib6_entry *fib6_entry;
5098
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02005099 fib6_entry = mlxsw_sp_fib6_node_entry_find(fib_node, nrt, replace);
5100
5101 if (replace && WARN_ON(!fib6_entry))
5102 return -EINVAL;
Ido Schimmel428b8512017-08-03 13:28:28 +02005103
5104 if (fib6_entry) {
5105 list_add_tail(&new6_entry->common.list,
5106 &fib6_entry->common.list);
5107 } else {
5108 struct mlxsw_sp_fib6_entry *last;
5109
5110 list_for_each_entry(last, &fib_node->entry_list, common.list) {
5111 struct rt6_info *rt = mlxsw_sp_fib6_entry_rt(last);
5112
5113 if (nrt->rt6i_table->tb6_id > rt->rt6i_table->tb6_id)
5114 break;
5115 fib6_entry = last;
5116 }
5117
5118 if (fib6_entry)
5119 list_add(&new6_entry->common.list,
5120 &fib6_entry->common.list);
5121 else
5122 list_add(&new6_entry->common.list,
5123 &fib_node->entry_list);
5124 }
5125
5126 return 0;
5127}
5128
5129static void
5130mlxsw_sp_fib6_node_list_remove(struct mlxsw_sp_fib6_entry *fib6_entry)
5131{
5132 list_del(&fib6_entry->common.list);
5133}
5134
5135static int mlxsw_sp_fib6_node_entry_link(struct mlxsw_sp *mlxsw_sp,
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02005136 struct mlxsw_sp_fib6_entry *fib6_entry,
5137 bool replace)
Ido Schimmel428b8512017-08-03 13:28:28 +02005138{
5139 int err;
5140
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02005141 err = mlxsw_sp_fib6_node_list_insert(fib6_entry, replace);
Ido Schimmel428b8512017-08-03 13:28:28 +02005142 if (err)
5143 return err;
5144
5145 err = mlxsw_sp_fib_node_entry_add(mlxsw_sp, &fib6_entry->common);
5146 if (err)
5147 goto err_fib_node_entry_add;
5148
5149 return 0;
5150
5151err_fib_node_entry_add:
5152 mlxsw_sp_fib6_node_list_remove(fib6_entry);
5153 return err;
5154}
5155
5156static void
5157mlxsw_sp_fib6_node_entry_unlink(struct mlxsw_sp *mlxsw_sp,
5158 struct mlxsw_sp_fib6_entry *fib6_entry)
5159{
5160 mlxsw_sp_fib_node_entry_del(mlxsw_sp, &fib6_entry->common);
5161 mlxsw_sp_fib6_node_list_remove(fib6_entry);
5162}
5163
5164static struct mlxsw_sp_fib6_entry *
5165mlxsw_sp_fib6_entry_lookup(struct mlxsw_sp *mlxsw_sp,
5166 const struct rt6_info *rt)
5167{
5168 struct mlxsw_sp_fib6_entry *fib6_entry;
5169 struct mlxsw_sp_fib_node *fib_node;
5170 struct mlxsw_sp_fib *fib;
5171 struct mlxsw_sp_vr *vr;
5172
5173 vr = mlxsw_sp_vr_find(mlxsw_sp, rt->rt6i_table->tb6_id);
5174 if (!vr)
5175 return NULL;
5176 fib = mlxsw_sp_vr_fib(vr, MLXSW_SP_L3_PROTO_IPV6);
5177
5178 fib_node = mlxsw_sp_fib_node_lookup(fib, &rt->rt6i_dst.addr,
5179 sizeof(rt->rt6i_dst.addr),
5180 rt->rt6i_dst.plen);
5181 if (!fib_node)
5182 return NULL;
5183
5184 list_for_each_entry(fib6_entry, &fib_node->entry_list, common.list) {
5185 struct rt6_info *iter_rt = mlxsw_sp_fib6_entry_rt(fib6_entry);
5186
5187 if (rt->rt6i_table->tb6_id == iter_rt->rt6i_table->tb6_id &&
5188 rt->rt6i_metric == iter_rt->rt6i_metric &&
5189 mlxsw_sp_fib6_entry_rt_find(fib6_entry, rt))
5190 return fib6_entry;
5191 }
5192
5193 return NULL;
5194}
5195
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02005196static void mlxsw_sp_fib6_entry_replace(struct mlxsw_sp *mlxsw_sp,
5197 struct mlxsw_sp_fib6_entry *fib6_entry,
5198 bool replace)
5199{
5200 struct mlxsw_sp_fib_node *fib_node = fib6_entry->common.fib_node;
5201 struct mlxsw_sp_fib6_entry *replaced;
5202
5203 if (!replace)
5204 return;
5205
5206 replaced = list_next_entry(fib6_entry, common.list);
5207
5208 mlxsw_sp_fib6_node_entry_unlink(mlxsw_sp, replaced);
5209 mlxsw_sp_fib6_entry_destroy(mlxsw_sp, replaced);
5210 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
5211}
5212
Ido Schimmel428b8512017-08-03 13:28:28 +02005213static int mlxsw_sp_router_fib6_add(struct mlxsw_sp *mlxsw_sp,
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02005214 struct rt6_info *rt, bool replace)
Ido Schimmel428b8512017-08-03 13:28:28 +02005215{
5216 struct mlxsw_sp_fib6_entry *fib6_entry;
5217 struct mlxsw_sp_fib_node *fib_node;
5218 int err;
5219
5220 if (mlxsw_sp->router->aborted)
5221 return 0;
5222
Ido Schimmelf36f5ac2017-08-03 13:28:30 +02005223 if (rt->rt6i_src.plen)
5224 return -EINVAL;
5225
Ido Schimmel428b8512017-08-03 13:28:28 +02005226 if (mlxsw_sp_fib6_rt_should_ignore(rt))
5227 return 0;
5228
5229 fib_node = mlxsw_sp_fib_node_get(mlxsw_sp, rt->rt6i_table->tb6_id,
5230 &rt->rt6i_dst.addr,
5231 sizeof(rt->rt6i_dst.addr),
5232 rt->rt6i_dst.plen,
5233 MLXSW_SP_L3_PROTO_IPV6);
5234 if (IS_ERR(fib_node))
5235 return PTR_ERR(fib_node);
5236
5237 /* Before creating a new entry, try to append route to an existing
5238 * multipath entry.
5239 */
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02005240 fib6_entry = mlxsw_sp_fib6_node_mp_entry_find(fib_node, rt, replace);
Ido Schimmel428b8512017-08-03 13:28:28 +02005241 if (fib6_entry) {
5242 err = mlxsw_sp_fib6_entry_nexthop_add(mlxsw_sp, fib6_entry, rt);
5243 if (err)
5244 goto err_fib6_entry_nexthop_add;
5245 return 0;
5246 }
5247
5248 fib6_entry = mlxsw_sp_fib6_entry_create(mlxsw_sp, fib_node, rt);
5249 if (IS_ERR(fib6_entry)) {
5250 err = PTR_ERR(fib6_entry);
5251 goto err_fib6_entry_create;
5252 }
5253
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02005254 err = mlxsw_sp_fib6_node_entry_link(mlxsw_sp, fib6_entry, replace);
Ido Schimmel428b8512017-08-03 13:28:28 +02005255 if (err)
5256 goto err_fib6_node_entry_link;
5257
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02005258 mlxsw_sp_fib6_entry_replace(mlxsw_sp, fib6_entry, replace);
5259
Ido Schimmel428b8512017-08-03 13:28:28 +02005260 return 0;
5261
5262err_fib6_node_entry_link:
5263 mlxsw_sp_fib6_entry_destroy(mlxsw_sp, fib6_entry);
5264err_fib6_entry_create:
5265err_fib6_entry_nexthop_add:
5266 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
5267 return err;
5268}
5269
5270static void mlxsw_sp_router_fib6_del(struct mlxsw_sp *mlxsw_sp,
5271 struct rt6_info *rt)
5272{
5273 struct mlxsw_sp_fib6_entry *fib6_entry;
5274 struct mlxsw_sp_fib_node *fib_node;
5275
5276 if (mlxsw_sp->router->aborted)
5277 return;
5278
5279 if (mlxsw_sp_fib6_rt_should_ignore(rt))
5280 return;
5281
5282 fib6_entry = mlxsw_sp_fib6_entry_lookup(mlxsw_sp, rt);
5283 if (WARN_ON(!fib6_entry))
5284 return;
5285
5286 /* If route is part of a multipath entry, but not the last one
5287 * removed, then only reduce its nexthop group.
5288 */
5289 if (!list_is_singular(&fib6_entry->rt6_list)) {
5290 mlxsw_sp_fib6_entry_nexthop_del(mlxsw_sp, fib6_entry, rt);
5291 return;
5292 }
5293
5294 fib_node = fib6_entry->common.fib_node;
5295
5296 mlxsw_sp_fib6_node_entry_unlink(mlxsw_sp, fib6_entry);
5297 mlxsw_sp_fib6_entry_destroy(mlxsw_sp, fib6_entry);
5298 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
5299}
5300
Ido Schimmelbc65a8a2017-07-18 10:10:25 +02005301static int __mlxsw_sp_router_set_abort_trap(struct mlxsw_sp *mlxsw_sp,
5302 enum mlxsw_reg_ralxx_protocol proto,
5303 u8 tree_id)
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005304{
5305 char ralta_pl[MLXSW_REG_RALTA_LEN];
5306 char ralst_pl[MLXSW_REG_RALST_LEN];
Ido Schimmelb5d90e62017-03-10 08:53:43 +01005307 int i, err;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005308
Ido Schimmelbc65a8a2017-07-18 10:10:25 +02005309 mlxsw_reg_ralta_pack(ralta_pl, true, proto, tree_id);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005310 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralta), ralta_pl);
5311 if (err)
5312 return err;
5313
Ido Schimmelbc65a8a2017-07-18 10:10:25 +02005314 mlxsw_reg_ralst_pack(ralst_pl, 0xff, tree_id);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005315 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralst), ralst_pl);
5316 if (err)
5317 return err;
5318
Ido Schimmelb5d90e62017-03-10 08:53:43 +01005319 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
Ido Schimmel9011b672017-05-16 19:38:25 +02005320 struct mlxsw_sp_vr *vr = &mlxsw_sp->router->vrs[i];
Ido Schimmelb5d90e62017-03-10 08:53:43 +01005321 char raltb_pl[MLXSW_REG_RALTB_LEN];
5322 char ralue_pl[MLXSW_REG_RALUE_LEN];
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005323
Ido Schimmelbc65a8a2017-07-18 10:10:25 +02005324 mlxsw_reg_raltb_pack(raltb_pl, vr->id, proto, tree_id);
Ido Schimmelb5d90e62017-03-10 08:53:43 +01005325 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raltb),
5326 raltb_pl);
5327 if (err)
5328 return err;
5329
Ido Schimmelbc65a8a2017-07-18 10:10:25 +02005330 mlxsw_reg_ralue_pack(ralue_pl, proto,
5331 MLXSW_REG_RALUE_OP_WRITE_WRITE, vr->id, 0);
Ido Schimmelb5d90e62017-03-10 08:53:43 +01005332 mlxsw_reg_ralue_act_ip2me_pack(ralue_pl);
5333 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue),
5334 ralue_pl);
5335 if (err)
5336 return err;
5337 }
5338
5339 return 0;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005340}
5341
Yotam Gigid42b0962017-09-27 08:23:20 +02005342static int mlxsw_sp_router_fibmr_add(struct mlxsw_sp *mlxsw_sp,
5343 struct mfc_entry_notifier_info *men_info,
5344 bool replace)
5345{
5346 struct mlxsw_sp_vr *vr;
5347
5348 if (mlxsw_sp->router->aborted)
5349 return 0;
5350
David Ahernf8fa9b42017-10-18 09:56:56 -07005351 vr = mlxsw_sp_vr_get(mlxsw_sp, men_info->tb_id, NULL);
Yotam Gigid42b0962017-09-27 08:23:20 +02005352 if (IS_ERR(vr))
5353 return PTR_ERR(vr);
5354
5355 return mlxsw_sp_mr_route4_add(vr->mr4_table, men_info->mfc, replace);
5356}
5357
5358static void mlxsw_sp_router_fibmr_del(struct mlxsw_sp *mlxsw_sp,
5359 struct mfc_entry_notifier_info *men_info)
5360{
5361 struct mlxsw_sp_vr *vr;
5362
5363 if (mlxsw_sp->router->aborted)
5364 return;
5365
5366 vr = mlxsw_sp_vr_find(mlxsw_sp, men_info->tb_id);
5367 if (WARN_ON(!vr))
5368 return;
5369
5370 mlxsw_sp_mr_route4_del(vr->mr4_table, men_info->mfc);
5371 mlxsw_sp_vr_put(vr);
5372}
5373
5374static int
5375mlxsw_sp_router_fibmr_vif_add(struct mlxsw_sp *mlxsw_sp,
5376 struct vif_entry_notifier_info *ven_info)
5377{
5378 struct mlxsw_sp_rif *rif;
5379 struct mlxsw_sp_vr *vr;
5380
5381 if (mlxsw_sp->router->aborted)
5382 return 0;
5383
David Ahernf8fa9b42017-10-18 09:56:56 -07005384 vr = mlxsw_sp_vr_get(mlxsw_sp, ven_info->tb_id, NULL);
Yotam Gigid42b0962017-09-27 08:23:20 +02005385 if (IS_ERR(vr))
5386 return PTR_ERR(vr);
5387
5388 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, ven_info->dev);
5389 return mlxsw_sp_mr_vif_add(vr->mr4_table, ven_info->dev,
5390 ven_info->vif_index,
5391 ven_info->vif_flags, rif);
5392}
5393
5394static void
5395mlxsw_sp_router_fibmr_vif_del(struct mlxsw_sp *mlxsw_sp,
5396 struct vif_entry_notifier_info *ven_info)
5397{
5398 struct mlxsw_sp_vr *vr;
5399
5400 if (mlxsw_sp->router->aborted)
5401 return;
5402
5403 vr = mlxsw_sp_vr_find(mlxsw_sp, ven_info->tb_id);
5404 if (WARN_ON(!vr))
5405 return;
5406
5407 mlxsw_sp_mr_vif_del(vr->mr4_table, ven_info->vif_index);
5408 mlxsw_sp_vr_put(vr);
5409}
5410
Ido Schimmelbc65a8a2017-07-18 10:10:25 +02005411static int mlxsw_sp_router_set_abort_trap(struct mlxsw_sp *mlxsw_sp)
5412{
5413 enum mlxsw_reg_ralxx_protocol proto = MLXSW_REG_RALXX_PROTOCOL_IPV4;
5414 int err;
5415
5416 err = __mlxsw_sp_router_set_abort_trap(mlxsw_sp, proto,
5417 MLXSW_SP_LPM_TREE_MIN);
5418 if (err)
5419 return err;
5420
Yotam Gigid42b0962017-09-27 08:23:20 +02005421 /* The multicast router code does not need an abort trap as by default,
5422 * packets that don't match any routes are trapped to the CPU.
5423 */
5424
Ido Schimmelbc65a8a2017-07-18 10:10:25 +02005425 proto = MLXSW_REG_RALXX_PROTOCOL_IPV6;
5426 return __mlxsw_sp_router_set_abort_trap(mlxsw_sp, proto,
5427 MLXSW_SP_LPM_TREE_MIN + 1);
5428}
5429
Ido Schimmel9aecce12017-02-09 10:28:42 +01005430static void mlxsw_sp_fib4_node_flush(struct mlxsw_sp *mlxsw_sp,
5431 struct mlxsw_sp_fib_node *fib_node)
5432{
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02005433 struct mlxsw_sp_fib4_entry *fib4_entry, *tmp;
Ido Schimmel9aecce12017-02-09 10:28:42 +01005434
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02005435 list_for_each_entry_safe(fib4_entry, tmp, &fib_node->entry_list,
5436 common.list) {
5437 bool do_break = &tmp->common.list == &fib_node->entry_list;
Ido Schimmel9aecce12017-02-09 10:28:42 +01005438
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02005439 mlxsw_sp_fib4_node_entry_unlink(mlxsw_sp, fib4_entry);
5440 mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib4_entry);
Ido Schimmel731ea1c2017-07-18 10:10:21 +02005441 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
Ido Schimmel9aecce12017-02-09 10:28:42 +01005442 /* Break when entry list is empty and node was freed.
5443 * Otherwise, we'll access freed memory in the next
5444 * iteration.
5445 */
5446 if (do_break)
5447 break;
5448 }
5449}
5450
Ido Schimmel428b8512017-08-03 13:28:28 +02005451static void mlxsw_sp_fib6_node_flush(struct mlxsw_sp *mlxsw_sp,
5452 struct mlxsw_sp_fib_node *fib_node)
5453{
5454 struct mlxsw_sp_fib6_entry *fib6_entry, *tmp;
5455
5456 list_for_each_entry_safe(fib6_entry, tmp, &fib_node->entry_list,
5457 common.list) {
5458 bool do_break = &tmp->common.list == &fib_node->entry_list;
5459
5460 mlxsw_sp_fib6_node_entry_unlink(mlxsw_sp, fib6_entry);
5461 mlxsw_sp_fib6_entry_destroy(mlxsw_sp, fib6_entry);
5462 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
5463 if (do_break)
5464 break;
5465 }
5466}
5467
Ido Schimmel9aecce12017-02-09 10:28:42 +01005468static void mlxsw_sp_fib_node_flush(struct mlxsw_sp *mlxsw_sp,
5469 struct mlxsw_sp_fib_node *fib_node)
5470{
Ido Schimmel76610eb2017-03-10 08:53:41 +01005471 switch (fib_node->fib->proto) {
Ido Schimmel9aecce12017-02-09 10:28:42 +01005472 case MLXSW_SP_L3_PROTO_IPV4:
5473 mlxsw_sp_fib4_node_flush(mlxsw_sp, fib_node);
5474 break;
5475 case MLXSW_SP_L3_PROTO_IPV6:
Ido Schimmel428b8512017-08-03 13:28:28 +02005476 mlxsw_sp_fib6_node_flush(mlxsw_sp, fib_node);
Ido Schimmel9aecce12017-02-09 10:28:42 +01005477 break;
5478 }
5479}
5480
Ido Schimmel76610eb2017-03-10 08:53:41 +01005481static void mlxsw_sp_vr_fib_flush(struct mlxsw_sp *mlxsw_sp,
5482 struct mlxsw_sp_vr *vr,
5483 enum mlxsw_sp_l3proto proto)
5484{
5485 struct mlxsw_sp_fib *fib = mlxsw_sp_vr_fib(vr, proto);
5486 struct mlxsw_sp_fib_node *fib_node, *tmp;
5487
5488 list_for_each_entry_safe(fib_node, tmp, &fib->node_list, list) {
5489 bool do_break = &tmp->list == &fib->node_list;
5490
5491 mlxsw_sp_fib_node_flush(mlxsw_sp, fib_node);
5492 if (do_break)
5493 break;
5494 }
5495}
5496
Ido Schimmelac571de2016-11-14 11:26:32 +01005497static void mlxsw_sp_router_fib_flush(struct mlxsw_sp *mlxsw_sp)
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005498{
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005499 int i;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005500
Jiri Pirkoc1a38312016-10-21 16:07:23 +02005501 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
Ido Schimmel9011b672017-05-16 19:38:25 +02005502 struct mlxsw_sp_vr *vr = &mlxsw_sp->router->vrs[i];
Ido Schimmelac571de2016-11-14 11:26:32 +01005503
Ido Schimmel76610eb2017-03-10 08:53:41 +01005504 if (!mlxsw_sp_vr_is_used(vr))
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005505 continue;
Yotam Gigid42b0962017-09-27 08:23:20 +02005506
5507 mlxsw_sp_mr_table_flush(vr->mr4_table);
Ido Schimmel76610eb2017-03-10 08:53:41 +01005508 mlxsw_sp_vr_fib_flush(mlxsw_sp, vr, MLXSW_SP_L3_PROTO_IPV4);
Ido Schimmela3d9bc52017-07-18 10:10:22 +02005509
5510 /* If virtual router was only used for IPv4, then it's no
5511 * longer used.
5512 */
5513 if (!mlxsw_sp_vr_is_used(vr))
5514 continue;
5515 mlxsw_sp_vr_fib_flush(mlxsw_sp, vr, MLXSW_SP_L3_PROTO_IPV6);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005516 }
Ido Schimmelac571de2016-11-14 11:26:32 +01005517}
5518
Ido Schimmelbc65a8a2017-07-18 10:10:25 +02005519static void mlxsw_sp_router_fib_abort(struct mlxsw_sp *mlxsw_sp)
Ido Schimmelac571de2016-11-14 11:26:32 +01005520{
5521 int err;
5522
Ido Schimmel9011b672017-05-16 19:38:25 +02005523 if (mlxsw_sp->router->aborted)
Ido Schimmeld331d302016-11-16 09:51:58 +01005524 return;
5525 dev_warn(mlxsw_sp->bus_info->dev, "FIB abort triggered. Note that FIB entries are no longer being offloaded to this device.\n");
Ido Schimmelac571de2016-11-14 11:26:32 +01005526 mlxsw_sp_router_fib_flush(mlxsw_sp);
Ido Schimmel9011b672017-05-16 19:38:25 +02005527 mlxsw_sp->router->aborted = true;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005528 err = mlxsw_sp_router_set_abort_trap(mlxsw_sp);
5529 if (err)
5530 dev_warn(mlxsw_sp->bus_info->dev, "Failed to set abort trap.\n");
5531}
5532
Ido Schimmel30572242016-12-03 16:45:01 +01005533struct mlxsw_sp_fib_event_work {
Ido Schimmela0e47612017-02-06 16:20:10 +01005534 struct work_struct work;
Ido Schimmelad178c82017-02-08 11:16:40 +01005535 union {
Ido Schimmel428b8512017-08-03 13:28:28 +02005536 struct fib6_entry_notifier_info fen6_info;
Ido Schimmelad178c82017-02-08 11:16:40 +01005537 struct fib_entry_notifier_info fen_info;
Ido Schimmel5d7bfd12017-03-16 09:08:14 +01005538 struct fib_rule_notifier_info fr_info;
Ido Schimmelad178c82017-02-08 11:16:40 +01005539 struct fib_nh_notifier_info fnh_info;
Yotam Gigid42b0962017-09-27 08:23:20 +02005540 struct mfc_entry_notifier_info men_info;
5541 struct vif_entry_notifier_info ven_info;
Ido Schimmelad178c82017-02-08 11:16:40 +01005542 };
Ido Schimmel30572242016-12-03 16:45:01 +01005543 struct mlxsw_sp *mlxsw_sp;
5544 unsigned long event;
5545};
5546
Ido Schimmel66a57632017-08-03 13:28:26 +02005547static void mlxsw_sp_router_fib4_event_work(struct work_struct *work)
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005548{
Ido Schimmel30572242016-12-03 16:45:01 +01005549 struct mlxsw_sp_fib_event_work *fib_work =
Ido Schimmela0e47612017-02-06 16:20:10 +01005550 container_of(work, struct mlxsw_sp_fib_event_work, work);
Ido Schimmel30572242016-12-03 16:45:01 +01005551 struct mlxsw_sp *mlxsw_sp = fib_work->mlxsw_sp;
Ido Schimmel599cf8f2017-02-09 10:28:44 +01005552 bool replace, append;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005553 int err;
5554
Ido Schimmel30572242016-12-03 16:45:01 +01005555 /* Protect internal structures from changes */
5556 rtnl_lock();
5557 switch (fib_work->event) {
Ido Schimmel599cf8f2017-02-09 10:28:44 +01005558 case FIB_EVENT_ENTRY_REPLACE: /* fall through */
Ido Schimmel4283bce2017-02-09 10:28:43 +01005559 case FIB_EVENT_ENTRY_APPEND: /* fall through */
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005560 case FIB_EVENT_ENTRY_ADD:
Ido Schimmel599cf8f2017-02-09 10:28:44 +01005561 replace = fib_work->event == FIB_EVENT_ENTRY_REPLACE;
Ido Schimmel4283bce2017-02-09 10:28:43 +01005562 append = fib_work->event == FIB_EVENT_ENTRY_APPEND;
5563 err = mlxsw_sp_router_fib4_add(mlxsw_sp, &fib_work->fen_info,
Ido Schimmel599cf8f2017-02-09 10:28:44 +01005564 replace, append);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005565 if (err)
Ido Schimmelbc65a8a2017-07-18 10:10:25 +02005566 mlxsw_sp_router_fib_abort(mlxsw_sp);
Ido Schimmel30572242016-12-03 16:45:01 +01005567 fib_info_put(fib_work->fen_info.fi);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005568 break;
5569 case FIB_EVENT_ENTRY_DEL:
Ido Schimmel30572242016-12-03 16:45:01 +01005570 mlxsw_sp_router_fib4_del(mlxsw_sp, &fib_work->fen_info);
5571 fib_info_put(fib_work->fen_info.fi);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005572 break;
David Ahern1f279232017-10-27 17:37:14 -07005573 case FIB_EVENT_RULE_ADD:
5574 /* if we get here, a rule was added that we do not support.
5575 * just do the fib_abort
5576 */
5577 mlxsw_sp_router_fib_abort(mlxsw_sp);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005578 break;
Ido Schimmelad178c82017-02-08 11:16:40 +01005579 case FIB_EVENT_NH_ADD: /* fall through */
5580 case FIB_EVENT_NH_DEL:
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02005581 mlxsw_sp_nexthop4_event(mlxsw_sp, fib_work->event,
5582 fib_work->fnh_info.fib_nh);
Ido Schimmelad178c82017-02-08 11:16:40 +01005583 fib_info_put(fib_work->fnh_info.fib_nh->nh_parent);
5584 break;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005585 }
Ido Schimmel30572242016-12-03 16:45:01 +01005586 rtnl_unlock();
5587 kfree(fib_work);
5588}
5589
Ido Schimmel66a57632017-08-03 13:28:26 +02005590static void mlxsw_sp_router_fib6_event_work(struct work_struct *work)
5591{
Ido Schimmel583419f2017-08-03 13:28:27 +02005592 struct mlxsw_sp_fib_event_work *fib_work =
5593 container_of(work, struct mlxsw_sp_fib_event_work, work);
5594 struct mlxsw_sp *mlxsw_sp = fib_work->mlxsw_sp;
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02005595 bool replace;
Ido Schimmel428b8512017-08-03 13:28:28 +02005596 int err;
Ido Schimmel583419f2017-08-03 13:28:27 +02005597
5598 rtnl_lock();
5599 switch (fib_work->event) {
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02005600 case FIB_EVENT_ENTRY_REPLACE: /* fall through */
Ido Schimmel428b8512017-08-03 13:28:28 +02005601 case FIB_EVENT_ENTRY_ADD:
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02005602 replace = fib_work->event == FIB_EVENT_ENTRY_REPLACE;
Ido Schimmel428b8512017-08-03 13:28:28 +02005603 err = mlxsw_sp_router_fib6_add(mlxsw_sp,
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02005604 fib_work->fen6_info.rt, replace);
Ido Schimmel428b8512017-08-03 13:28:28 +02005605 if (err)
5606 mlxsw_sp_router_fib_abort(mlxsw_sp);
5607 mlxsw_sp_rt6_release(fib_work->fen6_info.rt);
5608 break;
5609 case FIB_EVENT_ENTRY_DEL:
5610 mlxsw_sp_router_fib6_del(mlxsw_sp, fib_work->fen6_info.rt);
5611 mlxsw_sp_rt6_release(fib_work->fen6_info.rt);
5612 break;
David Ahern1f279232017-10-27 17:37:14 -07005613 case FIB_EVENT_RULE_ADD:
5614 /* if we get here, a rule was added that we do not support.
5615 * just do the fib_abort
5616 */
5617 mlxsw_sp_router_fib_abort(mlxsw_sp);
Ido Schimmel583419f2017-08-03 13:28:27 +02005618 break;
5619 }
5620 rtnl_unlock();
5621 kfree(fib_work);
Ido Schimmel66a57632017-08-03 13:28:26 +02005622}
5623
Yotam Gigid42b0962017-09-27 08:23:20 +02005624static void mlxsw_sp_router_fibmr_event_work(struct work_struct *work)
5625{
5626 struct mlxsw_sp_fib_event_work *fib_work =
5627 container_of(work, struct mlxsw_sp_fib_event_work, work);
5628 struct mlxsw_sp *mlxsw_sp = fib_work->mlxsw_sp;
Yotam Gigid42b0962017-09-27 08:23:20 +02005629 bool replace;
5630 int err;
5631
5632 rtnl_lock();
5633 switch (fib_work->event) {
5634 case FIB_EVENT_ENTRY_REPLACE: /* fall through */
5635 case FIB_EVENT_ENTRY_ADD:
5636 replace = fib_work->event == FIB_EVENT_ENTRY_REPLACE;
5637
5638 err = mlxsw_sp_router_fibmr_add(mlxsw_sp, &fib_work->men_info,
5639 replace);
5640 if (err)
5641 mlxsw_sp_router_fib_abort(mlxsw_sp);
5642 ipmr_cache_put(fib_work->men_info.mfc);
5643 break;
5644 case FIB_EVENT_ENTRY_DEL:
5645 mlxsw_sp_router_fibmr_del(mlxsw_sp, &fib_work->men_info);
5646 ipmr_cache_put(fib_work->men_info.mfc);
5647 break;
5648 case FIB_EVENT_VIF_ADD:
5649 err = mlxsw_sp_router_fibmr_vif_add(mlxsw_sp,
5650 &fib_work->ven_info);
5651 if (err)
5652 mlxsw_sp_router_fib_abort(mlxsw_sp);
5653 dev_put(fib_work->ven_info.dev);
5654 break;
5655 case FIB_EVENT_VIF_DEL:
5656 mlxsw_sp_router_fibmr_vif_del(mlxsw_sp,
5657 &fib_work->ven_info);
5658 dev_put(fib_work->ven_info.dev);
5659 break;
David Ahern1f279232017-10-27 17:37:14 -07005660 case FIB_EVENT_RULE_ADD:
5661 /* if we get here, a rule was added that we do not support.
5662 * just do the fib_abort
5663 */
5664 mlxsw_sp_router_fib_abort(mlxsw_sp);
Yotam Gigid42b0962017-09-27 08:23:20 +02005665 break;
5666 }
5667 rtnl_unlock();
5668 kfree(fib_work);
5669}
5670
Ido Schimmel66a57632017-08-03 13:28:26 +02005671static void mlxsw_sp_router_fib4_event(struct mlxsw_sp_fib_event_work *fib_work,
5672 struct fib_notifier_info *info)
5673{
David Ahern3c75f9b2017-10-18 15:01:38 -07005674 struct fib_entry_notifier_info *fen_info;
David Ahern3c75f9b2017-10-18 15:01:38 -07005675 struct fib_nh_notifier_info *fnh_info;
5676
Ido Schimmel66a57632017-08-03 13:28:26 +02005677 switch (fib_work->event) {
5678 case FIB_EVENT_ENTRY_REPLACE: /* fall through */
5679 case FIB_EVENT_ENTRY_APPEND: /* fall through */
5680 case FIB_EVENT_ENTRY_ADD: /* fall through */
5681 case FIB_EVENT_ENTRY_DEL:
David Ahern3c75f9b2017-10-18 15:01:38 -07005682 fen_info = container_of(info, struct fib_entry_notifier_info,
5683 info);
5684 fib_work->fen_info = *fen_info;
5685 /* Take reference on fib_info to prevent it from being
Ido Schimmel66a57632017-08-03 13:28:26 +02005686 * freed while work is queued. Release it afterwards.
5687 */
5688 fib_info_hold(fib_work->fen_info.fi);
5689 break;
Ido Schimmel66a57632017-08-03 13:28:26 +02005690 case FIB_EVENT_NH_ADD: /* fall through */
5691 case FIB_EVENT_NH_DEL:
David Ahern3c75f9b2017-10-18 15:01:38 -07005692 fnh_info = container_of(info, struct fib_nh_notifier_info,
5693 info);
5694 fib_work->fnh_info = *fnh_info;
Ido Schimmel66a57632017-08-03 13:28:26 +02005695 fib_info_hold(fib_work->fnh_info.fib_nh->nh_parent);
5696 break;
5697 }
5698}
5699
5700static void mlxsw_sp_router_fib6_event(struct mlxsw_sp_fib_event_work *fib_work,
5701 struct fib_notifier_info *info)
5702{
David Ahern3c75f9b2017-10-18 15:01:38 -07005703 struct fib6_entry_notifier_info *fen6_info;
David Ahern3c75f9b2017-10-18 15:01:38 -07005704
Ido Schimmel583419f2017-08-03 13:28:27 +02005705 switch (fib_work->event) {
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02005706 case FIB_EVENT_ENTRY_REPLACE: /* fall through */
Ido Schimmel428b8512017-08-03 13:28:28 +02005707 case FIB_EVENT_ENTRY_ADD: /* fall through */
5708 case FIB_EVENT_ENTRY_DEL:
David Ahern3c75f9b2017-10-18 15:01:38 -07005709 fen6_info = container_of(info, struct fib6_entry_notifier_info,
5710 info);
5711 fib_work->fen6_info = *fen6_info;
Ido Schimmel428b8512017-08-03 13:28:28 +02005712 rt6_hold(fib_work->fen6_info.rt);
5713 break;
Ido Schimmel583419f2017-08-03 13:28:27 +02005714 }
Ido Schimmel66a57632017-08-03 13:28:26 +02005715}
5716
Yotam Gigid42b0962017-09-27 08:23:20 +02005717static void
5718mlxsw_sp_router_fibmr_event(struct mlxsw_sp_fib_event_work *fib_work,
5719 struct fib_notifier_info *info)
5720{
5721 switch (fib_work->event) {
5722 case FIB_EVENT_ENTRY_REPLACE: /* fall through */
5723 case FIB_EVENT_ENTRY_ADD: /* fall through */
5724 case FIB_EVENT_ENTRY_DEL:
5725 memcpy(&fib_work->men_info, info, sizeof(fib_work->men_info));
5726 ipmr_cache_hold(fib_work->men_info.mfc);
5727 break;
5728 case FIB_EVENT_VIF_ADD: /* fall through */
5729 case FIB_EVENT_VIF_DEL:
5730 memcpy(&fib_work->ven_info, info, sizeof(fib_work->ven_info));
5731 dev_hold(fib_work->ven_info.dev);
5732 break;
David Ahern1f279232017-10-27 17:37:14 -07005733 }
5734}
5735
5736static int mlxsw_sp_router_fib_rule_event(unsigned long event,
5737 struct fib_notifier_info *info,
5738 struct mlxsw_sp *mlxsw_sp)
5739{
5740 struct netlink_ext_ack *extack = info->extack;
5741 struct fib_rule_notifier_info *fr_info;
5742 struct fib_rule *rule;
5743 int err = 0;
5744
5745 /* nothing to do at the moment */
5746 if (event == FIB_EVENT_RULE_DEL)
5747 return 0;
5748
5749 if (mlxsw_sp->router->aborted)
5750 return 0;
5751
5752 fr_info = container_of(info, struct fib_rule_notifier_info, info);
5753 rule = fr_info->rule;
5754
5755 switch (info->family) {
5756 case AF_INET:
5757 if (!fib4_rule_default(rule) && !rule->l3mdev)
5758 err = -1;
5759 break;
5760 case AF_INET6:
5761 if (!fib6_rule_default(rule) && !rule->l3mdev)
5762 err = -1;
5763 break;
5764 case RTNL_FAMILY_IPMR:
5765 if (!ipmr_rule_default(rule) && !rule->l3mdev)
5766 err = -1;
Yotam Gigid42b0962017-09-27 08:23:20 +02005767 break;
5768 }
David Ahern1f279232017-10-27 17:37:14 -07005769
5770 if (err < 0)
5771 NL_SET_ERR_MSG(extack, "spectrum: FIB rules not supported. Aborting offload");
5772
5773 return err;
Yotam Gigid42b0962017-09-27 08:23:20 +02005774}
5775
Ido Schimmel30572242016-12-03 16:45:01 +01005776/* Called with rcu_read_lock() */
5777static int mlxsw_sp_router_fib_event(struct notifier_block *nb,
5778 unsigned long event, void *ptr)
5779{
Ido Schimmel30572242016-12-03 16:45:01 +01005780 struct mlxsw_sp_fib_event_work *fib_work;
5781 struct fib_notifier_info *info = ptr;
Ido Schimmel7e39d112017-05-16 19:38:28 +02005782 struct mlxsw_sp_router *router;
David Ahern1f279232017-10-27 17:37:14 -07005783 int err;
Ido Schimmel30572242016-12-03 16:45:01 +01005784
Ido Schimmel8e29f972017-09-15 15:31:07 +02005785 if (!net_eq(info->net, &init_net) ||
Yotam Gigi664375e2017-09-27 08:23:22 +02005786 (info->family != AF_INET && info->family != AF_INET6 &&
5787 info->family != RTNL_FAMILY_IPMR))
Ido Schimmel30572242016-12-03 16:45:01 +01005788 return NOTIFY_DONE;
5789
David Ahern1f279232017-10-27 17:37:14 -07005790 router = container_of(nb, struct mlxsw_sp_router, fib_nb);
5791
5792 switch (event) {
5793 case FIB_EVENT_RULE_ADD: /* fall through */
5794 case FIB_EVENT_RULE_DEL:
5795 err = mlxsw_sp_router_fib_rule_event(event, info,
5796 router->mlxsw_sp);
5797 if (!err)
5798 return NOTIFY_DONE;
5799 }
5800
Ido Schimmel30572242016-12-03 16:45:01 +01005801 fib_work = kzalloc(sizeof(*fib_work), GFP_ATOMIC);
5802 if (WARN_ON(!fib_work))
5803 return NOTIFY_BAD;
5804
Ido Schimmel7e39d112017-05-16 19:38:28 +02005805 fib_work->mlxsw_sp = router->mlxsw_sp;
Ido Schimmel30572242016-12-03 16:45:01 +01005806 fib_work->event = event;
5807
Ido Schimmel66a57632017-08-03 13:28:26 +02005808 switch (info->family) {
5809 case AF_INET:
5810 INIT_WORK(&fib_work->work, mlxsw_sp_router_fib4_event_work);
5811 mlxsw_sp_router_fib4_event(fib_work, info);
Ido Schimmel30572242016-12-03 16:45:01 +01005812 break;
Ido Schimmel66a57632017-08-03 13:28:26 +02005813 case AF_INET6:
5814 INIT_WORK(&fib_work->work, mlxsw_sp_router_fib6_event_work);
5815 mlxsw_sp_router_fib6_event(fib_work, info);
Ido Schimmelad178c82017-02-08 11:16:40 +01005816 break;
Yotam Gigid42b0962017-09-27 08:23:20 +02005817 case RTNL_FAMILY_IPMR:
5818 INIT_WORK(&fib_work->work, mlxsw_sp_router_fibmr_event_work);
5819 mlxsw_sp_router_fibmr_event(fib_work, info);
5820 break;
Ido Schimmel30572242016-12-03 16:45:01 +01005821 }
5822
Ido Schimmela0e47612017-02-06 16:20:10 +01005823 mlxsw_core_schedule_work(&fib_work->work);
Ido Schimmel30572242016-12-03 16:45:01 +01005824
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005825 return NOTIFY_DONE;
5826}
5827
Ido Schimmel4724ba562017-03-10 08:53:39 +01005828static struct mlxsw_sp_rif *
5829mlxsw_sp_rif_find_by_dev(const struct mlxsw_sp *mlxsw_sp,
5830 const struct net_device *dev)
5831{
5832 int i;
5833
5834 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++)
Ido Schimmel5f9efff2017-05-16 19:38:27 +02005835 if (mlxsw_sp->router->rifs[i] &&
5836 mlxsw_sp->router->rifs[i]->dev == dev)
5837 return mlxsw_sp->router->rifs[i];
Ido Schimmel4724ba562017-03-10 08:53:39 +01005838
5839 return NULL;
5840}
5841
5842static int mlxsw_sp_router_rif_disable(struct mlxsw_sp *mlxsw_sp, u16 rif)
5843{
5844 char ritr_pl[MLXSW_REG_RITR_LEN];
5845 int err;
5846
5847 mlxsw_reg_ritr_rif_pack(ritr_pl, rif);
5848 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
5849 if (WARN_ON_ONCE(err))
5850 return err;
5851
5852 mlxsw_reg_ritr_enable_set(ritr_pl, false);
5853 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
5854}
5855
5856static void mlxsw_sp_router_rif_gone_sync(struct mlxsw_sp *mlxsw_sp,
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01005857 struct mlxsw_sp_rif *rif)
Ido Schimmel4724ba562017-03-10 08:53:39 +01005858{
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01005859 mlxsw_sp_router_rif_disable(mlxsw_sp, rif->rif_index);
5860 mlxsw_sp_nexthop_rif_gone_sync(mlxsw_sp, rif);
5861 mlxsw_sp_neigh_rif_gone_sync(mlxsw_sp, rif);
Ido Schimmel4724ba562017-03-10 08:53:39 +01005862}
5863
Arkadi Sharshevsky5ea12372017-07-18 10:10:13 +02005864static bool
5865mlxsw_sp_rif_should_config(struct mlxsw_sp_rif *rif, struct net_device *dev,
5866 unsigned long event)
Ido Schimmel4724ba562017-03-10 08:53:39 +01005867{
Arkadi Sharshevsky5ea12372017-07-18 10:10:13 +02005868 struct inet6_dev *inet6_dev;
5869 bool addr_list_empty = true;
5870 struct in_device *idev;
5871
Ido Schimmel4724ba562017-03-10 08:53:39 +01005872 switch (event) {
5873 case NETDEV_UP:
Petr Machataf1b1f272017-07-31 09:27:28 +02005874 return rif == NULL;
Ido Schimmel4724ba562017-03-10 08:53:39 +01005875 case NETDEV_DOWN:
Arkadi Sharshevsky5ea12372017-07-18 10:10:13 +02005876 idev = __in_dev_get_rtnl(dev);
5877 if (idev && idev->ifa_list)
5878 addr_list_empty = false;
5879
5880 inet6_dev = __in6_dev_get(dev);
5881 if (addr_list_empty && inet6_dev &&
5882 !list_empty(&inet6_dev->addr_list))
5883 addr_list_empty = false;
5884
5885 if (rif && addr_list_empty &&
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01005886 !netif_is_l3_slave(rif->dev))
Ido Schimmel4724ba562017-03-10 08:53:39 +01005887 return true;
5888 /* It is possible we already removed the RIF ourselves
5889 * if it was assigned to a netdev that is now a bridge
5890 * or LAG slave.
5891 */
5892 return false;
5893 }
5894
5895 return false;
5896}
5897
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02005898static enum mlxsw_sp_rif_type
5899mlxsw_sp_dev_rif_type(const struct mlxsw_sp *mlxsw_sp,
5900 const struct net_device *dev)
5901{
5902 enum mlxsw_sp_fid_type type;
5903
Petr Machata6ddb7422017-09-02 23:49:19 +02005904 if (mlxsw_sp_netdev_ipip_type(mlxsw_sp, dev, NULL))
5905 return MLXSW_SP_RIF_TYPE_IPIP_LB;
5906
5907 /* Otherwise RIF type is derived from the type of the underlying FID. */
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02005908 if (is_vlan_dev(dev) && netif_is_bridge_master(vlan_dev_real_dev(dev)))
5909 type = MLXSW_SP_FID_TYPE_8021Q;
5910 else if (netif_is_bridge_master(dev) && br_vlan_enabled(dev))
5911 type = MLXSW_SP_FID_TYPE_8021Q;
5912 else if (netif_is_bridge_master(dev))
5913 type = MLXSW_SP_FID_TYPE_8021D;
5914 else
5915 type = MLXSW_SP_FID_TYPE_RFID;
5916
5917 return mlxsw_sp_fid_type_rif_type(mlxsw_sp, type);
5918}
5919
Ido Schimmelde5ed992017-06-04 16:53:40 +02005920static int mlxsw_sp_rif_index_alloc(struct mlxsw_sp *mlxsw_sp, u16 *p_rif_index)
Ido Schimmel4724ba562017-03-10 08:53:39 +01005921{
5922 int i;
5923
Ido Schimmelde5ed992017-06-04 16:53:40 +02005924 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++) {
5925 if (!mlxsw_sp->router->rifs[i]) {
5926 *p_rif_index = i;
5927 return 0;
5928 }
5929 }
Ido Schimmel4724ba562017-03-10 08:53:39 +01005930
Ido Schimmelde5ed992017-06-04 16:53:40 +02005931 return -ENOBUFS;
Ido Schimmel4724ba562017-03-10 08:53:39 +01005932}
5933
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02005934static struct mlxsw_sp_rif *mlxsw_sp_rif_alloc(size_t rif_size, u16 rif_index,
5935 u16 vr_id,
5936 struct net_device *l3_dev)
Ido Schimmel4724ba562017-03-10 08:53:39 +01005937{
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01005938 struct mlxsw_sp_rif *rif;
Ido Schimmel4724ba562017-03-10 08:53:39 +01005939
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02005940 rif = kzalloc(rif_size, GFP_KERNEL);
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01005941 if (!rif)
Ido Schimmel4724ba562017-03-10 08:53:39 +01005942 return NULL;
5943
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01005944 INIT_LIST_HEAD(&rif->nexthop_list);
5945 INIT_LIST_HEAD(&rif->neigh_list);
5946 ether_addr_copy(rif->addr, l3_dev->dev_addr);
5947 rif->mtu = l3_dev->mtu;
5948 rif->vr_id = vr_id;
5949 rif->dev = l3_dev;
5950 rif->rif_index = rif_index;
Ido Schimmel4724ba562017-03-10 08:53:39 +01005951
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01005952 return rif;
Ido Schimmel4724ba562017-03-10 08:53:39 +01005953}
5954
Ido Schimmel5f9efff2017-05-16 19:38:27 +02005955struct mlxsw_sp_rif *mlxsw_sp_rif_by_index(const struct mlxsw_sp *mlxsw_sp,
5956 u16 rif_index)
5957{
5958 return mlxsw_sp->router->rifs[rif_index];
5959}
5960
Arkadi Sharshevskyfd1b9d42017-03-28 17:24:16 +02005961u16 mlxsw_sp_rif_index(const struct mlxsw_sp_rif *rif)
5962{
5963 return rif->rif_index;
5964}
5965
Petr Machata92107cf2017-09-02 23:49:28 +02005966u16 mlxsw_sp_ipip_lb_rif_index(const struct mlxsw_sp_rif_ipip_lb *lb_rif)
5967{
5968 return lb_rif->common.rif_index;
5969}
5970
5971u16 mlxsw_sp_ipip_lb_ul_vr_id(const struct mlxsw_sp_rif_ipip_lb *lb_rif)
5972{
5973 return lb_rif->ul_vr_id;
5974}
5975
Arkadi Sharshevskyfd1b9d42017-03-28 17:24:16 +02005976int mlxsw_sp_rif_dev_ifindex(const struct mlxsw_sp_rif *rif)
5977{
5978 return rif->dev->ifindex;
5979}
5980
Yotam Gigi91e4d592017-09-19 10:00:19 +02005981const struct net_device *mlxsw_sp_rif_dev(const struct mlxsw_sp_rif *rif)
5982{
5983 return rif->dev;
5984}
5985
Ido Schimmel4724ba562017-03-10 08:53:39 +01005986static struct mlxsw_sp_rif *
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02005987mlxsw_sp_rif_create(struct mlxsw_sp *mlxsw_sp,
David Ahernf8fa9b42017-10-18 09:56:56 -07005988 const struct mlxsw_sp_rif_params *params,
5989 struct netlink_ext_ack *extack)
Ido Schimmel4724ba562017-03-10 08:53:39 +01005990{
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02005991 u32 tb_id = l3mdev_fib_table(params->dev);
5992 const struct mlxsw_sp_rif_ops *ops;
Petr Machata010cadf2017-09-02 23:49:18 +02005993 struct mlxsw_sp_fid *fid = NULL;
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02005994 enum mlxsw_sp_rif_type type;
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01005995 struct mlxsw_sp_rif *rif;
Ido Schimmela1107482017-05-26 08:37:39 +02005996 struct mlxsw_sp_vr *vr;
5997 u16 rif_index;
Ido Schimmel4724ba562017-03-10 08:53:39 +01005998 int err;
5999
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006000 type = mlxsw_sp_dev_rif_type(mlxsw_sp, params->dev);
6001 ops = mlxsw_sp->router->rif_ops_arr[type];
6002
David Ahernf8fa9b42017-10-18 09:56:56 -07006003 vr = mlxsw_sp_vr_get(mlxsw_sp, tb_id ? : RT_TABLE_MAIN, extack);
Ido Schimmelc9ec53f2017-05-26 08:37:38 +02006004 if (IS_ERR(vr))
6005 return ERR_CAST(vr);
Petr Machata28a04c72017-10-02 12:14:56 +02006006 vr->rif_count++;
Ido Schimmelc9ec53f2017-05-26 08:37:38 +02006007
Ido Schimmelde5ed992017-06-04 16:53:40 +02006008 err = mlxsw_sp_rif_index_alloc(mlxsw_sp, &rif_index);
David Ahernf8fa9b42017-10-18 09:56:56 -07006009 if (err) {
6010 NL_SET_ERR_MSG(extack, "spectrum: Exceeded number of supported router interfaces");
Ido Schimmelde5ed992017-06-04 16:53:40 +02006011 goto err_rif_index_alloc;
David Ahernf8fa9b42017-10-18 09:56:56 -07006012 }
Ido Schimmel4724ba562017-03-10 08:53:39 +01006013
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006014 rif = mlxsw_sp_rif_alloc(ops->rif_size, rif_index, vr->id, params->dev);
Ido Schimmela13a5942017-05-26 08:37:33 +02006015 if (!rif) {
6016 err = -ENOMEM;
6017 goto err_rif_alloc;
6018 }
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006019 rif->mlxsw_sp = mlxsw_sp;
6020 rif->ops = ops;
Ido Schimmela13a5942017-05-26 08:37:33 +02006021
Petr Machata010cadf2017-09-02 23:49:18 +02006022 if (ops->fid_get) {
6023 fid = ops->fid_get(rif);
6024 if (IS_ERR(fid)) {
6025 err = PTR_ERR(fid);
6026 goto err_fid_get;
6027 }
6028 rif->fid = fid;
Ido Schimmel4d93cee2017-05-26 08:37:34 +02006029 }
6030
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006031 if (ops->setup)
6032 ops->setup(rif, params);
6033
6034 err = ops->configure(rif);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006035 if (err)
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006036 goto err_configure;
Ido Schimmel4724ba562017-03-10 08:53:39 +01006037
Yotam Gigid42b0962017-09-27 08:23:20 +02006038 err = mlxsw_sp_mr_rif_add(vr->mr4_table, rif);
6039 if (err)
6040 goto err_mr_rif_add;
6041
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006042 mlxsw_sp_rif_counters_alloc(rif);
Ido Schimmel5f9efff2017-05-16 19:38:27 +02006043 mlxsw_sp->router->rifs[rif_index] = rif;
Ido Schimmel4724ba562017-03-10 08:53:39 +01006044
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01006045 return rif;
Ido Schimmel4724ba562017-03-10 08:53:39 +01006046
Yotam Gigid42b0962017-09-27 08:23:20 +02006047err_mr_rif_add:
6048 ops->deconfigure(rif);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006049err_configure:
Petr Machata010cadf2017-09-02 23:49:18 +02006050 if (fid)
6051 mlxsw_sp_fid_put(fid);
Ido Schimmela1107482017-05-26 08:37:39 +02006052err_fid_get:
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006053 kfree(rif);
6054err_rif_alloc:
Ido Schimmelde5ed992017-06-04 16:53:40 +02006055err_rif_index_alloc:
Petr Machata28a04c72017-10-02 12:14:56 +02006056 vr->rif_count--;
Ido Schimmelc9ec53f2017-05-26 08:37:38 +02006057 mlxsw_sp_vr_put(vr);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006058 return ERR_PTR(err);
6059}
6060
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006061void mlxsw_sp_rif_destroy(struct mlxsw_sp_rif *rif)
Ido Schimmel4724ba562017-03-10 08:53:39 +01006062{
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006063 const struct mlxsw_sp_rif_ops *ops = rif->ops;
6064 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
Ido Schimmela1107482017-05-26 08:37:39 +02006065 struct mlxsw_sp_fid *fid = rif->fid;
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006066 struct mlxsw_sp_vr *vr;
Ido Schimmel4724ba562017-03-10 08:53:39 +01006067
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01006068 mlxsw_sp_router_rif_gone_sync(mlxsw_sp, rif);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006069 vr = &mlxsw_sp->router->vrs[rif->vr_id];
Arkadi Sharshevskye0c0afd2017-03-28 17:24:15 +02006070
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006071 mlxsw_sp->router->rifs[rif->rif_index] = NULL;
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006072 mlxsw_sp_rif_counters_free(rif);
Yotam Gigid42b0962017-09-27 08:23:20 +02006073 mlxsw_sp_mr_rif_del(vr->mr4_table, rif);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006074 ops->deconfigure(rif);
Petr Machata010cadf2017-09-02 23:49:18 +02006075 if (fid)
6076 /* Loopback RIFs are not associated with a FID. */
6077 mlxsw_sp_fid_put(fid);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006078 kfree(rif);
Petr Machata28a04c72017-10-02 12:14:56 +02006079 vr->rif_count--;
Ido Schimmelc9ec53f2017-05-26 08:37:38 +02006080 mlxsw_sp_vr_put(vr);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006081}
6082
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006083static void
6084mlxsw_sp_rif_subport_params_init(struct mlxsw_sp_rif_params *params,
6085 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
6086{
6087 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
6088
6089 params->vid = mlxsw_sp_port_vlan->vid;
6090 params->lag = mlxsw_sp_port->lagged;
6091 if (params->lag)
6092 params->lag_id = mlxsw_sp_port->lag_id;
6093 else
6094 params->system_port = mlxsw_sp_port->local_port;
6095}
6096
Ido Schimmel7cbecf22017-05-26 08:37:28 +02006097static int
Ido Schimmela1107482017-05-26 08:37:39 +02006098mlxsw_sp_port_vlan_router_join(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan,
David Ahernf8fa9b42017-10-18 09:56:56 -07006099 struct net_device *l3_dev,
6100 struct netlink_ext_ack *extack)
Ido Schimmel4724ba562017-03-10 08:53:39 +01006101{
Ido Schimmel7cbecf22017-05-26 08:37:28 +02006102 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
Ido Schimmel1b8f09a2017-05-26 08:37:36 +02006103 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
Ido Schimmel7cbecf22017-05-26 08:37:28 +02006104 u16 vid = mlxsw_sp_port_vlan->vid;
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01006105 struct mlxsw_sp_rif *rif;
Ido Schimmela1107482017-05-26 08:37:39 +02006106 struct mlxsw_sp_fid *fid;
Ido Schimmel03ea01e2017-05-23 21:56:30 +02006107 int err;
Ido Schimmel4724ba562017-03-10 08:53:39 +01006108
Ido Schimmel1b8f09a2017-05-26 08:37:36 +02006109 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev);
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01006110 if (!rif) {
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006111 struct mlxsw_sp_rif_params params = {
6112 .dev = l3_dev,
6113 };
6114
6115 mlxsw_sp_rif_subport_params_init(&params, mlxsw_sp_port_vlan);
David Ahernf8fa9b42017-10-18 09:56:56 -07006116 rif = mlxsw_sp_rif_create(mlxsw_sp, &params, extack);
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01006117 if (IS_ERR(rif))
6118 return PTR_ERR(rif);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006119 }
6120
Ido Schimmela1107482017-05-26 08:37:39 +02006121 /* FID was already created, just take a reference */
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006122 fid = rif->ops->fid_get(rif);
Ido Schimmela1107482017-05-26 08:37:39 +02006123 err = mlxsw_sp_fid_port_vid_map(fid, mlxsw_sp_port, vid);
6124 if (err)
6125 goto err_fid_port_vid_map;
6126
Ido Schimmel7cbecf22017-05-26 08:37:28 +02006127 err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, false);
Ido Schimmel03ea01e2017-05-23 21:56:30 +02006128 if (err)
6129 goto err_port_vid_learning_set;
6130
Ido Schimmel7cbecf22017-05-26 08:37:28 +02006131 err = mlxsw_sp_port_vid_stp_set(mlxsw_sp_port, vid,
Ido Schimmel03ea01e2017-05-23 21:56:30 +02006132 BR_STATE_FORWARDING);
6133 if (err)
6134 goto err_port_vid_stp_set;
6135
Ido Schimmela1107482017-05-26 08:37:39 +02006136 mlxsw_sp_port_vlan->fid = fid;
Ido Schimmel4724ba562017-03-10 08:53:39 +01006137
Ido Schimmel4724ba562017-03-10 08:53:39 +01006138 return 0;
Ido Schimmel03ea01e2017-05-23 21:56:30 +02006139
6140err_port_vid_stp_set:
Ido Schimmel7cbecf22017-05-26 08:37:28 +02006141 mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, true);
Ido Schimmel03ea01e2017-05-23 21:56:30 +02006142err_port_vid_learning_set:
Ido Schimmela1107482017-05-26 08:37:39 +02006143 mlxsw_sp_fid_port_vid_unmap(fid, mlxsw_sp_port, vid);
6144err_fid_port_vid_map:
6145 mlxsw_sp_fid_put(fid);
Ido Schimmel03ea01e2017-05-23 21:56:30 +02006146 return err;
Ido Schimmel4724ba562017-03-10 08:53:39 +01006147}
6148
Ido Schimmela1107482017-05-26 08:37:39 +02006149void
6150mlxsw_sp_port_vlan_router_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
Ido Schimmel4724ba562017-03-10 08:53:39 +01006151{
Ido Schimmelce95e152017-05-26 08:37:27 +02006152 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
Ido Schimmel7cbecf22017-05-26 08:37:28 +02006153 struct mlxsw_sp_fid *fid = mlxsw_sp_port_vlan->fid;
Ido Schimmelce95e152017-05-26 08:37:27 +02006154 u16 vid = mlxsw_sp_port_vlan->vid;
Ido Schimmelce95e152017-05-26 08:37:27 +02006155
Ido Schimmela1107482017-05-26 08:37:39 +02006156 if (WARN_ON(mlxsw_sp_fid_type(fid) != MLXSW_SP_FID_TYPE_RFID))
6157 return;
Ido Schimmel4aafc362017-05-26 08:37:25 +02006158
Ido Schimmela1107482017-05-26 08:37:39 +02006159 mlxsw_sp_port_vlan->fid = NULL;
Ido Schimmel7cbecf22017-05-26 08:37:28 +02006160 mlxsw_sp_port_vid_stp_set(mlxsw_sp_port, vid, BR_STATE_BLOCKING);
6161 mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, true);
Ido Schimmela1107482017-05-26 08:37:39 +02006162 mlxsw_sp_fid_port_vid_unmap(fid, mlxsw_sp_port, vid);
6163 /* If router port holds the last reference on the rFID, then the
6164 * associated Sub-port RIF will be destroyed.
6165 */
6166 mlxsw_sp_fid_put(fid);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006167}
6168
Ido Schimmel7cbecf22017-05-26 08:37:28 +02006169static int mlxsw_sp_inetaddr_port_vlan_event(struct net_device *l3_dev,
6170 struct net_device *port_dev,
David Ahernf8fa9b42017-10-18 09:56:56 -07006171 unsigned long event, u16 vid,
6172 struct netlink_ext_ack *extack)
Ido Schimmel4724ba562017-03-10 08:53:39 +01006173{
6174 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(port_dev);
Ido Schimmelce95e152017-05-26 08:37:27 +02006175 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
Ido Schimmel4724ba562017-03-10 08:53:39 +01006176
Ido Schimmelce95e152017-05-26 08:37:27 +02006177 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
Ido Schimmel7cbecf22017-05-26 08:37:28 +02006178 if (WARN_ON(!mlxsw_sp_port_vlan))
6179 return -EINVAL;
Ido Schimmel4724ba562017-03-10 08:53:39 +01006180
6181 switch (event) {
6182 case NETDEV_UP:
Ido Schimmela1107482017-05-26 08:37:39 +02006183 return mlxsw_sp_port_vlan_router_join(mlxsw_sp_port_vlan,
David Ahernf8fa9b42017-10-18 09:56:56 -07006184 l3_dev, extack);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006185 case NETDEV_DOWN:
Ido Schimmela1107482017-05-26 08:37:39 +02006186 mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port_vlan);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006187 break;
6188 }
6189
6190 return 0;
6191}
6192
6193static int mlxsw_sp_inetaddr_port_event(struct net_device *port_dev,
David Ahernf8fa9b42017-10-18 09:56:56 -07006194 unsigned long event,
6195 struct netlink_ext_ack *extack)
Ido Schimmel4724ba562017-03-10 08:53:39 +01006196{
Jiri Pirko2b94e582017-04-18 16:55:37 +02006197 if (netif_is_bridge_port(port_dev) ||
6198 netif_is_lag_port(port_dev) ||
6199 netif_is_ovs_port(port_dev))
Ido Schimmel4724ba562017-03-10 08:53:39 +01006200 return 0;
6201
David Ahernf8fa9b42017-10-18 09:56:56 -07006202 return mlxsw_sp_inetaddr_port_vlan_event(port_dev, port_dev, event, 1,
6203 extack);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006204}
6205
6206static int __mlxsw_sp_inetaddr_lag_event(struct net_device *l3_dev,
6207 struct net_device *lag_dev,
David Ahernf8fa9b42017-10-18 09:56:56 -07006208 unsigned long event, u16 vid,
6209 struct netlink_ext_ack *extack)
Ido Schimmel4724ba562017-03-10 08:53:39 +01006210{
6211 struct net_device *port_dev;
6212 struct list_head *iter;
6213 int err;
6214
6215 netdev_for_each_lower_dev(lag_dev, port_dev, iter) {
6216 if (mlxsw_sp_port_dev_check(port_dev)) {
Ido Schimmel7cbecf22017-05-26 08:37:28 +02006217 err = mlxsw_sp_inetaddr_port_vlan_event(l3_dev,
6218 port_dev,
David Ahernf8fa9b42017-10-18 09:56:56 -07006219 event, vid,
6220 extack);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006221 if (err)
6222 return err;
6223 }
6224 }
6225
6226 return 0;
6227}
6228
6229static int mlxsw_sp_inetaddr_lag_event(struct net_device *lag_dev,
David Ahernf8fa9b42017-10-18 09:56:56 -07006230 unsigned long event,
6231 struct netlink_ext_ack *extack)
Ido Schimmel4724ba562017-03-10 08:53:39 +01006232{
6233 if (netif_is_bridge_port(lag_dev))
6234 return 0;
6235
David Ahernf8fa9b42017-10-18 09:56:56 -07006236 return __mlxsw_sp_inetaddr_lag_event(lag_dev, lag_dev, event, 1,
6237 extack);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006238}
6239
Ido Schimmel4724ba562017-03-10 08:53:39 +01006240static int mlxsw_sp_inetaddr_bridge_event(struct net_device *l3_dev,
David Ahernf8fa9b42017-10-18 09:56:56 -07006241 unsigned long event,
6242 struct netlink_ext_ack *extack)
Ido Schimmel4724ba562017-03-10 08:53:39 +01006243{
6244 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(l3_dev);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006245 struct mlxsw_sp_rif_params params = {
6246 .dev = l3_dev,
6247 };
Ido Schimmela1107482017-05-26 08:37:39 +02006248 struct mlxsw_sp_rif *rif;
Ido Schimmel4724ba562017-03-10 08:53:39 +01006249
6250 switch (event) {
6251 case NETDEV_UP:
David Ahernf8fa9b42017-10-18 09:56:56 -07006252 rif = mlxsw_sp_rif_create(mlxsw_sp, &params, extack);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006253 if (IS_ERR(rif))
6254 return PTR_ERR(rif);
6255 break;
Ido Schimmel4724ba562017-03-10 08:53:39 +01006256 case NETDEV_DOWN:
Ido Schimmela1107482017-05-26 08:37:39 +02006257 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006258 mlxsw_sp_rif_destroy(rif);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006259 break;
6260 }
6261
6262 return 0;
6263}
6264
6265static int mlxsw_sp_inetaddr_vlan_event(struct net_device *vlan_dev,
David Ahernf8fa9b42017-10-18 09:56:56 -07006266 unsigned long event,
6267 struct netlink_ext_ack *extack)
Ido Schimmel4724ba562017-03-10 08:53:39 +01006268{
6269 struct net_device *real_dev = vlan_dev_real_dev(vlan_dev);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006270 u16 vid = vlan_dev_vlan_id(vlan_dev);
6271
Ido Schimmel6b27c8a2017-06-28 09:03:12 +03006272 if (netif_is_bridge_port(vlan_dev))
6273 return 0;
6274
Ido Schimmel4724ba562017-03-10 08:53:39 +01006275 if (mlxsw_sp_port_dev_check(real_dev))
Ido Schimmel7cbecf22017-05-26 08:37:28 +02006276 return mlxsw_sp_inetaddr_port_vlan_event(vlan_dev, real_dev,
David Ahernf8fa9b42017-10-18 09:56:56 -07006277 event, vid, extack);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006278 else if (netif_is_lag_master(real_dev))
6279 return __mlxsw_sp_inetaddr_lag_event(vlan_dev, real_dev, event,
David Ahernf8fa9b42017-10-18 09:56:56 -07006280 vid, extack);
Ido Schimmelc57529e2017-05-26 08:37:31 +02006281 else if (netif_is_bridge_master(real_dev) && br_vlan_enabled(real_dev))
David Ahernf8fa9b42017-10-18 09:56:56 -07006282 return mlxsw_sp_inetaddr_bridge_event(vlan_dev, event, extack);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006283
6284 return 0;
6285}
6286
Ido Schimmelb1e45522017-04-30 19:47:14 +03006287static int __mlxsw_sp_inetaddr_event(struct net_device *dev,
David Ahernf8fa9b42017-10-18 09:56:56 -07006288 unsigned long event,
6289 struct netlink_ext_ack *extack)
Ido Schimmelb1e45522017-04-30 19:47:14 +03006290{
6291 if (mlxsw_sp_port_dev_check(dev))
David Ahernf8fa9b42017-10-18 09:56:56 -07006292 return mlxsw_sp_inetaddr_port_event(dev, event, extack);
Ido Schimmelb1e45522017-04-30 19:47:14 +03006293 else if (netif_is_lag_master(dev))
David Ahernf8fa9b42017-10-18 09:56:56 -07006294 return mlxsw_sp_inetaddr_lag_event(dev, event, extack);
Ido Schimmelb1e45522017-04-30 19:47:14 +03006295 else if (netif_is_bridge_master(dev))
David Ahernf8fa9b42017-10-18 09:56:56 -07006296 return mlxsw_sp_inetaddr_bridge_event(dev, event, extack);
Ido Schimmelb1e45522017-04-30 19:47:14 +03006297 else if (is_vlan_dev(dev))
David Ahernf8fa9b42017-10-18 09:56:56 -07006298 return mlxsw_sp_inetaddr_vlan_event(dev, event, extack);
Ido Schimmelb1e45522017-04-30 19:47:14 +03006299 else
6300 return 0;
6301}
6302
Ido Schimmel4724ba562017-03-10 08:53:39 +01006303int mlxsw_sp_inetaddr_event(struct notifier_block *unused,
6304 unsigned long event, void *ptr)
6305{
6306 struct in_ifaddr *ifa = (struct in_ifaddr *) ptr;
6307 struct net_device *dev = ifa->ifa_dev->dev;
6308 struct mlxsw_sp *mlxsw_sp;
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01006309 struct mlxsw_sp_rif *rif;
Ido Schimmel4724ba562017-03-10 08:53:39 +01006310 int err = 0;
6311
David Ahern89d5dd22017-10-18 09:56:55 -07006312 /* NETDEV_UP event is handled by mlxsw_sp_inetaddr_valid_event */
6313 if (event == NETDEV_UP)
6314 goto out;
6315
6316 mlxsw_sp = mlxsw_sp_lower_get(dev);
6317 if (!mlxsw_sp)
6318 goto out;
6319
6320 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
6321 if (!mlxsw_sp_rif_should_config(rif, dev, event))
6322 goto out;
6323
David Ahernf8fa9b42017-10-18 09:56:56 -07006324 err = __mlxsw_sp_inetaddr_event(dev, event, NULL);
David Ahern89d5dd22017-10-18 09:56:55 -07006325out:
6326 return notifier_from_errno(err);
6327}
6328
6329int mlxsw_sp_inetaddr_valid_event(struct notifier_block *unused,
6330 unsigned long event, void *ptr)
6331{
6332 struct in_validator_info *ivi = (struct in_validator_info *) ptr;
6333 struct net_device *dev = ivi->ivi_dev->dev;
6334 struct mlxsw_sp *mlxsw_sp;
6335 struct mlxsw_sp_rif *rif;
6336 int err = 0;
6337
Ido Schimmel4724ba562017-03-10 08:53:39 +01006338 mlxsw_sp = mlxsw_sp_lower_get(dev);
6339 if (!mlxsw_sp)
6340 goto out;
6341
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01006342 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
Arkadi Sharshevsky5ea12372017-07-18 10:10:13 +02006343 if (!mlxsw_sp_rif_should_config(rif, dev, event))
Ido Schimmel4724ba562017-03-10 08:53:39 +01006344 goto out;
6345
David Ahernf8fa9b42017-10-18 09:56:56 -07006346 err = __mlxsw_sp_inetaddr_event(dev, event, ivi->extack);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006347out:
6348 return notifier_from_errno(err);
6349}
6350
Arkadi Sharshevsky5ea12372017-07-18 10:10:13 +02006351struct mlxsw_sp_inet6addr_event_work {
6352 struct work_struct work;
6353 struct net_device *dev;
6354 unsigned long event;
6355};
6356
6357static void mlxsw_sp_inet6addr_event_work(struct work_struct *work)
6358{
6359 struct mlxsw_sp_inet6addr_event_work *inet6addr_work =
6360 container_of(work, struct mlxsw_sp_inet6addr_event_work, work);
6361 struct net_device *dev = inet6addr_work->dev;
6362 unsigned long event = inet6addr_work->event;
6363 struct mlxsw_sp *mlxsw_sp;
6364 struct mlxsw_sp_rif *rif;
6365
6366 rtnl_lock();
6367 mlxsw_sp = mlxsw_sp_lower_get(dev);
6368 if (!mlxsw_sp)
6369 goto out;
6370
6371 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
6372 if (!mlxsw_sp_rif_should_config(rif, dev, event))
6373 goto out;
6374
David Ahernf8fa9b42017-10-18 09:56:56 -07006375 __mlxsw_sp_inetaddr_event(dev, event, NULL);
Arkadi Sharshevsky5ea12372017-07-18 10:10:13 +02006376out:
6377 rtnl_unlock();
6378 dev_put(dev);
6379 kfree(inet6addr_work);
6380}
6381
6382/* Called with rcu_read_lock() */
6383int mlxsw_sp_inet6addr_event(struct notifier_block *unused,
6384 unsigned long event, void *ptr)
6385{
6386 struct inet6_ifaddr *if6 = (struct inet6_ifaddr *) ptr;
6387 struct mlxsw_sp_inet6addr_event_work *inet6addr_work;
6388 struct net_device *dev = if6->idev->dev;
6389
David Ahern89d5dd22017-10-18 09:56:55 -07006390 /* NETDEV_UP event is handled by mlxsw_sp_inet6addr_valid_event */
6391 if (event == NETDEV_UP)
6392 return NOTIFY_DONE;
6393
Arkadi Sharshevsky5ea12372017-07-18 10:10:13 +02006394 if (!mlxsw_sp_port_dev_lower_find_rcu(dev))
6395 return NOTIFY_DONE;
6396
6397 inet6addr_work = kzalloc(sizeof(*inet6addr_work), GFP_ATOMIC);
6398 if (!inet6addr_work)
6399 return NOTIFY_BAD;
6400
6401 INIT_WORK(&inet6addr_work->work, mlxsw_sp_inet6addr_event_work);
6402 inet6addr_work->dev = dev;
6403 inet6addr_work->event = event;
6404 dev_hold(dev);
6405 mlxsw_core_schedule_work(&inet6addr_work->work);
6406
6407 return NOTIFY_DONE;
6408}
6409
David Ahern89d5dd22017-10-18 09:56:55 -07006410int mlxsw_sp_inet6addr_valid_event(struct notifier_block *unused,
6411 unsigned long event, void *ptr)
6412{
6413 struct in6_validator_info *i6vi = (struct in6_validator_info *) ptr;
6414 struct net_device *dev = i6vi->i6vi_dev->dev;
6415 struct mlxsw_sp *mlxsw_sp;
6416 struct mlxsw_sp_rif *rif;
6417 int err = 0;
6418
6419 mlxsw_sp = mlxsw_sp_lower_get(dev);
6420 if (!mlxsw_sp)
6421 goto out;
6422
6423 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
6424 if (!mlxsw_sp_rif_should_config(rif, dev, event))
6425 goto out;
6426
David Ahernf8fa9b42017-10-18 09:56:56 -07006427 err = __mlxsw_sp_inetaddr_event(dev, event, i6vi->extack);
David Ahern89d5dd22017-10-18 09:56:55 -07006428out:
6429 return notifier_from_errno(err);
6430}
6431
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01006432static int mlxsw_sp_rif_edit(struct mlxsw_sp *mlxsw_sp, u16 rif_index,
Ido Schimmel4724ba562017-03-10 08:53:39 +01006433 const char *mac, int mtu)
6434{
6435 char ritr_pl[MLXSW_REG_RITR_LEN];
6436 int err;
6437
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01006438 mlxsw_reg_ritr_rif_pack(ritr_pl, rif_index);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006439 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
6440 if (err)
6441 return err;
6442
6443 mlxsw_reg_ritr_mtu_set(ritr_pl, mtu);
6444 mlxsw_reg_ritr_if_mac_memcpy_to(ritr_pl, mac);
6445 mlxsw_reg_ritr_op_set(ritr_pl, MLXSW_REG_RITR_RIF_CREATE);
6446 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
6447}
6448
6449int mlxsw_sp_netdevice_router_port_event(struct net_device *dev)
6450{
6451 struct mlxsw_sp *mlxsw_sp;
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01006452 struct mlxsw_sp_rif *rif;
Ido Schimmela1107482017-05-26 08:37:39 +02006453 u16 fid_index;
Ido Schimmel4724ba562017-03-10 08:53:39 +01006454 int err;
6455
6456 mlxsw_sp = mlxsw_sp_lower_get(dev);
6457 if (!mlxsw_sp)
6458 return 0;
6459
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01006460 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
6461 if (!rif)
Ido Schimmel4724ba562017-03-10 08:53:39 +01006462 return 0;
Ido Schimmela1107482017-05-26 08:37:39 +02006463 fid_index = mlxsw_sp_fid_index(rif->fid);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006464
Ido Schimmela1107482017-05-26 08:37:39 +02006465 err = mlxsw_sp_rif_fdb_op(mlxsw_sp, rif->addr, fid_index, false);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006466 if (err)
6467 return err;
6468
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01006469 err = mlxsw_sp_rif_edit(mlxsw_sp, rif->rif_index, dev->dev_addr,
6470 dev->mtu);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006471 if (err)
6472 goto err_rif_edit;
6473
Ido Schimmela1107482017-05-26 08:37:39 +02006474 err = mlxsw_sp_rif_fdb_op(mlxsw_sp, dev->dev_addr, fid_index, true);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006475 if (err)
6476 goto err_rif_fdb_op;
6477
Yotam Gigifd890fe2017-09-27 08:23:21 +02006478 if (rif->mtu != dev->mtu) {
6479 struct mlxsw_sp_vr *vr;
6480
6481 /* The RIF is relevant only to its mr_table instance, as unlike
6482 * unicast routing, in multicast routing a RIF cannot be shared
6483 * between several multicast routing tables.
6484 */
6485 vr = &mlxsw_sp->router->vrs[rif->vr_id];
6486 mlxsw_sp_mr_rif_mtu_update(vr->mr4_table, rif, dev->mtu);
6487 }
6488
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01006489 ether_addr_copy(rif->addr, dev->dev_addr);
6490 rif->mtu = dev->mtu;
Ido Schimmel4724ba562017-03-10 08:53:39 +01006491
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01006492 netdev_dbg(dev, "Updated RIF=%d\n", rif->rif_index);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006493
6494 return 0;
6495
6496err_rif_fdb_op:
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01006497 mlxsw_sp_rif_edit(mlxsw_sp, rif->rif_index, rif->addr, rif->mtu);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006498err_rif_edit:
Ido Schimmela1107482017-05-26 08:37:39 +02006499 mlxsw_sp_rif_fdb_op(mlxsw_sp, rif->addr, fid_index, true);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006500 return err;
6501}
6502
Ido Schimmelb1e45522017-04-30 19:47:14 +03006503static int mlxsw_sp_port_vrf_join(struct mlxsw_sp *mlxsw_sp,
David Ahernf8fa9b42017-10-18 09:56:56 -07006504 struct net_device *l3_dev,
6505 struct netlink_ext_ack *extack)
Ido Schimmel7179eb52017-03-16 09:08:18 +01006506{
Ido Schimmelb1e45522017-04-30 19:47:14 +03006507 struct mlxsw_sp_rif *rif;
Ido Schimmel7179eb52017-03-16 09:08:18 +01006508
Ido Schimmelb1e45522017-04-30 19:47:14 +03006509 /* If netdev is already associated with a RIF, then we need to
6510 * destroy it and create a new one with the new virtual router ID.
Ido Schimmel7179eb52017-03-16 09:08:18 +01006511 */
Ido Schimmelb1e45522017-04-30 19:47:14 +03006512 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev);
6513 if (rif)
David Ahernf8fa9b42017-10-18 09:56:56 -07006514 __mlxsw_sp_inetaddr_event(l3_dev, NETDEV_DOWN, extack);
Ido Schimmel7179eb52017-03-16 09:08:18 +01006515
David Ahernf8fa9b42017-10-18 09:56:56 -07006516 return __mlxsw_sp_inetaddr_event(l3_dev, NETDEV_UP, extack);
Ido Schimmel7179eb52017-03-16 09:08:18 +01006517}
6518
Ido Schimmelb1e45522017-04-30 19:47:14 +03006519static void mlxsw_sp_port_vrf_leave(struct mlxsw_sp *mlxsw_sp,
6520 struct net_device *l3_dev)
Ido Schimmel7179eb52017-03-16 09:08:18 +01006521{
Ido Schimmelb1e45522017-04-30 19:47:14 +03006522 struct mlxsw_sp_rif *rif;
Ido Schimmel7179eb52017-03-16 09:08:18 +01006523
Ido Schimmelb1e45522017-04-30 19:47:14 +03006524 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev);
6525 if (!rif)
Ido Schimmel7179eb52017-03-16 09:08:18 +01006526 return;
David Ahernf8fa9b42017-10-18 09:56:56 -07006527 __mlxsw_sp_inetaddr_event(l3_dev, NETDEV_DOWN, NULL);
Ido Schimmel7179eb52017-03-16 09:08:18 +01006528}
6529
Ido Schimmelb1e45522017-04-30 19:47:14 +03006530int mlxsw_sp_netdevice_vrf_event(struct net_device *l3_dev, unsigned long event,
6531 struct netdev_notifier_changeupper_info *info)
Ido Schimmel3d70e4582017-03-16 09:08:19 +01006532{
Ido Schimmelb1e45522017-04-30 19:47:14 +03006533 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(l3_dev);
6534 int err = 0;
Ido Schimmel3d70e4582017-03-16 09:08:19 +01006535
Ido Schimmelb1e45522017-04-30 19:47:14 +03006536 if (!mlxsw_sp)
6537 return 0;
Ido Schimmel3d70e4582017-03-16 09:08:19 +01006538
Ido Schimmelb1e45522017-04-30 19:47:14 +03006539 switch (event) {
6540 case NETDEV_PRECHANGEUPPER:
6541 return 0;
6542 case NETDEV_CHANGEUPPER:
David Ahernf8fa9b42017-10-18 09:56:56 -07006543 if (info->linking) {
6544 struct netlink_ext_ack *extack;
6545
6546 extack = netdev_notifier_info_to_extack(&info->info);
6547 err = mlxsw_sp_port_vrf_join(mlxsw_sp, l3_dev, extack);
6548 } else {
Ido Schimmelb1e45522017-04-30 19:47:14 +03006549 mlxsw_sp_port_vrf_leave(mlxsw_sp, l3_dev);
David Ahernf8fa9b42017-10-18 09:56:56 -07006550 }
Ido Schimmelb1e45522017-04-30 19:47:14 +03006551 break;
6552 }
Ido Schimmel3d70e4582017-03-16 09:08:19 +01006553
Ido Schimmelb1e45522017-04-30 19:47:14 +03006554 return err;
Ido Schimmel3d70e4582017-03-16 09:08:19 +01006555}
6556
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006557static struct mlxsw_sp_rif_subport *
6558mlxsw_sp_rif_subport_rif(const struct mlxsw_sp_rif *rif)
Ido Schimmela1107482017-05-26 08:37:39 +02006559{
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006560 return container_of(rif, struct mlxsw_sp_rif_subport, common);
Ido Schimmela1107482017-05-26 08:37:39 +02006561}
6562
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006563static void mlxsw_sp_rif_subport_setup(struct mlxsw_sp_rif *rif,
6564 const struct mlxsw_sp_rif_params *params)
6565{
6566 struct mlxsw_sp_rif_subport *rif_subport;
6567
6568 rif_subport = mlxsw_sp_rif_subport_rif(rif);
6569 rif_subport->vid = params->vid;
6570 rif_subport->lag = params->lag;
6571 if (params->lag)
6572 rif_subport->lag_id = params->lag_id;
6573 else
6574 rif_subport->system_port = params->system_port;
6575}
6576
6577static int mlxsw_sp_rif_subport_op(struct mlxsw_sp_rif *rif, bool enable)
6578{
6579 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
6580 struct mlxsw_sp_rif_subport *rif_subport;
6581 char ritr_pl[MLXSW_REG_RITR_LEN];
6582
6583 rif_subport = mlxsw_sp_rif_subport_rif(rif);
6584 mlxsw_reg_ritr_pack(ritr_pl, enable, MLXSW_REG_RITR_SP_IF,
Petr Machata9571e822017-09-02 23:49:14 +02006585 rif->rif_index, rif->vr_id, rif->dev->mtu);
6586 mlxsw_reg_ritr_mac_pack(ritr_pl, rif->dev->dev_addr);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006587 mlxsw_reg_ritr_sp_if_pack(ritr_pl, rif_subport->lag,
6588 rif_subport->lag ? rif_subport->lag_id :
6589 rif_subport->system_port,
6590 rif_subport->vid);
6591
6592 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
6593}
6594
6595static int mlxsw_sp_rif_subport_configure(struct mlxsw_sp_rif *rif)
6596{
Petr Machata010cadf2017-09-02 23:49:18 +02006597 int err;
6598
6599 err = mlxsw_sp_rif_subport_op(rif, true);
6600 if (err)
6601 return err;
6602
6603 err = mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
6604 mlxsw_sp_fid_index(rif->fid), true);
6605 if (err)
6606 goto err_rif_fdb_op;
6607
6608 mlxsw_sp_fid_rif_set(rif->fid, rif);
6609 return 0;
6610
6611err_rif_fdb_op:
6612 mlxsw_sp_rif_subport_op(rif, false);
6613 return err;
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006614}
6615
6616static void mlxsw_sp_rif_subport_deconfigure(struct mlxsw_sp_rif *rif)
6617{
Petr Machata010cadf2017-09-02 23:49:18 +02006618 struct mlxsw_sp_fid *fid = rif->fid;
6619
6620 mlxsw_sp_fid_rif_set(fid, NULL);
6621 mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
6622 mlxsw_sp_fid_index(fid), false);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006623 mlxsw_sp_rif_subport_op(rif, false);
6624}
6625
6626static struct mlxsw_sp_fid *
6627mlxsw_sp_rif_subport_fid_get(struct mlxsw_sp_rif *rif)
6628{
6629 return mlxsw_sp_fid_rfid_get(rif->mlxsw_sp, rif->rif_index);
6630}
6631
6632static const struct mlxsw_sp_rif_ops mlxsw_sp_rif_subport_ops = {
6633 .type = MLXSW_SP_RIF_TYPE_SUBPORT,
6634 .rif_size = sizeof(struct mlxsw_sp_rif_subport),
6635 .setup = mlxsw_sp_rif_subport_setup,
6636 .configure = mlxsw_sp_rif_subport_configure,
6637 .deconfigure = mlxsw_sp_rif_subport_deconfigure,
6638 .fid_get = mlxsw_sp_rif_subport_fid_get,
6639};
6640
6641static int mlxsw_sp_rif_vlan_fid_op(struct mlxsw_sp_rif *rif,
6642 enum mlxsw_reg_ritr_if_type type,
6643 u16 vid_fid, bool enable)
6644{
6645 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
6646 char ritr_pl[MLXSW_REG_RITR_LEN];
6647
6648 mlxsw_reg_ritr_pack(ritr_pl, enable, type, rif->rif_index, rif->vr_id,
Petr Machata9571e822017-09-02 23:49:14 +02006649 rif->dev->mtu);
6650 mlxsw_reg_ritr_mac_pack(ritr_pl, rif->dev->dev_addr);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006651 mlxsw_reg_ritr_fid_set(ritr_pl, type, vid_fid);
6652
6653 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
6654}
6655
Yotam Gigib35750f2017-10-09 11:15:33 +02006656u8 mlxsw_sp_router_port(const struct mlxsw_sp *mlxsw_sp)
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006657{
6658 return mlxsw_core_max_ports(mlxsw_sp->core) + 1;
6659}
6660
6661static int mlxsw_sp_rif_vlan_configure(struct mlxsw_sp_rif *rif)
6662{
6663 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
6664 u16 vid = mlxsw_sp_fid_8021q_vid(rif->fid);
6665 int err;
6666
6667 err = mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_VLAN_IF, vid, true);
6668 if (err)
6669 return err;
6670
Ido Schimmel0d284812017-07-18 10:10:12 +02006671 err = mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
6672 mlxsw_sp_router_port(mlxsw_sp), true);
6673 if (err)
6674 goto err_fid_mc_flood_set;
6675
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006676 err = mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
6677 mlxsw_sp_router_port(mlxsw_sp), true);
6678 if (err)
6679 goto err_fid_bc_flood_set;
6680
Petr Machata010cadf2017-09-02 23:49:18 +02006681 err = mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
6682 mlxsw_sp_fid_index(rif->fid), true);
6683 if (err)
6684 goto err_rif_fdb_op;
6685
6686 mlxsw_sp_fid_rif_set(rif->fid, rif);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006687 return 0;
6688
Petr Machata010cadf2017-09-02 23:49:18 +02006689err_rif_fdb_op:
6690 mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
6691 mlxsw_sp_router_port(mlxsw_sp), false);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006692err_fid_bc_flood_set:
Ido Schimmel0d284812017-07-18 10:10:12 +02006693 mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
6694 mlxsw_sp_router_port(mlxsw_sp), false);
6695err_fid_mc_flood_set:
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006696 mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_VLAN_IF, vid, false);
6697 return err;
6698}
6699
6700static void mlxsw_sp_rif_vlan_deconfigure(struct mlxsw_sp_rif *rif)
6701{
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006702 u16 vid = mlxsw_sp_fid_8021q_vid(rif->fid);
Petr Machata010cadf2017-09-02 23:49:18 +02006703 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
6704 struct mlxsw_sp_fid *fid = rif->fid;
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006705
Petr Machata010cadf2017-09-02 23:49:18 +02006706 mlxsw_sp_fid_rif_set(fid, NULL);
6707 mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
6708 mlxsw_sp_fid_index(fid), false);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006709 mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
6710 mlxsw_sp_router_port(mlxsw_sp), false);
Ido Schimmel0d284812017-07-18 10:10:12 +02006711 mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
6712 mlxsw_sp_router_port(mlxsw_sp), false);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006713 mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_VLAN_IF, vid, false);
6714}
6715
6716static struct mlxsw_sp_fid *
6717mlxsw_sp_rif_vlan_fid_get(struct mlxsw_sp_rif *rif)
6718{
6719 u16 vid = is_vlan_dev(rif->dev) ? vlan_dev_vlan_id(rif->dev) : 1;
6720
6721 return mlxsw_sp_fid_8021q_get(rif->mlxsw_sp, vid);
6722}
6723
6724static const struct mlxsw_sp_rif_ops mlxsw_sp_rif_vlan_ops = {
6725 .type = MLXSW_SP_RIF_TYPE_VLAN,
6726 .rif_size = sizeof(struct mlxsw_sp_rif),
6727 .configure = mlxsw_sp_rif_vlan_configure,
6728 .deconfigure = mlxsw_sp_rif_vlan_deconfigure,
6729 .fid_get = mlxsw_sp_rif_vlan_fid_get,
6730};
6731
6732static int mlxsw_sp_rif_fid_configure(struct mlxsw_sp_rif *rif)
6733{
6734 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
6735 u16 fid_index = mlxsw_sp_fid_index(rif->fid);
6736 int err;
6737
6738 err = mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_FID_IF, fid_index,
6739 true);
6740 if (err)
6741 return err;
6742
Ido Schimmel0d284812017-07-18 10:10:12 +02006743 err = mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
6744 mlxsw_sp_router_port(mlxsw_sp), true);
6745 if (err)
6746 goto err_fid_mc_flood_set;
6747
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006748 err = mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
6749 mlxsw_sp_router_port(mlxsw_sp), true);
6750 if (err)
6751 goto err_fid_bc_flood_set;
6752
Petr Machata010cadf2017-09-02 23:49:18 +02006753 err = mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
6754 mlxsw_sp_fid_index(rif->fid), true);
6755 if (err)
6756 goto err_rif_fdb_op;
6757
6758 mlxsw_sp_fid_rif_set(rif->fid, rif);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006759 return 0;
6760
Petr Machata010cadf2017-09-02 23:49:18 +02006761err_rif_fdb_op:
6762 mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
6763 mlxsw_sp_router_port(mlxsw_sp), false);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006764err_fid_bc_flood_set:
Ido Schimmel0d284812017-07-18 10:10:12 +02006765 mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
6766 mlxsw_sp_router_port(mlxsw_sp), false);
6767err_fid_mc_flood_set:
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006768 mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_FID_IF, fid_index, false);
6769 return err;
6770}
6771
6772static void mlxsw_sp_rif_fid_deconfigure(struct mlxsw_sp_rif *rif)
6773{
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006774 u16 fid_index = mlxsw_sp_fid_index(rif->fid);
Petr Machata010cadf2017-09-02 23:49:18 +02006775 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
6776 struct mlxsw_sp_fid *fid = rif->fid;
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006777
Petr Machata010cadf2017-09-02 23:49:18 +02006778 mlxsw_sp_fid_rif_set(fid, NULL);
6779 mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
6780 mlxsw_sp_fid_index(fid), false);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006781 mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
6782 mlxsw_sp_router_port(mlxsw_sp), false);
Ido Schimmel0d284812017-07-18 10:10:12 +02006783 mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
6784 mlxsw_sp_router_port(mlxsw_sp), false);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006785 mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_FID_IF, fid_index, false);
6786}
6787
6788static struct mlxsw_sp_fid *
6789mlxsw_sp_rif_fid_fid_get(struct mlxsw_sp_rif *rif)
6790{
6791 return mlxsw_sp_fid_8021d_get(rif->mlxsw_sp, rif->dev->ifindex);
6792}
6793
6794static const struct mlxsw_sp_rif_ops mlxsw_sp_rif_fid_ops = {
6795 .type = MLXSW_SP_RIF_TYPE_FID,
6796 .rif_size = sizeof(struct mlxsw_sp_rif),
6797 .configure = mlxsw_sp_rif_fid_configure,
6798 .deconfigure = mlxsw_sp_rif_fid_deconfigure,
6799 .fid_get = mlxsw_sp_rif_fid_fid_get,
6800};
6801
Petr Machata6ddb7422017-09-02 23:49:19 +02006802static struct mlxsw_sp_rif_ipip_lb *
6803mlxsw_sp_rif_ipip_lb_rif(struct mlxsw_sp_rif *rif)
6804{
6805 return container_of(rif, struct mlxsw_sp_rif_ipip_lb, common);
6806}
6807
6808static void
6809mlxsw_sp_rif_ipip_lb_setup(struct mlxsw_sp_rif *rif,
6810 const struct mlxsw_sp_rif_params *params)
6811{
6812 struct mlxsw_sp_rif_params_ipip_lb *params_lb;
6813 struct mlxsw_sp_rif_ipip_lb *rif_lb;
6814
6815 params_lb = container_of(params, struct mlxsw_sp_rif_params_ipip_lb,
6816 common);
6817 rif_lb = mlxsw_sp_rif_ipip_lb_rif(rif);
6818 rif_lb->lb_config = params_lb->lb_config;
6819}
6820
6821static int
6822mlxsw_sp_rif_ipip_lb_op(struct mlxsw_sp_rif_ipip_lb *lb_rif,
6823 struct mlxsw_sp_vr *ul_vr, bool enable)
6824{
6825 struct mlxsw_sp_rif_ipip_lb_config lb_cf = lb_rif->lb_config;
6826 struct mlxsw_sp_rif *rif = &lb_rif->common;
6827 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
6828 char ritr_pl[MLXSW_REG_RITR_LEN];
6829 u32 saddr4;
6830
6831 switch (lb_cf.ul_protocol) {
6832 case MLXSW_SP_L3_PROTO_IPV4:
6833 saddr4 = be32_to_cpu(lb_cf.saddr.addr4);
6834 mlxsw_reg_ritr_pack(ritr_pl, enable, MLXSW_REG_RITR_LOOPBACK_IF,
6835 rif->rif_index, rif->vr_id, rif->dev->mtu);
6836 mlxsw_reg_ritr_loopback_ipip4_pack(ritr_pl, lb_cf.lb_ipipt,
6837 MLXSW_REG_RITR_LOOPBACK_IPIP_OPTIONS_GRE_KEY_PRESET,
6838 ul_vr->id, saddr4, lb_cf.okey);
6839 break;
6840
6841 case MLXSW_SP_L3_PROTO_IPV6:
6842 return -EAFNOSUPPORT;
6843 }
6844
6845 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
6846}
6847
6848static int
6849mlxsw_sp_rif_ipip_lb_configure(struct mlxsw_sp_rif *rif)
6850{
6851 struct mlxsw_sp_rif_ipip_lb *lb_rif = mlxsw_sp_rif_ipip_lb_rif(rif);
6852 u32 ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(rif->dev);
6853 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
6854 struct mlxsw_sp_vr *ul_vr;
6855 int err;
6856
David Ahernf8fa9b42017-10-18 09:56:56 -07006857 ul_vr = mlxsw_sp_vr_get(mlxsw_sp, ul_tb_id, NULL);
Petr Machata6ddb7422017-09-02 23:49:19 +02006858 if (IS_ERR(ul_vr))
6859 return PTR_ERR(ul_vr);
6860
6861 err = mlxsw_sp_rif_ipip_lb_op(lb_rif, ul_vr, true);
6862 if (err)
6863 goto err_loopback_op;
6864
6865 lb_rif->ul_vr_id = ul_vr->id;
6866 ++ul_vr->rif_count;
6867 return 0;
6868
6869err_loopback_op:
6870 mlxsw_sp_vr_put(ul_vr);
6871 return err;
6872}
6873
6874static void mlxsw_sp_rif_ipip_lb_deconfigure(struct mlxsw_sp_rif *rif)
6875{
6876 struct mlxsw_sp_rif_ipip_lb *lb_rif = mlxsw_sp_rif_ipip_lb_rif(rif);
6877 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
6878 struct mlxsw_sp_vr *ul_vr;
6879
6880 ul_vr = &mlxsw_sp->router->vrs[lb_rif->ul_vr_id];
6881 mlxsw_sp_rif_ipip_lb_op(lb_rif, ul_vr, false);
6882
6883 --ul_vr->rif_count;
6884 mlxsw_sp_vr_put(ul_vr);
6885}
6886
6887static const struct mlxsw_sp_rif_ops mlxsw_sp_rif_ipip_lb_ops = {
6888 .type = MLXSW_SP_RIF_TYPE_IPIP_LB,
6889 .rif_size = sizeof(struct mlxsw_sp_rif_ipip_lb),
6890 .setup = mlxsw_sp_rif_ipip_lb_setup,
6891 .configure = mlxsw_sp_rif_ipip_lb_configure,
6892 .deconfigure = mlxsw_sp_rif_ipip_lb_deconfigure,
6893};
6894
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006895static const struct mlxsw_sp_rif_ops *mlxsw_sp_rif_ops_arr[] = {
6896 [MLXSW_SP_RIF_TYPE_SUBPORT] = &mlxsw_sp_rif_subport_ops,
6897 [MLXSW_SP_RIF_TYPE_VLAN] = &mlxsw_sp_rif_vlan_ops,
6898 [MLXSW_SP_RIF_TYPE_FID] = &mlxsw_sp_rif_fid_ops,
Petr Machata6ddb7422017-09-02 23:49:19 +02006899 [MLXSW_SP_RIF_TYPE_IPIP_LB] = &mlxsw_sp_rif_ipip_lb_ops,
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006900};
6901
Ido Schimmel348b8fc2017-05-16 19:38:29 +02006902static int mlxsw_sp_rifs_init(struct mlxsw_sp *mlxsw_sp)
6903{
6904 u64 max_rifs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS);
6905
6906 mlxsw_sp->router->rifs = kcalloc(max_rifs,
6907 sizeof(struct mlxsw_sp_rif *),
6908 GFP_KERNEL);
6909 if (!mlxsw_sp->router->rifs)
6910 return -ENOMEM;
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006911
6912 mlxsw_sp->router->rif_ops_arr = mlxsw_sp_rif_ops_arr;
6913
Ido Schimmel348b8fc2017-05-16 19:38:29 +02006914 return 0;
6915}
6916
6917static void mlxsw_sp_rifs_fini(struct mlxsw_sp *mlxsw_sp)
6918{
6919 int i;
6920
6921 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++)
6922 WARN_ON_ONCE(mlxsw_sp->router->rifs[i]);
6923
6924 kfree(mlxsw_sp->router->rifs);
6925}
6926
Petr Machatadcbda282017-10-20 09:16:16 +02006927static int
6928mlxsw_sp_ipip_config_tigcr(struct mlxsw_sp *mlxsw_sp)
6929{
6930 char tigcr_pl[MLXSW_REG_TIGCR_LEN];
6931
6932 mlxsw_reg_tigcr_pack(tigcr_pl, true, 0);
6933 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(tigcr), tigcr_pl);
6934}
6935
Petr Machata38ebc0f2017-09-02 23:49:17 +02006936static int mlxsw_sp_ipips_init(struct mlxsw_sp *mlxsw_sp)
6937{
6938 mlxsw_sp->router->ipip_ops_arr = mlxsw_sp_ipip_ops_arr;
Petr Machata1012b9a2017-09-02 23:49:23 +02006939 INIT_LIST_HEAD(&mlxsw_sp->router->ipip_list);
Petr Machatadcbda282017-10-20 09:16:16 +02006940 return mlxsw_sp_ipip_config_tigcr(mlxsw_sp);
Petr Machata38ebc0f2017-09-02 23:49:17 +02006941}
6942
6943static void mlxsw_sp_ipips_fini(struct mlxsw_sp *mlxsw_sp)
6944{
Petr Machata1012b9a2017-09-02 23:49:23 +02006945 WARN_ON(!list_empty(&mlxsw_sp->router->ipip_list));
Petr Machata38ebc0f2017-09-02 23:49:17 +02006946}
6947
Ido Schimmelc3852ef2016-12-03 16:45:07 +01006948static void mlxsw_sp_router_fib_dump_flush(struct notifier_block *nb)
6949{
Ido Schimmel7e39d112017-05-16 19:38:28 +02006950 struct mlxsw_sp_router *router;
Ido Schimmelc3852ef2016-12-03 16:45:07 +01006951
6952 /* Flush pending FIB notifications and then flush the device's
6953 * table before requesting another dump. The FIB notification
6954 * block is unregistered, so no need to take RTNL.
6955 */
6956 mlxsw_core_flush_owq();
Ido Schimmel7e39d112017-05-16 19:38:28 +02006957 router = container_of(nb, struct mlxsw_sp_router, fib_nb);
6958 mlxsw_sp_router_fib_flush(router->mlxsw_sp);
Ido Schimmelc3852ef2016-12-03 16:45:07 +01006959}
6960
Ido Schimmelaf658b62017-11-02 17:14:09 +01006961#ifdef CONFIG_IP_ROUTE_MULTIPATH
6962static void mlxsw_sp_mp_hash_header_set(char *recr2_pl, int header)
6963{
6964 mlxsw_reg_recr2_outer_header_enables_set(recr2_pl, header, true);
6965}
6966
6967static void mlxsw_sp_mp_hash_field_set(char *recr2_pl, int field)
6968{
6969 mlxsw_reg_recr2_outer_header_fields_enable_set(recr2_pl, field, true);
6970}
6971
6972static void mlxsw_sp_mp4_hash_init(char *recr2_pl)
6973{
6974 bool only_l3 = !init_net.ipv4.sysctl_fib_multipath_hash_policy;
6975
6976 mlxsw_sp_mp_hash_header_set(recr2_pl,
6977 MLXSW_REG_RECR2_IPV4_EN_NOT_TCP_NOT_UDP);
6978 mlxsw_sp_mp_hash_header_set(recr2_pl, MLXSW_REG_RECR2_IPV4_EN_TCP_UDP);
6979 mlxsw_reg_recr2_ipv4_sip_enable(recr2_pl);
6980 mlxsw_reg_recr2_ipv4_dip_enable(recr2_pl);
6981 if (only_l3)
6982 return;
6983 mlxsw_sp_mp_hash_header_set(recr2_pl, MLXSW_REG_RECR2_TCP_UDP_EN_IPV4);
6984 mlxsw_sp_mp_hash_field_set(recr2_pl, MLXSW_REG_RECR2_IPV4_PROTOCOL);
6985 mlxsw_sp_mp_hash_field_set(recr2_pl, MLXSW_REG_RECR2_TCP_UDP_SPORT);
6986 mlxsw_sp_mp_hash_field_set(recr2_pl, MLXSW_REG_RECR2_TCP_UDP_DPORT);
6987}
6988
6989static void mlxsw_sp_mp6_hash_init(char *recr2_pl)
6990{
6991 mlxsw_sp_mp_hash_header_set(recr2_pl,
6992 MLXSW_REG_RECR2_IPV6_EN_NOT_TCP_NOT_UDP);
6993 mlxsw_sp_mp_hash_header_set(recr2_pl, MLXSW_REG_RECR2_IPV6_EN_TCP_UDP);
6994 mlxsw_reg_recr2_ipv6_sip_enable(recr2_pl);
6995 mlxsw_reg_recr2_ipv6_dip_enable(recr2_pl);
6996 mlxsw_sp_mp_hash_field_set(recr2_pl, MLXSW_REG_RECR2_IPV6_FLOW_LABEL);
6997 mlxsw_sp_mp_hash_field_set(recr2_pl, MLXSW_REG_RECR2_IPV6_NEXT_HEADER);
6998}
6999
7000static int mlxsw_sp_mp_hash_init(struct mlxsw_sp *mlxsw_sp)
7001{
7002 char recr2_pl[MLXSW_REG_RECR2_LEN];
7003 u32 seed;
7004
7005 get_random_bytes(&seed, sizeof(seed));
7006 mlxsw_reg_recr2_pack(recr2_pl, seed);
7007 mlxsw_sp_mp4_hash_init(recr2_pl);
7008 mlxsw_sp_mp6_hash_init(recr2_pl);
7009
7010 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(recr2), recr2_pl);
7011}
7012#else
7013static int mlxsw_sp_mp_hash_init(struct mlxsw_sp *mlxsw_sp)
7014{
7015 return 0;
7016}
7017#endif
7018
Yuval Mintz48276a22018-01-14 12:33:14 +01007019static int mlxsw_sp_dscp_init(struct mlxsw_sp *mlxsw_sp)
7020{
7021 char rdpm_pl[MLXSW_REG_RDPM_LEN];
7022 unsigned int i;
7023
7024 MLXSW_REG_ZERO(rdpm, rdpm_pl);
7025
7026 /* HW is determining switch priority based on DSCP-bits, but the
7027 * kernel is still doing that based on the ToS. Since there's a
7028 * mismatch in bits we need to make sure to translate the right
7029 * value ToS would observe, skipping the 2 least-significant ECN bits.
7030 */
7031 for (i = 0; i < MLXSW_REG_RDPM_DSCP_ENTRY_REC_MAX_COUNT; i++)
7032 mlxsw_reg_rdpm_pack(rdpm_pl, i, rt_tos2priority(i << 2));
7033
7034 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rdpm), rdpm_pl);
7035}
7036
Ido Schimmel4724ba562017-03-10 08:53:39 +01007037static int __mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp)
7038{
7039 char rgcr_pl[MLXSW_REG_RGCR_LEN];
7040 u64 max_rifs;
7041 int err;
7042
7043 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_RIFS))
7044 return -EIO;
Ido Schimmel4724ba562017-03-10 08:53:39 +01007045 max_rifs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS);
Ido Schimmel4724ba562017-03-10 08:53:39 +01007046
Arkadi Sharshevskye29237e2017-07-18 10:10:09 +02007047 mlxsw_reg_rgcr_pack(rgcr_pl, true, true);
Ido Schimmel4724ba562017-03-10 08:53:39 +01007048 mlxsw_reg_rgcr_max_router_interfaces_set(rgcr_pl, max_rifs);
Yuval Mintz48276a22018-01-14 12:33:14 +01007049 mlxsw_reg_rgcr_usp_set(rgcr_pl, true);
Ido Schimmel4724ba562017-03-10 08:53:39 +01007050 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rgcr), rgcr_pl);
7051 if (err)
Ido Schimmel348b8fc2017-05-16 19:38:29 +02007052 return err;
Ido Schimmel4724ba562017-03-10 08:53:39 +01007053 return 0;
Ido Schimmel4724ba562017-03-10 08:53:39 +01007054}
7055
7056static void __mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp)
7057{
7058 char rgcr_pl[MLXSW_REG_RGCR_LEN];
Ido Schimmel4724ba562017-03-10 08:53:39 +01007059
Arkadi Sharshevskye29237e2017-07-18 10:10:09 +02007060 mlxsw_reg_rgcr_pack(rgcr_pl, false, false);
Ido Schimmel4724ba562017-03-10 08:53:39 +01007061 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rgcr), rgcr_pl);
Ido Schimmel4724ba562017-03-10 08:53:39 +01007062}
7063
Jiri Pirkob45f64d2016-09-26 12:52:31 +02007064int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp)
7065{
Ido Schimmel9011b672017-05-16 19:38:25 +02007066 struct mlxsw_sp_router *router;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02007067 int err;
7068
Ido Schimmel9011b672017-05-16 19:38:25 +02007069 router = kzalloc(sizeof(*mlxsw_sp->router), GFP_KERNEL);
7070 if (!router)
7071 return -ENOMEM;
7072 mlxsw_sp->router = router;
7073 router->mlxsw_sp = mlxsw_sp;
7074
7075 INIT_LIST_HEAD(&mlxsw_sp->router->nexthop_neighs_list);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02007076 err = __mlxsw_sp_router_init(mlxsw_sp);
7077 if (err)
Ido Schimmel9011b672017-05-16 19:38:25 +02007078 goto err_router_init;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02007079
Ido Schimmel348b8fc2017-05-16 19:38:29 +02007080 err = mlxsw_sp_rifs_init(mlxsw_sp);
7081 if (err)
7082 goto err_rifs_init;
7083
Petr Machata38ebc0f2017-09-02 23:49:17 +02007084 err = mlxsw_sp_ipips_init(mlxsw_sp);
7085 if (err)
7086 goto err_ipips_init;
7087
Ido Schimmel9011b672017-05-16 19:38:25 +02007088 err = rhashtable_init(&mlxsw_sp->router->nexthop_ht,
Ido Schimmelc53b8e12017-02-08 11:16:30 +01007089 &mlxsw_sp_nexthop_ht_params);
7090 if (err)
7091 goto err_nexthop_ht_init;
7092
Ido Schimmel9011b672017-05-16 19:38:25 +02007093 err = rhashtable_init(&mlxsw_sp->router->nexthop_group_ht,
Ido Schimmele9ad5e72017-02-08 11:16:29 +01007094 &mlxsw_sp_nexthop_group_ht_params);
7095 if (err)
7096 goto err_nexthop_group_ht_init;
7097
Arkadi Sharshevskydbe45982017-09-25 10:32:23 +02007098 INIT_LIST_HEAD(&mlxsw_sp->router->nexthop_list);
Ido Schimmel8494ab02017-03-24 08:02:47 +01007099 err = mlxsw_sp_lpm_init(mlxsw_sp);
7100 if (err)
7101 goto err_lpm_init;
7102
Yotam Gigid42b0962017-09-27 08:23:20 +02007103 err = mlxsw_sp_mr_init(mlxsw_sp, &mlxsw_sp_mr_tcam_ops);
7104 if (err)
7105 goto err_mr_init;
7106
Jiri Pirkob45f64d2016-09-26 12:52:31 +02007107 err = mlxsw_sp_vrs_init(mlxsw_sp);
7108 if (err)
7109 goto err_vrs_init;
7110
Ido Schimmel8c9583a2016-10-27 15:12:57 +02007111 err = mlxsw_sp_neigh_init(mlxsw_sp);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02007112 if (err)
7113 goto err_neigh_init;
7114
Ido Schimmel48fac882017-11-02 17:14:06 +01007115 mlxsw_sp->router->netevent_nb.notifier_call =
7116 mlxsw_sp_router_netevent_event;
7117 err = register_netevent_notifier(&mlxsw_sp->router->netevent_nb);
7118 if (err)
7119 goto err_register_netevent_notifier;
7120
Ido Schimmelaf658b62017-11-02 17:14:09 +01007121 err = mlxsw_sp_mp_hash_init(mlxsw_sp);
7122 if (err)
7123 goto err_mp_hash_init;
7124
Yuval Mintz48276a22018-01-14 12:33:14 +01007125 err = mlxsw_sp_dscp_init(mlxsw_sp);
7126 if (err)
7127 goto err_dscp_init;
7128
Ido Schimmel7e39d112017-05-16 19:38:28 +02007129 mlxsw_sp->router->fib_nb.notifier_call = mlxsw_sp_router_fib_event;
7130 err = register_fib_notifier(&mlxsw_sp->router->fib_nb,
Ido Schimmelc3852ef2016-12-03 16:45:07 +01007131 mlxsw_sp_router_fib_dump_flush);
7132 if (err)
7133 goto err_register_fib_notifier;
7134
Jiri Pirkob45f64d2016-09-26 12:52:31 +02007135 return 0;
7136
Ido Schimmelc3852ef2016-12-03 16:45:07 +01007137err_register_fib_notifier:
Yuval Mintz48276a22018-01-14 12:33:14 +01007138err_dscp_init:
Ido Schimmelaf658b62017-11-02 17:14:09 +01007139err_mp_hash_init:
Ido Schimmel48fac882017-11-02 17:14:06 +01007140 unregister_netevent_notifier(&mlxsw_sp->router->netevent_nb);
7141err_register_netevent_notifier:
Ido Schimmelc3852ef2016-12-03 16:45:07 +01007142 mlxsw_sp_neigh_fini(mlxsw_sp);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02007143err_neigh_init:
7144 mlxsw_sp_vrs_fini(mlxsw_sp);
7145err_vrs_init:
Yotam Gigid42b0962017-09-27 08:23:20 +02007146 mlxsw_sp_mr_fini(mlxsw_sp);
7147err_mr_init:
Ido Schimmel8494ab02017-03-24 08:02:47 +01007148 mlxsw_sp_lpm_fini(mlxsw_sp);
7149err_lpm_init:
Ido Schimmel9011b672017-05-16 19:38:25 +02007150 rhashtable_destroy(&mlxsw_sp->router->nexthop_group_ht);
Ido Schimmele9ad5e72017-02-08 11:16:29 +01007151err_nexthop_group_ht_init:
Ido Schimmel9011b672017-05-16 19:38:25 +02007152 rhashtable_destroy(&mlxsw_sp->router->nexthop_ht);
Ido Schimmelc53b8e12017-02-08 11:16:30 +01007153err_nexthop_ht_init:
Petr Machata38ebc0f2017-09-02 23:49:17 +02007154 mlxsw_sp_ipips_fini(mlxsw_sp);
7155err_ipips_init:
Ido Schimmel348b8fc2017-05-16 19:38:29 +02007156 mlxsw_sp_rifs_fini(mlxsw_sp);
7157err_rifs_init:
Jiri Pirkob45f64d2016-09-26 12:52:31 +02007158 __mlxsw_sp_router_fini(mlxsw_sp);
Ido Schimmel9011b672017-05-16 19:38:25 +02007159err_router_init:
7160 kfree(mlxsw_sp->router);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02007161 return err;
7162}
7163
7164void mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp)
7165{
Ido Schimmel7e39d112017-05-16 19:38:28 +02007166 unregister_fib_notifier(&mlxsw_sp->router->fib_nb);
Ido Schimmel48fac882017-11-02 17:14:06 +01007167 unregister_netevent_notifier(&mlxsw_sp->router->netevent_nb);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02007168 mlxsw_sp_neigh_fini(mlxsw_sp);
7169 mlxsw_sp_vrs_fini(mlxsw_sp);
Yotam Gigid42b0962017-09-27 08:23:20 +02007170 mlxsw_sp_mr_fini(mlxsw_sp);
Ido Schimmel8494ab02017-03-24 08:02:47 +01007171 mlxsw_sp_lpm_fini(mlxsw_sp);
Ido Schimmel9011b672017-05-16 19:38:25 +02007172 rhashtable_destroy(&mlxsw_sp->router->nexthop_group_ht);
7173 rhashtable_destroy(&mlxsw_sp->router->nexthop_ht);
Petr Machata38ebc0f2017-09-02 23:49:17 +02007174 mlxsw_sp_ipips_fini(mlxsw_sp);
Ido Schimmel348b8fc2017-05-16 19:38:29 +02007175 mlxsw_sp_rifs_fini(mlxsw_sp);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02007176 __mlxsw_sp_router_fini(mlxsw_sp);
Ido Schimmel9011b672017-05-16 19:38:25 +02007177 kfree(mlxsw_sp->router);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02007178}