blob: c1928561c41283f5e0f13cabab7b3d4466326221 [file] [log] [blame]
Ido Schimmel464dce12016-07-02 11:00:15 +02001/*
2 * drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
Petr Machata6ddb7422017-09-02 23:49:19 +02003 * Copyright (c) 2016-2017 Mellanox Technologies. All rights reserved.
Ido Schimmel464dce12016-07-02 11:00:15 +02004 * Copyright (c) 2016 Jiri Pirko <jiri@mellanox.com>
5 * Copyright (c) 2016 Ido Schimmel <idosch@mellanox.com>
Yotam Gigic723c7352016-07-05 11:27:43 +02006 * Copyright (c) 2016 Yotam Gigi <yotamg@mellanox.com>
Petr Machata6ddb7422017-09-02 23:49:19 +02007 * Copyright (c) 2017 Petr Machata <petrm@mellanox.com>
Ido Schimmel464dce12016-07-02 11:00:15 +02008 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions are met:
11 *
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. Neither the names of the copyright holders nor the names of its
18 * contributors may be used to endorse or promote products derived from
19 * this software without specific prior written permission.
20 *
21 * Alternatively, this software may be distributed under the terms of the
22 * GNU General Public License ("GPL") version 2 as published by the Free
23 * Software Foundation.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
26 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
29 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
36 */
37
38#include <linux/kernel.h>
39#include <linux/types.h>
Jiri Pirko5e9c16c2016-07-04 08:23:04 +020040#include <linux/rhashtable.h>
41#include <linux/bitops.h>
42#include <linux/in6.h>
Yotam Gigic723c7352016-07-05 11:27:43 +020043#include <linux/notifier.h>
Ido Schimmeldf6dd79b2017-02-08 14:36:49 +010044#include <linux/inetdevice.h>
Ido Schimmel9db032b2017-03-16 09:08:17 +010045#include <linux/netdevice.h>
Ido Schimmel03ea01e2017-05-23 21:56:30 +020046#include <linux/if_bridge.h>
Ido Schimmelb5f3e0d2017-07-24 09:56:00 +020047#include <linux/socket.h>
Ido Schimmel428b8512017-08-03 13:28:28 +020048#include <linux/route.h>
Ido Schimmeleb789982017-10-22 23:11:48 +020049#include <linux/gcd.h>
Ido Schimmelaf658b62017-11-02 17:14:09 +010050#include <linux/random.h>
Yotam Gigic723c7352016-07-05 11:27:43 +020051#include <net/netevent.h>
Jiri Pirko6cf3c972016-07-05 11:27:39 +020052#include <net/neighbour.h>
53#include <net/arp.h>
Jiri Pirkob45f64d2016-09-26 12:52:31 +020054#include <net/ip_fib.h>
Ido Schimmel583419f2017-08-03 13:28:27 +020055#include <net/ip6_fib.h>
Ido Schimmel5d7bfd12017-03-16 09:08:14 +010056#include <net/fib_rules.h>
Petr Machata6ddb7422017-09-02 23:49:19 +020057#include <net/ip_tunnels.h>
Ido Schimmel57837882017-03-16 09:08:16 +010058#include <net/l3mdev.h>
Arkadi Sharshevsky5ea12372017-07-18 10:10:13 +020059#include <net/addrconf.h>
Arkadi Sharshevskyd5eb89c2017-07-18 10:10:15 +020060#include <net/ndisc.h>
61#include <net/ipv6.h>
Ido Schimmel04b1d4e2017-08-03 13:28:11 +020062#include <net/fib_notifier.h>
Ido Schimmel464dce12016-07-02 11:00:15 +020063
64#include "spectrum.h"
65#include "core.h"
66#include "reg.h"
Arkadi Sharshevskye0c0afd2017-03-28 17:24:15 +020067#include "spectrum_cnt.h"
68#include "spectrum_dpipe.h"
Petr Machata38ebc0f2017-09-02 23:49:17 +020069#include "spectrum_ipip.h"
Yotam Gigid42b0962017-09-27 08:23:20 +020070#include "spectrum_mr.h"
71#include "spectrum_mr_tcam.h"
Arkadi Sharshevskye0c0afd2017-03-28 17:24:15 +020072#include "spectrum_router.h"
Ido Schimmel464dce12016-07-02 11:00:15 +020073
Ido Schimmel9011b672017-05-16 19:38:25 +020074struct mlxsw_sp_vr;
75struct mlxsw_sp_lpm_tree;
Ido Schimmele4f3c1c2017-05-26 08:37:40 +020076struct mlxsw_sp_rif_ops;
Ido Schimmel9011b672017-05-16 19:38:25 +020077
78struct mlxsw_sp_router {
79 struct mlxsw_sp *mlxsw_sp;
Ido Schimmel5f9efff2017-05-16 19:38:27 +020080 struct mlxsw_sp_rif **rifs;
Ido Schimmel9011b672017-05-16 19:38:25 +020081 struct mlxsw_sp_vr *vrs;
82 struct rhashtable neigh_ht;
83 struct rhashtable nexthop_group_ht;
84 struct rhashtable nexthop_ht;
Arkadi Sharshevskydbe45982017-09-25 10:32:23 +020085 struct list_head nexthop_list;
Ido Schimmel9011b672017-05-16 19:38:25 +020086 struct {
87 struct mlxsw_sp_lpm_tree *trees;
88 unsigned int tree_count;
89 } lpm;
90 struct {
91 struct delayed_work dw;
92 unsigned long interval; /* ms */
93 } neighs_update;
94 struct delayed_work nexthop_probe_dw;
95#define MLXSW_SP_UNRESOLVED_NH_PROBE_INTERVAL 5000 /* ms */
96 struct list_head nexthop_neighs_list;
Petr Machata1012b9a2017-09-02 23:49:23 +020097 struct list_head ipip_list;
Ido Schimmel9011b672017-05-16 19:38:25 +020098 bool aborted;
Ido Schimmel7e39d112017-05-16 19:38:28 +020099 struct notifier_block fib_nb;
Ido Schimmel48fac882017-11-02 17:14:06 +0100100 struct notifier_block netevent_nb;
Ido Schimmele4f3c1c2017-05-26 08:37:40 +0200101 const struct mlxsw_sp_rif_ops **rif_ops_arr;
Petr Machata38ebc0f2017-09-02 23:49:17 +0200102 const struct mlxsw_sp_ipip_ops **ipip_ops_arr;
Ido Schimmel9011b672017-05-16 19:38:25 +0200103};
104
Ido Schimmel4724ba562017-03-10 08:53:39 +0100105struct mlxsw_sp_rif {
106 struct list_head nexthop_list;
107 struct list_head neigh_list;
108 struct net_device *dev;
Ido Schimmela1107482017-05-26 08:37:39 +0200109 struct mlxsw_sp_fid *fid;
Ido Schimmel4724ba562017-03-10 08:53:39 +0100110 unsigned char addr[ETH_ALEN];
111 int mtu;
Arkadi Sharshevskybf952332017-03-17 09:38:00 +0100112 u16 rif_index;
Ido Schimmel69132292017-03-10 08:53:42 +0100113 u16 vr_id;
Ido Schimmele4f3c1c2017-05-26 08:37:40 +0200114 const struct mlxsw_sp_rif_ops *ops;
115 struct mlxsw_sp *mlxsw_sp;
116
Arkadi Sharshevskye0c0afd2017-03-28 17:24:15 +0200117 unsigned int counter_ingress;
118 bool counter_ingress_valid;
119 unsigned int counter_egress;
120 bool counter_egress_valid;
Ido Schimmel4724ba562017-03-10 08:53:39 +0100121};
122
Ido Schimmele4f3c1c2017-05-26 08:37:40 +0200123struct mlxsw_sp_rif_params {
124 struct net_device *dev;
125 union {
126 u16 system_port;
127 u16 lag_id;
128 };
129 u16 vid;
130 bool lag;
131};
132
Ido Schimmel4d93cee2017-05-26 08:37:34 +0200133struct mlxsw_sp_rif_subport {
134 struct mlxsw_sp_rif common;
135 union {
136 u16 system_port;
137 u16 lag_id;
138 };
139 u16 vid;
140 bool lag;
141};
142
Petr Machata6ddb7422017-09-02 23:49:19 +0200143struct mlxsw_sp_rif_ipip_lb {
144 struct mlxsw_sp_rif common;
145 struct mlxsw_sp_rif_ipip_lb_config lb_config;
146 u16 ul_vr_id; /* Reserved for Spectrum-2. */
147};
148
149struct mlxsw_sp_rif_params_ipip_lb {
150 struct mlxsw_sp_rif_params common;
151 struct mlxsw_sp_rif_ipip_lb_config lb_config;
152};
153
Ido Schimmele4f3c1c2017-05-26 08:37:40 +0200154struct mlxsw_sp_rif_ops {
155 enum mlxsw_sp_rif_type type;
156 size_t rif_size;
157
158 void (*setup)(struct mlxsw_sp_rif *rif,
159 const struct mlxsw_sp_rif_params *params);
160 int (*configure)(struct mlxsw_sp_rif *rif);
161 void (*deconfigure)(struct mlxsw_sp_rif *rif);
162 struct mlxsw_sp_fid * (*fid_get)(struct mlxsw_sp_rif *rif);
163};
164
Arkadi Sharshevskye0c0afd2017-03-28 17:24:15 +0200165static unsigned int *
166mlxsw_sp_rif_p_counter_get(struct mlxsw_sp_rif *rif,
167 enum mlxsw_sp_rif_counter_dir dir)
168{
169 switch (dir) {
170 case MLXSW_SP_RIF_COUNTER_EGRESS:
171 return &rif->counter_egress;
172 case MLXSW_SP_RIF_COUNTER_INGRESS:
173 return &rif->counter_ingress;
174 }
175 return NULL;
176}
177
178static bool
179mlxsw_sp_rif_counter_valid_get(struct mlxsw_sp_rif *rif,
180 enum mlxsw_sp_rif_counter_dir dir)
181{
182 switch (dir) {
183 case MLXSW_SP_RIF_COUNTER_EGRESS:
184 return rif->counter_egress_valid;
185 case MLXSW_SP_RIF_COUNTER_INGRESS:
186 return rif->counter_ingress_valid;
187 }
188 return false;
189}
190
191static void
192mlxsw_sp_rif_counter_valid_set(struct mlxsw_sp_rif *rif,
193 enum mlxsw_sp_rif_counter_dir dir,
194 bool valid)
195{
196 switch (dir) {
197 case MLXSW_SP_RIF_COUNTER_EGRESS:
198 rif->counter_egress_valid = valid;
199 break;
200 case MLXSW_SP_RIF_COUNTER_INGRESS:
201 rif->counter_ingress_valid = valid;
202 break;
203 }
204}
205
206static int mlxsw_sp_rif_counter_edit(struct mlxsw_sp *mlxsw_sp, u16 rif_index,
207 unsigned int counter_index, bool enable,
208 enum mlxsw_sp_rif_counter_dir dir)
209{
210 char ritr_pl[MLXSW_REG_RITR_LEN];
211 bool is_egress = false;
212 int err;
213
214 if (dir == MLXSW_SP_RIF_COUNTER_EGRESS)
215 is_egress = true;
216 mlxsw_reg_ritr_rif_pack(ritr_pl, rif_index);
217 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
218 if (err)
219 return err;
220
221 mlxsw_reg_ritr_counter_pack(ritr_pl, counter_index, enable,
222 is_egress);
223 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
224}
225
226int mlxsw_sp_rif_counter_value_get(struct mlxsw_sp *mlxsw_sp,
227 struct mlxsw_sp_rif *rif,
228 enum mlxsw_sp_rif_counter_dir dir, u64 *cnt)
229{
230 char ricnt_pl[MLXSW_REG_RICNT_LEN];
231 unsigned int *p_counter_index;
232 bool valid;
233 int err;
234
235 valid = mlxsw_sp_rif_counter_valid_get(rif, dir);
236 if (!valid)
237 return -EINVAL;
238
239 p_counter_index = mlxsw_sp_rif_p_counter_get(rif, dir);
240 if (!p_counter_index)
241 return -EINVAL;
242 mlxsw_reg_ricnt_pack(ricnt_pl, *p_counter_index,
243 MLXSW_REG_RICNT_OPCODE_NOP);
244 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ricnt), ricnt_pl);
245 if (err)
246 return err;
247 *cnt = mlxsw_reg_ricnt_good_unicast_packets_get(ricnt_pl);
248 return 0;
249}
250
251static int mlxsw_sp_rif_counter_clear(struct mlxsw_sp *mlxsw_sp,
252 unsigned int counter_index)
253{
254 char ricnt_pl[MLXSW_REG_RICNT_LEN];
255
256 mlxsw_reg_ricnt_pack(ricnt_pl, counter_index,
257 MLXSW_REG_RICNT_OPCODE_CLEAR);
258 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ricnt), ricnt_pl);
259}
260
261int mlxsw_sp_rif_counter_alloc(struct mlxsw_sp *mlxsw_sp,
262 struct mlxsw_sp_rif *rif,
263 enum mlxsw_sp_rif_counter_dir dir)
264{
265 unsigned int *p_counter_index;
266 int err;
267
268 p_counter_index = mlxsw_sp_rif_p_counter_get(rif, dir);
269 if (!p_counter_index)
270 return -EINVAL;
271 err = mlxsw_sp_counter_alloc(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_RIF,
272 p_counter_index);
273 if (err)
274 return err;
275
276 err = mlxsw_sp_rif_counter_clear(mlxsw_sp, *p_counter_index);
277 if (err)
278 goto err_counter_clear;
279
280 err = mlxsw_sp_rif_counter_edit(mlxsw_sp, rif->rif_index,
281 *p_counter_index, true, dir);
282 if (err)
283 goto err_counter_edit;
284 mlxsw_sp_rif_counter_valid_set(rif, dir, true);
285 return 0;
286
287err_counter_edit:
288err_counter_clear:
289 mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_RIF,
290 *p_counter_index);
291 return err;
292}
293
294void mlxsw_sp_rif_counter_free(struct mlxsw_sp *mlxsw_sp,
295 struct mlxsw_sp_rif *rif,
296 enum mlxsw_sp_rif_counter_dir dir)
297{
298 unsigned int *p_counter_index;
299
Arkadi Sharshevsky6b1206b2017-05-18 09:18:53 +0200300 if (!mlxsw_sp_rif_counter_valid_get(rif, dir))
301 return;
302
Arkadi Sharshevskye0c0afd2017-03-28 17:24:15 +0200303 p_counter_index = mlxsw_sp_rif_p_counter_get(rif, dir);
304 if (WARN_ON(!p_counter_index))
305 return;
306 mlxsw_sp_rif_counter_edit(mlxsw_sp, rif->rif_index,
307 *p_counter_index, false, dir);
308 mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_RIF,
309 *p_counter_index);
310 mlxsw_sp_rif_counter_valid_set(rif, dir, false);
311}
312
Ido Schimmele4f3c1c2017-05-26 08:37:40 +0200313static void mlxsw_sp_rif_counters_alloc(struct mlxsw_sp_rif *rif)
314{
315 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
316 struct devlink *devlink;
317
318 devlink = priv_to_devlink(mlxsw_sp->core);
319 if (!devlink_dpipe_table_counter_enabled(devlink,
320 MLXSW_SP_DPIPE_TABLE_NAME_ERIF))
321 return;
322 mlxsw_sp_rif_counter_alloc(mlxsw_sp, rif, MLXSW_SP_RIF_COUNTER_EGRESS);
323}
324
325static void mlxsw_sp_rif_counters_free(struct mlxsw_sp_rif *rif)
326{
327 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
328
329 mlxsw_sp_rif_counter_free(mlxsw_sp, rif, MLXSW_SP_RIF_COUNTER_EGRESS);
330}
331
Ido Schimmel4724ba562017-03-10 08:53:39 +0100332static struct mlxsw_sp_rif *
333mlxsw_sp_rif_find_by_dev(const struct mlxsw_sp *mlxsw_sp,
334 const struct net_device *dev);
335
Ido Schimmel7dcc18a2017-07-18 10:10:30 +0200336#define MLXSW_SP_PREFIX_COUNT (sizeof(struct in6_addr) * BITS_PER_BYTE + 1)
Ido Schimmel9011b672017-05-16 19:38:25 +0200337
338struct mlxsw_sp_prefix_usage {
339 DECLARE_BITMAP(b, MLXSW_SP_PREFIX_COUNT);
340};
341
Jiri Pirko53342022016-07-04 08:23:08 +0200342#define mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage) \
343 for_each_set_bit(prefix, (prefix_usage)->b, MLXSW_SP_PREFIX_COUNT)
344
345static bool
346mlxsw_sp_prefix_usage_eq(struct mlxsw_sp_prefix_usage *prefix_usage1,
347 struct mlxsw_sp_prefix_usage *prefix_usage2)
348{
349 return !memcmp(prefix_usage1, prefix_usage2, sizeof(*prefix_usage1));
350}
351
Jiri Pirko6b75c482016-07-04 08:23:09 +0200352static bool
353mlxsw_sp_prefix_usage_none(struct mlxsw_sp_prefix_usage *prefix_usage)
354{
355 struct mlxsw_sp_prefix_usage prefix_usage_none = {{ 0 } };
356
357 return mlxsw_sp_prefix_usage_eq(prefix_usage, &prefix_usage_none);
358}
359
360static void
361mlxsw_sp_prefix_usage_cpy(struct mlxsw_sp_prefix_usage *prefix_usage1,
362 struct mlxsw_sp_prefix_usage *prefix_usage2)
363{
364 memcpy(prefix_usage1, prefix_usage2, sizeof(*prefix_usage1));
365}
366
367static void
Jiri Pirko5e9c16c2016-07-04 08:23:04 +0200368mlxsw_sp_prefix_usage_set(struct mlxsw_sp_prefix_usage *prefix_usage,
369 unsigned char prefix_len)
370{
371 set_bit(prefix_len, prefix_usage->b);
372}
373
374static void
375mlxsw_sp_prefix_usage_clear(struct mlxsw_sp_prefix_usage *prefix_usage,
376 unsigned char prefix_len)
377{
378 clear_bit(prefix_len, prefix_usage->b);
379}
380
381struct mlxsw_sp_fib_key {
382 unsigned char addr[sizeof(struct in6_addr)];
383 unsigned char prefix_len;
384};
385
Jiri Pirko61c503f2016-07-04 08:23:11 +0200386enum mlxsw_sp_fib_entry_type {
387 MLXSW_SP_FIB_ENTRY_TYPE_REMOTE,
388 MLXSW_SP_FIB_ENTRY_TYPE_LOCAL,
389 MLXSW_SP_FIB_ENTRY_TYPE_TRAP,
Petr Machata4607f6d2017-09-02 23:49:25 +0200390
391 /* This is a special case of local delivery, where a packet should be
392 * decapsulated on reception. Note that there is no corresponding ENCAP,
393 * because that's a type of next hop, not of FIB entry. (There can be
394 * several next hops in a REMOTE entry, and some of them may be
395 * encapsulating entries.)
396 */
397 MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP,
Jiri Pirko61c503f2016-07-04 08:23:11 +0200398};
399
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +0200400struct mlxsw_sp_nexthop_group;
Ido Schimmel9011b672017-05-16 19:38:25 +0200401struct mlxsw_sp_fib;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +0200402
Ido Schimmel9aecce12017-02-09 10:28:42 +0100403struct mlxsw_sp_fib_node {
404 struct list_head entry_list;
Jiri Pirkob45f64d2016-09-26 12:52:31 +0200405 struct list_head list;
Ido Schimmel9aecce12017-02-09 10:28:42 +0100406 struct rhash_head ht_node;
Ido Schimmel76610eb2017-03-10 08:53:41 +0100407 struct mlxsw_sp_fib *fib;
Ido Schimmel9aecce12017-02-09 10:28:42 +0100408 struct mlxsw_sp_fib_key key;
409};
410
Petr Machata4607f6d2017-09-02 23:49:25 +0200411struct mlxsw_sp_fib_entry_decap {
412 struct mlxsw_sp_ipip_entry *ipip_entry;
413 u32 tunnel_index;
414};
415
Ido Schimmel9aecce12017-02-09 10:28:42 +0100416struct mlxsw_sp_fib_entry {
417 struct list_head list;
418 struct mlxsw_sp_fib_node *fib_node;
419 enum mlxsw_sp_fib_entry_type type;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +0200420 struct list_head nexthop_group_node;
421 struct mlxsw_sp_nexthop_group *nh_group;
Petr Machata4607f6d2017-09-02 23:49:25 +0200422 struct mlxsw_sp_fib_entry_decap decap; /* Valid for decap entries. */
Jiri Pirko5e9c16c2016-07-04 08:23:04 +0200423};
424
Ido Schimmel4f1c7f12017-07-18 10:10:26 +0200425struct mlxsw_sp_fib4_entry {
426 struct mlxsw_sp_fib_entry common;
427 u32 tb_id;
428 u32 prio;
429 u8 tos;
430 u8 type;
431};
432
Ido Schimmel428b8512017-08-03 13:28:28 +0200433struct mlxsw_sp_fib6_entry {
434 struct mlxsw_sp_fib_entry common;
435 struct list_head rt6_list;
436 unsigned int nrt6;
437};
438
439struct mlxsw_sp_rt6 {
440 struct list_head list;
441 struct rt6_info *rt;
442};
443
Ido Schimmel9011b672017-05-16 19:38:25 +0200444struct mlxsw_sp_lpm_tree {
445 u8 id; /* tree ID */
446 unsigned int ref_count;
447 enum mlxsw_sp_l3proto proto;
448 struct mlxsw_sp_prefix_usage prefix_usage;
449};
450
Jiri Pirko5e9c16c2016-07-04 08:23:04 +0200451struct mlxsw_sp_fib {
452 struct rhashtable ht;
Ido Schimmel9aecce12017-02-09 10:28:42 +0100453 struct list_head node_list;
Ido Schimmel76610eb2017-03-10 08:53:41 +0100454 struct mlxsw_sp_vr *vr;
455 struct mlxsw_sp_lpm_tree *lpm_tree;
Jiri Pirko5e9c16c2016-07-04 08:23:04 +0200456 unsigned long prefix_ref_count[MLXSW_SP_PREFIX_COUNT];
457 struct mlxsw_sp_prefix_usage prefix_usage;
Ido Schimmel76610eb2017-03-10 08:53:41 +0100458 enum mlxsw_sp_l3proto proto;
Jiri Pirko5e9c16c2016-07-04 08:23:04 +0200459};
460
Ido Schimmel9011b672017-05-16 19:38:25 +0200461struct mlxsw_sp_vr {
462 u16 id; /* virtual router ID */
463 u32 tb_id; /* kernel fib table id */
464 unsigned int rif_count;
465 struct mlxsw_sp_fib *fib4;
Ido Schimmela3d9bc52017-07-18 10:10:22 +0200466 struct mlxsw_sp_fib *fib6;
Yotam Gigid42b0962017-09-27 08:23:20 +0200467 struct mlxsw_sp_mr_table *mr4_table;
Ido Schimmel9011b672017-05-16 19:38:25 +0200468};
469
Ido Schimmel9aecce12017-02-09 10:28:42 +0100470static const struct rhashtable_params mlxsw_sp_fib_ht_params;
Jiri Pirko5e9c16c2016-07-04 08:23:04 +0200471
Ido Schimmel76610eb2017-03-10 08:53:41 +0100472static struct mlxsw_sp_fib *mlxsw_sp_fib_create(struct mlxsw_sp_vr *vr,
473 enum mlxsw_sp_l3proto proto)
Jiri Pirko5e9c16c2016-07-04 08:23:04 +0200474{
475 struct mlxsw_sp_fib *fib;
476 int err;
477
478 fib = kzalloc(sizeof(*fib), GFP_KERNEL);
479 if (!fib)
480 return ERR_PTR(-ENOMEM);
481 err = rhashtable_init(&fib->ht, &mlxsw_sp_fib_ht_params);
482 if (err)
483 goto err_rhashtable_init;
Ido Schimmel9aecce12017-02-09 10:28:42 +0100484 INIT_LIST_HEAD(&fib->node_list);
Ido Schimmel76610eb2017-03-10 08:53:41 +0100485 fib->proto = proto;
486 fib->vr = vr;
Jiri Pirko5e9c16c2016-07-04 08:23:04 +0200487 return fib;
488
489err_rhashtable_init:
490 kfree(fib);
491 return ERR_PTR(err);
492}
493
494static void mlxsw_sp_fib_destroy(struct mlxsw_sp_fib *fib)
495{
Ido Schimmel9aecce12017-02-09 10:28:42 +0100496 WARN_ON(!list_empty(&fib->node_list));
Ido Schimmel76610eb2017-03-10 08:53:41 +0100497 WARN_ON(fib->lpm_tree);
Jiri Pirko5e9c16c2016-07-04 08:23:04 +0200498 rhashtable_destroy(&fib->ht);
499 kfree(fib);
500}
501
Jiri Pirko53342022016-07-04 08:23:08 +0200502static struct mlxsw_sp_lpm_tree *
Ido Schimmel382dbb42017-03-10 08:53:40 +0100503mlxsw_sp_lpm_tree_find_unused(struct mlxsw_sp *mlxsw_sp)
Jiri Pirko53342022016-07-04 08:23:08 +0200504{
505 static struct mlxsw_sp_lpm_tree *lpm_tree;
506 int i;
507
Ido Schimmel9011b672017-05-16 19:38:25 +0200508 for (i = 0; i < mlxsw_sp->router->lpm.tree_count; i++) {
509 lpm_tree = &mlxsw_sp->router->lpm.trees[i];
Ido Schimmel382dbb42017-03-10 08:53:40 +0100510 if (lpm_tree->ref_count == 0)
511 return lpm_tree;
Jiri Pirko53342022016-07-04 08:23:08 +0200512 }
513 return NULL;
514}
515
516static int mlxsw_sp_lpm_tree_alloc(struct mlxsw_sp *mlxsw_sp,
517 struct mlxsw_sp_lpm_tree *lpm_tree)
518{
519 char ralta_pl[MLXSW_REG_RALTA_LEN];
520
Ido Schimmel1a9234e662016-09-19 08:29:26 +0200521 mlxsw_reg_ralta_pack(ralta_pl, true,
522 (enum mlxsw_reg_ralxx_protocol) lpm_tree->proto,
523 lpm_tree->id);
Jiri Pirko53342022016-07-04 08:23:08 +0200524 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralta), ralta_pl);
525}
526
Ido Schimmelcc702672017-08-14 10:54:03 +0200527static void mlxsw_sp_lpm_tree_free(struct mlxsw_sp *mlxsw_sp,
528 struct mlxsw_sp_lpm_tree *lpm_tree)
Jiri Pirko53342022016-07-04 08:23:08 +0200529{
530 char ralta_pl[MLXSW_REG_RALTA_LEN];
531
Ido Schimmel1a9234e662016-09-19 08:29:26 +0200532 mlxsw_reg_ralta_pack(ralta_pl, false,
533 (enum mlxsw_reg_ralxx_protocol) lpm_tree->proto,
534 lpm_tree->id);
Ido Schimmelcc702672017-08-14 10:54:03 +0200535 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralta), ralta_pl);
Jiri Pirko53342022016-07-04 08:23:08 +0200536}
537
538static int
539mlxsw_sp_lpm_tree_left_struct_set(struct mlxsw_sp *mlxsw_sp,
540 struct mlxsw_sp_prefix_usage *prefix_usage,
541 struct mlxsw_sp_lpm_tree *lpm_tree)
542{
543 char ralst_pl[MLXSW_REG_RALST_LEN];
544 u8 root_bin = 0;
545 u8 prefix;
546 u8 last_prefix = MLXSW_REG_RALST_BIN_NO_CHILD;
547
548 mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage)
549 root_bin = prefix;
550
551 mlxsw_reg_ralst_pack(ralst_pl, root_bin, lpm_tree->id);
552 mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage) {
553 if (prefix == 0)
554 continue;
555 mlxsw_reg_ralst_bin_pack(ralst_pl, prefix, last_prefix,
556 MLXSW_REG_RALST_BIN_NO_CHILD);
557 last_prefix = prefix;
558 }
559 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralst), ralst_pl);
560}
561
562static struct mlxsw_sp_lpm_tree *
563mlxsw_sp_lpm_tree_create(struct mlxsw_sp *mlxsw_sp,
564 struct mlxsw_sp_prefix_usage *prefix_usage,
Ido Schimmel382dbb42017-03-10 08:53:40 +0100565 enum mlxsw_sp_l3proto proto)
Jiri Pirko53342022016-07-04 08:23:08 +0200566{
567 struct mlxsw_sp_lpm_tree *lpm_tree;
568 int err;
569
Ido Schimmel382dbb42017-03-10 08:53:40 +0100570 lpm_tree = mlxsw_sp_lpm_tree_find_unused(mlxsw_sp);
Jiri Pirko53342022016-07-04 08:23:08 +0200571 if (!lpm_tree)
572 return ERR_PTR(-EBUSY);
573 lpm_tree->proto = proto;
574 err = mlxsw_sp_lpm_tree_alloc(mlxsw_sp, lpm_tree);
575 if (err)
576 return ERR_PTR(err);
577
578 err = mlxsw_sp_lpm_tree_left_struct_set(mlxsw_sp, prefix_usage,
579 lpm_tree);
580 if (err)
581 goto err_left_struct_set;
Jiri Pirko2083d362016-10-25 11:25:56 +0200582 memcpy(&lpm_tree->prefix_usage, prefix_usage,
583 sizeof(lpm_tree->prefix_usage));
Jiri Pirko53342022016-07-04 08:23:08 +0200584 return lpm_tree;
585
586err_left_struct_set:
587 mlxsw_sp_lpm_tree_free(mlxsw_sp, lpm_tree);
588 return ERR_PTR(err);
589}
590
Ido Schimmelcc702672017-08-14 10:54:03 +0200591static void mlxsw_sp_lpm_tree_destroy(struct mlxsw_sp *mlxsw_sp,
592 struct mlxsw_sp_lpm_tree *lpm_tree)
Jiri Pirko53342022016-07-04 08:23:08 +0200593{
Ido Schimmelcc702672017-08-14 10:54:03 +0200594 mlxsw_sp_lpm_tree_free(mlxsw_sp, lpm_tree);
Jiri Pirko53342022016-07-04 08:23:08 +0200595}
596
597static struct mlxsw_sp_lpm_tree *
598mlxsw_sp_lpm_tree_get(struct mlxsw_sp *mlxsw_sp,
599 struct mlxsw_sp_prefix_usage *prefix_usage,
Ido Schimmel382dbb42017-03-10 08:53:40 +0100600 enum mlxsw_sp_l3proto proto)
Jiri Pirko53342022016-07-04 08:23:08 +0200601{
602 struct mlxsw_sp_lpm_tree *lpm_tree;
603 int i;
604
Ido Schimmel9011b672017-05-16 19:38:25 +0200605 for (i = 0; i < mlxsw_sp->router->lpm.tree_count; i++) {
606 lpm_tree = &mlxsw_sp->router->lpm.trees[i];
Jiri Pirko8b99bec2016-10-25 11:25:57 +0200607 if (lpm_tree->ref_count != 0 &&
608 lpm_tree->proto == proto &&
Jiri Pirko53342022016-07-04 08:23:08 +0200609 mlxsw_sp_prefix_usage_eq(&lpm_tree->prefix_usage,
610 prefix_usage))
Ido Schimmelfc922bb2017-08-14 10:54:05 +0200611 return lpm_tree;
Jiri Pirko53342022016-07-04 08:23:08 +0200612 }
Ido Schimmelfc922bb2017-08-14 10:54:05 +0200613 return mlxsw_sp_lpm_tree_create(mlxsw_sp, prefix_usage, proto);
614}
Jiri Pirko53342022016-07-04 08:23:08 +0200615
Ido Schimmelfc922bb2017-08-14 10:54:05 +0200616static void mlxsw_sp_lpm_tree_hold(struct mlxsw_sp_lpm_tree *lpm_tree)
617{
Jiri Pirko53342022016-07-04 08:23:08 +0200618 lpm_tree->ref_count++;
Jiri Pirko53342022016-07-04 08:23:08 +0200619}
620
Ido Schimmelcc702672017-08-14 10:54:03 +0200621static void mlxsw_sp_lpm_tree_put(struct mlxsw_sp *mlxsw_sp,
622 struct mlxsw_sp_lpm_tree *lpm_tree)
Jiri Pirko53342022016-07-04 08:23:08 +0200623{
624 if (--lpm_tree->ref_count == 0)
Ido Schimmelcc702672017-08-14 10:54:03 +0200625 mlxsw_sp_lpm_tree_destroy(mlxsw_sp, lpm_tree);
Jiri Pirko53342022016-07-04 08:23:08 +0200626}
627
Ido Schimmeld7a60302017-06-08 08:47:43 +0200628#define MLXSW_SP_LPM_TREE_MIN 1 /* tree 0 is reserved */
Ido Schimmel8494ab02017-03-24 08:02:47 +0100629
630static int mlxsw_sp_lpm_init(struct mlxsw_sp *mlxsw_sp)
Jiri Pirko53342022016-07-04 08:23:08 +0200631{
632 struct mlxsw_sp_lpm_tree *lpm_tree;
Ido Schimmel8494ab02017-03-24 08:02:47 +0100633 u64 max_trees;
Jiri Pirko53342022016-07-04 08:23:08 +0200634 int i;
635
Ido Schimmel8494ab02017-03-24 08:02:47 +0100636 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_LPM_TREES))
637 return -EIO;
638
639 max_trees = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_LPM_TREES);
Ido Schimmel9011b672017-05-16 19:38:25 +0200640 mlxsw_sp->router->lpm.tree_count = max_trees - MLXSW_SP_LPM_TREE_MIN;
641 mlxsw_sp->router->lpm.trees = kcalloc(mlxsw_sp->router->lpm.tree_count,
Ido Schimmel8494ab02017-03-24 08:02:47 +0100642 sizeof(struct mlxsw_sp_lpm_tree),
643 GFP_KERNEL);
Ido Schimmel9011b672017-05-16 19:38:25 +0200644 if (!mlxsw_sp->router->lpm.trees)
Ido Schimmel8494ab02017-03-24 08:02:47 +0100645 return -ENOMEM;
646
Ido Schimmel9011b672017-05-16 19:38:25 +0200647 for (i = 0; i < mlxsw_sp->router->lpm.tree_count; i++) {
648 lpm_tree = &mlxsw_sp->router->lpm.trees[i];
Jiri Pirko53342022016-07-04 08:23:08 +0200649 lpm_tree->id = i + MLXSW_SP_LPM_TREE_MIN;
650 }
Ido Schimmel8494ab02017-03-24 08:02:47 +0100651
652 return 0;
653}
654
655static void mlxsw_sp_lpm_fini(struct mlxsw_sp *mlxsw_sp)
656{
Ido Schimmel9011b672017-05-16 19:38:25 +0200657 kfree(mlxsw_sp->router->lpm.trees);
Jiri Pirko53342022016-07-04 08:23:08 +0200658}
659
Ido Schimmel76610eb2017-03-10 08:53:41 +0100660static bool mlxsw_sp_vr_is_used(const struct mlxsw_sp_vr *vr)
661{
Yotam Gigid42b0962017-09-27 08:23:20 +0200662 return !!vr->fib4 || !!vr->fib6 || !!vr->mr4_table;
Ido Schimmel76610eb2017-03-10 08:53:41 +0100663}
664
Jiri Pirko6b75c482016-07-04 08:23:09 +0200665static struct mlxsw_sp_vr *mlxsw_sp_vr_find_unused(struct mlxsw_sp *mlxsw_sp)
666{
667 struct mlxsw_sp_vr *vr;
668 int i;
669
Jiri Pirkoc1a38312016-10-21 16:07:23 +0200670 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
Ido Schimmel9011b672017-05-16 19:38:25 +0200671 vr = &mlxsw_sp->router->vrs[i];
Ido Schimmel76610eb2017-03-10 08:53:41 +0100672 if (!mlxsw_sp_vr_is_used(vr))
Jiri Pirko6b75c482016-07-04 08:23:09 +0200673 return vr;
674 }
675 return NULL;
676}
677
678static int mlxsw_sp_vr_lpm_tree_bind(struct mlxsw_sp *mlxsw_sp,
Ido Schimmel0adb2142017-08-14 10:54:04 +0200679 const struct mlxsw_sp_fib *fib, u8 tree_id)
Jiri Pirko6b75c482016-07-04 08:23:09 +0200680{
681 char raltb_pl[MLXSW_REG_RALTB_LEN];
682
Ido Schimmel76610eb2017-03-10 08:53:41 +0100683 mlxsw_reg_raltb_pack(raltb_pl, fib->vr->id,
684 (enum mlxsw_reg_ralxx_protocol) fib->proto,
Ido Schimmel0adb2142017-08-14 10:54:04 +0200685 tree_id);
Jiri Pirko6b75c482016-07-04 08:23:09 +0200686 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raltb), raltb_pl);
687}
688
689static int mlxsw_sp_vr_lpm_tree_unbind(struct mlxsw_sp *mlxsw_sp,
Ido Schimmel76610eb2017-03-10 08:53:41 +0100690 const struct mlxsw_sp_fib *fib)
Jiri Pirko6b75c482016-07-04 08:23:09 +0200691{
692 char raltb_pl[MLXSW_REG_RALTB_LEN];
693
694 /* Bind to tree 0 which is default */
Ido Schimmel76610eb2017-03-10 08:53:41 +0100695 mlxsw_reg_raltb_pack(raltb_pl, fib->vr->id,
696 (enum mlxsw_reg_ralxx_protocol) fib->proto, 0);
Jiri Pirko6b75c482016-07-04 08:23:09 +0200697 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raltb), raltb_pl);
698}
699
700static u32 mlxsw_sp_fix_tb_id(u32 tb_id)
701{
Yotam Gigi7e50d432017-09-27 08:23:19 +0200702 /* For our purpose, squash main, default and local tables into one */
703 if (tb_id == RT_TABLE_LOCAL || tb_id == RT_TABLE_DEFAULT)
Jiri Pirko6b75c482016-07-04 08:23:09 +0200704 tb_id = RT_TABLE_MAIN;
705 return tb_id;
706}
707
708static struct mlxsw_sp_vr *mlxsw_sp_vr_find(struct mlxsw_sp *mlxsw_sp,
Ido Schimmel76610eb2017-03-10 08:53:41 +0100709 u32 tb_id)
Jiri Pirko6b75c482016-07-04 08:23:09 +0200710{
711 struct mlxsw_sp_vr *vr;
712 int i;
713
714 tb_id = mlxsw_sp_fix_tb_id(tb_id);
Nogah Frankel9497c042016-09-20 11:16:54 +0200715
Jiri Pirkoc1a38312016-10-21 16:07:23 +0200716 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
Ido Schimmel9011b672017-05-16 19:38:25 +0200717 vr = &mlxsw_sp->router->vrs[i];
Ido Schimmel76610eb2017-03-10 08:53:41 +0100718 if (mlxsw_sp_vr_is_used(vr) && vr->tb_id == tb_id)
Jiri Pirko6b75c482016-07-04 08:23:09 +0200719 return vr;
720 }
721 return NULL;
722}
723
Ido Schimmel76610eb2017-03-10 08:53:41 +0100724static struct mlxsw_sp_fib *mlxsw_sp_vr_fib(const struct mlxsw_sp_vr *vr,
725 enum mlxsw_sp_l3proto proto)
Jiri Pirko6b75c482016-07-04 08:23:09 +0200726{
Ido Schimmel76610eb2017-03-10 08:53:41 +0100727 switch (proto) {
728 case MLXSW_SP_L3_PROTO_IPV4:
729 return vr->fib4;
730 case MLXSW_SP_L3_PROTO_IPV6:
Ido Schimmela3d9bc52017-07-18 10:10:22 +0200731 return vr->fib6;
Ido Schimmel76610eb2017-03-10 08:53:41 +0100732 }
733 return NULL;
734}
735
736static struct mlxsw_sp_vr *mlxsw_sp_vr_create(struct mlxsw_sp *mlxsw_sp,
David Ahernf8fa9b42017-10-18 09:56:56 -0700737 u32 tb_id,
738 struct netlink_ext_ack *extack)
Ido Schimmel76610eb2017-03-10 08:53:41 +0100739{
Jiri Pirko6b75c482016-07-04 08:23:09 +0200740 struct mlxsw_sp_vr *vr;
Ido Schimmela3d9bc52017-07-18 10:10:22 +0200741 int err;
Jiri Pirko6b75c482016-07-04 08:23:09 +0200742
743 vr = mlxsw_sp_vr_find_unused(mlxsw_sp);
David Ahernf8fa9b42017-10-18 09:56:56 -0700744 if (!vr) {
745 NL_SET_ERR_MSG(extack, "spectrum: Exceeded number of supported virtual routers");
Jiri Pirko6b75c482016-07-04 08:23:09 +0200746 return ERR_PTR(-EBUSY);
David Ahernf8fa9b42017-10-18 09:56:56 -0700747 }
Ido Schimmel76610eb2017-03-10 08:53:41 +0100748 vr->fib4 = mlxsw_sp_fib_create(vr, MLXSW_SP_L3_PROTO_IPV4);
749 if (IS_ERR(vr->fib4))
750 return ERR_CAST(vr->fib4);
Ido Schimmela3d9bc52017-07-18 10:10:22 +0200751 vr->fib6 = mlxsw_sp_fib_create(vr, MLXSW_SP_L3_PROTO_IPV6);
752 if (IS_ERR(vr->fib6)) {
753 err = PTR_ERR(vr->fib6);
754 goto err_fib6_create;
755 }
Yotam Gigid42b0962017-09-27 08:23:20 +0200756 vr->mr4_table = mlxsw_sp_mr_table_create(mlxsw_sp, vr->id,
757 MLXSW_SP_L3_PROTO_IPV4);
758 if (IS_ERR(vr->mr4_table)) {
759 err = PTR_ERR(vr->mr4_table);
760 goto err_mr_table_create;
761 }
Jiri Pirko6b75c482016-07-04 08:23:09 +0200762 vr->tb_id = tb_id;
Jiri Pirko6b75c482016-07-04 08:23:09 +0200763 return vr;
Ido Schimmela3d9bc52017-07-18 10:10:22 +0200764
Yotam Gigid42b0962017-09-27 08:23:20 +0200765err_mr_table_create:
766 mlxsw_sp_fib_destroy(vr->fib6);
767 vr->fib6 = NULL;
Ido Schimmela3d9bc52017-07-18 10:10:22 +0200768err_fib6_create:
769 mlxsw_sp_fib_destroy(vr->fib4);
770 vr->fib4 = NULL;
771 return ERR_PTR(err);
Jiri Pirko6b75c482016-07-04 08:23:09 +0200772}
773
Ido Schimmel76610eb2017-03-10 08:53:41 +0100774static void mlxsw_sp_vr_destroy(struct mlxsw_sp_vr *vr)
Jiri Pirko6b75c482016-07-04 08:23:09 +0200775{
Yotam Gigid42b0962017-09-27 08:23:20 +0200776 mlxsw_sp_mr_table_destroy(vr->mr4_table);
777 vr->mr4_table = NULL;
Ido Schimmela3d9bc52017-07-18 10:10:22 +0200778 mlxsw_sp_fib_destroy(vr->fib6);
779 vr->fib6 = NULL;
Ido Schimmel76610eb2017-03-10 08:53:41 +0100780 mlxsw_sp_fib_destroy(vr->fib4);
781 vr->fib4 = NULL;
Jiri Pirko6b75c482016-07-04 08:23:09 +0200782}
783
David Ahernf8fa9b42017-10-18 09:56:56 -0700784static struct mlxsw_sp_vr *mlxsw_sp_vr_get(struct mlxsw_sp *mlxsw_sp, u32 tb_id,
785 struct netlink_ext_ack *extack)
Jiri Pirko6b75c482016-07-04 08:23:09 +0200786{
787 struct mlxsw_sp_vr *vr;
Jiri Pirko6b75c482016-07-04 08:23:09 +0200788
789 tb_id = mlxsw_sp_fix_tb_id(tb_id);
Ido Schimmel76610eb2017-03-10 08:53:41 +0100790 vr = mlxsw_sp_vr_find(mlxsw_sp, tb_id);
791 if (!vr)
David Ahernf8fa9b42017-10-18 09:56:56 -0700792 vr = mlxsw_sp_vr_create(mlxsw_sp, tb_id, extack);
Jiri Pirko6b75c482016-07-04 08:23:09 +0200793 return vr;
794}
795
Ido Schimmel76610eb2017-03-10 08:53:41 +0100796static void mlxsw_sp_vr_put(struct mlxsw_sp_vr *vr)
Jiri Pirko6b75c482016-07-04 08:23:09 +0200797{
Ido Schimmela3d9bc52017-07-18 10:10:22 +0200798 if (!vr->rif_count && list_empty(&vr->fib4->node_list) &&
Yotam Gigid42b0962017-09-27 08:23:20 +0200799 list_empty(&vr->fib6->node_list) &&
800 mlxsw_sp_mr_table_empty(vr->mr4_table))
Ido Schimmel76610eb2017-03-10 08:53:41 +0100801 mlxsw_sp_vr_destroy(vr);
Jiri Pirko6b75c482016-07-04 08:23:09 +0200802}
803
Ido Schimmelfc922bb2017-08-14 10:54:05 +0200804static bool
805mlxsw_sp_vr_lpm_tree_should_replace(struct mlxsw_sp_vr *vr,
806 enum mlxsw_sp_l3proto proto, u8 tree_id)
807{
808 struct mlxsw_sp_fib *fib = mlxsw_sp_vr_fib(vr, proto);
809
810 if (!mlxsw_sp_vr_is_used(vr))
811 return false;
812 if (fib->lpm_tree && fib->lpm_tree->id == tree_id)
813 return true;
814 return false;
815}
816
817static int mlxsw_sp_vr_lpm_tree_replace(struct mlxsw_sp *mlxsw_sp,
818 struct mlxsw_sp_fib *fib,
819 struct mlxsw_sp_lpm_tree *new_tree)
820{
821 struct mlxsw_sp_lpm_tree *old_tree = fib->lpm_tree;
822 int err;
823
824 err = mlxsw_sp_vr_lpm_tree_bind(mlxsw_sp, fib, new_tree->id);
825 if (err)
826 return err;
827 fib->lpm_tree = new_tree;
828 mlxsw_sp_lpm_tree_hold(new_tree);
829 mlxsw_sp_lpm_tree_put(mlxsw_sp, old_tree);
830 return 0;
831}
832
833static int mlxsw_sp_vrs_lpm_tree_replace(struct mlxsw_sp *mlxsw_sp,
834 struct mlxsw_sp_fib *fib,
835 struct mlxsw_sp_lpm_tree *new_tree)
836{
837 struct mlxsw_sp_lpm_tree *old_tree = fib->lpm_tree;
838 enum mlxsw_sp_l3proto proto = fib->proto;
839 u8 old_id, new_id = new_tree->id;
840 struct mlxsw_sp_vr *vr;
841 int i, err;
842
843 if (!old_tree)
844 goto no_replace;
845 old_id = old_tree->id;
846
847 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
848 vr = &mlxsw_sp->router->vrs[i];
849 if (!mlxsw_sp_vr_lpm_tree_should_replace(vr, proto, old_id))
850 continue;
851 err = mlxsw_sp_vr_lpm_tree_replace(mlxsw_sp,
852 mlxsw_sp_vr_fib(vr, proto),
853 new_tree);
854 if (err)
855 goto err_tree_replace;
856 }
857
858 return 0;
859
860err_tree_replace:
861 for (i--; i >= 0; i--) {
862 if (!mlxsw_sp_vr_lpm_tree_should_replace(vr, proto, new_id))
863 continue;
864 mlxsw_sp_vr_lpm_tree_replace(mlxsw_sp,
865 mlxsw_sp_vr_fib(vr, proto),
866 old_tree);
867 }
868 return err;
869
870no_replace:
871 err = mlxsw_sp_vr_lpm_tree_bind(mlxsw_sp, fib, new_tree->id);
872 if (err)
873 return err;
874 fib->lpm_tree = new_tree;
875 mlxsw_sp_lpm_tree_hold(new_tree);
876 return 0;
877}
878
879static void
880mlxsw_sp_vrs_prefixes(struct mlxsw_sp *mlxsw_sp,
881 enum mlxsw_sp_l3proto proto,
882 struct mlxsw_sp_prefix_usage *req_prefix_usage)
883{
884 int i;
885
886 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
887 struct mlxsw_sp_vr *vr = &mlxsw_sp->router->vrs[i];
888 struct mlxsw_sp_fib *fib = mlxsw_sp_vr_fib(vr, proto);
889 unsigned char prefix;
890
891 if (!mlxsw_sp_vr_is_used(vr))
892 continue;
893 mlxsw_sp_prefix_usage_for_each(prefix, &fib->prefix_usage)
894 mlxsw_sp_prefix_usage_set(req_prefix_usage, prefix);
895 }
896}
897
Nogah Frankel9497c042016-09-20 11:16:54 +0200898static int mlxsw_sp_vrs_init(struct mlxsw_sp *mlxsw_sp)
Jiri Pirko6b75c482016-07-04 08:23:09 +0200899{
900 struct mlxsw_sp_vr *vr;
Jiri Pirkoc1a38312016-10-21 16:07:23 +0200901 u64 max_vrs;
Jiri Pirko6b75c482016-07-04 08:23:09 +0200902 int i;
903
Jiri Pirkoc1a38312016-10-21 16:07:23 +0200904 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_VRS))
Nogah Frankel9497c042016-09-20 11:16:54 +0200905 return -EIO;
906
Jiri Pirkoc1a38312016-10-21 16:07:23 +0200907 max_vrs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS);
Ido Schimmel9011b672017-05-16 19:38:25 +0200908 mlxsw_sp->router->vrs = kcalloc(max_vrs, sizeof(struct mlxsw_sp_vr),
909 GFP_KERNEL);
910 if (!mlxsw_sp->router->vrs)
Nogah Frankel9497c042016-09-20 11:16:54 +0200911 return -ENOMEM;
912
Jiri Pirkoc1a38312016-10-21 16:07:23 +0200913 for (i = 0; i < max_vrs; i++) {
Ido Schimmel9011b672017-05-16 19:38:25 +0200914 vr = &mlxsw_sp->router->vrs[i];
Jiri Pirko6b75c482016-07-04 08:23:09 +0200915 vr->id = i;
916 }
Nogah Frankel9497c042016-09-20 11:16:54 +0200917
918 return 0;
919}
920
Ido Schimmelac571de2016-11-14 11:26:32 +0100921static void mlxsw_sp_router_fib_flush(struct mlxsw_sp *mlxsw_sp);
922
Nogah Frankel9497c042016-09-20 11:16:54 +0200923static void mlxsw_sp_vrs_fini(struct mlxsw_sp *mlxsw_sp)
924{
Ido Schimmel30572242016-12-03 16:45:01 +0100925 /* At this stage we're guaranteed not to have new incoming
926 * FIB notifications and the work queue is free from FIBs
927 * sitting on top of mlxsw netdevs. However, we can still
928 * have other FIBs queued. Flush the queue before flushing
929 * the device's tables. No need for locks, as we're the only
930 * writer.
931 */
932 mlxsw_core_flush_owq();
Ido Schimmelac571de2016-11-14 11:26:32 +0100933 mlxsw_sp_router_fib_flush(mlxsw_sp);
Ido Schimmel9011b672017-05-16 19:38:25 +0200934 kfree(mlxsw_sp->router->vrs);
Jiri Pirko6b75c482016-07-04 08:23:09 +0200935}
936
Petr Machata6ddb7422017-09-02 23:49:19 +0200937static struct net_device *
938__mlxsw_sp_ipip_netdev_ul_dev_get(const struct net_device *ol_dev)
939{
940 struct ip_tunnel *tun = netdev_priv(ol_dev);
941 struct net *net = dev_net(ol_dev);
942
943 return __dev_get_by_index(net, tun->parms.link);
944}
945
Petr Machata4cf04f32017-11-03 10:03:42 +0100946u32 mlxsw_sp_ipip_dev_ul_tb_id(const struct net_device *ol_dev)
Petr Machata6ddb7422017-09-02 23:49:19 +0200947{
948 struct net_device *d = __mlxsw_sp_ipip_netdev_ul_dev_get(ol_dev);
949
950 if (d)
951 return l3mdev_fib_table(d) ? : RT_TABLE_MAIN;
952 else
953 return l3mdev_fib_table(ol_dev) ? : RT_TABLE_MAIN;
954}
955
Petr Machata1012b9a2017-09-02 23:49:23 +0200956static struct mlxsw_sp_rif *
957mlxsw_sp_rif_create(struct mlxsw_sp *mlxsw_sp,
David Ahernf8fa9b42017-10-18 09:56:56 -0700958 const struct mlxsw_sp_rif_params *params,
959 struct netlink_ext_ack *extack);
Petr Machata1012b9a2017-09-02 23:49:23 +0200960
961static struct mlxsw_sp_rif_ipip_lb *
962mlxsw_sp_ipip_ol_ipip_lb_create(struct mlxsw_sp *mlxsw_sp,
963 enum mlxsw_sp_ipip_type ipipt,
Petr Machata7e75af62017-11-03 10:03:36 +0100964 struct net_device *ol_dev,
965 struct netlink_ext_ack *extack)
Petr Machata1012b9a2017-09-02 23:49:23 +0200966{
967 struct mlxsw_sp_rif_params_ipip_lb lb_params;
968 const struct mlxsw_sp_ipip_ops *ipip_ops;
969 struct mlxsw_sp_rif *rif;
970
971 ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipipt];
972 lb_params = (struct mlxsw_sp_rif_params_ipip_lb) {
973 .common.dev = ol_dev,
974 .common.lag = false,
975 .lb_config = ipip_ops->ol_loopback_config(mlxsw_sp, ol_dev),
976 };
977
Petr Machata7e75af62017-11-03 10:03:36 +0100978 rif = mlxsw_sp_rif_create(mlxsw_sp, &lb_params.common, extack);
Petr Machata1012b9a2017-09-02 23:49:23 +0200979 if (IS_ERR(rif))
980 return ERR_CAST(rif);
981 return container_of(rif, struct mlxsw_sp_rif_ipip_lb, common);
982}
983
984static struct mlxsw_sp_ipip_entry *
985mlxsw_sp_ipip_entry_alloc(struct mlxsw_sp *mlxsw_sp,
986 enum mlxsw_sp_ipip_type ipipt,
987 struct net_device *ol_dev)
988{
989 struct mlxsw_sp_ipip_entry *ipip_entry;
990 struct mlxsw_sp_ipip_entry *ret = NULL;
991
992 ipip_entry = kzalloc(sizeof(*ipip_entry), GFP_KERNEL);
993 if (!ipip_entry)
994 return ERR_PTR(-ENOMEM);
995
996 ipip_entry->ol_lb = mlxsw_sp_ipip_ol_ipip_lb_create(mlxsw_sp, ipipt,
Petr Machata7e75af62017-11-03 10:03:36 +0100997 ol_dev, NULL);
Petr Machata1012b9a2017-09-02 23:49:23 +0200998 if (IS_ERR(ipip_entry->ol_lb)) {
999 ret = ERR_CAST(ipip_entry->ol_lb);
1000 goto err_ol_ipip_lb_create;
1001 }
1002
1003 ipip_entry->ipipt = ipipt;
1004 ipip_entry->ol_dev = ol_dev;
Petr Machata4cf04f32017-11-03 10:03:42 +01001005 ipip_entry->parms = mlxsw_sp_ipip_netdev_parms(ol_dev);
Petr Machata1012b9a2017-09-02 23:49:23 +02001006
1007 return ipip_entry;
1008
1009err_ol_ipip_lb_create:
1010 kfree(ipip_entry);
1011 return ret;
1012}
1013
1014static void
Petr Machata4cccb732017-10-16 16:26:39 +02001015mlxsw_sp_ipip_entry_dealloc(struct mlxsw_sp_ipip_entry *ipip_entry)
Petr Machata1012b9a2017-09-02 23:49:23 +02001016{
Petr Machata1012b9a2017-09-02 23:49:23 +02001017 mlxsw_sp_rif_destroy(&ipip_entry->ol_lb->common);
1018 kfree(ipip_entry);
1019}
1020
Petr Machata1012b9a2017-09-02 23:49:23 +02001021static bool
1022mlxsw_sp_ipip_entry_saddr_matches(struct mlxsw_sp *mlxsw_sp,
1023 const enum mlxsw_sp_l3proto ul_proto,
1024 union mlxsw_sp_l3addr saddr,
1025 u32 ul_tb_id,
1026 struct mlxsw_sp_ipip_entry *ipip_entry)
1027{
1028 u32 tun_ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(ipip_entry->ol_dev);
1029 enum mlxsw_sp_ipip_type ipipt = ipip_entry->ipipt;
1030 union mlxsw_sp_l3addr tun_saddr;
1031
1032 if (mlxsw_sp->router->ipip_ops_arr[ipipt]->ul_proto != ul_proto)
1033 return false;
1034
1035 tun_saddr = mlxsw_sp_ipip_netdev_saddr(ul_proto, ipip_entry->ol_dev);
1036 return tun_ul_tb_id == ul_tb_id &&
1037 mlxsw_sp_l3addr_eq(&tun_saddr, &saddr);
1038}
1039
Petr Machata4607f6d2017-09-02 23:49:25 +02001040static int
1041mlxsw_sp_fib_entry_decap_init(struct mlxsw_sp *mlxsw_sp,
1042 struct mlxsw_sp_fib_entry *fib_entry,
1043 struct mlxsw_sp_ipip_entry *ipip_entry)
1044{
1045 u32 tunnel_index;
1046 int err;
1047
1048 err = mlxsw_sp_kvdl_alloc(mlxsw_sp, 1, &tunnel_index);
1049 if (err)
1050 return err;
1051
1052 ipip_entry->decap_fib_entry = fib_entry;
1053 fib_entry->decap.ipip_entry = ipip_entry;
1054 fib_entry->decap.tunnel_index = tunnel_index;
1055 return 0;
1056}
1057
1058static void mlxsw_sp_fib_entry_decap_fini(struct mlxsw_sp *mlxsw_sp,
1059 struct mlxsw_sp_fib_entry *fib_entry)
1060{
1061 /* Unlink this node from the IPIP entry that it's the decap entry of. */
1062 fib_entry->decap.ipip_entry->decap_fib_entry = NULL;
1063 fib_entry->decap.ipip_entry = NULL;
1064 mlxsw_sp_kvdl_free(mlxsw_sp, fib_entry->decap.tunnel_index);
1065}
1066
Petr Machata1cc38fb2017-09-02 23:49:26 +02001067static struct mlxsw_sp_fib_node *
1068mlxsw_sp_fib_node_lookup(struct mlxsw_sp_fib *fib, const void *addr,
1069 size_t addr_len, unsigned char prefix_len);
Petr Machata4607f6d2017-09-02 23:49:25 +02001070static int mlxsw_sp_fib_entry_update(struct mlxsw_sp *mlxsw_sp,
1071 struct mlxsw_sp_fib_entry *fib_entry);
1072
1073static void
1074mlxsw_sp_ipip_entry_demote_decap(struct mlxsw_sp *mlxsw_sp,
1075 struct mlxsw_sp_ipip_entry *ipip_entry)
1076{
1077 struct mlxsw_sp_fib_entry *fib_entry = ipip_entry->decap_fib_entry;
1078
1079 mlxsw_sp_fib_entry_decap_fini(mlxsw_sp, fib_entry);
1080 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
1081
1082 mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
1083}
1084
Petr Machata1cc38fb2017-09-02 23:49:26 +02001085static void
1086mlxsw_sp_ipip_entry_promote_decap(struct mlxsw_sp *mlxsw_sp,
1087 struct mlxsw_sp_ipip_entry *ipip_entry,
1088 struct mlxsw_sp_fib_entry *decap_fib_entry)
1089{
1090 if (mlxsw_sp_fib_entry_decap_init(mlxsw_sp, decap_fib_entry,
1091 ipip_entry))
1092 return;
1093 decap_fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP;
1094
1095 if (mlxsw_sp_fib_entry_update(mlxsw_sp, decap_fib_entry))
1096 mlxsw_sp_ipip_entry_demote_decap(mlxsw_sp, ipip_entry);
1097}
1098
1099/* Given an IPIP entry, find the corresponding decap route. */
1100static struct mlxsw_sp_fib_entry *
1101mlxsw_sp_ipip_entry_find_decap(struct mlxsw_sp *mlxsw_sp,
1102 struct mlxsw_sp_ipip_entry *ipip_entry)
1103{
1104 static struct mlxsw_sp_fib_node *fib_node;
1105 const struct mlxsw_sp_ipip_ops *ipip_ops;
1106 struct mlxsw_sp_fib_entry *fib_entry;
1107 unsigned char saddr_prefix_len;
1108 union mlxsw_sp_l3addr saddr;
1109 struct mlxsw_sp_fib *ul_fib;
1110 struct mlxsw_sp_vr *ul_vr;
1111 const void *saddrp;
1112 size_t saddr_len;
1113 u32 ul_tb_id;
1114 u32 saddr4;
1115
1116 ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt];
1117
1118 ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(ipip_entry->ol_dev);
1119 ul_vr = mlxsw_sp_vr_find(mlxsw_sp, ul_tb_id);
1120 if (!ul_vr)
1121 return NULL;
1122
1123 ul_fib = mlxsw_sp_vr_fib(ul_vr, ipip_ops->ul_proto);
1124 saddr = mlxsw_sp_ipip_netdev_saddr(ipip_ops->ul_proto,
1125 ipip_entry->ol_dev);
1126
1127 switch (ipip_ops->ul_proto) {
1128 case MLXSW_SP_L3_PROTO_IPV4:
1129 saddr4 = be32_to_cpu(saddr.addr4);
1130 saddrp = &saddr4;
1131 saddr_len = 4;
1132 saddr_prefix_len = 32;
1133 break;
1134 case MLXSW_SP_L3_PROTO_IPV6:
1135 WARN_ON(1);
1136 return NULL;
1137 }
1138
1139 fib_node = mlxsw_sp_fib_node_lookup(ul_fib, saddrp, saddr_len,
1140 saddr_prefix_len);
1141 if (!fib_node || list_empty(&fib_node->entry_list))
1142 return NULL;
1143
1144 fib_entry = list_first_entry(&fib_node->entry_list,
1145 struct mlxsw_sp_fib_entry, list);
1146 if (fib_entry->type != MLXSW_SP_FIB_ENTRY_TYPE_TRAP)
1147 return NULL;
1148
1149 return fib_entry;
1150}
1151
Petr Machata1012b9a2017-09-02 23:49:23 +02001152static struct mlxsw_sp_ipip_entry *
Petr Machata4cccb732017-10-16 16:26:39 +02001153mlxsw_sp_ipip_entry_create(struct mlxsw_sp *mlxsw_sp,
1154 enum mlxsw_sp_ipip_type ipipt,
1155 struct net_device *ol_dev)
Petr Machata1012b9a2017-09-02 23:49:23 +02001156{
Petr Machata1012b9a2017-09-02 23:49:23 +02001157 struct mlxsw_sp_ipip_entry *ipip_entry;
Petr Machata1012b9a2017-09-02 23:49:23 +02001158
1159 ipip_entry = mlxsw_sp_ipip_entry_alloc(mlxsw_sp, ipipt, ol_dev);
1160 if (IS_ERR(ipip_entry))
1161 return ipip_entry;
1162
1163 list_add_tail(&ipip_entry->ipip_list_node,
1164 &mlxsw_sp->router->ipip_list);
1165
Petr Machata1012b9a2017-09-02 23:49:23 +02001166 return ipip_entry;
1167}
1168
1169static void
Petr Machata4cccb732017-10-16 16:26:39 +02001170mlxsw_sp_ipip_entry_destroy(struct mlxsw_sp *mlxsw_sp,
1171 struct mlxsw_sp_ipip_entry *ipip_entry)
Petr Machata1012b9a2017-09-02 23:49:23 +02001172{
Petr Machata4cccb732017-10-16 16:26:39 +02001173 list_del(&ipip_entry->ipip_list_node);
1174 mlxsw_sp_ipip_entry_dealloc(ipip_entry);
Petr Machata1012b9a2017-09-02 23:49:23 +02001175}
1176
Petr Machata4607f6d2017-09-02 23:49:25 +02001177static bool
1178mlxsw_sp_ipip_entry_matches_decap(struct mlxsw_sp *mlxsw_sp,
1179 const struct net_device *ul_dev,
1180 enum mlxsw_sp_l3proto ul_proto,
1181 union mlxsw_sp_l3addr ul_dip,
1182 struct mlxsw_sp_ipip_entry *ipip_entry)
1183{
1184 u32 ul_tb_id = l3mdev_fib_table(ul_dev) ? : RT_TABLE_MAIN;
1185 enum mlxsw_sp_ipip_type ipipt = ipip_entry->ipipt;
1186 struct net_device *ipip_ul_dev;
1187
1188 if (mlxsw_sp->router->ipip_ops_arr[ipipt]->ul_proto != ul_proto)
1189 return false;
1190
1191 ipip_ul_dev = __mlxsw_sp_ipip_netdev_ul_dev_get(ipip_entry->ol_dev);
1192 return mlxsw_sp_ipip_entry_saddr_matches(mlxsw_sp, ul_proto, ul_dip,
1193 ul_tb_id, ipip_entry) &&
1194 (!ipip_ul_dev || ipip_ul_dev == ul_dev);
1195}
1196
1197/* Given decap parameters, find the corresponding IPIP entry. */
1198static struct mlxsw_sp_ipip_entry *
1199mlxsw_sp_ipip_entry_find_by_decap(struct mlxsw_sp *mlxsw_sp,
1200 const struct net_device *ul_dev,
1201 enum mlxsw_sp_l3proto ul_proto,
1202 union mlxsw_sp_l3addr ul_dip)
1203{
1204 struct mlxsw_sp_ipip_entry *ipip_entry;
1205
1206 list_for_each_entry(ipip_entry, &mlxsw_sp->router->ipip_list,
1207 ipip_list_node)
1208 if (mlxsw_sp_ipip_entry_matches_decap(mlxsw_sp, ul_dev,
1209 ul_proto, ul_dip,
1210 ipip_entry))
1211 return ipip_entry;
1212
1213 return NULL;
1214}
1215
Petr Machata6698c162017-10-16 16:26:36 +02001216static bool mlxsw_sp_netdev_ipip_type(const struct mlxsw_sp *mlxsw_sp,
1217 const struct net_device *dev,
1218 enum mlxsw_sp_ipip_type *p_type)
1219{
1220 struct mlxsw_sp_router *router = mlxsw_sp->router;
1221 const struct mlxsw_sp_ipip_ops *ipip_ops;
1222 enum mlxsw_sp_ipip_type ipipt;
1223
1224 for (ipipt = 0; ipipt < MLXSW_SP_IPIP_TYPE_MAX; ++ipipt) {
1225 ipip_ops = router->ipip_ops_arr[ipipt];
1226 if (dev->type == ipip_ops->dev_type) {
1227 if (p_type)
1228 *p_type = ipipt;
1229 return true;
1230 }
1231 }
1232 return false;
1233}
1234
Petr Machata796ec772017-11-03 10:03:29 +01001235bool mlxsw_sp_netdev_is_ipip_ol(const struct mlxsw_sp *mlxsw_sp,
1236 const struct net_device *dev)
Petr Machata00635872017-10-16 16:26:37 +02001237{
1238 return mlxsw_sp_netdev_ipip_type(mlxsw_sp, dev, NULL);
1239}
1240
1241static struct mlxsw_sp_ipip_entry *
1242mlxsw_sp_ipip_entry_find_by_ol_dev(struct mlxsw_sp *mlxsw_sp,
1243 const struct net_device *ol_dev)
1244{
1245 struct mlxsw_sp_ipip_entry *ipip_entry;
1246
1247 list_for_each_entry(ipip_entry, &mlxsw_sp->router->ipip_list,
1248 ipip_list_node)
1249 if (ipip_entry->ol_dev == ol_dev)
1250 return ipip_entry;
1251
1252 return NULL;
1253}
1254
Petr Machata61481f22017-11-03 10:03:41 +01001255static struct mlxsw_sp_ipip_entry *
1256mlxsw_sp_ipip_entry_find_by_ul_dev(const struct mlxsw_sp *mlxsw_sp,
1257 const struct net_device *ul_dev,
1258 struct mlxsw_sp_ipip_entry *start)
1259{
1260 struct mlxsw_sp_ipip_entry *ipip_entry;
1261
1262 ipip_entry = list_prepare_entry(start, &mlxsw_sp->router->ipip_list,
1263 ipip_list_node);
1264 list_for_each_entry_continue(ipip_entry, &mlxsw_sp->router->ipip_list,
1265 ipip_list_node) {
1266 struct net_device *ipip_ul_dev =
1267 __mlxsw_sp_ipip_netdev_ul_dev_get(ipip_entry->ol_dev);
1268
1269 if (ipip_ul_dev == ul_dev)
1270 return ipip_entry;
1271 }
1272
1273 return NULL;
1274}
1275
1276bool mlxsw_sp_netdev_is_ipip_ul(const struct mlxsw_sp *mlxsw_sp,
1277 const struct net_device *dev)
1278{
1279 return mlxsw_sp_ipip_entry_find_by_ul_dev(mlxsw_sp, dev, NULL);
1280}
1281
Petr Machatacafdb2a2017-11-03 10:03:30 +01001282static bool mlxsw_sp_netdevice_ipip_can_offload(struct mlxsw_sp *mlxsw_sp,
1283 const struct net_device *ol_dev,
1284 enum mlxsw_sp_ipip_type ipipt)
1285{
1286 const struct mlxsw_sp_ipip_ops *ops
1287 = mlxsw_sp->router->ipip_ops_arr[ipipt];
1288
1289 /* For deciding whether decap should be offloaded, we don't care about
1290 * overlay protocol, so ask whether either one is supported.
1291 */
1292 return ops->can_offload(mlxsw_sp, ol_dev, MLXSW_SP_L3_PROTO_IPV4) ||
1293 ops->can_offload(mlxsw_sp, ol_dev, MLXSW_SP_L3_PROTO_IPV6);
1294}
1295
Petr Machata796ec772017-11-03 10:03:29 +01001296static int mlxsw_sp_netdevice_ipip_ol_reg_event(struct mlxsw_sp *mlxsw_sp,
1297 struct net_device *ol_dev)
Petr Machata00635872017-10-16 16:26:37 +02001298{
Petr Machata00635872017-10-16 16:26:37 +02001299 struct mlxsw_sp_ipip_entry *ipip_entry;
Petr Machataaf641712017-11-03 10:03:40 +01001300 enum mlxsw_sp_l3proto ul_proto;
Petr Machata00635872017-10-16 16:26:37 +02001301 enum mlxsw_sp_ipip_type ipipt;
Petr Machataaf641712017-11-03 10:03:40 +01001302 union mlxsw_sp_l3addr saddr;
1303 u32 ul_tb_id;
Petr Machata00635872017-10-16 16:26:37 +02001304
1305 mlxsw_sp_netdev_ipip_type(mlxsw_sp, ol_dev, &ipipt);
Petr Machatacafdb2a2017-11-03 10:03:30 +01001306 if (mlxsw_sp_netdevice_ipip_can_offload(mlxsw_sp, ol_dev, ipipt)) {
Petr Machataaf641712017-11-03 10:03:40 +01001307 ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(ol_dev);
1308 ul_proto = mlxsw_sp->router->ipip_ops_arr[ipipt]->ul_proto;
1309 saddr = mlxsw_sp_ipip_netdev_saddr(ul_proto, ol_dev);
1310 if (!mlxsw_sp_ipip_demote_tunnel_by_saddr(mlxsw_sp, ul_proto,
1311 saddr, ul_tb_id,
1312 NULL)) {
1313 ipip_entry = mlxsw_sp_ipip_entry_create(mlxsw_sp, ipipt,
1314 ol_dev);
1315 if (IS_ERR(ipip_entry))
1316 return PTR_ERR(ipip_entry);
1317 }
Petr Machata00635872017-10-16 16:26:37 +02001318 }
1319
1320 return 0;
1321}
1322
Petr Machata796ec772017-11-03 10:03:29 +01001323static void mlxsw_sp_netdevice_ipip_ol_unreg_event(struct mlxsw_sp *mlxsw_sp,
1324 struct net_device *ol_dev)
Petr Machata00635872017-10-16 16:26:37 +02001325{
1326 struct mlxsw_sp_ipip_entry *ipip_entry;
1327
1328 ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
1329 if (ipip_entry)
Petr Machata4cccb732017-10-16 16:26:39 +02001330 mlxsw_sp_ipip_entry_destroy(mlxsw_sp, ipip_entry);
Petr Machata00635872017-10-16 16:26:37 +02001331}
1332
Petr Machata47518ca2017-11-03 10:03:35 +01001333static void
1334mlxsw_sp_ipip_entry_ol_up_event(struct mlxsw_sp *mlxsw_sp,
1335 struct mlxsw_sp_ipip_entry *ipip_entry)
1336{
1337 struct mlxsw_sp_fib_entry *decap_fib_entry;
1338
1339 decap_fib_entry = mlxsw_sp_ipip_entry_find_decap(mlxsw_sp, ipip_entry);
1340 if (decap_fib_entry)
1341 mlxsw_sp_ipip_entry_promote_decap(mlxsw_sp, ipip_entry,
1342 decap_fib_entry);
1343}
1344
Petr Machata6d4de442017-11-03 10:03:34 +01001345static void mlxsw_sp_netdevice_ipip_ol_up_event(struct mlxsw_sp *mlxsw_sp,
1346 struct net_device *ol_dev)
Petr Machata00635872017-10-16 16:26:37 +02001347{
Petr Machata00635872017-10-16 16:26:37 +02001348 struct mlxsw_sp_ipip_entry *ipip_entry;
1349
1350 ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
Petr Machata47518ca2017-11-03 10:03:35 +01001351 if (ipip_entry)
1352 mlxsw_sp_ipip_entry_ol_up_event(mlxsw_sp, ipip_entry);
Petr Machata00635872017-10-16 16:26:37 +02001353}
1354
Petr Machataa3fe1982017-11-03 10:03:33 +01001355static void
1356mlxsw_sp_ipip_entry_ol_down_event(struct mlxsw_sp *mlxsw_sp,
1357 struct mlxsw_sp_ipip_entry *ipip_entry)
1358{
1359 if (ipip_entry->decap_fib_entry)
1360 mlxsw_sp_ipip_entry_demote_decap(mlxsw_sp, ipip_entry);
1361}
1362
Petr Machata796ec772017-11-03 10:03:29 +01001363static void mlxsw_sp_netdevice_ipip_ol_down_event(struct mlxsw_sp *mlxsw_sp,
1364 struct net_device *ol_dev)
Petr Machata00635872017-10-16 16:26:37 +02001365{
1366 struct mlxsw_sp_ipip_entry *ipip_entry;
1367
1368 ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
Petr Machataa3fe1982017-11-03 10:03:33 +01001369 if (ipip_entry)
1370 mlxsw_sp_ipip_entry_ol_down_event(mlxsw_sp, ipip_entry);
Petr Machata00635872017-10-16 16:26:37 +02001371}
1372
Petr Machata0c5f1cd2017-11-03 10:03:38 +01001373static void mlxsw_sp_nexthop_rif_update(struct mlxsw_sp *mlxsw_sp,
1374 struct mlxsw_sp_rif *rif);
Petr Machata65a61212017-11-03 10:03:37 +01001375static int
1376mlxsw_sp_ipip_entry_ol_lb_update(struct mlxsw_sp *mlxsw_sp,
1377 struct mlxsw_sp_ipip_entry *ipip_entry,
Petr Machata0c5f1cd2017-11-03 10:03:38 +01001378 bool keep_encap,
Petr Machata65a61212017-11-03 10:03:37 +01001379 struct netlink_ext_ack *extack)
Petr Machataf63ce4e2017-10-16 16:26:38 +02001380{
Petr Machata65a61212017-11-03 10:03:37 +01001381 struct mlxsw_sp_rif_ipip_lb *old_lb_rif = ipip_entry->ol_lb;
1382 struct mlxsw_sp_rif_ipip_lb *new_lb_rif;
Petr Machataf63ce4e2017-10-16 16:26:38 +02001383
Petr Machata65a61212017-11-03 10:03:37 +01001384 new_lb_rif = mlxsw_sp_ipip_ol_ipip_lb_create(mlxsw_sp,
1385 ipip_entry->ipipt,
1386 ipip_entry->ol_dev,
1387 extack);
1388 if (IS_ERR(new_lb_rif))
1389 return PTR_ERR(new_lb_rif);
1390 ipip_entry->ol_lb = new_lb_rif;
Petr Machata0c5f1cd2017-11-03 10:03:38 +01001391
1392 if (keep_encap) {
1393 list_splice_init(&old_lb_rif->common.nexthop_list,
1394 &new_lb_rif->common.nexthop_list);
1395 mlxsw_sp_nexthop_rif_update(mlxsw_sp, &new_lb_rif->common);
1396 }
1397
Petr Machata65a61212017-11-03 10:03:37 +01001398 mlxsw_sp_rif_destroy(&old_lb_rif->common);
Petr Machataf63ce4e2017-10-16 16:26:38 +02001399
Petr Machata65a61212017-11-03 10:03:37 +01001400 return 0;
1401}
1402
Petr Machata0c5f1cd2017-11-03 10:03:38 +01001403/**
1404 * Update the offload related to an IPIP entry. This always updates decap, and
1405 * in addition to that it also:
1406 * @recreate_loopback: recreates the associated loopback RIF
1407 * @keep_encap: updates next hops that use the tunnel netdevice. This is only
1408 * relevant when recreate_loopback is true.
1409 * @update_nexthops: updates next hops, keeping the current loopback RIF. This
1410 * is only relevant when recreate_loopback is false.
1411 */
Petr Machata65a61212017-11-03 10:03:37 +01001412int __mlxsw_sp_ipip_entry_update_tunnel(struct mlxsw_sp *mlxsw_sp,
1413 struct mlxsw_sp_ipip_entry *ipip_entry,
Petr Machata0c5f1cd2017-11-03 10:03:38 +01001414 bool recreate_loopback,
1415 bool keep_encap,
1416 bool update_nexthops,
Petr Machata65a61212017-11-03 10:03:37 +01001417 struct netlink_ext_ack *extack)
1418{
1419 int err;
1420
1421 /* RIFs can't be edited, so to update loopback, we need to destroy and
1422 * recreate it. That creates a window of opportunity where RALUE and
1423 * RATR registers end up referencing a RIF that's already gone. RATRs
1424 * are handled in mlxsw_sp_ipip_entry_ol_lb_update(), and to take care
Petr Machataf63ce4e2017-10-16 16:26:38 +02001425 * of RALUE, demote the decap route back.
1426 */
1427 if (ipip_entry->decap_fib_entry)
1428 mlxsw_sp_ipip_entry_demote_decap(mlxsw_sp, ipip_entry);
1429
Petr Machata0c5f1cd2017-11-03 10:03:38 +01001430 if (recreate_loopback) {
1431 err = mlxsw_sp_ipip_entry_ol_lb_update(mlxsw_sp, ipip_entry,
1432 keep_encap, extack);
1433 if (err)
1434 return err;
1435 } else if (update_nexthops) {
1436 mlxsw_sp_nexthop_rif_update(mlxsw_sp,
1437 &ipip_entry->ol_lb->common);
1438 }
Petr Machataf63ce4e2017-10-16 16:26:38 +02001439
Petr Machata65a61212017-11-03 10:03:37 +01001440 if (ipip_entry->ol_dev->flags & IFF_UP)
1441 mlxsw_sp_ipip_entry_ol_up_event(mlxsw_sp, ipip_entry);
Petr Machataf63ce4e2017-10-16 16:26:38 +02001442
1443 return 0;
1444}
1445
Petr Machata65a61212017-11-03 10:03:37 +01001446static int mlxsw_sp_netdevice_ipip_ol_vrf_event(struct mlxsw_sp *mlxsw_sp,
1447 struct net_device *ol_dev,
1448 struct netlink_ext_ack *extack)
1449{
1450 struct mlxsw_sp_ipip_entry *ipip_entry =
1451 mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
1452
1453 if (!ipip_entry)
1454 return 0;
1455 return __mlxsw_sp_ipip_entry_update_tunnel(mlxsw_sp, ipip_entry,
Petr Machata0c5f1cd2017-11-03 10:03:38 +01001456 true, false, false, extack);
Petr Machata65a61212017-11-03 10:03:37 +01001457}
1458
Petr Machata61481f22017-11-03 10:03:41 +01001459static int
1460mlxsw_sp_netdevice_ipip_ul_vrf_event(struct mlxsw_sp *mlxsw_sp,
1461 struct mlxsw_sp_ipip_entry *ipip_entry,
1462 struct net_device *ul_dev,
1463 struct netlink_ext_ack *extack)
1464{
1465 return __mlxsw_sp_ipip_entry_update_tunnel(mlxsw_sp, ipip_entry,
1466 true, true, false, extack);
1467}
1468
Petr Machata4cf04f32017-11-03 10:03:42 +01001469static int
1470mlxsw_sp_netdevice_ipip_ol_change_event(struct mlxsw_sp *mlxsw_sp,
1471 struct net_device *ol_dev,
1472 struct netlink_ext_ack *extack)
1473{
1474 const struct mlxsw_sp_ipip_ops *ipip_ops;
1475 struct mlxsw_sp_ipip_entry *ipip_entry;
1476 int err;
1477
1478 ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
1479 if (!ipip_entry)
1480 /* A change might make a tunnel eligible for offloading, but
1481 * that is currently not implemented. What falls to slow path
1482 * stays there.
1483 */
1484 return 0;
1485
1486 /* A change might make a tunnel not eligible for offloading. */
1487 if (!mlxsw_sp_netdevice_ipip_can_offload(mlxsw_sp, ol_dev,
1488 ipip_entry->ipipt)) {
1489 mlxsw_sp_ipip_entry_demote_tunnel(mlxsw_sp, ipip_entry);
1490 return 0;
1491 }
1492
1493 ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt];
1494 err = ipip_ops->ol_netdev_change(mlxsw_sp, ipip_entry, extack);
1495 return err;
1496}
1497
Petr Machataaf641712017-11-03 10:03:40 +01001498void mlxsw_sp_ipip_entry_demote_tunnel(struct mlxsw_sp *mlxsw_sp,
1499 struct mlxsw_sp_ipip_entry *ipip_entry)
1500{
1501 struct net_device *ol_dev = ipip_entry->ol_dev;
1502
1503 if (ol_dev->flags & IFF_UP)
1504 mlxsw_sp_ipip_entry_ol_down_event(mlxsw_sp, ipip_entry);
1505 mlxsw_sp_ipip_entry_destroy(mlxsw_sp, ipip_entry);
1506}
1507
1508/* The configuration where several tunnels have the same local address in the
1509 * same underlay table needs special treatment in the HW. That is currently not
1510 * implemented in the driver. This function finds and demotes the first tunnel
1511 * with a given source address, except the one passed in in the argument
1512 * `except'.
1513 */
1514bool
1515mlxsw_sp_ipip_demote_tunnel_by_saddr(struct mlxsw_sp *mlxsw_sp,
1516 enum mlxsw_sp_l3proto ul_proto,
1517 union mlxsw_sp_l3addr saddr,
1518 u32 ul_tb_id,
1519 const struct mlxsw_sp_ipip_entry *except)
1520{
1521 struct mlxsw_sp_ipip_entry *ipip_entry, *tmp;
1522
1523 list_for_each_entry_safe(ipip_entry, tmp, &mlxsw_sp->router->ipip_list,
1524 ipip_list_node) {
1525 if (ipip_entry != except &&
1526 mlxsw_sp_ipip_entry_saddr_matches(mlxsw_sp, ul_proto, saddr,
1527 ul_tb_id, ipip_entry)) {
1528 mlxsw_sp_ipip_entry_demote_tunnel(mlxsw_sp, ipip_entry);
1529 return true;
1530 }
1531 }
1532
1533 return false;
1534}
1535
Petr Machata61481f22017-11-03 10:03:41 +01001536static void mlxsw_sp_ipip_demote_tunnel_by_ul_netdev(struct mlxsw_sp *mlxsw_sp,
1537 struct net_device *ul_dev)
1538{
1539 struct mlxsw_sp_ipip_entry *ipip_entry, *tmp;
1540
1541 list_for_each_entry_safe(ipip_entry, tmp, &mlxsw_sp->router->ipip_list,
1542 ipip_list_node) {
1543 struct net_device *ipip_ul_dev =
1544 __mlxsw_sp_ipip_netdev_ul_dev_get(ipip_entry->ol_dev);
1545
1546 if (ipip_ul_dev == ul_dev)
1547 mlxsw_sp_ipip_entry_demote_tunnel(mlxsw_sp, ipip_entry);
1548 }
1549}
1550
Petr Machata7e75af62017-11-03 10:03:36 +01001551int mlxsw_sp_netdevice_ipip_ol_event(struct mlxsw_sp *mlxsw_sp,
1552 struct net_device *ol_dev,
1553 unsigned long event,
1554 struct netdev_notifier_info *info)
Petr Machata00635872017-10-16 16:26:37 +02001555{
Petr Machata7e75af62017-11-03 10:03:36 +01001556 struct netdev_notifier_changeupper_info *chup;
1557 struct netlink_ext_ack *extack;
1558
Petr Machata00635872017-10-16 16:26:37 +02001559 switch (event) {
1560 case NETDEV_REGISTER:
Petr Machata796ec772017-11-03 10:03:29 +01001561 return mlxsw_sp_netdevice_ipip_ol_reg_event(mlxsw_sp, ol_dev);
Petr Machata00635872017-10-16 16:26:37 +02001562 case NETDEV_UNREGISTER:
Petr Machata796ec772017-11-03 10:03:29 +01001563 mlxsw_sp_netdevice_ipip_ol_unreg_event(mlxsw_sp, ol_dev);
Petr Machata00635872017-10-16 16:26:37 +02001564 return 0;
1565 case NETDEV_UP:
Petr Machata6d4de442017-11-03 10:03:34 +01001566 mlxsw_sp_netdevice_ipip_ol_up_event(mlxsw_sp, ol_dev);
1567 return 0;
Petr Machata00635872017-10-16 16:26:37 +02001568 case NETDEV_DOWN:
Petr Machata796ec772017-11-03 10:03:29 +01001569 mlxsw_sp_netdevice_ipip_ol_down_event(mlxsw_sp, ol_dev);
Petr Machata00635872017-10-16 16:26:37 +02001570 return 0;
Petr Machataf63ce4e2017-10-16 16:26:38 +02001571 case NETDEV_CHANGEUPPER:
Petr Machata7e75af62017-11-03 10:03:36 +01001572 chup = container_of(info, typeof(*chup), info);
1573 extack = info->extack;
1574 if (netif_is_l3_master(chup->upper_dev))
Petr Machata796ec772017-11-03 10:03:29 +01001575 return mlxsw_sp_netdevice_ipip_ol_vrf_event(mlxsw_sp,
Petr Machata7e75af62017-11-03 10:03:36 +01001576 ol_dev,
1577 extack);
Petr Machataf63ce4e2017-10-16 16:26:38 +02001578 return 0;
Petr Machata4cf04f32017-11-03 10:03:42 +01001579 case NETDEV_CHANGE:
1580 extack = info->extack;
1581 return mlxsw_sp_netdevice_ipip_ol_change_event(mlxsw_sp,
1582 ol_dev, extack);
Petr Machata00635872017-10-16 16:26:37 +02001583 }
1584 return 0;
1585}
1586
Petr Machata61481f22017-11-03 10:03:41 +01001587static int
1588__mlxsw_sp_netdevice_ipip_ul_event(struct mlxsw_sp *mlxsw_sp,
1589 struct mlxsw_sp_ipip_entry *ipip_entry,
1590 struct net_device *ul_dev,
1591 unsigned long event,
1592 struct netdev_notifier_info *info)
1593{
1594 struct netdev_notifier_changeupper_info *chup;
1595 struct netlink_ext_ack *extack;
1596
1597 switch (event) {
1598 case NETDEV_CHANGEUPPER:
1599 chup = container_of(info, typeof(*chup), info);
1600 extack = info->extack;
1601 if (netif_is_l3_master(chup->upper_dev))
1602 return mlxsw_sp_netdevice_ipip_ul_vrf_event(mlxsw_sp,
1603 ipip_entry,
1604 ul_dev,
1605 extack);
1606 break;
1607 }
1608 return 0;
1609}
1610
1611int
1612mlxsw_sp_netdevice_ipip_ul_event(struct mlxsw_sp *mlxsw_sp,
1613 struct net_device *ul_dev,
1614 unsigned long event,
1615 struct netdev_notifier_info *info)
1616{
1617 struct mlxsw_sp_ipip_entry *ipip_entry = NULL;
1618 int err;
1619
1620 while ((ipip_entry = mlxsw_sp_ipip_entry_find_by_ul_dev(mlxsw_sp,
1621 ul_dev,
1622 ipip_entry))) {
1623 err = __mlxsw_sp_netdevice_ipip_ul_event(mlxsw_sp, ipip_entry,
1624 ul_dev, event, info);
1625 if (err) {
1626 mlxsw_sp_ipip_demote_tunnel_by_ul_netdev(mlxsw_sp,
1627 ul_dev);
1628 return err;
1629 }
1630 }
1631
1632 return 0;
1633}
1634
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001635struct mlxsw_sp_neigh_key {
Jiri Pirko33b13412016-11-10 12:31:04 +01001636 struct neighbour *n;
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001637};
1638
1639struct mlxsw_sp_neigh_entry {
Ido Schimmel9665b742017-02-08 11:16:42 +01001640 struct list_head rif_list_node;
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001641 struct rhash_head ht_node;
1642 struct mlxsw_sp_neigh_key key;
1643 u16 rif;
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001644 bool connected;
Yotam Gigia6bf9e92016-07-05 11:27:44 +02001645 unsigned char ha[ETH_ALEN];
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001646 struct list_head nexthop_list; /* list of nexthops using
1647 * this neigh entry
1648 */
Yotam Gigib2157142016-07-05 11:27:51 +02001649 struct list_head nexthop_neighs_list_node;
Arkadi Sharshevsky7cfcbc72017-08-24 08:40:08 +02001650 unsigned int counter_index;
1651 bool counter_valid;
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001652};
1653
1654static const struct rhashtable_params mlxsw_sp_neigh_ht_params = {
1655 .key_offset = offsetof(struct mlxsw_sp_neigh_entry, key),
1656 .head_offset = offsetof(struct mlxsw_sp_neigh_entry, ht_node),
1657 .key_len = sizeof(struct mlxsw_sp_neigh_key),
1658};
1659
Arkadi Sharshevskyf17cc842017-08-24 08:40:04 +02001660struct mlxsw_sp_neigh_entry *
1661mlxsw_sp_rif_neigh_next(struct mlxsw_sp_rif *rif,
1662 struct mlxsw_sp_neigh_entry *neigh_entry)
1663{
1664 if (!neigh_entry) {
1665 if (list_empty(&rif->neigh_list))
1666 return NULL;
1667 else
1668 return list_first_entry(&rif->neigh_list,
1669 typeof(*neigh_entry),
1670 rif_list_node);
1671 }
Arkadi Sharshevskyec2437f2017-09-25 10:32:24 +02001672 if (list_is_last(&neigh_entry->rif_list_node, &rif->neigh_list))
Arkadi Sharshevskyf17cc842017-08-24 08:40:04 +02001673 return NULL;
1674 return list_next_entry(neigh_entry, rif_list_node);
1675}
1676
1677int mlxsw_sp_neigh_entry_type(struct mlxsw_sp_neigh_entry *neigh_entry)
1678{
1679 return neigh_entry->key.n->tbl->family;
1680}
1681
1682unsigned char *
1683mlxsw_sp_neigh_entry_ha(struct mlxsw_sp_neigh_entry *neigh_entry)
1684{
1685 return neigh_entry->ha;
1686}
1687
1688u32 mlxsw_sp_neigh4_entry_dip(struct mlxsw_sp_neigh_entry *neigh_entry)
1689{
1690 struct neighbour *n;
1691
1692 n = neigh_entry->key.n;
1693 return ntohl(*((__be32 *) n->primary_key));
1694}
1695
Arkadi Sharshevsky02507682017-08-31 17:59:15 +02001696struct in6_addr *
1697mlxsw_sp_neigh6_entry_dip(struct mlxsw_sp_neigh_entry *neigh_entry)
1698{
1699 struct neighbour *n;
1700
1701 n = neigh_entry->key.n;
1702 return (struct in6_addr *) &n->primary_key;
1703}
1704
Arkadi Sharshevsky7cfcbc72017-08-24 08:40:08 +02001705int mlxsw_sp_neigh_counter_get(struct mlxsw_sp *mlxsw_sp,
1706 struct mlxsw_sp_neigh_entry *neigh_entry,
1707 u64 *p_counter)
1708{
1709 if (!neigh_entry->counter_valid)
1710 return -EINVAL;
1711
1712 return mlxsw_sp_flow_counter_get(mlxsw_sp, neigh_entry->counter_index,
1713 p_counter, NULL);
1714}
1715
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001716static struct mlxsw_sp_neigh_entry *
1717mlxsw_sp_neigh_entry_alloc(struct mlxsw_sp *mlxsw_sp, struct neighbour *n,
1718 u16 rif)
1719{
1720 struct mlxsw_sp_neigh_entry *neigh_entry;
1721
1722 neigh_entry = kzalloc(sizeof(*neigh_entry), GFP_KERNEL);
1723 if (!neigh_entry)
1724 return NULL;
1725
1726 neigh_entry->key.n = n;
1727 neigh_entry->rif = rif;
1728 INIT_LIST_HEAD(&neigh_entry->nexthop_list);
1729
1730 return neigh_entry;
1731}
1732
1733static void mlxsw_sp_neigh_entry_free(struct mlxsw_sp_neigh_entry *neigh_entry)
1734{
1735 kfree(neigh_entry);
1736}
1737
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001738static int
1739mlxsw_sp_neigh_entry_insert(struct mlxsw_sp *mlxsw_sp,
1740 struct mlxsw_sp_neigh_entry *neigh_entry)
1741{
Ido Schimmel9011b672017-05-16 19:38:25 +02001742 return rhashtable_insert_fast(&mlxsw_sp->router->neigh_ht,
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001743 &neigh_entry->ht_node,
1744 mlxsw_sp_neigh_ht_params);
1745}
1746
1747static void
1748mlxsw_sp_neigh_entry_remove(struct mlxsw_sp *mlxsw_sp,
1749 struct mlxsw_sp_neigh_entry *neigh_entry)
1750{
Ido Schimmel9011b672017-05-16 19:38:25 +02001751 rhashtable_remove_fast(&mlxsw_sp->router->neigh_ht,
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001752 &neigh_entry->ht_node,
1753 mlxsw_sp_neigh_ht_params);
1754}
1755
Arkadi Sharshevsky7cfcbc72017-08-24 08:40:08 +02001756static bool
Arkadi Sharshevsky1ed55742017-08-31 17:59:18 +02001757mlxsw_sp_neigh_counter_should_alloc(struct mlxsw_sp *mlxsw_sp,
1758 struct mlxsw_sp_neigh_entry *neigh_entry)
Arkadi Sharshevsky7cfcbc72017-08-24 08:40:08 +02001759{
1760 struct devlink *devlink;
Arkadi Sharshevsky1ed55742017-08-31 17:59:18 +02001761 const char *table_name;
1762
1763 switch (mlxsw_sp_neigh_entry_type(neigh_entry)) {
1764 case AF_INET:
1765 table_name = MLXSW_SP_DPIPE_TABLE_NAME_HOST4;
1766 break;
1767 case AF_INET6:
1768 table_name = MLXSW_SP_DPIPE_TABLE_NAME_HOST6;
1769 break;
1770 default:
1771 WARN_ON(1);
1772 return false;
1773 }
Arkadi Sharshevsky7cfcbc72017-08-24 08:40:08 +02001774
1775 devlink = priv_to_devlink(mlxsw_sp->core);
Arkadi Sharshevsky1ed55742017-08-31 17:59:18 +02001776 return devlink_dpipe_table_counter_enabled(devlink, table_name);
Arkadi Sharshevsky7cfcbc72017-08-24 08:40:08 +02001777}
1778
1779static void
1780mlxsw_sp_neigh_counter_alloc(struct mlxsw_sp *mlxsw_sp,
1781 struct mlxsw_sp_neigh_entry *neigh_entry)
1782{
Arkadi Sharshevsky1ed55742017-08-31 17:59:18 +02001783 if (!mlxsw_sp_neigh_counter_should_alloc(mlxsw_sp, neigh_entry))
Arkadi Sharshevsky7cfcbc72017-08-24 08:40:08 +02001784 return;
1785
1786 if (mlxsw_sp_flow_counter_alloc(mlxsw_sp, &neigh_entry->counter_index))
1787 return;
1788
1789 neigh_entry->counter_valid = true;
1790}
1791
1792static void
1793mlxsw_sp_neigh_counter_free(struct mlxsw_sp *mlxsw_sp,
1794 struct mlxsw_sp_neigh_entry *neigh_entry)
1795{
1796 if (!neigh_entry->counter_valid)
1797 return;
1798 mlxsw_sp_flow_counter_free(mlxsw_sp,
1799 neigh_entry->counter_index);
1800 neigh_entry->counter_valid = false;
1801}
1802
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001803static struct mlxsw_sp_neigh_entry *
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001804mlxsw_sp_neigh_entry_create(struct mlxsw_sp *mlxsw_sp, struct neighbour *n)
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001805{
1806 struct mlxsw_sp_neigh_entry *neigh_entry;
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01001807 struct mlxsw_sp_rif *rif;
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001808 int err;
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001809
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01001810 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, n->dev);
1811 if (!rif)
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001812 return ERR_PTR(-EINVAL);
1813
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01001814 neigh_entry = mlxsw_sp_neigh_entry_alloc(mlxsw_sp, n, rif->rif_index);
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001815 if (!neigh_entry)
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001816 return ERR_PTR(-ENOMEM);
1817
1818 err = mlxsw_sp_neigh_entry_insert(mlxsw_sp, neigh_entry);
1819 if (err)
1820 goto err_neigh_entry_insert;
1821
Arkadi Sharshevsky7cfcbc72017-08-24 08:40:08 +02001822 mlxsw_sp_neigh_counter_alloc(mlxsw_sp, neigh_entry);
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01001823 list_add(&neigh_entry->rif_list_node, &rif->neigh_list);
Ido Schimmel9665b742017-02-08 11:16:42 +01001824
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001825 return neigh_entry;
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001826
1827err_neigh_entry_insert:
1828 mlxsw_sp_neigh_entry_free(neigh_entry);
1829 return ERR_PTR(err);
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001830}
1831
1832static void
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001833mlxsw_sp_neigh_entry_destroy(struct mlxsw_sp *mlxsw_sp,
1834 struct mlxsw_sp_neigh_entry *neigh_entry)
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001835{
Ido Schimmel9665b742017-02-08 11:16:42 +01001836 list_del(&neigh_entry->rif_list_node);
Arkadi Sharshevsky7cfcbc72017-08-24 08:40:08 +02001837 mlxsw_sp_neigh_counter_free(mlxsw_sp, neigh_entry);
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001838 mlxsw_sp_neigh_entry_remove(mlxsw_sp, neigh_entry);
1839 mlxsw_sp_neigh_entry_free(neigh_entry);
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001840}
1841
1842static struct mlxsw_sp_neigh_entry *
Jiri Pirko33b13412016-11-10 12:31:04 +01001843mlxsw_sp_neigh_entry_lookup(struct mlxsw_sp *mlxsw_sp, struct neighbour *n)
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001844{
Jiri Pirko33b13412016-11-10 12:31:04 +01001845 struct mlxsw_sp_neigh_key key;
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001846
Jiri Pirko33b13412016-11-10 12:31:04 +01001847 key.n = n;
Ido Schimmel9011b672017-05-16 19:38:25 +02001848 return rhashtable_lookup_fast(&mlxsw_sp->router->neigh_ht,
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001849 &key, mlxsw_sp_neigh_ht_params);
1850}
1851
Yotam Gigic723c7352016-07-05 11:27:43 +02001852static void
1853mlxsw_sp_router_neighs_update_interval_init(struct mlxsw_sp *mlxsw_sp)
1854{
Arkadi Sharshevskya6c9b5d2017-07-18 10:10:18 +02001855 unsigned long interval;
Yotam Gigic723c7352016-07-05 11:27:43 +02001856
Ido Schimmelb5f3e0d2017-07-24 09:56:00 +02001857#if IS_ENABLED(CONFIG_IPV6)
Arkadi Sharshevskya6c9b5d2017-07-18 10:10:18 +02001858 interval = min_t(unsigned long,
1859 NEIGH_VAR(&arp_tbl.parms, DELAY_PROBE_TIME),
1860 NEIGH_VAR(&nd_tbl.parms, DELAY_PROBE_TIME));
Ido Schimmelb5f3e0d2017-07-24 09:56:00 +02001861#else
1862 interval = NEIGH_VAR(&arp_tbl.parms, DELAY_PROBE_TIME);
1863#endif
Ido Schimmel9011b672017-05-16 19:38:25 +02001864 mlxsw_sp->router->neighs_update.interval = jiffies_to_msecs(interval);
Yotam Gigic723c7352016-07-05 11:27:43 +02001865}
1866
1867static void mlxsw_sp_router_neigh_ent_ipv4_process(struct mlxsw_sp *mlxsw_sp,
1868 char *rauhtd_pl,
1869 int ent_index)
1870{
1871 struct net_device *dev;
1872 struct neighbour *n;
1873 __be32 dipn;
1874 u32 dip;
1875 u16 rif;
1876
1877 mlxsw_reg_rauhtd_ent_ipv4_unpack(rauhtd_pl, ent_index, &rif, &dip);
1878
Ido Schimmel5f9efff2017-05-16 19:38:27 +02001879 if (!mlxsw_sp->router->rifs[rif]) {
Yotam Gigic723c7352016-07-05 11:27:43 +02001880 dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Incorrect RIF in neighbour entry\n");
1881 return;
1882 }
1883
1884 dipn = htonl(dip);
Ido Schimmel5f9efff2017-05-16 19:38:27 +02001885 dev = mlxsw_sp->router->rifs[rif]->dev;
Yotam Gigic723c7352016-07-05 11:27:43 +02001886 n = neigh_lookup(&arp_tbl, &dipn, dev);
1887 if (!n) {
1888 netdev_err(dev, "Failed to find matching neighbour for IP=%pI4h\n",
1889 &dip);
1890 return;
1891 }
1892
1893 netdev_dbg(dev, "Updating neighbour with IP=%pI4h\n", &dip);
1894 neigh_event_send(n, NULL);
1895 neigh_release(n);
1896}
1897
Ido Schimmeldf9a21f2017-08-15 09:10:33 +02001898#if IS_ENABLED(CONFIG_IPV6)
Arkadi Sharshevsky60f040c2017-07-18 10:10:17 +02001899static void mlxsw_sp_router_neigh_ent_ipv6_process(struct mlxsw_sp *mlxsw_sp,
1900 char *rauhtd_pl,
1901 int rec_index)
1902{
1903 struct net_device *dev;
1904 struct neighbour *n;
1905 struct in6_addr dip;
1906 u16 rif;
1907
1908 mlxsw_reg_rauhtd_ent_ipv6_unpack(rauhtd_pl, rec_index, &rif,
1909 (char *) &dip);
1910
1911 if (!mlxsw_sp->router->rifs[rif]) {
1912 dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Incorrect RIF in neighbour entry\n");
1913 return;
1914 }
1915
1916 dev = mlxsw_sp->router->rifs[rif]->dev;
1917 n = neigh_lookup(&nd_tbl, &dip, dev);
1918 if (!n) {
1919 netdev_err(dev, "Failed to find matching neighbour for IP=%pI6c\n",
1920 &dip);
1921 return;
1922 }
1923
1924 netdev_dbg(dev, "Updating neighbour with IP=%pI6c\n", &dip);
1925 neigh_event_send(n, NULL);
1926 neigh_release(n);
1927}
Ido Schimmelb5f3e0d2017-07-24 09:56:00 +02001928#else
1929static void mlxsw_sp_router_neigh_ent_ipv6_process(struct mlxsw_sp *mlxsw_sp,
1930 char *rauhtd_pl,
1931 int rec_index)
1932{
1933}
1934#endif
Arkadi Sharshevsky60f040c2017-07-18 10:10:17 +02001935
Yotam Gigic723c7352016-07-05 11:27:43 +02001936static void mlxsw_sp_router_neigh_rec_ipv4_process(struct mlxsw_sp *mlxsw_sp,
1937 char *rauhtd_pl,
1938 int rec_index)
1939{
1940 u8 num_entries;
1941 int i;
1942
1943 num_entries = mlxsw_reg_rauhtd_ipv4_rec_num_entries_get(rauhtd_pl,
1944 rec_index);
1945 /* Hardware starts counting at 0, so add 1. */
1946 num_entries++;
1947
1948 /* Each record consists of several neighbour entries. */
1949 for (i = 0; i < num_entries; i++) {
1950 int ent_index;
1951
1952 ent_index = rec_index * MLXSW_REG_RAUHTD_IPV4_ENT_PER_REC + i;
1953 mlxsw_sp_router_neigh_ent_ipv4_process(mlxsw_sp, rauhtd_pl,
1954 ent_index);
1955 }
1956
1957}
1958
Arkadi Sharshevsky60f040c2017-07-18 10:10:17 +02001959static void mlxsw_sp_router_neigh_rec_ipv6_process(struct mlxsw_sp *mlxsw_sp,
1960 char *rauhtd_pl,
1961 int rec_index)
1962{
1963 /* One record contains one entry. */
1964 mlxsw_sp_router_neigh_ent_ipv6_process(mlxsw_sp, rauhtd_pl,
1965 rec_index);
1966}
1967
Yotam Gigic723c7352016-07-05 11:27:43 +02001968static void mlxsw_sp_router_neigh_rec_process(struct mlxsw_sp *mlxsw_sp,
1969 char *rauhtd_pl, int rec_index)
1970{
1971 switch (mlxsw_reg_rauhtd_rec_type_get(rauhtd_pl, rec_index)) {
1972 case MLXSW_REG_RAUHTD_TYPE_IPV4:
1973 mlxsw_sp_router_neigh_rec_ipv4_process(mlxsw_sp, rauhtd_pl,
1974 rec_index);
1975 break;
1976 case MLXSW_REG_RAUHTD_TYPE_IPV6:
Arkadi Sharshevsky60f040c2017-07-18 10:10:17 +02001977 mlxsw_sp_router_neigh_rec_ipv6_process(mlxsw_sp, rauhtd_pl,
1978 rec_index);
Yotam Gigic723c7352016-07-05 11:27:43 +02001979 break;
1980 }
1981}
1982
Arkadi Sharshevsky42cdb332016-11-11 16:34:26 +01001983static bool mlxsw_sp_router_rauhtd_is_full(char *rauhtd_pl)
1984{
1985 u8 num_rec, last_rec_index, num_entries;
1986
1987 num_rec = mlxsw_reg_rauhtd_num_rec_get(rauhtd_pl);
1988 last_rec_index = num_rec - 1;
1989
1990 if (num_rec < MLXSW_REG_RAUHTD_REC_MAX_NUM)
1991 return false;
1992 if (mlxsw_reg_rauhtd_rec_type_get(rauhtd_pl, last_rec_index) ==
1993 MLXSW_REG_RAUHTD_TYPE_IPV6)
1994 return true;
1995
1996 num_entries = mlxsw_reg_rauhtd_ipv4_rec_num_entries_get(rauhtd_pl,
1997 last_rec_index);
1998 if (++num_entries == MLXSW_REG_RAUHTD_IPV4_ENT_PER_REC)
1999 return true;
2000 return false;
2001}
2002
Arkadi Sharshevsky60f040c2017-07-18 10:10:17 +02002003static int
2004__mlxsw_sp_router_neighs_update_rauhtd(struct mlxsw_sp *mlxsw_sp,
2005 char *rauhtd_pl,
2006 enum mlxsw_reg_rauhtd_type type)
Yotam Gigic723c7352016-07-05 11:27:43 +02002007{
Arkadi Sharshevsky60f040c2017-07-18 10:10:17 +02002008 int i, num_rec;
2009 int err;
Yotam Gigic723c7352016-07-05 11:27:43 +02002010
2011 /* Make sure the neighbour's netdev isn't removed in the
2012 * process.
2013 */
2014 rtnl_lock();
2015 do {
Arkadi Sharshevsky60f040c2017-07-18 10:10:17 +02002016 mlxsw_reg_rauhtd_pack(rauhtd_pl, type);
Yotam Gigic723c7352016-07-05 11:27:43 +02002017 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(rauhtd),
2018 rauhtd_pl);
2019 if (err) {
Petr Machata7ff176f2017-10-02 12:21:57 +02002020 dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Failed to dump neighbour table\n");
Yotam Gigic723c7352016-07-05 11:27:43 +02002021 break;
2022 }
2023 num_rec = mlxsw_reg_rauhtd_num_rec_get(rauhtd_pl);
2024 for (i = 0; i < num_rec; i++)
2025 mlxsw_sp_router_neigh_rec_process(mlxsw_sp, rauhtd_pl,
2026 i);
Arkadi Sharshevsky42cdb332016-11-11 16:34:26 +01002027 } while (mlxsw_sp_router_rauhtd_is_full(rauhtd_pl));
Yotam Gigic723c7352016-07-05 11:27:43 +02002028 rtnl_unlock();
2029
Arkadi Sharshevsky60f040c2017-07-18 10:10:17 +02002030 return err;
2031}
2032
2033static int mlxsw_sp_router_neighs_update_rauhtd(struct mlxsw_sp *mlxsw_sp)
2034{
2035 enum mlxsw_reg_rauhtd_type type;
2036 char *rauhtd_pl;
2037 int err;
2038
2039 rauhtd_pl = kmalloc(MLXSW_REG_RAUHTD_LEN, GFP_KERNEL);
2040 if (!rauhtd_pl)
2041 return -ENOMEM;
2042
2043 type = MLXSW_REG_RAUHTD_TYPE_IPV4;
2044 err = __mlxsw_sp_router_neighs_update_rauhtd(mlxsw_sp, rauhtd_pl, type);
2045 if (err)
2046 goto out;
2047
2048 type = MLXSW_REG_RAUHTD_TYPE_IPV6;
2049 err = __mlxsw_sp_router_neighs_update_rauhtd(mlxsw_sp, rauhtd_pl, type);
2050out:
Yotam Gigic723c7352016-07-05 11:27:43 +02002051 kfree(rauhtd_pl);
Yotam Gigib2157142016-07-05 11:27:51 +02002052 return err;
2053}
2054
2055static void mlxsw_sp_router_neighs_update_nh(struct mlxsw_sp *mlxsw_sp)
2056{
2057 struct mlxsw_sp_neigh_entry *neigh_entry;
2058
2059 /* Take RTNL mutex here to prevent lists from changes */
2060 rtnl_lock();
Ido Schimmel9011b672017-05-16 19:38:25 +02002061 list_for_each_entry(neigh_entry, &mlxsw_sp->router->nexthop_neighs_list,
Ido Schimmel8a0b7272017-02-06 16:20:15 +01002062 nexthop_neighs_list_node)
Yotam Gigib2157142016-07-05 11:27:51 +02002063 /* If this neigh have nexthops, make the kernel think this neigh
2064 * is active regardless of the traffic.
2065 */
Ido Schimmel8a0b7272017-02-06 16:20:15 +01002066 neigh_event_send(neigh_entry->key.n, NULL);
Yotam Gigib2157142016-07-05 11:27:51 +02002067 rtnl_unlock();
2068}
2069
2070static void
2071mlxsw_sp_router_neighs_update_work_schedule(struct mlxsw_sp *mlxsw_sp)
2072{
Ido Schimmel9011b672017-05-16 19:38:25 +02002073 unsigned long interval = mlxsw_sp->router->neighs_update.interval;
Yotam Gigib2157142016-07-05 11:27:51 +02002074
Ido Schimmel9011b672017-05-16 19:38:25 +02002075 mlxsw_core_schedule_dw(&mlxsw_sp->router->neighs_update.dw,
Yotam Gigib2157142016-07-05 11:27:51 +02002076 msecs_to_jiffies(interval));
2077}
2078
2079static void mlxsw_sp_router_neighs_update_work(struct work_struct *work)
2080{
Ido Schimmel9011b672017-05-16 19:38:25 +02002081 struct mlxsw_sp_router *router;
Yotam Gigib2157142016-07-05 11:27:51 +02002082 int err;
2083
Ido Schimmel9011b672017-05-16 19:38:25 +02002084 router = container_of(work, struct mlxsw_sp_router,
2085 neighs_update.dw.work);
2086 err = mlxsw_sp_router_neighs_update_rauhtd(router->mlxsw_sp);
Yotam Gigib2157142016-07-05 11:27:51 +02002087 if (err)
Ido Schimmel9011b672017-05-16 19:38:25 +02002088 dev_err(router->mlxsw_sp->bus_info->dev, "Could not update kernel for neigh activity");
Yotam Gigib2157142016-07-05 11:27:51 +02002089
Ido Schimmel9011b672017-05-16 19:38:25 +02002090 mlxsw_sp_router_neighs_update_nh(router->mlxsw_sp);
Yotam Gigib2157142016-07-05 11:27:51 +02002091
Ido Schimmel9011b672017-05-16 19:38:25 +02002092 mlxsw_sp_router_neighs_update_work_schedule(router->mlxsw_sp);
Yotam Gigic723c7352016-07-05 11:27:43 +02002093}
2094
Yotam Gigi0b2361d2016-07-05 11:27:52 +02002095static void mlxsw_sp_router_probe_unresolved_nexthops(struct work_struct *work)
2096{
2097 struct mlxsw_sp_neigh_entry *neigh_entry;
Ido Schimmel9011b672017-05-16 19:38:25 +02002098 struct mlxsw_sp_router *router;
Yotam Gigi0b2361d2016-07-05 11:27:52 +02002099
Ido Schimmel9011b672017-05-16 19:38:25 +02002100 router = container_of(work, struct mlxsw_sp_router,
2101 nexthop_probe_dw.work);
Yotam Gigi0b2361d2016-07-05 11:27:52 +02002102 /* Iterate over nexthop neighbours, find those who are unresolved and
2103 * send arp on them. This solves the chicken-egg problem when
2104 * the nexthop wouldn't get offloaded until the neighbor is resolved
2105 * but it wouldn't get resolved ever in case traffic is flowing in HW
2106 * using different nexthop.
2107 *
2108 * Take RTNL mutex here to prevent lists from changes.
2109 */
2110 rtnl_lock();
Ido Schimmel9011b672017-05-16 19:38:25 +02002111 list_for_each_entry(neigh_entry, &router->nexthop_neighs_list,
Ido Schimmel8a0b7272017-02-06 16:20:15 +01002112 nexthop_neighs_list_node)
Ido Schimmel01b1aa32017-02-06 16:20:16 +01002113 if (!neigh_entry->connected)
Jiri Pirko33b13412016-11-10 12:31:04 +01002114 neigh_event_send(neigh_entry->key.n, NULL);
Yotam Gigi0b2361d2016-07-05 11:27:52 +02002115 rtnl_unlock();
2116
Ido Schimmel9011b672017-05-16 19:38:25 +02002117 mlxsw_core_schedule_dw(&router->nexthop_probe_dw,
Yotam Gigi0b2361d2016-07-05 11:27:52 +02002118 MLXSW_SP_UNRESOLVED_NH_PROBE_INTERVAL);
2119}
2120
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002121static void
2122mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp *mlxsw_sp,
2123 struct mlxsw_sp_neigh_entry *neigh_entry,
2124 bool removing);
2125
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002126static enum mlxsw_reg_rauht_op mlxsw_sp_rauht_op(bool adding)
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002127{
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002128 return adding ? MLXSW_REG_RAUHT_OP_WRITE_ADD :
2129 MLXSW_REG_RAUHT_OP_WRITE_DELETE;
2130}
2131
2132static void
2133mlxsw_sp_router_neigh_entry_op4(struct mlxsw_sp *mlxsw_sp,
2134 struct mlxsw_sp_neigh_entry *neigh_entry,
2135 enum mlxsw_reg_rauht_op op)
2136{
Jiri Pirko33b13412016-11-10 12:31:04 +01002137 struct neighbour *n = neigh_entry->key.n;
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002138 u32 dip = ntohl(*((__be32 *) n->primary_key));
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002139 char rauht_pl[MLXSW_REG_RAUHT_LEN];
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002140
2141 mlxsw_reg_rauht_pack4(rauht_pl, op, neigh_entry->rif, neigh_entry->ha,
2142 dip);
Arkadi Sharshevsky7cfcbc72017-08-24 08:40:08 +02002143 if (neigh_entry->counter_valid)
2144 mlxsw_reg_rauht_pack_counter(rauht_pl,
2145 neigh_entry->counter_index);
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002146 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rauht), rauht_pl);
2147}
2148
2149static void
Arkadi Sharshevskyd5eb89c2017-07-18 10:10:15 +02002150mlxsw_sp_router_neigh_entry_op6(struct mlxsw_sp *mlxsw_sp,
2151 struct mlxsw_sp_neigh_entry *neigh_entry,
2152 enum mlxsw_reg_rauht_op op)
2153{
2154 struct neighbour *n = neigh_entry->key.n;
2155 char rauht_pl[MLXSW_REG_RAUHT_LEN];
2156 const char *dip = n->primary_key;
2157
2158 mlxsw_reg_rauht_pack6(rauht_pl, op, neigh_entry->rif, neigh_entry->ha,
2159 dip);
Arkadi Sharshevsky7cfcbc72017-08-24 08:40:08 +02002160 if (neigh_entry->counter_valid)
2161 mlxsw_reg_rauht_pack_counter(rauht_pl,
2162 neigh_entry->counter_index);
Arkadi Sharshevskyd5eb89c2017-07-18 10:10:15 +02002163 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rauht), rauht_pl);
2164}
2165
Arkadi Sharshevsky1d1056d82017-08-31 17:59:13 +02002166bool mlxsw_sp_neigh_ipv6_ignore(struct mlxsw_sp_neigh_entry *neigh_entry)
Arkadi Sharshevskyd5eb89c2017-07-18 10:10:15 +02002167{
Arkadi Sharshevsky1d1056d82017-08-31 17:59:13 +02002168 struct neighbour *n = neigh_entry->key.n;
2169
Arkadi Sharshevskyd5eb89c2017-07-18 10:10:15 +02002170 /* Packets with a link-local destination address are trapped
2171 * after LPM lookup and never reach the neighbour table, so
2172 * there is no need to program such neighbours to the device.
2173 */
2174 if (ipv6_addr_type((struct in6_addr *) &n->primary_key) &
2175 IPV6_ADDR_LINKLOCAL)
2176 return true;
2177 return false;
2178}
2179
2180static void
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002181mlxsw_sp_neigh_entry_update(struct mlxsw_sp *mlxsw_sp,
2182 struct mlxsw_sp_neigh_entry *neigh_entry,
2183 bool adding)
2184{
2185 if (!adding && !neigh_entry->connected)
2186 return;
2187 neigh_entry->connected = adding;
Ido Schimmelb5f3e0d2017-07-24 09:56:00 +02002188 if (neigh_entry->key.n->tbl->family == AF_INET) {
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002189 mlxsw_sp_router_neigh_entry_op4(mlxsw_sp, neigh_entry,
2190 mlxsw_sp_rauht_op(adding));
Ido Schimmelb5f3e0d2017-07-24 09:56:00 +02002191 } else if (neigh_entry->key.n->tbl->family == AF_INET6) {
Arkadi Sharshevsky1d1056d82017-08-31 17:59:13 +02002192 if (mlxsw_sp_neigh_ipv6_ignore(neigh_entry))
Arkadi Sharshevskyd5eb89c2017-07-18 10:10:15 +02002193 return;
2194 mlxsw_sp_router_neigh_entry_op6(mlxsw_sp, neigh_entry,
2195 mlxsw_sp_rauht_op(adding));
2196 } else {
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002197 WARN_ON_ONCE(1);
Arkadi Sharshevskyd5eb89c2017-07-18 10:10:15 +02002198 }
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002199}
2200
Arkadi Sharshevskya481d712017-08-24 08:40:10 +02002201void
2202mlxsw_sp_neigh_entry_counter_update(struct mlxsw_sp *mlxsw_sp,
2203 struct mlxsw_sp_neigh_entry *neigh_entry,
2204 bool adding)
2205{
2206 if (adding)
2207 mlxsw_sp_neigh_counter_alloc(mlxsw_sp, neigh_entry);
2208 else
2209 mlxsw_sp_neigh_counter_free(mlxsw_sp, neigh_entry);
2210 mlxsw_sp_neigh_entry_update(mlxsw_sp, neigh_entry, true);
2211}
2212
Ido Schimmelceb88812017-11-02 17:14:07 +01002213struct mlxsw_sp_netevent_work {
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002214 struct work_struct work;
2215 struct mlxsw_sp *mlxsw_sp;
2216 struct neighbour *n;
2217};
2218
2219static void mlxsw_sp_router_neigh_event_work(struct work_struct *work)
2220{
Ido Schimmelceb88812017-11-02 17:14:07 +01002221 struct mlxsw_sp_netevent_work *net_work =
2222 container_of(work, struct mlxsw_sp_netevent_work, work);
2223 struct mlxsw_sp *mlxsw_sp = net_work->mlxsw_sp;
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002224 struct mlxsw_sp_neigh_entry *neigh_entry;
Ido Schimmelceb88812017-11-02 17:14:07 +01002225 struct neighbour *n = net_work->n;
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002226 unsigned char ha[ETH_ALEN];
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002227 bool entry_connected;
Ido Schimmel93a87e52016-12-23 09:32:49 +01002228 u8 nud_state, dead;
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002229
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002230 /* If these parameters are changed after we release the lock,
2231 * then we are guaranteed to receive another event letting us
2232 * know about it.
2233 */
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002234 read_lock_bh(&n->lock);
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002235 memcpy(ha, n->ha, ETH_ALEN);
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002236 nud_state = n->nud_state;
Ido Schimmel93a87e52016-12-23 09:32:49 +01002237 dead = n->dead;
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002238 read_unlock_bh(&n->lock);
2239
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002240 rtnl_lock();
Ido Schimmel93a87e52016-12-23 09:32:49 +01002241 entry_connected = nud_state & NUD_VALID && !dead;
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002242 neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp, n);
2243 if (!entry_connected && !neigh_entry)
2244 goto out;
2245 if (!neigh_entry) {
2246 neigh_entry = mlxsw_sp_neigh_entry_create(mlxsw_sp, n);
2247 if (IS_ERR(neigh_entry))
2248 goto out;
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002249 }
2250
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002251 memcpy(neigh_entry->ha, ha, ETH_ALEN);
2252 mlxsw_sp_neigh_entry_update(mlxsw_sp, neigh_entry, entry_connected);
2253 mlxsw_sp_nexthop_neigh_update(mlxsw_sp, neigh_entry, !entry_connected);
2254
2255 if (!neigh_entry->connected && list_empty(&neigh_entry->nexthop_list))
2256 mlxsw_sp_neigh_entry_destroy(mlxsw_sp, neigh_entry);
2257
2258out:
2259 rtnl_unlock();
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002260 neigh_release(n);
Ido Schimmelceb88812017-11-02 17:14:07 +01002261 kfree(net_work);
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002262}
2263
Ido Schimmel28678f02017-11-02 17:14:10 +01002264static int mlxsw_sp_mp_hash_init(struct mlxsw_sp *mlxsw_sp);
2265
2266static void mlxsw_sp_router_mp_hash_event_work(struct work_struct *work)
2267{
2268 struct mlxsw_sp_netevent_work *net_work =
2269 container_of(work, struct mlxsw_sp_netevent_work, work);
2270 struct mlxsw_sp *mlxsw_sp = net_work->mlxsw_sp;
2271
2272 mlxsw_sp_mp_hash_init(mlxsw_sp);
2273 kfree(net_work);
2274}
2275
2276static int mlxsw_sp_router_netevent_event(struct notifier_block *nb,
Ido Schimmel48fac882017-11-02 17:14:06 +01002277 unsigned long event, void *ptr)
Yotam Gigic723c7352016-07-05 11:27:43 +02002278{
Ido Schimmelceb88812017-11-02 17:14:07 +01002279 struct mlxsw_sp_netevent_work *net_work;
Yotam Gigic723c7352016-07-05 11:27:43 +02002280 struct mlxsw_sp_port *mlxsw_sp_port;
Ido Schimmel28678f02017-11-02 17:14:10 +01002281 struct mlxsw_sp_router *router;
Yotam Gigic723c7352016-07-05 11:27:43 +02002282 struct mlxsw_sp *mlxsw_sp;
2283 unsigned long interval;
2284 struct neigh_parms *p;
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002285 struct neighbour *n;
Ido Schimmel28678f02017-11-02 17:14:10 +01002286 struct net *net;
Yotam Gigic723c7352016-07-05 11:27:43 +02002287
2288 switch (event) {
2289 case NETEVENT_DELAY_PROBE_TIME_UPDATE:
2290 p = ptr;
2291
2292 /* We don't care about changes in the default table. */
Ido Schimmelb5f3e0d2017-07-24 09:56:00 +02002293 if (!p->dev || (p->tbl->family != AF_INET &&
2294 p->tbl->family != AF_INET6))
Yotam Gigic723c7352016-07-05 11:27:43 +02002295 return NOTIFY_DONE;
2296
2297 /* We are in atomic context and can't take RTNL mutex,
2298 * so use RCU variant to walk the device chain.
2299 */
2300 mlxsw_sp_port = mlxsw_sp_port_lower_dev_hold(p->dev);
2301 if (!mlxsw_sp_port)
2302 return NOTIFY_DONE;
2303
2304 mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2305 interval = jiffies_to_msecs(NEIGH_VAR(p, DELAY_PROBE_TIME));
Ido Schimmel9011b672017-05-16 19:38:25 +02002306 mlxsw_sp->router->neighs_update.interval = interval;
Yotam Gigic723c7352016-07-05 11:27:43 +02002307
2308 mlxsw_sp_port_dev_put(mlxsw_sp_port);
2309 break;
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002310 case NETEVENT_NEIGH_UPDATE:
2311 n = ptr;
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002312
Ido Schimmelb5f3e0d2017-07-24 09:56:00 +02002313 if (n->tbl->family != AF_INET && n->tbl->family != AF_INET6)
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002314 return NOTIFY_DONE;
2315
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002316 mlxsw_sp_port = mlxsw_sp_port_lower_dev_hold(n->dev);
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002317 if (!mlxsw_sp_port)
2318 return NOTIFY_DONE;
2319
Ido Schimmelceb88812017-11-02 17:14:07 +01002320 net_work = kzalloc(sizeof(*net_work), GFP_ATOMIC);
2321 if (!net_work) {
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002322 mlxsw_sp_port_dev_put(mlxsw_sp_port);
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002323 return NOTIFY_BAD;
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002324 }
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002325
Ido Schimmelceb88812017-11-02 17:14:07 +01002326 INIT_WORK(&net_work->work, mlxsw_sp_router_neigh_event_work);
2327 net_work->mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2328 net_work->n = n;
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002329
2330 /* Take a reference to ensure the neighbour won't be
2331 * destructed until we drop the reference in delayed
2332 * work.
2333 */
2334 neigh_clone(n);
Ido Schimmelceb88812017-11-02 17:14:07 +01002335 mlxsw_core_schedule_work(&net_work->work);
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002336 mlxsw_sp_port_dev_put(mlxsw_sp_port);
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002337 break;
Ido Schimmel28678f02017-11-02 17:14:10 +01002338 case NETEVENT_MULTIPATH_HASH_UPDATE:
2339 net = ptr;
2340
2341 if (!net_eq(net, &init_net))
2342 return NOTIFY_DONE;
2343
2344 net_work = kzalloc(sizeof(*net_work), GFP_ATOMIC);
2345 if (!net_work)
2346 return NOTIFY_BAD;
2347
2348 router = container_of(nb, struct mlxsw_sp_router, netevent_nb);
2349 INIT_WORK(&net_work->work, mlxsw_sp_router_mp_hash_event_work);
2350 net_work->mlxsw_sp = router->mlxsw_sp;
2351 mlxsw_core_schedule_work(&net_work->work);
2352 break;
Yotam Gigic723c7352016-07-05 11:27:43 +02002353 }
2354
2355 return NOTIFY_DONE;
2356}
2357
Jiri Pirko6cf3c972016-07-05 11:27:39 +02002358static int mlxsw_sp_neigh_init(struct mlxsw_sp *mlxsw_sp)
2359{
Yotam Gigic723c7352016-07-05 11:27:43 +02002360 int err;
2361
Ido Schimmel9011b672017-05-16 19:38:25 +02002362 err = rhashtable_init(&mlxsw_sp->router->neigh_ht,
Yotam Gigic723c7352016-07-05 11:27:43 +02002363 &mlxsw_sp_neigh_ht_params);
2364 if (err)
2365 return err;
2366
2367 /* Initialize the polling interval according to the default
2368 * table.
2369 */
2370 mlxsw_sp_router_neighs_update_interval_init(mlxsw_sp);
2371
Yotam Gigi0b2361d2016-07-05 11:27:52 +02002372 /* Create the delayed works for the activity_update */
Ido Schimmel9011b672017-05-16 19:38:25 +02002373 INIT_DELAYED_WORK(&mlxsw_sp->router->neighs_update.dw,
Yotam Gigic723c7352016-07-05 11:27:43 +02002374 mlxsw_sp_router_neighs_update_work);
Ido Schimmel9011b672017-05-16 19:38:25 +02002375 INIT_DELAYED_WORK(&mlxsw_sp->router->nexthop_probe_dw,
Yotam Gigi0b2361d2016-07-05 11:27:52 +02002376 mlxsw_sp_router_probe_unresolved_nexthops);
Ido Schimmel9011b672017-05-16 19:38:25 +02002377 mlxsw_core_schedule_dw(&mlxsw_sp->router->neighs_update.dw, 0);
2378 mlxsw_core_schedule_dw(&mlxsw_sp->router->nexthop_probe_dw, 0);
Yotam Gigic723c7352016-07-05 11:27:43 +02002379 return 0;
Jiri Pirko6cf3c972016-07-05 11:27:39 +02002380}
2381
2382static void mlxsw_sp_neigh_fini(struct mlxsw_sp *mlxsw_sp)
2383{
Ido Schimmel9011b672017-05-16 19:38:25 +02002384 cancel_delayed_work_sync(&mlxsw_sp->router->neighs_update.dw);
2385 cancel_delayed_work_sync(&mlxsw_sp->router->nexthop_probe_dw);
2386 rhashtable_destroy(&mlxsw_sp->router->neigh_ht);
Jiri Pirko6cf3c972016-07-05 11:27:39 +02002387}
2388
Ido Schimmel9665b742017-02-08 11:16:42 +01002389static void mlxsw_sp_neigh_rif_gone_sync(struct mlxsw_sp *mlxsw_sp,
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01002390 struct mlxsw_sp_rif *rif)
Ido Schimmel9665b742017-02-08 11:16:42 +01002391{
2392 struct mlxsw_sp_neigh_entry *neigh_entry, *tmp;
2393
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01002394 list_for_each_entry_safe(neigh_entry, tmp, &rif->neigh_list,
Ido Schimmel4a3c67a2017-07-21 20:31:38 +02002395 rif_list_node) {
2396 mlxsw_sp_neigh_entry_update(mlxsw_sp, neigh_entry, false);
Ido Schimmel9665b742017-02-08 11:16:42 +01002397 mlxsw_sp_neigh_entry_destroy(mlxsw_sp, neigh_entry);
Ido Schimmel4a3c67a2017-07-21 20:31:38 +02002398 }
Ido Schimmel9665b742017-02-08 11:16:42 +01002399}
2400
Petr Machata35225e42017-09-02 23:49:22 +02002401enum mlxsw_sp_nexthop_type {
2402 MLXSW_SP_NEXTHOP_TYPE_ETH,
Petr Machata1012b9a2017-09-02 23:49:23 +02002403 MLXSW_SP_NEXTHOP_TYPE_IPIP,
Petr Machata35225e42017-09-02 23:49:22 +02002404};
2405
Ido Schimmelc53b8e12017-02-08 11:16:30 +01002406struct mlxsw_sp_nexthop_key {
2407 struct fib_nh *fib_nh;
2408};
2409
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002410struct mlxsw_sp_nexthop {
2411 struct list_head neigh_list_node; /* member of neigh entry list */
Ido Schimmel9665b742017-02-08 11:16:42 +01002412 struct list_head rif_list_node;
Arkadi Sharshevskydbe45982017-09-25 10:32:23 +02002413 struct list_head router_list_node;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002414 struct mlxsw_sp_nexthop_group *nh_grp; /* pointer back to the group
2415 * this belongs to
2416 */
Ido Schimmelc53b8e12017-02-08 11:16:30 +01002417 struct rhash_head ht_node;
2418 struct mlxsw_sp_nexthop_key key;
Ido Schimmel58adf2c2017-07-18 10:10:19 +02002419 unsigned char gw_addr[sizeof(struct in6_addr)];
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02002420 int ifindex;
Ido Schimmel408bd942017-10-22 23:11:46 +02002421 int nh_weight;
Ido Schimmeleb789982017-10-22 23:11:48 +02002422 int norm_nh_weight;
2423 int num_adj_entries;
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01002424 struct mlxsw_sp_rif *rif;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002425 u8 should_offload:1, /* set indicates this neigh is connected and
2426 * should be put to KVD linear area of this group.
2427 */
2428 offloaded:1, /* set in case the neigh is actually put into
2429 * KVD linear area of this group.
2430 */
2431 update:1; /* set indicates that MAC of this neigh should be
2432 * updated in HW
2433 */
Petr Machata35225e42017-09-02 23:49:22 +02002434 enum mlxsw_sp_nexthop_type type;
2435 union {
2436 struct mlxsw_sp_neigh_entry *neigh_entry;
Petr Machata1012b9a2017-09-02 23:49:23 +02002437 struct mlxsw_sp_ipip_entry *ipip_entry;
Petr Machata35225e42017-09-02 23:49:22 +02002438 };
Arkadi Sharshevskya5390272017-09-25 10:32:28 +02002439 unsigned int counter_index;
2440 bool counter_valid;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002441};
2442
2443struct mlxsw_sp_nexthop_group {
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02002444 void *priv;
Ido Schimmele9ad5e72017-02-08 11:16:29 +01002445 struct rhash_head ht_node;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002446 struct list_head fib_list; /* list of fib entries that use this group */
Ido Schimmel58adf2c2017-07-18 10:10:19 +02002447 struct neigh_table *neigh_tbl;
Ido Schimmelb3e8d1e2017-02-08 11:16:32 +01002448 u8 adj_index_valid:1,
2449 gateway:1; /* routes using the group use a gateway */
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002450 u32 adj_index;
2451 u16 ecmp_size;
2452 u16 count;
Ido Schimmeleb789982017-10-22 23:11:48 +02002453 int sum_norm_weight;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002454 struct mlxsw_sp_nexthop nexthops[0];
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01002455#define nh_rif nexthops[0].rif
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002456};
2457
Arkadi Sharshevsky427e6522017-09-25 10:32:30 +02002458void mlxsw_sp_nexthop_counter_alloc(struct mlxsw_sp *mlxsw_sp,
2459 struct mlxsw_sp_nexthop *nh)
Arkadi Sharshevskya5390272017-09-25 10:32:28 +02002460{
2461 struct devlink *devlink;
2462
2463 devlink = priv_to_devlink(mlxsw_sp->core);
2464 if (!devlink_dpipe_table_counter_enabled(devlink,
2465 MLXSW_SP_DPIPE_TABLE_NAME_ADJ))
2466 return;
2467
2468 if (mlxsw_sp_flow_counter_alloc(mlxsw_sp, &nh->counter_index))
2469 return;
2470
2471 nh->counter_valid = true;
2472}
2473
Arkadi Sharshevsky427e6522017-09-25 10:32:30 +02002474void mlxsw_sp_nexthop_counter_free(struct mlxsw_sp *mlxsw_sp,
2475 struct mlxsw_sp_nexthop *nh)
Arkadi Sharshevskya5390272017-09-25 10:32:28 +02002476{
2477 if (!nh->counter_valid)
2478 return;
2479 mlxsw_sp_flow_counter_free(mlxsw_sp, nh->counter_index);
2480 nh->counter_valid = false;
2481}
2482
2483int mlxsw_sp_nexthop_counter_get(struct mlxsw_sp *mlxsw_sp,
2484 struct mlxsw_sp_nexthop *nh, u64 *p_counter)
2485{
2486 if (!nh->counter_valid)
2487 return -EINVAL;
2488
2489 return mlxsw_sp_flow_counter_get(mlxsw_sp, nh->counter_index,
2490 p_counter, NULL);
2491}
2492
Arkadi Sharshevskyc556cd22017-09-25 10:32:25 +02002493struct mlxsw_sp_nexthop *mlxsw_sp_nexthop_next(struct mlxsw_sp_router *router,
2494 struct mlxsw_sp_nexthop *nh)
2495{
2496 if (!nh) {
2497 if (list_empty(&router->nexthop_list))
2498 return NULL;
2499 else
2500 return list_first_entry(&router->nexthop_list,
2501 typeof(*nh), router_list_node);
2502 }
2503 if (list_is_last(&nh->router_list_node, &router->nexthop_list))
2504 return NULL;
2505 return list_next_entry(nh, router_list_node);
2506}
2507
2508bool mlxsw_sp_nexthop_offload(struct mlxsw_sp_nexthop *nh)
2509{
2510 return nh->offloaded;
2511}
2512
2513unsigned char *mlxsw_sp_nexthop_ha(struct mlxsw_sp_nexthop *nh)
2514{
2515 if (!nh->offloaded)
2516 return NULL;
2517 return nh->neigh_entry->ha;
2518}
2519
2520int mlxsw_sp_nexthop_indexes(struct mlxsw_sp_nexthop *nh, u32 *p_adj_index,
Ido Schimmele69cd9d2017-10-22 23:11:43 +02002521 u32 *p_adj_size, u32 *p_adj_hash_index)
Arkadi Sharshevskyc556cd22017-09-25 10:32:25 +02002522{
2523 struct mlxsw_sp_nexthop_group *nh_grp = nh->nh_grp;
2524 u32 adj_hash_index = 0;
2525 int i;
2526
2527 if (!nh->offloaded || !nh_grp->adj_index_valid)
2528 return -EINVAL;
2529
2530 *p_adj_index = nh_grp->adj_index;
Ido Schimmele69cd9d2017-10-22 23:11:43 +02002531 *p_adj_size = nh_grp->ecmp_size;
Arkadi Sharshevskyc556cd22017-09-25 10:32:25 +02002532
2533 for (i = 0; i < nh_grp->count; i++) {
2534 struct mlxsw_sp_nexthop *nh_iter = &nh_grp->nexthops[i];
2535
2536 if (nh_iter == nh)
2537 break;
2538 if (nh_iter->offloaded)
Ido Schimmeleb789982017-10-22 23:11:48 +02002539 adj_hash_index += nh_iter->num_adj_entries;
Arkadi Sharshevskyc556cd22017-09-25 10:32:25 +02002540 }
2541
2542 *p_adj_hash_index = adj_hash_index;
2543 return 0;
2544}
2545
2546struct mlxsw_sp_rif *mlxsw_sp_nexthop_rif(struct mlxsw_sp_nexthop *nh)
2547{
2548 return nh->rif;
2549}
2550
2551bool mlxsw_sp_nexthop_group_has_ipip(struct mlxsw_sp_nexthop *nh)
2552{
2553 struct mlxsw_sp_nexthop_group *nh_grp = nh->nh_grp;
2554 int i;
2555
2556 for (i = 0; i < nh_grp->count; i++) {
2557 struct mlxsw_sp_nexthop *nh_iter = &nh_grp->nexthops[i];
2558
2559 if (nh_iter->type == MLXSW_SP_NEXTHOP_TYPE_IPIP)
2560 return true;
2561 }
2562 return false;
2563}
2564
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02002565static struct fib_info *
2566mlxsw_sp_nexthop4_group_fi(const struct mlxsw_sp_nexthop_group *nh_grp)
2567{
2568 return nh_grp->priv;
2569}
2570
2571struct mlxsw_sp_nexthop_group_cmp_arg {
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02002572 enum mlxsw_sp_l3proto proto;
2573 union {
2574 struct fib_info *fi;
2575 struct mlxsw_sp_fib6_entry *fib6_entry;
2576 };
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02002577};
2578
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02002579static bool
2580mlxsw_sp_nexthop6_group_has_nexthop(const struct mlxsw_sp_nexthop_group *nh_grp,
2581 const struct in6_addr *gw, int ifindex)
2582{
2583 int i;
2584
2585 for (i = 0; i < nh_grp->count; i++) {
2586 const struct mlxsw_sp_nexthop *nh;
2587
2588 nh = &nh_grp->nexthops[i];
2589 if (nh->ifindex == ifindex &&
2590 ipv6_addr_equal(gw, (struct in6_addr *) nh->gw_addr))
2591 return true;
2592 }
2593
2594 return false;
2595}
2596
2597static bool
2598mlxsw_sp_nexthop6_group_cmp(const struct mlxsw_sp_nexthop_group *nh_grp,
2599 const struct mlxsw_sp_fib6_entry *fib6_entry)
2600{
2601 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
2602
2603 if (nh_grp->count != fib6_entry->nrt6)
2604 return false;
2605
2606 list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) {
2607 struct in6_addr *gw;
2608 int ifindex;
2609
2610 ifindex = mlxsw_sp_rt6->rt->dst.dev->ifindex;
2611 gw = &mlxsw_sp_rt6->rt->rt6i_gateway;
2612 if (!mlxsw_sp_nexthop6_group_has_nexthop(nh_grp, gw, ifindex))
2613 return false;
2614 }
2615
2616 return true;
2617}
2618
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02002619static int
2620mlxsw_sp_nexthop_group_cmp(struct rhashtable_compare_arg *arg, const void *ptr)
2621{
2622 const struct mlxsw_sp_nexthop_group_cmp_arg *cmp_arg = arg->key;
2623 const struct mlxsw_sp_nexthop_group *nh_grp = ptr;
2624
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02002625 switch (cmp_arg->proto) {
2626 case MLXSW_SP_L3_PROTO_IPV4:
2627 return cmp_arg->fi != mlxsw_sp_nexthop4_group_fi(nh_grp);
2628 case MLXSW_SP_L3_PROTO_IPV6:
2629 return !mlxsw_sp_nexthop6_group_cmp(nh_grp,
2630 cmp_arg->fib6_entry);
2631 default:
2632 WARN_ON(1);
2633 return 1;
2634 }
2635}
2636
2637static int
2638mlxsw_sp_nexthop_group_type(const struct mlxsw_sp_nexthop_group *nh_grp)
2639{
2640 return nh_grp->neigh_tbl->family;
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02002641}
2642
2643static u32 mlxsw_sp_nexthop_group_hash_obj(const void *data, u32 len, u32 seed)
2644{
2645 const struct mlxsw_sp_nexthop_group *nh_grp = data;
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02002646 const struct mlxsw_sp_nexthop *nh;
2647 struct fib_info *fi;
2648 unsigned int val;
2649 int i;
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02002650
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02002651 switch (mlxsw_sp_nexthop_group_type(nh_grp)) {
2652 case AF_INET:
2653 fi = mlxsw_sp_nexthop4_group_fi(nh_grp);
2654 return jhash(&fi, sizeof(fi), seed);
2655 case AF_INET6:
2656 val = nh_grp->count;
2657 for (i = 0; i < nh_grp->count; i++) {
2658 nh = &nh_grp->nexthops[i];
2659 val ^= nh->ifindex;
2660 }
2661 return jhash(&val, sizeof(val), seed);
2662 default:
2663 WARN_ON(1);
2664 return 0;
2665 }
2666}
2667
2668static u32
2669mlxsw_sp_nexthop6_group_hash(struct mlxsw_sp_fib6_entry *fib6_entry, u32 seed)
2670{
2671 unsigned int val = fib6_entry->nrt6;
2672 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
2673 struct net_device *dev;
2674
2675 list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) {
2676 dev = mlxsw_sp_rt6->rt->dst.dev;
2677 val ^= dev->ifindex;
2678 }
2679
2680 return jhash(&val, sizeof(val), seed);
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02002681}
2682
2683static u32
2684mlxsw_sp_nexthop_group_hash(const void *data, u32 len, u32 seed)
2685{
2686 const struct mlxsw_sp_nexthop_group_cmp_arg *cmp_arg = data;
2687
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02002688 switch (cmp_arg->proto) {
2689 case MLXSW_SP_L3_PROTO_IPV4:
2690 return jhash(&cmp_arg->fi, sizeof(cmp_arg->fi), seed);
2691 case MLXSW_SP_L3_PROTO_IPV6:
2692 return mlxsw_sp_nexthop6_group_hash(cmp_arg->fib6_entry, seed);
2693 default:
2694 WARN_ON(1);
2695 return 0;
2696 }
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02002697}
2698
Ido Schimmele9ad5e72017-02-08 11:16:29 +01002699static const struct rhashtable_params mlxsw_sp_nexthop_group_ht_params = {
Ido Schimmele9ad5e72017-02-08 11:16:29 +01002700 .head_offset = offsetof(struct mlxsw_sp_nexthop_group, ht_node),
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02002701 .hashfn = mlxsw_sp_nexthop_group_hash,
2702 .obj_hashfn = mlxsw_sp_nexthop_group_hash_obj,
2703 .obj_cmpfn = mlxsw_sp_nexthop_group_cmp,
Ido Schimmele9ad5e72017-02-08 11:16:29 +01002704};
2705
2706static int mlxsw_sp_nexthop_group_insert(struct mlxsw_sp *mlxsw_sp,
2707 struct mlxsw_sp_nexthop_group *nh_grp)
2708{
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02002709 if (mlxsw_sp_nexthop_group_type(nh_grp) == AF_INET6 &&
2710 !nh_grp->gateway)
2711 return 0;
2712
Ido Schimmel9011b672017-05-16 19:38:25 +02002713 return rhashtable_insert_fast(&mlxsw_sp->router->nexthop_group_ht,
Ido Schimmele9ad5e72017-02-08 11:16:29 +01002714 &nh_grp->ht_node,
2715 mlxsw_sp_nexthop_group_ht_params);
2716}
2717
2718static void mlxsw_sp_nexthop_group_remove(struct mlxsw_sp *mlxsw_sp,
2719 struct mlxsw_sp_nexthop_group *nh_grp)
2720{
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02002721 if (mlxsw_sp_nexthop_group_type(nh_grp) == AF_INET6 &&
2722 !nh_grp->gateway)
2723 return;
2724
Ido Schimmel9011b672017-05-16 19:38:25 +02002725 rhashtable_remove_fast(&mlxsw_sp->router->nexthop_group_ht,
Ido Schimmele9ad5e72017-02-08 11:16:29 +01002726 &nh_grp->ht_node,
2727 mlxsw_sp_nexthop_group_ht_params);
2728}
2729
2730static struct mlxsw_sp_nexthop_group *
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02002731mlxsw_sp_nexthop4_group_lookup(struct mlxsw_sp *mlxsw_sp,
2732 struct fib_info *fi)
Ido Schimmele9ad5e72017-02-08 11:16:29 +01002733{
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02002734 struct mlxsw_sp_nexthop_group_cmp_arg cmp_arg;
2735
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02002736 cmp_arg.proto = MLXSW_SP_L3_PROTO_IPV4;
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02002737 cmp_arg.fi = fi;
2738 return rhashtable_lookup_fast(&mlxsw_sp->router->nexthop_group_ht,
2739 &cmp_arg,
Ido Schimmele9ad5e72017-02-08 11:16:29 +01002740 mlxsw_sp_nexthop_group_ht_params);
2741}
2742
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02002743static struct mlxsw_sp_nexthop_group *
2744mlxsw_sp_nexthop6_group_lookup(struct mlxsw_sp *mlxsw_sp,
2745 struct mlxsw_sp_fib6_entry *fib6_entry)
2746{
2747 struct mlxsw_sp_nexthop_group_cmp_arg cmp_arg;
2748
2749 cmp_arg.proto = MLXSW_SP_L3_PROTO_IPV6;
2750 cmp_arg.fib6_entry = fib6_entry;
2751 return rhashtable_lookup_fast(&mlxsw_sp->router->nexthop_group_ht,
2752 &cmp_arg,
2753 mlxsw_sp_nexthop_group_ht_params);
2754}
2755
Ido Schimmelc53b8e12017-02-08 11:16:30 +01002756static const struct rhashtable_params mlxsw_sp_nexthop_ht_params = {
2757 .key_offset = offsetof(struct mlxsw_sp_nexthop, key),
2758 .head_offset = offsetof(struct mlxsw_sp_nexthop, ht_node),
2759 .key_len = sizeof(struct mlxsw_sp_nexthop_key),
2760};
2761
2762static int mlxsw_sp_nexthop_insert(struct mlxsw_sp *mlxsw_sp,
2763 struct mlxsw_sp_nexthop *nh)
2764{
Ido Schimmel9011b672017-05-16 19:38:25 +02002765 return rhashtable_insert_fast(&mlxsw_sp->router->nexthop_ht,
Ido Schimmelc53b8e12017-02-08 11:16:30 +01002766 &nh->ht_node, mlxsw_sp_nexthop_ht_params);
2767}
2768
2769static void mlxsw_sp_nexthop_remove(struct mlxsw_sp *mlxsw_sp,
2770 struct mlxsw_sp_nexthop *nh)
2771{
Ido Schimmel9011b672017-05-16 19:38:25 +02002772 rhashtable_remove_fast(&mlxsw_sp->router->nexthop_ht, &nh->ht_node,
Ido Schimmelc53b8e12017-02-08 11:16:30 +01002773 mlxsw_sp_nexthop_ht_params);
2774}
2775
Ido Schimmelad178c82017-02-08 11:16:40 +01002776static struct mlxsw_sp_nexthop *
2777mlxsw_sp_nexthop_lookup(struct mlxsw_sp *mlxsw_sp,
2778 struct mlxsw_sp_nexthop_key key)
2779{
Ido Schimmel9011b672017-05-16 19:38:25 +02002780 return rhashtable_lookup_fast(&mlxsw_sp->router->nexthop_ht, &key,
Ido Schimmelad178c82017-02-08 11:16:40 +01002781 mlxsw_sp_nexthop_ht_params);
2782}
2783
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002784static int mlxsw_sp_adj_index_mass_update_vr(struct mlxsw_sp *mlxsw_sp,
Ido Schimmel76610eb2017-03-10 08:53:41 +01002785 const struct mlxsw_sp_fib *fib,
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002786 u32 adj_index, u16 ecmp_size,
2787 u32 new_adj_index,
2788 u16 new_ecmp_size)
2789{
2790 char raleu_pl[MLXSW_REG_RALEU_LEN];
2791
Ido Schimmel1a9234e662016-09-19 08:29:26 +02002792 mlxsw_reg_raleu_pack(raleu_pl,
Ido Schimmel76610eb2017-03-10 08:53:41 +01002793 (enum mlxsw_reg_ralxx_protocol) fib->proto,
2794 fib->vr->id, adj_index, ecmp_size, new_adj_index,
Ido Schimmel1a9234e662016-09-19 08:29:26 +02002795 new_ecmp_size);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002796 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raleu), raleu_pl);
2797}
2798
2799static int mlxsw_sp_adj_index_mass_update(struct mlxsw_sp *mlxsw_sp,
2800 struct mlxsw_sp_nexthop_group *nh_grp,
2801 u32 old_adj_index, u16 old_ecmp_size)
2802{
2803 struct mlxsw_sp_fib_entry *fib_entry;
Ido Schimmel76610eb2017-03-10 08:53:41 +01002804 struct mlxsw_sp_fib *fib = NULL;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002805 int err;
2806
2807 list_for_each_entry(fib_entry, &nh_grp->fib_list, nexthop_group_node) {
Ido Schimmel76610eb2017-03-10 08:53:41 +01002808 if (fib == fib_entry->fib_node->fib)
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002809 continue;
Ido Schimmel76610eb2017-03-10 08:53:41 +01002810 fib = fib_entry->fib_node->fib;
2811 err = mlxsw_sp_adj_index_mass_update_vr(mlxsw_sp, fib,
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002812 old_adj_index,
2813 old_ecmp_size,
2814 nh_grp->adj_index,
2815 nh_grp->ecmp_size);
2816 if (err)
2817 return err;
2818 }
2819 return 0;
2820}
2821
Ido Schimmeleb789982017-10-22 23:11:48 +02002822static int __mlxsw_sp_nexthop_update(struct mlxsw_sp *mlxsw_sp, u32 adj_index,
2823 struct mlxsw_sp_nexthop *nh)
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002824{
2825 struct mlxsw_sp_neigh_entry *neigh_entry = nh->neigh_entry;
2826 char ratr_pl[MLXSW_REG_RATR_LEN];
2827
2828 mlxsw_reg_ratr_pack(ratr_pl, MLXSW_REG_RATR_OP_WRITE_WRITE_ENTRY,
Petr Machata89e41982017-09-02 23:49:15 +02002829 true, MLXSW_REG_RATR_TYPE_ETHERNET,
2830 adj_index, neigh_entry->rif);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002831 mlxsw_reg_ratr_eth_entry_pack(ratr_pl, neigh_entry->ha);
Arkadi Sharshevskya5390272017-09-25 10:32:28 +02002832 if (nh->counter_valid)
2833 mlxsw_reg_ratr_counter_pack(ratr_pl, nh->counter_index, true);
2834 else
2835 mlxsw_reg_ratr_counter_pack(ratr_pl, 0, false);
2836
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002837 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ratr), ratr_pl);
2838}
2839
Ido Schimmeleb789982017-10-22 23:11:48 +02002840int mlxsw_sp_nexthop_update(struct mlxsw_sp *mlxsw_sp, u32 adj_index,
2841 struct mlxsw_sp_nexthop *nh)
2842{
2843 int i;
2844
2845 for (i = 0; i < nh->num_adj_entries; i++) {
2846 int err;
2847
2848 err = __mlxsw_sp_nexthop_update(mlxsw_sp, adj_index + i, nh);
2849 if (err)
2850 return err;
2851 }
2852
2853 return 0;
2854}
2855
2856static int __mlxsw_sp_nexthop_ipip_update(struct mlxsw_sp *mlxsw_sp,
2857 u32 adj_index,
2858 struct mlxsw_sp_nexthop *nh)
Petr Machata1012b9a2017-09-02 23:49:23 +02002859{
2860 const struct mlxsw_sp_ipip_ops *ipip_ops;
2861
2862 ipip_ops = mlxsw_sp->router->ipip_ops_arr[nh->ipip_entry->ipipt];
2863 return ipip_ops->nexthop_update(mlxsw_sp, adj_index, nh->ipip_entry);
2864}
2865
Ido Schimmeleb789982017-10-22 23:11:48 +02002866static int mlxsw_sp_nexthop_ipip_update(struct mlxsw_sp *mlxsw_sp,
2867 u32 adj_index,
2868 struct mlxsw_sp_nexthop *nh)
2869{
2870 int i;
2871
2872 for (i = 0; i < nh->num_adj_entries; i++) {
2873 int err;
2874
2875 err = __mlxsw_sp_nexthop_ipip_update(mlxsw_sp, adj_index + i,
2876 nh);
2877 if (err)
2878 return err;
2879 }
2880
2881 return 0;
2882}
2883
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002884static int
Petr Machata35225e42017-09-02 23:49:22 +02002885mlxsw_sp_nexthop_group_update(struct mlxsw_sp *mlxsw_sp,
2886 struct mlxsw_sp_nexthop_group *nh_grp,
2887 bool reallocate)
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002888{
2889 u32 adj_index = nh_grp->adj_index; /* base */
2890 struct mlxsw_sp_nexthop *nh;
2891 int i;
2892 int err;
2893
2894 for (i = 0; i < nh_grp->count; i++) {
2895 nh = &nh_grp->nexthops[i];
2896
2897 if (!nh->should_offload) {
2898 nh->offloaded = 0;
2899 continue;
2900 }
2901
Ido Schimmela59b7e02017-01-23 11:11:42 +01002902 if (nh->update || reallocate) {
Petr Machata35225e42017-09-02 23:49:22 +02002903 switch (nh->type) {
2904 case MLXSW_SP_NEXTHOP_TYPE_ETH:
Arkadi Sharshevskya5390272017-09-25 10:32:28 +02002905 err = mlxsw_sp_nexthop_update
Petr Machata35225e42017-09-02 23:49:22 +02002906 (mlxsw_sp, adj_index, nh);
2907 break;
Petr Machata1012b9a2017-09-02 23:49:23 +02002908 case MLXSW_SP_NEXTHOP_TYPE_IPIP:
2909 err = mlxsw_sp_nexthop_ipip_update
2910 (mlxsw_sp, adj_index, nh);
2911 break;
Petr Machata35225e42017-09-02 23:49:22 +02002912 }
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002913 if (err)
2914 return err;
2915 nh->update = 0;
2916 nh->offloaded = 1;
2917 }
Ido Schimmeleb789982017-10-22 23:11:48 +02002918 adj_index += nh->num_adj_entries;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002919 }
2920 return 0;
2921}
2922
Ido Schimmel1819ae32017-07-21 18:04:28 +02002923static bool
2924mlxsw_sp_fib_node_entry_is_first(const struct mlxsw_sp_fib_node *fib_node,
2925 const struct mlxsw_sp_fib_entry *fib_entry);
2926
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002927static int
2928mlxsw_sp_nexthop_fib_entries_update(struct mlxsw_sp *mlxsw_sp,
2929 struct mlxsw_sp_nexthop_group *nh_grp)
2930{
2931 struct mlxsw_sp_fib_entry *fib_entry;
2932 int err;
2933
2934 list_for_each_entry(fib_entry, &nh_grp->fib_list, nexthop_group_node) {
Ido Schimmel1819ae32017-07-21 18:04:28 +02002935 if (!mlxsw_sp_fib_node_entry_is_first(fib_entry->fib_node,
2936 fib_entry))
2937 continue;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002938 err = mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
2939 if (err)
2940 return err;
2941 }
2942 return 0;
2943}
2944
2945static void
Ido Schimmel77d964e2017-08-02 09:56:05 +02002946mlxsw_sp_fib_entry_offload_refresh(struct mlxsw_sp_fib_entry *fib_entry,
2947 enum mlxsw_reg_ralue_op op, int err);
2948
2949static void
2950mlxsw_sp_nexthop_fib_entries_refresh(struct mlxsw_sp_nexthop_group *nh_grp)
2951{
2952 enum mlxsw_reg_ralue_op op = MLXSW_REG_RALUE_OP_WRITE_WRITE;
2953 struct mlxsw_sp_fib_entry *fib_entry;
2954
2955 list_for_each_entry(fib_entry, &nh_grp->fib_list, nexthop_group_node) {
2956 if (!mlxsw_sp_fib_node_entry_is_first(fib_entry->fib_node,
2957 fib_entry))
2958 continue;
2959 mlxsw_sp_fib_entry_offload_refresh(fib_entry, op, 0);
2960 }
2961}
2962
Ido Schimmel425a08c2017-10-22 23:11:47 +02002963static void mlxsw_sp_adj_grp_size_round_up(u16 *p_adj_grp_size)
2964{
2965 /* Valid sizes for an adjacency group are:
2966 * 1-64, 512, 1024, 2048 and 4096.
2967 */
2968 if (*p_adj_grp_size <= 64)
2969 return;
2970 else if (*p_adj_grp_size <= 512)
2971 *p_adj_grp_size = 512;
2972 else if (*p_adj_grp_size <= 1024)
2973 *p_adj_grp_size = 1024;
2974 else if (*p_adj_grp_size <= 2048)
2975 *p_adj_grp_size = 2048;
2976 else
2977 *p_adj_grp_size = 4096;
2978}
2979
2980static void mlxsw_sp_adj_grp_size_round_down(u16 *p_adj_grp_size,
2981 unsigned int alloc_size)
2982{
2983 if (alloc_size >= 4096)
2984 *p_adj_grp_size = 4096;
2985 else if (alloc_size >= 2048)
2986 *p_adj_grp_size = 2048;
2987 else if (alloc_size >= 1024)
2988 *p_adj_grp_size = 1024;
2989 else if (alloc_size >= 512)
2990 *p_adj_grp_size = 512;
2991}
2992
2993static int mlxsw_sp_fix_adj_grp_size(struct mlxsw_sp *mlxsw_sp,
2994 u16 *p_adj_grp_size)
2995{
2996 unsigned int alloc_size;
2997 int err;
2998
2999 /* Round up the requested group size to the next size supported
3000 * by the device and make sure the request can be satisfied.
3001 */
3002 mlxsw_sp_adj_grp_size_round_up(p_adj_grp_size);
3003 err = mlxsw_sp_kvdl_alloc_size_query(mlxsw_sp, *p_adj_grp_size,
3004 &alloc_size);
3005 if (err)
3006 return err;
3007 /* It is possible the allocation results in more allocated
3008 * entries than requested. Try to use as much of them as
3009 * possible.
3010 */
3011 mlxsw_sp_adj_grp_size_round_down(p_adj_grp_size, alloc_size);
3012
3013 return 0;
3014}
3015
Ido Schimmel77d964e2017-08-02 09:56:05 +02003016static void
Ido Schimmeleb789982017-10-22 23:11:48 +02003017mlxsw_sp_nexthop_group_normalize(struct mlxsw_sp_nexthop_group *nh_grp)
3018{
3019 int i, g = 0, sum_norm_weight = 0;
3020 struct mlxsw_sp_nexthop *nh;
3021
3022 for (i = 0; i < nh_grp->count; i++) {
3023 nh = &nh_grp->nexthops[i];
3024
3025 if (!nh->should_offload)
3026 continue;
3027 if (g > 0)
3028 g = gcd(nh->nh_weight, g);
3029 else
3030 g = nh->nh_weight;
3031 }
3032
3033 for (i = 0; i < nh_grp->count; i++) {
3034 nh = &nh_grp->nexthops[i];
3035
3036 if (!nh->should_offload)
3037 continue;
3038 nh->norm_nh_weight = nh->nh_weight / g;
3039 sum_norm_weight += nh->norm_nh_weight;
3040 }
3041
3042 nh_grp->sum_norm_weight = sum_norm_weight;
3043}
3044
3045static void
3046mlxsw_sp_nexthop_group_rebalance(struct mlxsw_sp_nexthop_group *nh_grp)
3047{
3048 int total = nh_grp->sum_norm_weight;
3049 u16 ecmp_size = nh_grp->ecmp_size;
3050 int i, weight = 0, lower_bound = 0;
3051
3052 for (i = 0; i < nh_grp->count; i++) {
3053 struct mlxsw_sp_nexthop *nh = &nh_grp->nexthops[i];
3054 int upper_bound;
3055
3056 if (!nh->should_offload)
3057 continue;
3058 weight += nh->norm_nh_weight;
3059 upper_bound = DIV_ROUND_CLOSEST(ecmp_size * weight, total);
3060 nh->num_adj_entries = upper_bound - lower_bound;
3061 lower_bound = upper_bound;
3062 }
3063}
3064
3065static void
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003066mlxsw_sp_nexthop_group_refresh(struct mlxsw_sp *mlxsw_sp,
3067 struct mlxsw_sp_nexthop_group *nh_grp)
3068{
Ido Schimmeleb789982017-10-22 23:11:48 +02003069 u16 ecmp_size, old_ecmp_size;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003070 struct mlxsw_sp_nexthop *nh;
3071 bool offload_change = false;
3072 u32 adj_index;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003073 bool old_adj_index_valid;
3074 u32 old_adj_index;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003075 int i;
3076 int err;
3077
Ido Schimmelb3e8d1e2017-02-08 11:16:32 +01003078 if (!nh_grp->gateway) {
3079 mlxsw_sp_nexthop_fib_entries_update(mlxsw_sp, nh_grp);
3080 return;
3081 }
3082
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003083 for (i = 0; i < nh_grp->count; i++) {
3084 nh = &nh_grp->nexthops[i];
3085
Petr Machata56b8a9e2017-07-31 09:27:29 +02003086 if (nh->should_offload != nh->offloaded) {
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003087 offload_change = true;
3088 if (nh->should_offload)
3089 nh->update = 1;
3090 }
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003091 }
3092 if (!offload_change) {
3093 /* Nothing was added or removed, so no need to reallocate. Just
3094 * update MAC on existing adjacency indexes.
3095 */
Petr Machata35225e42017-09-02 23:49:22 +02003096 err = mlxsw_sp_nexthop_group_update(mlxsw_sp, nh_grp, false);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003097 if (err) {
3098 dev_warn(mlxsw_sp->bus_info->dev, "Failed to update neigh MAC in adjacency table.\n");
3099 goto set_trap;
3100 }
3101 return;
3102 }
Ido Schimmeleb789982017-10-22 23:11:48 +02003103 mlxsw_sp_nexthop_group_normalize(nh_grp);
3104 if (!nh_grp->sum_norm_weight)
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003105 /* No neigh of this group is connected so we just set
3106 * the trap and let everthing flow through kernel.
3107 */
3108 goto set_trap;
3109
Ido Schimmeleb789982017-10-22 23:11:48 +02003110 ecmp_size = nh_grp->sum_norm_weight;
Ido Schimmel425a08c2017-10-22 23:11:47 +02003111 err = mlxsw_sp_fix_adj_grp_size(mlxsw_sp, &ecmp_size);
3112 if (err)
3113 /* No valid allocation size available. */
3114 goto set_trap;
3115
Arkadi Sharshevsky13124442017-03-25 08:28:22 +01003116 err = mlxsw_sp_kvdl_alloc(mlxsw_sp, ecmp_size, &adj_index);
3117 if (err) {
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003118 /* We ran out of KVD linear space, just set the
3119 * trap and let everything flow through kernel.
3120 */
3121 dev_warn(mlxsw_sp->bus_info->dev, "Failed to allocate KVD linear area for nexthop group.\n");
3122 goto set_trap;
3123 }
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003124 old_adj_index_valid = nh_grp->adj_index_valid;
3125 old_adj_index = nh_grp->adj_index;
3126 old_ecmp_size = nh_grp->ecmp_size;
3127 nh_grp->adj_index_valid = 1;
3128 nh_grp->adj_index = adj_index;
3129 nh_grp->ecmp_size = ecmp_size;
Ido Schimmeleb789982017-10-22 23:11:48 +02003130 mlxsw_sp_nexthop_group_rebalance(nh_grp);
Petr Machata35225e42017-09-02 23:49:22 +02003131 err = mlxsw_sp_nexthop_group_update(mlxsw_sp, nh_grp, true);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003132 if (err) {
3133 dev_warn(mlxsw_sp->bus_info->dev, "Failed to update neigh MAC in adjacency table.\n");
3134 goto set_trap;
3135 }
3136
3137 if (!old_adj_index_valid) {
3138 /* The trap was set for fib entries, so we have to call
3139 * fib entry update to unset it and use adjacency index.
3140 */
3141 err = mlxsw_sp_nexthop_fib_entries_update(mlxsw_sp, nh_grp);
3142 if (err) {
3143 dev_warn(mlxsw_sp->bus_info->dev, "Failed to add adjacency index to fib entries.\n");
3144 goto set_trap;
3145 }
3146 return;
3147 }
3148
3149 err = mlxsw_sp_adj_index_mass_update(mlxsw_sp, nh_grp,
3150 old_adj_index, old_ecmp_size);
3151 mlxsw_sp_kvdl_free(mlxsw_sp, old_adj_index);
3152 if (err) {
3153 dev_warn(mlxsw_sp->bus_info->dev, "Failed to mass-update adjacency index for nexthop group.\n");
3154 goto set_trap;
3155 }
Ido Schimmel77d964e2017-08-02 09:56:05 +02003156
3157 /* Offload state within the group changed, so update the flags. */
3158 mlxsw_sp_nexthop_fib_entries_refresh(nh_grp);
3159
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003160 return;
3161
3162set_trap:
3163 old_adj_index_valid = nh_grp->adj_index_valid;
3164 nh_grp->adj_index_valid = 0;
3165 for (i = 0; i < nh_grp->count; i++) {
3166 nh = &nh_grp->nexthops[i];
3167 nh->offloaded = 0;
3168 }
3169 err = mlxsw_sp_nexthop_fib_entries_update(mlxsw_sp, nh_grp);
3170 if (err)
3171 dev_warn(mlxsw_sp->bus_info->dev, "Failed to set traps for fib entries.\n");
3172 if (old_adj_index_valid)
3173 mlxsw_sp_kvdl_free(mlxsw_sp, nh_grp->adj_index);
3174}
3175
3176static void __mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp_nexthop *nh,
3177 bool removing)
3178{
Petr Machata213666a2017-07-31 09:27:30 +02003179 if (!removing)
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003180 nh->should_offload = 1;
Petr Machata213666a2017-07-31 09:27:30 +02003181 else if (nh->offloaded)
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003182 nh->should_offload = 0;
3183 nh->update = 1;
3184}
3185
3186static void
3187mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp *mlxsw_sp,
3188 struct mlxsw_sp_neigh_entry *neigh_entry,
3189 bool removing)
3190{
3191 struct mlxsw_sp_nexthop *nh;
3192
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003193 list_for_each_entry(nh, &neigh_entry->nexthop_list,
3194 neigh_list_node) {
3195 __mlxsw_sp_nexthop_neigh_update(nh, removing);
3196 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nh_grp);
3197 }
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003198}
3199
Ido Schimmel9665b742017-02-08 11:16:42 +01003200static void mlxsw_sp_nexthop_rif_init(struct mlxsw_sp_nexthop *nh,
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003201 struct mlxsw_sp_rif *rif)
Ido Schimmel9665b742017-02-08 11:16:42 +01003202{
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003203 if (nh->rif)
Ido Schimmel9665b742017-02-08 11:16:42 +01003204 return;
3205
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003206 nh->rif = rif;
3207 list_add(&nh->rif_list_node, &rif->nexthop_list);
Ido Schimmel9665b742017-02-08 11:16:42 +01003208}
3209
3210static void mlxsw_sp_nexthop_rif_fini(struct mlxsw_sp_nexthop *nh)
3211{
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003212 if (!nh->rif)
Ido Schimmel9665b742017-02-08 11:16:42 +01003213 return;
3214
3215 list_del(&nh->rif_list_node);
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003216 nh->rif = NULL;
Ido Schimmel9665b742017-02-08 11:16:42 +01003217}
3218
Ido Schimmela8c97012017-02-08 11:16:35 +01003219static int mlxsw_sp_nexthop_neigh_init(struct mlxsw_sp *mlxsw_sp,
3220 struct mlxsw_sp_nexthop *nh)
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003221{
3222 struct mlxsw_sp_neigh_entry *neigh_entry;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003223 struct neighbour *n;
Ido Schimmel93a87e52016-12-23 09:32:49 +01003224 u8 nud_state, dead;
Ido Schimmelc53b8e12017-02-08 11:16:30 +01003225 int err;
3226
Ido Schimmelad178c82017-02-08 11:16:40 +01003227 if (!nh->nh_grp->gateway || nh->neigh_entry)
Ido Schimmelb8399a12017-02-08 11:16:33 +01003228 return 0;
3229
Jiri Pirko33b13412016-11-10 12:31:04 +01003230 /* Take a reference of neigh here ensuring that neigh would
Petr Machata8de3c172017-07-31 09:27:25 +02003231 * not be destructed before the nexthop entry is finished.
Jiri Pirko33b13412016-11-10 12:31:04 +01003232 * The reference is taken either in neigh_lookup() or
Ido Schimmelfd76d912017-02-06 16:20:17 +01003233 * in neigh_create() in case n is not found.
Jiri Pirko33b13412016-11-10 12:31:04 +01003234 */
Ido Schimmel58adf2c2017-07-18 10:10:19 +02003235 n = neigh_lookup(nh->nh_grp->neigh_tbl, &nh->gw_addr, nh->rif->dev);
Jiri Pirko33b13412016-11-10 12:31:04 +01003236 if (!n) {
Ido Schimmel58adf2c2017-07-18 10:10:19 +02003237 n = neigh_create(nh->nh_grp->neigh_tbl, &nh->gw_addr,
3238 nh->rif->dev);
Ido Schimmela8c97012017-02-08 11:16:35 +01003239 if (IS_ERR(n))
3240 return PTR_ERR(n);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003241 neigh_event_send(n, NULL);
Jiri Pirko33b13412016-11-10 12:31:04 +01003242 }
3243 neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp, n);
3244 if (!neigh_entry) {
Ido Schimmel5c8802f2017-02-06 16:20:13 +01003245 neigh_entry = mlxsw_sp_neigh_entry_create(mlxsw_sp, n);
3246 if (IS_ERR(neigh_entry)) {
Ido Schimmelc53b8e12017-02-08 11:16:30 +01003247 err = -EINVAL;
3248 goto err_neigh_entry_create;
Ido Schimmel5c8802f2017-02-06 16:20:13 +01003249 }
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003250 }
Yotam Gigib2157142016-07-05 11:27:51 +02003251
3252 /* If that is the first nexthop connected to that neigh, add to
3253 * nexthop_neighs_list
3254 */
3255 if (list_empty(&neigh_entry->nexthop_list))
3256 list_add_tail(&neigh_entry->nexthop_neighs_list_node,
Ido Schimmel9011b672017-05-16 19:38:25 +02003257 &mlxsw_sp->router->nexthop_neighs_list);
Yotam Gigib2157142016-07-05 11:27:51 +02003258
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003259 nh->neigh_entry = neigh_entry;
3260 list_add_tail(&nh->neigh_list_node, &neigh_entry->nexthop_list);
3261 read_lock_bh(&n->lock);
3262 nud_state = n->nud_state;
Ido Schimmel93a87e52016-12-23 09:32:49 +01003263 dead = n->dead;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003264 read_unlock_bh(&n->lock);
Ido Schimmel93a87e52016-12-23 09:32:49 +01003265 __mlxsw_sp_nexthop_neigh_update(nh, !(nud_state & NUD_VALID && !dead));
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003266
3267 return 0;
Ido Schimmelc53b8e12017-02-08 11:16:30 +01003268
3269err_neigh_entry_create:
3270 neigh_release(n);
Ido Schimmelc53b8e12017-02-08 11:16:30 +01003271 return err;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003272}
3273
Ido Schimmela8c97012017-02-08 11:16:35 +01003274static void mlxsw_sp_nexthop_neigh_fini(struct mlxsw_sp *mlxsw_sp,
3275 struct mlxsw_sp_nexthop *nh)
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003276{
3277 struct mlxsw_sp_neigh_entry *neigh_entry = nh->neigh_entry;
Ido Schimmela8c97012017-02-08 11:16:35 +01003278 struct neighbour *n;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003279
Ido Schimmelb8399a12017-02-08 11:16:33 +01003280 if (!neigh_entry)
Ido Schimmela8c97012017-02-08 11:16:35 +01003281 return;
3282 n = neigh_entry->key.n;
Ido Schimmelb8399a12017-02-08 11:16:33 +01003283
Ido Schimmel58312122016-12-23 09:32:50 +01003284 __mlxsw_sp_nexthop_neigh_update(nh, true);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003285 list_del(&nh->neigh_list_node);
Ido Schimmele58be792017-02-08 11:16:28 +01003286 nh->neigh_entry = NULL;
Yotam Gigib2157142016-07-05 11:27:51 +02003287
3288 /* If that is the last nexthop connected to that neigh, remove from
3289 * nexthop_neighs_list
3290 */
Ido Schimmele58be792017-02-08 11:16:28 +01003291 if (list_empty(&neigh_entry->nexthop_list))
3292 list_del(&neigh_entry->nexthop_neighs_list_node);
Yotam Gigib2157142016-07-05 11:27:51 +02003293
Ido Schimmel5c8802f2017-02-06 16:20:13 +01003294 if (!neigh_entry->connected && list_empty(&neigh_entry->nexthop_list))
3295 mlxsw_sp_neigh_entry_destroy(mlxsw_sp, neigh_entry);
3296
3297 neigh_release(n);
Ido Schimmela8c97012017-02-08 11:16:35 +01003298}
Ido Schimmelc53b8e12017-02-08 11:16:30 +01003299
Petr Machata1012b9a2017-09-02 23:49:23 +02003300static int mlxsw_sp_nexthop_ipip_init(struct mlxsw_sp *mlxsw_sp,
Petr Machata1012b9a2017-09-02 23:49:23 +02003301 struct mlxsw_sp_nexthop *nh,
3302 struct net_device *ol_dev)
3303{
3304 if (!nh->nh_grp->gateway || nh->ipip_entry)
3305 return 0;
3306
Petr Machata4cccb732017-10-16 16:26:39 +02003307 nh->ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
3308 if (!nh->ipip_entry)
3309 return -ENOENT;
Petr Machata1012b9a2017-09-02 23:49:23 +02003310
3311 __mlxsw_sp_nexthop_neigh_update(nh, false);
3312 return 0;
3313}
3314
3315static void mlxsw_sp_nexthop_ipip_fini(struct mlxsw_sp *mlxsw_sp,
3316 struct mlxsw_sp_nexthop *nh)
3317{
3318 struct mlxsw_sp_ipip_entry *ipip_entry = nh->ipip_entry;
3319
3320 if (!ipip_entry)
3321 return;
3322
3323 __mlxsw_sp_nexthop_neigh_update(nh, true);
Petr Machata1012b9a2017-09-02 23:49:23 +02003324 nh->ipip_entry = NULL;
3325}
3326
3327static bool mlxsw_sp_nexthop4_ipip_type(const struct mlxsw_sp *mlxsw_sp,
3328 const struct fib_nh *fib_nh,
3329 enum mlxsw_sp_ipip_type *p_ipipt)
3330{
3331 struct net_device *dev = fib_nh->nh_dev;
3332
3333 return dev &&
3334 fib_nh->nh_parent->fib_type == RTN_UNICAST &&
3335 mlxsw_sp_netdev_ipip_type(mlxsw_sp, dev, p_ipipt);
3336}
3337
Petr Machata35225e42017-09-02 23:49:22 +02003338static void mlxsw_sp_nexthop_type_fini(struct mlxsw_sp *mlxsw_sp,
3339 struct mlxsw_sp_nexthop *nh)
3340{
3341 switch (nh->type) {
3342 case MLXSW_SP_NEXTHOP_TYPE_ETH:
3343 mlxsw_sp_nexthop_neigh_fini(mlxsw_sp, nh);
3344 mlxsw_sp_nexthop_rif_fini(nh);
3345 break;
Petr Machata1012b9a2017-09-02 23:49:23 +02003346 case MLXSW_SP_NEXTHOP_TYPE_IPIP:
Petr Machatade0f43c2017-10-02 12:14:57 +02003347 mlxsw_sp_nexthop_rif_fini(nh);
Petr Machata1012b9a2017-09-02 23:49:23 +02003348 mlxsw_sp_nexthop_ipip_fini(mlxsw_sp, nh);
3349 break;
Petr Machata35225e42017-09-02 23:49:22 +02003350 }
3351}
3352
3353static int mlxsw_sp_nexthop4_type_init(struct mlxsw_sp *mlxsw_sp,
3354 struct mlxsw_sp_nexthop *nh,
3355 struct fib_nh *fib_nh)
3356{
Petr Machata1012b9a2017-09-02 23:49:23 +02003357 struct mlxsw_sp_router *router = mlxsw_sp->router;
Petr Machata35225e42017-09-02 23:49:22 +02003358 struct net_device *dev = fib_nh->nh_dev;
Petr Machata1012b9a2017-09-02 23:49:23 +02003359 enum mlxsw_sp_ipip_type ipipt;
Petr Machata35225e42017-09-02 23:49:22 +02003360 struct mlxsw_sp_rif *rif;
3361 int err;
3362
Petr Machata1012b9a2017-09-02 23:49:23 +02003363 if (mlxsw_sp_nexthop4_ipip_type(mlxsw_sp, fib_nh, &ipipt) &&
3364 router->ipip_ops_arr[ipipt]->can_offload(mlxsw_sp, dev,
3365 MLXSW_SP_L3_PROTO_IPV4)) {
3366 nh->type = MLXSW_SP_NEXTHOP_TYPE_IPIP;
Petr Machata4cccb732017-10-16 16:26:39 +02003367 err = mlxsw_sp_nexthop_ipip_init(mlxsw_sp, nh, dev);
Petr Machatade0f43c2017-10-02 12:14:57 +02003368 if (err)
3369 return err;
3370 mlxsw_sp_nexthop_rif_init(nh, &nh->ipip_entry->ol_lb->common);
3371 return 0;
Petr Machata1012b9a2017-09-02 23:49:23 +02003372 }
3373
Petr Machata35225e42017-09-02 23:49:22 +02003374 nh->type = MLXSW_SP_NEXTHOP_TYPE_ETH;
3375 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
3376 if (!rif)
3377 return 0;
3378
3379 mlxsw_sp_nexthop_rif_init(nh, rif);
3380 err = mlxsw_sp_nexthop_neigh_init(mlxsw_sp, nh);
3381 if (err)
3382 goto err_neigh_init;
3383
3384 return 0;
3385
3386err_neigh_init:
3387 mlxsw_sp_nexthop_rif_fini(nh);
3388 return err;
3389}
3390
3391static void mlxsw_sp_nexthop4_type_fini(struct mlxsw_sp *mlxsw_sp,
3392 struct mlxsw_sp_nexthop *nh)
3393{
3394 mlxsw_sp_nexthop_type_fini(mlxsw_sp, nh);
3395}
3396
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02003397static int mlxsw_sp_nexthop4_init(struct mlxsw_sp *mlxsw_sp,
3398 struct mlxsw_sp_nexthop_group *nh_grp,
3399 struct mlxsw_sp_nexthop *nh,
3400 struct fib_nh *fib_nh)
Ido Schimmela8c97012017-02-08 11:16:35 +01003401{
3402 struct net_device *dev = fib_nh->nh_dev;
Ido Schimmeldf6dd79b2017-02-08 14:36:49 +01003403 struct in_device *in_dev;
Ido Schimmela8c97012017-02-08 11:16:35 +01003404 int err;
3405
3406 nh->nh_grp = nh_grp;
3407 nh->key.fib_nh = fib_nh;
Ido Schimmel408bd942017-10-22 23:11:46 +02003408#ifdef CONFIG_IP_ROUTE_MULTIPATH
3409 nh->nh_weight = fib_nh->nh_weight;
3410#else
3411 nh->nh_weight = 1;
3412#endif
Ido Schimmel58adf2c2017-07-18 10:10:19 +02003413 memcpy(&nh->gw_addr, &fib_nh->nh_gw, sizeof(fib_nh->nh_gw));
Ido Schimmela8c97012017-02-08 11:16:35 +01003414 err = mlxsw_sp_nexthop_insert(mlxsw_sp, nh);
3415 if (err)
3416 return err;
3417
Arkadi Sharshevskya5390272017-09-25 10:32:28 +02003418 mlxsw_sp_nexthop_counter_alloc(mlxsw_sp, nh);
Arkadi Sharshevskydbe45982017-09-25 10:32:23 +02003419 list_add_tail(&nh->router_list_node, &mlxsw_sp->router->nexthop_list);
3420
Ido Schimmel97989ee2017-03-10 08:53:38 +01003421 if (!dev)
3422 return 0;
3423
Ido Schimmeldf6dd79b2017-02-08 14:36:49 +01003424 in_dev = __in_dev_get_rtnl(dev);
3425 if (in_dev && IN_DEV_IGNORE_ROUTES_WITH_LINKDOWN(in_dev) &&
3426 fib_nh->nh_flags & RTNH_F_LINKDOWN)
3427 return 0;
3428
Petr Machata35225e42017-09-02 23:49:22 +02003429 err = mlxsw_sp_nexthop4_type_init(mlxsw_sp, nh, fib_nh);
Ido Schimmela8c97012017-02-08 11:16:35 +01003430 if (err)
3431 goto err_nexthop_neigh_init;
3432
3433 return 0;
3434
3435err_nexthop_neigh_init:
3436 mlxsw_sp_nexthop_remove(mlxsw_sp, nh);
3437 return err;
3438}
3439
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02003440static void mlxsw_sp_nexthop4_fini(struct mlxsw_sp *mlxsw_sp,
3441 struct mlxsw_sp_nexthop *nh)
Ido Schimmela8c97012017-02-08 11:16:35 +01003442{
Petr Machata35225e42017-09-02 23:49:22 +02003443 mlxsw_sp_nexthop4_type_fini(mlxsw_sp, nh);
Arkadi Sharshevskydbe45982017-09-25 10:32:23 +02003444 list_del(&nh->router_list_node);
Arkadi Sharshevskya5390272017-09-25 10:32:28 +02003445 mlxsw_sp_nexthop_counter_free(mlxsw_sp, nh);
Ido Schimmelc53b8e12017-02-08 11:16:30 +01003446 mlxsw_sp_nexthop_remove(mlxsw_sp, nh);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003447}
3448
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02003449static void mlxsw_sp_nexthop4_event(struct mlxsw_sp *mlxsw_sp,
3450 unsigned long event, struct fib_nh *fib_nh)
Ido Schimmelad178c82017-02-08 11:16:40 +01003451{
3452 struct mlxsw_sp_nexthop_key key;
3453 struct mlxsw_sp_nexthop *nh;
Ido Schimmelad178c82017-02-08 11:16:40 +01003454
Ido Schimmel9011b672017-05-16 19:38:25 +02003455 if (mlxsw_sp->router->aborted)
Ido Schimmelad178c82017-02-08 11:16:40 +01003456 return;
3457
3458 key.fib_nh = fib_nh;
3459 nh = mlxsw_sp_nexthop_lookup(mlxsw_sp, key);
3460 if (WARN_ON_ONCE(!nh))
3461 return;
3462
Ido Schimmelad178c82017-02-08 11:16:40 +01003463 switch (event) {
3464 case FIB_EVENT_NH_ADD:
Petr Machata35225e42017-09-02 23:49:22 +02003465 mlxsw_sp_nexthop4_type_init(mlxsw_sp, nh, fib_nh);
Ido Schimmelad178c82017-02-08 11:16:40 +01003466 break;
3467 case FIB_EVENT_NH_DEL:
Petr Machata35225e42017-09-02 23:49:22 +02003468 mlxsw_sp_nexthop4_type_fini(mlxsw_sp, nh);
Ido Schimmelad178c82017-02-08 11:16:40 +01003469 break;
3470 }
3471
3472 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nh_grp);
3473}
3474
Petr Machata0c5f1cd2017-11-03 10:03:38 +01003475static void mlxsw_sp_nexthop_rif_update(struct mlxsw_sp *mlxsw_sp,
3476 struct mlxsw_sp_rif *rif)
3477{
3478 struct mlxsw_sp_nexthop *nh;
3479
3480 list_for_each_entry(nh, &rif->nexthop_list, rif_list_node) {
3481 __mlxsw_sp_nexthop_neigh_update(nh, false);
3482 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nh_grp);
3483 }
3484}
3485
Ido Schimmel9665b742017-02-08 11:16:42 +01003486static void mlxsw_sp_nexthop_rif_gone_sync(struct mlxsw_sp *mlxsw_sp,
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003487 struct mlxsw_sp_rif *rif)
Ido Schimmel9665b742017-02-08 11:16:42 +01003488{
3489 struct mlxsw_sp_nexthop *nh, *tmp;
3490
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003491 list_for_each_entry_safe(nh, tmp, &rif->nexthop_list, rif_list_node) {
Petr Machata35225e42017-09-02 23:49:22 +02003492 mlxsw_sp_nexthop_type_fini(mlxsw_sp, nh);
Ido Schimmel9665b742017-02-08 11:16:42 +01003493 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nh_grp);
3494 }
3495}
3496
Petr Machata9b014512017-09-02 23:49:20 +02003497static bool mlxsw_sp_fi_is_gateway(const struct mlxsw_sp *mlxsw_sp,
3498 const struct fib_info *fi)
3499{
Petr Machata1012b9a2017-09-02 23:49:23 +02003500 return fi->fib_nh->nh_scope == RT_SCOPE_LINK ||
3501 mlxsw_sp_nexthop4_ipip_type(mlxsw_sp, fi->fib_nh, NULL);
Petr Machata9b014512017-09-02 23:49:20 +02003502}
3503
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003504static struct mlxsw_sp_nexthop_group *
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02003505mlxsw_sp_nexthop4_group_create(struct mlxsw_sp *mlxsw_sp, struct fib_info *fi)
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003506{
3507 struct mlxsw_sp_nexthop_group *nh_grp;
3508 struct mlxsw_sp_nexthop *nh;
3509 struct fib_nh *fib_nh;
3510 size_t alloc_size;
3511 int i;
3512 int err;
3513
3514 alloc_size = sizeof(*nh_grp) +
3515 fi->fib_nhs * sizeof(struct mlxsw_sp_nexthop);
3516 nh_grp = kzalloc(alloc_size, GFP_KERNEL);
3517 if (!nh_grp)
3518 return ERR_PTR(-ENOMEM);
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02003519 nh_grp->priv = fi;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003520 INIT_LIST_HEAD(&nh_grp->fib_list);
Ido Schimmel58adf2c2017-07-18 10:10:19 +02003521 nh_grp->neigh_tbl = &arp_tbl;
3522
Petr Machata9b014512017-09-02 23:49:20 +02003523 nh_grp->gateway = mlxsw_sp_fi_is_gateway(mlxsw_sp, fi);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003524 nh_grp->count = fi->fib_nhs;
Ido Schimmel7387dbb2017-07-12 09:12:53 +02003525 fib_info_hold(fi);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003526 for (i = 0; i < nh_grp->count; i++) {
3527 nh = &nh_grp->nexthops[i];
3528 fib_nh = &fi->fib_nh[i];
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02003529 err = mlxsw_sp_nexthop4_init(mlxsw_sp, nh_grp, nh, fib_nh);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003530 if (err)
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02003531 goto err_nexthop4_init;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003532 }
Ido Schimmele9ad5e72017-02-08 11:16:29 +01003533 err = mlxsw_sp_nexthop_group_insert(mlxsw_sp, nh_grp);
3534 if (err)
3535 goto err_nexthop_group_insert;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003536 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
3537 return nh_grp;
3538
Ido Schimmele9ad5e72017-02-08 11:16:29 +01003539err_nexthop_group_insert:
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02003540err_nexthop4_init:
Ido Schimmeldf6dd79b2017-02-08 14:36:49 +01003541 for (i--; i >= 0; i--) {
3542 nh = &nh_grp->nexthops[i];
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02003543 mlxsw_sp_nexthop4_fini(mlxsw_sp, nh);
Ido Schimmeldf6dd79b2017-02-08 14:36:49 +01003544 }
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02003545 fib_info_put(fi);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003546 kfree(nh_grp);
3547 return ERR_PTR(err);
3548}
3549
3550static void
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02003551mlxsw_sp_nexthop4_group_destroy(struct mlxsw_sp *mlxsw_sp,
3552 struct mlxsw_sp_nexthop_group *nh_grp)
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003553{
3554 struct mlxsw_sp_nexthop *nh;
3555 int i;
3556
Ido Schimmele9ad5e72017-02-08 11:16:29 +01003557 mlxsw_sp_nexthop_group_remove(mlxsw_sp, nh_grp);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003558 for (i = 0; i < nh_grp->count; i++) {
3559 nh = &nh_grp->nexthops[i];
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02003560 mlxsw_sp_nexthop4_fini(mlxsw_sp, nh);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003561 }
Ido Schimmel58312122016-12-23 09:32:50 +01003562 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
3563 WARN_ON_ONCE(nh_grp->adj_index_valid);
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02003564 fib_info_put(mlxsw_sp_nexthop4_group_fi(nh_grp));
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003565 kfree(nh_grp);
3566}
3567
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02003568static int mlxsw_sp_nexthop4_group_get(struct mlxsw_sp *mlxsw_sp,
3569 struct mlxsw_sp_fib_entry *fib_entry,
3570 struct fib_info *fi)
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003571{
3572 struct mlxsw_sp_nexthop_group *nh_grp;
3573
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02003574 nh_grp = mlxsw_sp_nexthop4_group_lookup(mlxsw_sp, fi);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003575 if (!nh_grp) {
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02003576 nh_grp = mlxsw_sp_nexthop4_group_create(mlxsw_sp, fi);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003577 if (IS_ERR(nh_grp))
3578 return PTR_ERR(nh_grp);
3579 }
3580 list_add_tail(&fib_entry->nexthop_group_node, &nh_grp->fib_list);
3581 fib_entry->nh_group = nh_grp;
3582 return 0;
3583}
3584
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02003585static void mlxsw_sp_nexthop4_group_put(struct mlxsw_sp *mlxsw_sp,
3586 struct mlxsw_sp_fib_entry *fib_entry)
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003587{
3588 struct mlxsw_sp_nexthop_group *nh_grp = fib_entry->nh_group;
3589
3590 list_del(&fib_entry->nexthop_group_node);
3591 if (!list_empty(&nh_grp->fib_list))
3592 return;
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02003593 mlxsw_sp_nexthop4_group_destroy(mlxsw_sp, nh_grp);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003594}
3595
Ido Schimmel013b20f2017-02-08 11:16:36 +01003596static bool
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02003597mlxsw_sp_fib4_entry_should_offload(const struct mlxsw_sp_fib_entry *fib_entry)
3598{
3599 struct mlxsw_sp_fib4_entry *fib4_entry;
3600
3601 fib4_entry = container_of(fib_entry, struct mlxsw_sp_fib4_entry,
3602 common);
3603 return !fib4_entry->tos;
3604}
3605
3606static bool
Ido Schimmel013b20f2017-02-08 11:16:36 +01003607mlxsw_sp_fib_entry_should_offload(const struct mlxsw_sp_fib_entry *fib_entry)
3608{
3609 struct mlxsw_sp_nexthop_group *nh_group = fib_entry->nh_group;
3610
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02003611 switch (fib_entry->fib_node->fib->proto) {
3612 case MLXSW_SP_L3_PROTO_IPV4:
3613 if (!mlxsw_sp_fib4_entry_should_offload(fib_entry))
3614 return false;
3615 break;
3616 case MLXSW_SP_L3_PROTO_IPV6:
3617 break;
3618 }
Ido Schimmel9aecce12017-02-09 10:28:42 +01003619
Ido Schimmel013b20f2017-02-08 11:16:36 +01003620 switch (fib_entry->type) {
3621 case MLXSW_SP_FIB_ENTRY_TYPE_REMOTE:
3622 return !!nh_group->adj_index_valid;
3623 case MLXSW_SP_FIB_ENTRY_TYPE_LOCAL:
Ido Schimmel70ad3502017-02-08 11:16:38 +01003624 return !!nh_group->nh_rif;
Petr Machata4607f6d2017-09-02 23:49:25 +02003625 case MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP:
3626 return true;
Ido Schimmel013b20f2017-02-08 11:16:36 +01003627 default:
3628 return false;
3629 }
3630}
3631
Ido Schimmel428b8512017-08-03 13:28:28 +02003632static struct mlxsw_sp_nexthop *
3633mlxsw_sp_rt6_nexthop(struct mlxsw_sp_nexthop_group *nh_grp,
3634 const struct mlxsw_sp_rt6 *mlxsw_sp_rt6)
3635{
3636 int i;
3637
3638 for (i = 0; i < nh_grp->count; i++) {
3639 struct mlxsw_sp_nexthop *nh = &nh_grp->nexthops[i];
3640 struct rt6_info *rt = mlxsw_sp_rt6->rt;
3641
3642 if (nh->rif && nh->rif->dev == rt->dst.dev &&
3643 ipv6_addr_equal((const struct in6_addr *) &nh->gw_addr,
3644 &rt->rt6i_gateway))
3645 return nh;
3646 continue;
3647 }
3648
3649 return NULL;
3650}
3651
Ido Schimmel3984d1a2017-08-02 09:56:03 +02003652static void
3653mlxsw_sp_fib4_entry_offload_set(struct mlxsw_sp_fib_entry *fib_entry)
3654{
3655 struct mlxsw_sp_nexthop_group *nh_grp = fib_entry->nh_group;
3656 int i;
3657
Petr Machata4607f6d2017-09-02 23:49:25 +02003658 if (fib_entry->type == MLXSW_SP_FIB_ENTRY_TYPE_LOCAL ||
3659 fib_entry->type == MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP) {
Ido Schimmel3984d1a2017-08-02 09:56:03 +02003660 nh_grp->nexthops->key.fib_nh->nh_flags |= RTNH_F_OFFLOAD;
3661 return;
3662 }
3663
3664 for (i = 0; i < nh_grp->count; i++) {
3665 struct mlxsw_sp_nexthop *nh = &nh_grp->nexthops[i];
3666
3667 if (nh->offloaded)
3668 nh->key.fib_nh->nh_flags |= RTNH_F_OFFLOAD;
3669 else
3670 nh->key.fib_nh->nh_flags &= ~RTNH_F_OFFLOAD;
3671 }
3672}
3673
3674static void
3675mlxsw_sp_fib4_entry_offload_unset(struct mlxsw_sp_fib_entry *fib_entry)
3676{
3677 struct mlxsw_sp_nexthop_group *nh_grp = fib_entry->nh_group;
3678 int i;
3679
3680 for (i = 0; i < nh_grp->count; i++) {
3681 struct mlxsw_sp_nexthop *nh = &nh_grp->nexthops[i];
3682
3683 nh->key.fib_nh->nh_flags &= ~RTNH_F_OFFLOAD;
3684 }
3685}
3686
Ido Schimmel428b8512017-08-03 13:28:28 +02003687static void
3688mlxsw_sp_fib6_entry_offload_set(struct mlxsw_sp_fib_entry *fib_entry)
3689{
3690 struct mlxsw_sp_fib6_entry *fib6_entry;
3691 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
3692
3693 fib6_entry = container_of(fib_entry, struct mlxsw_sp_fib6_entry,
3694 common);
3695
3696 if (fib_entry->type == MLXSW_SP_FIB_ENTRY_TYPE_LOCAL) {
3697 list_first_entry(&fib6_entry->rt6_list, struct mlxsw_sp_rt6,
Ido Schimmelfe400792017-08-15 09:09:49 +02003698 list)->rt->rt6i_nh_flags |= RTNH_F_OFFLOAD;
Ido Schimmel428b8512017-08-03 13:28:28 +02003699 return;
3700 }
3701
3702 list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) {
3703 struct mlxsw_sp_nexthop_group *nh_grp = fib_entry->nh_group;
3704 struct mlxsw_sp_nexthop *nh;
3705
3706 nh = mlxsw_sp_rt6_nexthop(nh_grp, mlxsw_sp_rt6);
3707 if (nh && nh->offloaded)
Ido Schimmelfe400792017-08-15 09:09:49 +02003708 mlxsw_sp_rt6->rt->rt6i_nh_flags |= RTNH_F_OFFLOAD;
Ido Schimmel428b8512017-08-03 13:28:28 +02003709 else
Ido Schimmelfe400792017-08-15 09:09:49 +02003710 mlxsw_sp_rt6->rt->rt6i_nh_flags &= ~RTNH_F_OFFLOAD;
Ido Schimmel428b8512017-08-03 13:28:28 +02003711 }
3712}
3713
3714static void
3715mlxsw_sp_fib6_entry_offload_unset(struct mlxsw_sp_fib_entry *fib_entry)
3716{
3717 struct mlxsw_sp_fib6_entry *fib6_entry;
3718 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
3719
3720 fib6_entry = container_of(fib_entry, struct mlxsw_sp_fib6_entry,
3721 common);
3722 list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) {
3723 struct rt6_info *rt = mlxsw_sp_rt6->rt;
3724
Ido Schimmelfe400792017-08-15 09:09:49 +02003725 rt->rt6i_nh_flags &= ~RTNH_F_OFFLOAD;
Ido Schimmel428b8512017-08-03 13:28:28 +02003726 }
3727}
3728
Ido Schimmel013b20f2017-02-08 11:16:36 +01003729static void mlxsw_sp_fib_entry_offload_set(struct mlxsw_sp_fib_entry *fib_entry)
3730{
Ido Schimmel76610eb2017-03-10 08:53:41 +01003731 switch (fib_entry->fib_node->fib->proto) {
Ido Schimmel013b20f2017-02-08 11:16:36 +01003732 case MLXSW_SP_L3_PROTO_IPV4:
Ido Schimmel3984d1a2017-08-02 09:56:03 +02003733 mlxsw_sp_fib4_entry_offload_set(fib_entry);
Ido Schimmel013b20f2017-02-08 11:16:36 +01003734 break;
3735 case MLXSW_SP_L3_PROTO_IPV6:
Ido Schimmel428b8512017-08-03 13:28:28 +02003736 mlxsw_sp_fib6_entry_offload_set(fib_entry);
3737 break;
Ido Schimmel013b20f2017-02-08 11:16:36 +01003738 }
3739}
3740
3741static void
3742mlxsw_sp_fib_entry_offload_unset(struct mlxsw_sp_fib_entry *fib_entry)
3743{
Ido Schimmel76610eb2017-03-10 08:53:41 +01003744 switch (fib_entry->fib_node->fib->proto) {
Ido Schimmel013b20f2017-02-08 11:16:36 +01003745 case MLXSW_SP_L3_PROTO_IPV4:
Ido Schimmel3984d1a2017-08-02 09:56:03 +02003746 mlxsw_sp_fib4_entry_offload_unset(fib_entry);
Ido Schimmel013b20f2017-02-08 11:16:36 +01003747 break;
3748 case MLXSW_SP_L3_PROTO_IPV6:
Ido Schimmel428b8512017-08-03 13:28:28 +02003749 mlxsw_sp_fib6_entry_offload_unset(fib_entry);
3750 break;
Ido Schimmel013b20f2017-02-08 11:16:36 +01003751 }
Ido Schimmel013b20f2017-02-08 11:16:36 +01003752}
3753
3754static void
3755mlxsw_sp_fib_entry_offload_refresh(struct mlxsw_sp_fib_entry *fib_entry,
3756 enum mlxsw_reg_ralue_op op, int err)
3757{
3758 switch (op) {
3759 case MLXSW_REG_RALUE_OP_WRITE_DELETE:
Ido Schimmel013b20f2017-02-08 11:16:36 +01003760 return mlxsw_sp_fib_entry_offload_unset(fib_entry);
3761 case MLXSW_REG_RALUE_OP_WRITE_WRITE:
3762 if (err)
3763 return;
Ido Schimmel1353ee72017-08-02 09:56:04 +02003764 if (mlxsw_sp_fib_entry_should_offload(fib_entry))
Ido Schimmel013b20f2017-02-08 11:16:36 +01003765 mlxsw_sp_fib_entry_offload_set(fib_entry);
Petr Machata85f44a12017-10-02 12:21:58 +02003766 else
Ido Schimmel013b20f2017-02-08 11:16:36 +01003767 mlxsw_sp_fib_entry_offload_unset(fib_entry);
3768 return;
3769 default:
3770 return;
3771 }
3772}
3773
Ido Schimmel9dbf4d72017-07-18 10:10:24 +02003774static void
3775mlxsw_sp_fib_entry_ralue_pack(char *ralue_pl,
3776 const struct mlxsw_sp_fib_entry *fib_entry,
3777 enum mlxsw_reg_ralue_op op)
3778{
3779 struct mlxsw_sp_fib *fib = fib_entry->fib_node->fib;
3780 enum mlxsw_reg_ralxx_protocol proto;
3781 u32 *p_dip;
3782
3783 proto = (enum mlxsw_reg_ralxx_protocol) fib->proto;
3784
3785 switch (fib->proto) {
3786 case MLXSW_SP_L3_PROTO_IPV4:
3787 p_dip = (u32 *) fib_entry->fib_node->key.addr;
3788 mlxsw_reg_ralue_pack4(ralue_pl, proto, op, fib->vr->id,
3789 fib_entry->fib_node->key.prefix_len,
3790 *p_dip);
3791 break;
3792 case MLXSW_SP_L3_PROTO_IPV6:
3793 mlxsw_reg_ralue_pack6(ralue_pl, proto, op, fib->vr->id,
3794 fib_entry->fib_node->key.prefix_len,
3795 fib_entry->fib_node->key.addr);
3796 break;
3797 }
3798}
3799
3800static int mlxsw_sp_fib_entry_op_remote(struct mlxsw_sp *mlxsw_sp,
3801 struct mlxsw_sp_fib_entry *fib_entry,
3802 enum mlxsw_reg_ralue_op op)
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003803{
3804 char ralue_pl[MLXSW_REG_RALUE_LEN];
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003805 enum mlxsw_reg_ralue_trap_action trap_action;
3806 u16 trap_id = 0;
3807 u32 adjacency_index = 0;
3808 u16 ecmp_size = 0;
3809
3810 /* In case the nexthop group adjacency index is valid, use it
3811 * with provided ECMP size. Otherwise, setup trap and pass
3812 * traffic to kernel.
3813 */
Ido Schimmel4b411472017-02-08 11:16:37 +01003814 if (mlxsw_sp_fib_entry_should_offload(fib_entry)) {
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003815 trap_action = MLXSW_REG_RALUE_TRAP_ACTION_NOP;
3816 adjacency_index = fib_entry->nh_group->adj_index;
3817 ecmp_size = fib_entry->nh_group->ecmp_size;
3818 } else {
3819 trap_action = MLXSW_REG_RALUE_TRAP_ACTION_TRAP;
3820 trap_id = MLXSW_TRAP_ID_RTR_INGRESS0;
3821 }
3822
Ido Schimmel9dbf4d72017-07-18 10:10:24 +02003823 mlxsw_sp_fib_entry_ralue_pack(ralue_pl, fib_entry, op);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003824 mlxsw_reg_ralue_act_remote_pack(ralue_pl, trap_action, trap_id,
3825 adjacency_index, ecmp_size);
3826 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
3827}
3828
Ido Schimmel9dbf4d72017-07-18 10:10:24 +02003829static int mlxsw_sp_fib_entry_op_local(struct mlxsw_sp *mlxsw_sp,
3830 struct mlxsw_sp_fib_entry *fib_entry,
3831 enum mlxsw_reg_ralue_op op)
Jiri Pirko61c503f2016-07-04 08:23:11 +02003832{
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003833 struct mlxsw_sp_rif *rif = fib_entry->nh_group->nh_rif;
Ido Schimmel70ad3502017-02-08 11:16:38 +01003834 enum mlxsw_reg_ralue_trap_action trap_action;
Jiri Pirko61c503f2016-07-04 08:23:11 +02003835 char ralue_pl[MLXSW_REG_RALUE_LEN];
Ido Schimmel70ad3502017-02-08 11:16:38 +01003836 u16 trap_id = 0;
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003837 u16 rif_index = 0;
Ido Schimmel70ad3502017-02-08 11:16:38 +01003838
3839 if (mlxsw_sp_fib_entry_should_offload(fib_entry)) {
3840 trap_action = MLXSW_REG_RALUE_TRAP_ACTION_NOP;
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003841 rif_index = rif->rif_index;
Ido Schimmel70ad3502017-02-08 11:16:38 +01003842 } else {
3843 trap_action = MLXSW_REG_RALUE_TRAP_ACTION_TRAP;
3844 trap_id = MLXSW_TRAP_ID_RTR_INGRESS0;
3845 }
Jiri Pirko61c503f2016-07-04 08:23:11 +02003846
Ido Schimmel9dbf4d72017-07-18 10:10:24 +02003847 mlxsw_sp_fib_entry_ralue_pack(ralue_pl, fib_entry, op);
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003848 mlxsw_reg_ralue_act_local_pack(ralue_pl, trap_action, trap_id,
3849 rif_index);
Jiri Pirko61c503f2016-07-04 08:23:11 +02003850 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
3851}
3852
Ido Schimmel9dbf4d72017-07-18 10:10:24 +02003853static int mlxsw_sp_fib_entry_op_trap(struct mlxsw_sp *mlxsw_sp,
3854 struct mlxsw_sp_fib_entry *fib_entry,
3855 enum mlxsw_reg_ralue_op op)
Jiri Pirko61c503f2016-07-04 08:23:11 +02003856{
3857 char ralue_pl[MLXSW_REG_RALUE_LEN];
Jiri Pirko61c503f2016-07-04 08:23:11 +02003858
Ido Schimmel9dbf4d72017-07-18 10:10:24 +02003859 mlxsw_sp_fib_entry_ralue_pack(ralue_pl, fib_entry, op);
Jiri Pirko61c503f2016-07-04 08:23:11 +02003860 mlxsw_reg_ralue_act_ip2me_pack(ralue_pl);
3861 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
3862}
3863
Petr Machata4607f6d2017-09-02 23:49:25 +02003864static int
3865mlxsw_sp_fib_entry_op_ipip_decap(struct mlxsw_sp *mlxsw_sp,
3866 struct mlxsw_sp_fib_entry *fib_entry,
3867 enum mlxsw_reg_ralue_op op)
3868{
3869 struct mlxsw_sp_ipip_entry *ipip_entry = fib_entry->decap.ipip_entry;
3870 const struct mlxsw_sp_ipip_ops *ipip_ops;
3871
3872 if (WARN_ON(!ipip_entry))
3873 return -EINVAL;
3874
3875 ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt];
3876 return ipip_ops->fib_entry_op(mlxsw_sp, ipip_entry, op,
3877 fib_entry->decap.tunnel_index);
3878}
3879
Ido Schimmel9dbf4d72017-07-18 10:10:24 +02003880static int __mlxsw_sp_fib_entry_op(struct mlxsw_sp *mlxsw_sp,
3881 struct mlxsw_sp_fib_entry *fib_entry,
3882 enum mlxsw_reg_ralue_op op)
Jiri Pirko61c503f2016-07-04 08:23:11 +02003883{
3884 switch (fib_entry->type) {
3885 case MLXSW_SP_FIB_ENTRY_TYPE_REMOTE:
Ido Schimmel9dbf4d72017-07-18 10:10:24 +02003886 return mlxsw_sp_fib_entry_op_remote(mlxsw_sp, fib_entry, op);
Jiri Pirko61c503f2016-07-04 08:23:11 +02003887 case MLXSW_SP_FIB_ENTRY_TYPE_LOCAL:
Ido Schimmel9dbf4d72017-07-18 10:10:24 +02003888 return mlxsw_sp_fib_entry_op_local(mlxsw_sp, fib_entry, op);
Jiri Pirko61c503f2016-07-04 08:23:11 +02003889 case MLXSW_SP_FIB_ENTRY_TYPE_TRAP:
Ido Schimmel9dbf4d72017-07-18 10:10:24 +02003890 return mlxsw_sp_fib_entry_op_trap(mlxsw_sp, fib_entry, op);
Petr Machata4607f6d2017-09-02 23:49:25 +02003891 case MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP:
3892 return mlxsw_sp_fib_entry_op_ipip_decap(mlxsw_sp,
3893 fib_entry, op);
Jiri Pirko61c503f2016-07-04 08:23:11 +02003894 }
3895 return -EINVAL;
3896}
3897
3898static int mlxsw_sp_fib_entry_op(struct mlxsw_sp *mlxsw_sp,
3899 struct mlxsw_sp_fib_entry *fib_entry,
3900 enum mlxsw_reg_ralue_op op)
3901{
Ido Schimmel9dbf4d72017-07-18 10:10:24 +02003902 int err = __mlxsw_sp_fib_entry_op(mlxsw_sp, fib_entry, op);
Ido Schimmel013b20f2017-02-08 11:16:36 +01003903
Ido Schimmel013b20f2017-02-08 11:16:36 +01003904 mlxsw_sp_fib_entry_offload_refresh(fib_entry, op, err);
Ido Schimmel9dbf4d72017-07-18 10:10:24 +02003905
Ido Schimmel013b20f2017-02-08 11:16:36 +01003906 return err;
Jiri Pirko61c503f2016-07-04 08:23:11 +02003907}
3908
3909static int mlxsw_sp_fib_entry_update(struct mlxsw_sp *mlxsw_sp,
3910 struct mlxsw_sp_fib_entry *fib_entry)
3911{
Jiri Pirko7146da32016-09-01 10:37:41 +02003912 return mlxsw_sp_fib_entry_op(mlxsw_sp, fib_entry,
3913 MLXSW_REG_RALUE_OP_WRITE_WRITE);
Jiri Pirko61c503f2016-07-04 08:23:11 +02003914}
3915
3916static int mlxsw_sp_fib_entry_del(struct mlxsw_sp *mlxsw_sp,
3917 struct mlxsw_sp_fib_entry *fib_entry)
3918{
3919 return mlxsw_sp_fib_entry_op(mlxsw_sp, fib_entry,
3920 MLXSW_REG_RALUE_OP_WRITE_DELETE);
3921}
3922
Jiri Pirko61c503f2016-07-04 08:23:11 +02003923static int
Ido Schimmel013b20f2017-02-08 11:16:36 +01003924mlxsw_sp_fib4_entry_type_set(struct mlxsw_sp *mlxsw_sp,
3925 const struct fib_entry_notifier_info *fen_info,
3926 struct mlxsw_sp_fib_entry *fib_entry)
Jiri Pirko61c503f2016-07-04 08:23:11 +02003927{
Petr Machata4607f6d2017-09-02 23:49:25 +02003928 union mlxsw_sp_l3addr dip = { .addr4 = htonl(fen_info->dst) };
3929 struct net_device *dev = fen_info->fi->fib_dev;
3930 struct mlxsw_sp_ipip_entry *ipip_entry;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02003931 struct fib_info *fi = fen_info->fi;
Jiri Pirko61c503f2016-07-04 08:23:11 +02003932
Ido Schimmel97989ee2017-03-10 08:53:38 +01003933 switch (fen_info->type) {
Ido Schimmel97989ee2017-03-10 08:53:38 +01003934 case RTN_LOCAL:
Petr Machata4607f6d2017-09-02 23:49:25 +02003935 ipip_entry = mlxsw_sp_ipip_entry_find_by_decap(mlxsw_sp, dev,
3936 MLXSW_SP_L3_PROTO_IPV4, dip);
3937 if (ipip_entry) {
3938 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP;
3939 return mlxsw_sp_fib_entry_decap_init(mlxsw_sp,
3940 fib_entry,
3941 ipip_entry);
3942 }
3943 /* fall through */
3944 case RTN_BROADCAST:
Jiri Pirko61c503f2016-07-04 08:23:11 +02003945 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
3946 return 0;
Ido Schimmel97989ee2017-03-10 08:53:38 +01003947 case RTN_UNREACHABLE: /* fall through */
3948 case RTN_BLACKHOLE: /* fall through */
3949 case RTN_PROHIBIT:
3950 /* Packets hitting these routes need to be trapped, but
3951 * can do so with a lower priority than packets directed
3952 * at the host, so use action type local instead of trap.
3953 */
Jiri Pirkob45f64d2016-09-26 12:52:31 +02003954 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_LOCAL;
Ido Schimmel97989ee2017-03-10 08:53:38 +01003955 return 0;
3956 case RTN_UNICAST:
Petr Machata9b014512017-09-02 23:49:20 +02003957 if (mlxsw_sp_fi_is_gateway(mlxsw_sp, fi))
Ido Schimmel97989ee2017-03-10 08:53:38 +01003958 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_REMOTE;
Petr Machata9b014512017-09-02 23:49:20 +02003959 else
3960 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_LOCAL;
Ido Schimmel97989ee2017-03-10 08:53:38 +01003961 return 0;
3962 default:
3963 return -EINVAL;
3964 }
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003965}
3966
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02003967static struct mlxsw_sp_fib4_entry *
Ido Schimmel9aecce12017-02-09 10:28:42 +01003968mlxsw_sp_fib4_entry_create(struct mlxsw_sp *mlxsw_sp,
3969 struct mlxsw_sp_fib_node *fib_node,
3970 const struct fib_entry_notifier_info *fen_info)
Jiri Pirko5b004412016-09-01 10:37:40 +02003971{
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02003972 struct mlxsw_sp_fib4_entry *fib4_entry;
Jiri Pirko5b004412016-09-01 10:37:40 +02003973 struct mlxsw_sp_fib_entry *fib_entry;
Ido Schimmel9aecce12017-02-09 10:28:42 +01003974 int err;
3975
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02003976 fib4_entry = kzalloc(sizeof(*fib4_entry), GFP_KERNEL);
3977 if (!fib4_entry)
3978 return ERR_PTR(-ENOMEM);
3979 fib_entry = &fib4_entry->common;
Ido Schimmel9aecce12017-02-09 10:28:42 +01003980
3981 err = mlxsw_sp_fib4_entry_type_set(mlxsw_sp, fen_info, fib_entry);
3982 if (err)
3983 goto err_fib4_entry_type_set;
3984
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02003985 err = mlxsw_sp_nexthop4_group_get(mlxsw_sp, fib_entry, fen_info->fi);
Ido Schimmel9aecce12017-02-09 10:28:42 +01003986 if (err)
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02003987 goto err_nexthop4_group_get;
Ido Schimmel9aecce12017-02-09 10:28:42 +01003988
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02003989 fib4_entry->prio = fen_info->fi->fib_priority;
3990 fib4_entry->tb_id = fen_info->tb_id;
3991 fib4_entry->type = fen_info->type;
3992 fib4_entry->tos = fen_info->tos;
Ido Schimmel9aecce12017-02-09 10:28:42 +01003993
3994 fib_entry->fib_node = fib_node;
3995
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02003996 return fib4_entry;
Ido Schimmel9aecce12017-02-09 10:28:42 +01003997
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02003998err_nexthop4_group_get:
Ido Schimmel9aecce12017-02-09 10:28:42 +01003999err_fib4_entry_type_set:
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004000 kfree(fib4_entry);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004001 return ERR_PTR(err);
4002}
4003
4004static void mlxsw_sp_fib4_entry_destroy(struct mlxsw_sp *mlxsw_sp,
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004005 struct mlxsw_sp_fib4_entry *fib4_entry)
Ido Schimmel9aecce12017-02-09 10:28:42 +01004006{
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02004007 mlxsw_sp_nexthop4_group_put(mlxsw_sp, &fib4_entry->common);
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004008 kfree(fib4_entry);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004009}
4010
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004011static struct mlxsw_sp_fib4_entry *
Ido Schimmel9aecce12017-02-09 10:28:42 +01004012mlxsw_sp_fib4_entry_lookup(struct mlxsw_sp *mlxsw_sp,
4013 const struct fib_entry_notifier_info *fen_info)
4014{
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004015 struct mlxsw_sp_fib4_entry *fib4_entry;
Ido Schimmel9aecce12017-02-09 10:28:42 +01004016 struct mlxsw_sp_fib_node *fib_node;
Ido Schimmel160e22a2017-07-18 10:10:20 +02004017 struct mlxsw_sp_fib *fib;
4018 struct mlxsw_sp_vr *vr;
Ido Schimmel9aecce12017-02-09 10:28:42 +01004019
Ido Schimmel160e22a2017-07-18 10:10:20 +02004020 vr = mlxsw_sp_vr_find(mlxsw_sp, fen_info->tb_id);
4021 if (!vr)
4022 return NULL;
4023 fib = mlxsw_sp_vr_fib(vr, MLXSW_SP_L3_PROTO_IPV4);
4024
4025 fib_node = mlxsw_sp_fib_node_lookup(fib, &fen_info->dst,
4026 sizeof(fen_info->dst),
4027 fen_info->dst_len);
4028 if (!fib_node)
Ido Schimmel9aecce12017-02-09 10:28:42 +01004029 return NULL;
4030
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004031 list_for_each_entry(fib4_entry, &fib_node->entry_list, common.list) {
4032 if (fib4_entry->tb_id == fen_info->tb_id &&
4033 fib4_entry->tos == fen_info->tos &&
4034 fib4_entry->type == fen_info->type &&
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02004035 mlxsw_sp_nexthop4_group_fi(fib4_entry->common.nh_group) ==
4036 fen_info->fi) {
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004037 return fib4_entry;
Ido Schimmel9aecce12017-02-09 10:28:42 +01004038 }
4039 }
4040
4041 return NULL;
4042}
4043
4044static const struct rhashtable_params mlxsw_sp_fib_ht_params = {
4045 .key_offset = offsetof(struct mlxsw_sp_fib_node, key),
4046 .head_offset = offsetof(struct mlxsw_sp_fib_node, ht_node),
4047 .key_len = sizeof(struct mlxsw_sp_fib_key),
4048 .automatic_shrinking = true,
4049};
4050
4051static int mlxsw_sp_fib_node_insert(struct mlxsw_sp_fib *fib,
4052 struct mlxsw_sp_fib_node *fib_node)
4053{
4054 return rhashtable_insert_fast(&fib->ht, &fib_node->ht_node,
4055 mlxsw_sp_fib_ht_params);
4056}
4057
4058static void mlxsw_sp_fib_node_remove(struct mlxsw_sp_fib *fib,
4059 struct mlxsw_sp_fib_node *fib_node)
4060{
4061 rhashtable_remove_fast(&fib->ht, &fib_node->ht_node,
4062 mlxsw_sp_fib_ht_params);
4063}
4064
4065static struct mlxsw_sp_fib_node *
4066mlxsw_sp_fib_node_lookup(struct mlxsw_sp_fib *fib, const void *addr,
4067 size_t addr_len, unsigned char prefix_len)
4068{
4069 struct mlxsw_sp_fib_key key;
4070
4071 memset(&key, 0, sizeof(key));
4072 memcpy(key.addr, addr, addr_len);
4073 key.prefix_len = prefix_len;
4074 return rhashtable_lookup_fast(&fib->ht, &key, mlxsw_sp_fib_ht_params);
4075}
4076
4077static struct mlxsw_sp_fib_node *
Ido Schimmel76610eb2017-03-10 08:53:41 +01004078mlxsw_sp_fib_node_create(struct mlxsw_sp_fib *fib, const void *addr,
Ido Schimmel9aecce12017-02-09 10:28:42 +01004079 size_t addr_len, unsigned char prefix_len)
4080{
4081 struct mlxsw_sp_fib_node *fib_node;
4082
4083 fib_node = kzalloc(sizeof(*fib_node), GFP_KERNEL);
4084 if (!fib_node)
4085 return NULL;
4086
4087 INIT_LIST_HEAD(&fib_node->entry_list);
Ido Schimmel76610eb2017-03-10 08:53:41 +01004088 list_add(&fib_node->list, &fib->node_list);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004089 memcpy(fib_node->key.addr, addr, addr_len);
4090 fib_node->key.prefix_len = prefix_len;
Ido Schimmel9aecce12017-02-09 10:28:42 +01004091
4092 return fib_node;
4093}
4094
4095static void mlxsw_sp_fib_node_destroy(struct mlxsw_sp_fib_node *fib_node)
4096{
Ido Schimmel9aecce12017-02-09 10:28:42 +01004097 list_del(&fib_node->list);
4098 WARN_ON(!list_empty(&fib_node->entry_list));
4099 kfree(fib_node);
4100}
4101
4102static bool
4103mlxsw_sp_fib_node_entry_is_first(const struct mlxsw_sp_fib_node *fib_node,
4104 const struct mlxsw_sp_fib_entry *fib_entry)
4105{
4106 return list_first_entry(&fib_node->entry_list,
4107 struct mlxsw_sp_fib_entry, list) == fib_entry;
4108}
4109
Ido Schimmelfc922bb2017-08-14 10:54:05 +02004110static int mlxsw_sp_fib_lpm_tree_link(struct mlxsw_sp *mlxsw_sp,
4111 struct mlxsw_sp_fib *fib,
4112 struct mlxsw_sp_fib_node *fib_node)
4113{
4114 struct mlxsw_sp_prefix_usage req_prefix_usage = {{ 0 } };
4115 struct mlxsw_sp_lpm_tree *lpm_tree;
4116 int err;
4117
4118 /* Since the tree is shared between all virtual routers we must
4119 * make sure it contains all the required prefix lengths. This
4120 * can be computed by either adding the new prefix length to the
4121 * existing prefix usage of a bound tree, or by aggregating the
4122 * prefix lengths across all virtual routers and adding the new
4123 * one as well.
4124 */
4125 if (fib->lpm_tree)
4126 mlxsw_sp_prefix_usage_cpy(&req_prefix_usage,
4127 &fib->lpm_tree->prefix_usage);
4128 else
4129 mlxsw_sp_vrs_prefixes(mlxsw_sp, fib->proto, &req_prefix_usage);
4130 mlxsw_sp_prefix_usage_set(&req_prefix_usage, fib_node->key.prefix_len);
4131
4132 lpm_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, &req_prefix_usage,
4133 fib->proto);
4134 if (IS_ERR(lpm_tree))
4135 return PTR_ERR(lpm_tree);
4136
4137 if (fib->lpm_tree && fib->lpm_tree->id == lpm_tree->id)
4138 return 0;
4139
4140 err = mlxsw_sp_vrs_lpm_tree_replace(mlxsw_sp, fib, lpm_tree);
4141 if (err)
4142 return err;
4143
4144 return 0;
4145}
4146
4147static void mlxsw_sp_fib_lpm_tree_unlink(struct mlxsw_sp *mlxsw_sp,
4148 struct mlxsw_sp_fib *fib)
4149{
Ido Schimmelfc922bb2017-08-14 10:54:05 +02004150 if (!mlxsw_sp_prefix_usage_none(&fib->prefix_usage))
4151 return;
4152 mlxsw_sp_vr_lpm_tree_unbind(mlxsw_sp, fib);
4153 mlxsw_sp_lpm_tree_put(mlxsw_sp, fib->lpm_tree);
4154 fib->lpm_tree = NULL;
4155}
4156
Ido Schimmel9aecce12017-02-09 10:28:42 +01004157static void mlxsw_sp_fib_node_prefix_inc(struct mlxsw_sp_fib_node *fib_node)
4158{
4159 unsigned char prefix_len = fib_node->key.prefix_len;
Ido Schimmel76610eb2017-03-10 08:53:41 +01004160 struct mlxsw_sp_fib *fib = fib_node->fib;
Ido Schimmel9aecce12017-02-09 10:28:42 +01004161
4162 if (fib->prefix_ref_count[prefix_len]++ == 0)
4163 mlxsw_sp_prefix_usage_set(&fib->prefix_usage, prefix_len);
4164}
4165
4166static void mlxsw_sp_fib_node_prefix_dec(struct mlxsw_sp_fib_node *fib_node)
4167{
4168 unsigned char prefix_len = fib_node->key.prefix_len;
Ido Schimmel76610eb2017-03-10 08:53:41 +01004169 struct mlxsw_sp_fib *fib = fib_node->fib;
Ido Schimmel9aecce12017-02-09 10:28:42 +01004170
4171 if (--fib->prefix_ref_count[prefix_len] == 0)
4172 mlxsw_sp_prefix_usage_clear(&fib->prefix_usage, prefix_len);
4173}
4174
Ido Schimmel76610eb2017-03-10 08:53:41 +01004175static int mlxsw_sp_fib_node_init(struct mlxsw_sp *mlxsw_sp,
4176 struct mlxsw_sp_fib_node *fib_node,
4177 struct mlxsw_sp_fib *fib)
4178{
Ido Schimmel76610eb2017-03-10 08:53:41 +01004179 int err;
4180
4181 err = mlxsw_sp_fib_node_insert(fib, fib_node);
4182 if (err)
4183 return err;
4184 fib_node->fib = fib;
4185
Ido Schimmelfc922bb2017-08-14 10:54:05 +02004186 err = mlxsw_sp_fib_lpm_tree_link(mlxsw_sp, fib, fib_node);
4187 if (err)
4188 goto err_fib_lpm_tree_link;
Ido Schimmel76610eb2017-03-10 08:53:41 +01004189
4190 mlxsw_sp_fib_node_prefix_inc(fib_node);
4191
4192 return 0;
4193
Ido Schimmelfc922bb2017-08-14 10:54:05 +02004194err_fib_lpm_tree_link:
Ido Schimmel76610eb2017-03-10 08:53:41 +01004195 fib_node->fib = NULL;
4196 mlxsw_sp_fib_node_remove(fib, fib_node);
4197 return err;
4198}
4199
4200static void mlxsw_sp_fib_node_fini(struct mlxsw_sp *mlxsw_sp,
4201 struct mlxsw_sp_fib_node *fib_node)
4202{
Ido Schimmel76610eb2017-03-10 08:53:41 +01004203 struct mlxsw_sp_fib *fib = fib_node->fib;
4204
4205 mlxsw_sp_fib_node_prefix_dec(fib_node);
Ido Schimmelfc922bb2017-08-14 10:54:05 +02004206 mlxsw_sp_fib_lpm_tree_unlink(mlxsw_sp, fib);
Ido Schimmel76610eb2017-03-10 08:53:41 +01004207 fib_node->fib = NULL;
4208 mlxsw_sp_fib_node_remove(fib, fib_node);
4209}
4210
Ido Schimmel9aecce12017-02-09 10:28:42 +01004211static struct mlxsw_sp_fib_node *
Ido Schimmel731ea1c2017-07-18 10:10:21 +02004212mlxsw_sp_fib_node_get(struct mlxsw_sp *mlxsw_sp, u32 tb_id, const void *addr,
4213 size_t addr_len, unsigned char prefix_len,
4214 enum mlxsw_sp_l3proto proto)
Ido Schimmel9aecce12017-02-09 10:28:42 +01004215{
4216 struct mlxsw_sp_fib_node *fib_node;
Ido Schimmel76610eb2017-03-10 08:53:41 +01004217 struct mlxsw_sp_fib *fib;
Jiri Pirko5b004412016-09-01 10:37:40 +02004218 struct mlxsw_sp_vr *vr;
4219 int err;
4220
David Ahernf8fa9b42017-10-18 09:56:56 -07004221 vr = mlxsw_sp_vr_get(mlxsw_sp, tb_id, NULL);
Jiri Pirko5b004412016-09-01 10:37:40 +02004222 if (IS_ERR(vr))
4223 return ERR_CAST(vr);
Ido Schimmel731ea1c2017-07-18 10:10:21 +02004224 fib = mlxsw_sp_vr_fib(vr, proto);
Jiri Pirko5b004412016-09-01 10:37:40 +02004225
Ido Schimmel731ea1c2017-07-18 10:10:21 +02004226 fib_node = mlxsw_sp_fib_node_lookup(fib, addr, addr_len, prefix_len);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004227 if (fib_node)
4228 return fib_node;
4229
Ido Schimmel731ea1c2017-07-18 10:10:21 +02004230 fib_node = mlxsw_sp_fib_node_create(fib, addr, addr_len, prefix_len);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004231 if (!fib_node) {
Jiri Pirko5b004412016-09-01 10:37:40 +02004232 err = -ENOMEM;
Ido Schimmel9aecce12017-02-09 10:28:42 +01004233 goto err_fib_node_create;
Jiri Pirko5b004412016-09-01 10:37:40 +02004234 }
Jiri Pirko5b004412016-09-01 10:37:40 +02004235
Ido Schimmel76610eb2017-03-10 08:53:41 +01004236 err = mlxsw_sp_fib_node_init(mlxsw_sp, fib_node, fib);
4237 if (err)
4238 goto err_fib_node_init;
4239
Ido Schimmel9aecce12017-02-09 10:28:42 +01004240 return fib_node;
Jiri Pirko5b004412016-09-01 10:37:40 +02004241
Ido Schimmel76610eb2017-03-10 08:53:41 +01004242err_fib_node_init:
4243 mlxsw_sp_fib_node_destroy(fib_node);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004244err_fib_node_create:
Ido Schimmel76610eb2017-03-10 08:53:41 +01004245 mlxsw_sp_vr_put(vr);
Jiri Pirko5b004412016-09-01 10:37:40 +02004246 return ERR_PTR(err);
4247}
4248
Ido Schimmel731ea1c2017-07-18 10:10:21 +02004249static void mlxsw_sp_fib_node_put(struct mlxsw_sp *mlxsw_sp,
4250 struct mlxsw_sp_fib_node *fib_node)
Jiri Pirko5b004412016-09-01 10:37:40 +02004251{
Ido Schimmel76610eb2017-03-10 08:53:41 +01004252 struct mlxsw_sp_vr *vr = fib_node->fib->vr;
Jiri Pirko5b004412016-09-01 10:37:40 +02004253
Ido Schimmel9aecce12017-02-09 10:28:42 +01004254 if (!list_empty(&fib_node->entry_list))
4255 return;
Ido Schimmel76610eb2017-03-10 08:53:41 +01004256 mlxsw_sp_fib_node_fini(mlxsw_sp, fib_node);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004257 mlxsw_sp_fib_node_destroy(fib_node);
Ido Schimmel76610eb2017-03-10 08:53:41 +01004258 mlxsw_sp_vr_put(vr);
Jiri Pirko5b004412016-09-01 10:37:40 +02004259}
4260
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004261static struct mlxsw_sp_fib4_entry *
Ido Schimmel9aecce12017-02-09 10:28:42 +01004262mlxsw_sp_fib4_node_entry_find(const struct mlxsw_sp_fib_node *fib_node,
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004263 const struct mlxsw_sp_fib4_entry *new4_entry)
Jiri Pirko61c503f2016-07-04 08:23:11 +02004264{
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004265 struct mlxsw_sp_fib4_entry *fib4_entry;
Ido Schimmel9aecce12017-02-09 10:28:42 +01004266
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004267 list_for_each_entry(fib4_entry, &fib_node->entry_list, common.list) {
4268 if (fib4_entry->tb_id > new4_entry->tb_id)
Ido Schimmel9aecce12017-02-09 10:28:42 +01004269 continue;
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004270 if (fib4_entry->tb_id != new4_entry->tb_id)
Ido Schimmel9aecce12017-02-09 10:28:42 +01004271 break;
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004272 if (fib4_entry->tos > new4_entry->tos)
Ido Schimmel9aecce12017-02-09 10:28:42 +01004273 continue;
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004274 if (fib4_entry->prio >= new4_entry->prio ||
4275 fib4_entry->tos < new4_entry->tos)
4276 return fib4_entry;
Ido Schimmel9aecce12017-02-09 10:28:42 +01004277 }
4278
4279 return NULL;
4280}
4281
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004282static int
4283mlxsw_sp_fib4_node_list_append(struct mlxsw_sp_fib4_entry *fib4_entry,
4284 struct mlxsw_sp_fib4_entry *new4_entry)
Ido Schimmel4283bce2017-02-09 10:28:43 +01004285{
4286 struct mlxsw_sp_fib_node *fib_node;
4287
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004288 if (WARN_ON(!fib4_entry))
Ido Schimmel4283bce2017-02-09 10:28:43 +01004289 return -EINVAL;
4290
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004291 fib_node = fib4_entry->common.fib_node;
4292 list_for_each_entry_from(fib4_entry, &fib_node->entry_list,
4293 common.list) {
4294 if (fib4_entry->tb_id != new4_entry->tb_id ||
4295 fib4_entry->tos != new4_entry->tos ||
4296 fib4_entry->prio != new4_entry->prio)
Ido Schimmel4283bce2017-02-09 10:28:43 +01004297 break;
4298 }
4299
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004300 list_add_tail(&new4_entry->common.list, &fib4_entry->common.list);
Ido Schimmel4283bce2017-02-09 10:28:43 +01004301 return 0;
4302}
4303
Ido Schimmel9aecce12017-02-09 10:28:42 +01004304static int
Ido Schimmel9efbee62017-07-18 10:10:28 +02004305mlxsw_sp_fib4_node_list_insert(struct mlxsw_sp_fib4_entry *new4_entry,
Ido Schimmel599cf8f2017-02-09 10:28:44 +01004306 bool replace, bool append)
Ido Schimmel9aecce12017-02-09 10:28:42 +01004307{
Ido Schimmel9efbee62017-07-18 10:10:28 +02004308 struct mlxsw_sp_fib_node *fib_node = new4_entry->common.fib_node;
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004309 struct mlxsw_sp_fib4_entry *fib4_entry;
Ido Schimmel9aecce12017-02-09 10:28:42 +01004310
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004311 fib4_entry = mlxsw_sp_fib4_node_entry_find(fib_node, new4_entry);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004312
Ido Schimmel4283bce2017-02-09 10:28:43 +01004313 if (append)
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004314 return mlxsw_sp_fib4_node_list_append(fib4_entry, new4_entry);
4315 if (replace && WARN_ON(!fib4_entry))
Ido Schimmel599cf8f2017-02-09 10:28:44 +01004316 return -EINVAL;
Ido Schimmel4283bce2017-02-09 10:28:43 +01004317
Ido Schimmel599cf8f2017-02-09 10:28:44 +01004318 /* Insert new entry before replaced one, so that we can later
4319 * remove the second.
4320 */
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004321 if (fib4_entry) {
4322 list_add_tail(&new4_entry->common.list,
4323 &fib4_entry->common.list);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004324 } else {
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004325 struct mlxsw_sp_fib4_entry *last;
Ido Schimmel9aecce12017-02-09 10:28:42 +01004326
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004327 list_for_each_entry(last, &fib_node->entry_list, common.list) {
4328 if (new4_entry->tb_id > last->tb_id)
Ido Schimmel9aecce12017-02-09 10:28:42 +01004329 break;
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004330 fib4_entry = last;
Ido Schimmel9aecce12017-02-09 10:28:42 +01004331 }
4332
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004333 if (fib4_entry)
4334 list_add(&new4_entry->common.list,
4335 &fib4_entry->common.list);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004336 else
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004337 list_add(&new4_entry->common.list,
4338 &fib_node->entry_list);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004339 }
4340
4341 return 0;
4342}
4343
4344static void
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004345mlxsw_sp_fib4_node_list_remove(struct mlxsw_sp_fib4_entry *fib4_entry)
Ido Schimmel9aecce12017-02-09 10:28:42 +01004346{
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004347 list_del(&fib4_entry->common.list);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004348}
4349
Ido Schimmel80c238f2017-07-18 10:10:29 +02004350static int mlxsw_sp_fib_node_entry_add(struct mlxsw_sp *mlxsw_sp,
4351 struct mlxsw_sp_fib_entry *fib_entry)
Ido Schimmel9aecce12017-02-09 10:28:42 +01004352{
Ido Schimmel9efbee62017-07-18 10:10:28 +02004353 struct mlxsw_sp_fib_node *fib_node = fib_entry->fib_node;
4354
Ido Schimmel9aecce12017-02-09 10:28:42 +01004355 if (!mlxsw_sp_fib_node_entry_is_first(fib_node, fib_entry))
4356 return 0;
4357
4358 /* To prevent packet loss, overwrite the previously offloaded
4359 * entry.
4360 */
4361 if (!list_is_singular(&fib_node->entry_list)) {
4362 enum mlxsw_reg_ralue_op op = MLXSW_REG_RALUE_OP_WRITE_DELETE;
4363 struct mlxsw_sp_fib_entry *n = list_next_entry(fib_entry, list);
4364
4365 mlxsw_sp_fib_entry_offload_refresh(n, op, 0);
4366 }
4367
4368 return mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
4369}
4370
Ido Schimmel80c238f2017-07-18 10:10:29 +02004371static void mlxsw_sp_fib_node_entry_del(struct mlxsw_sp *mlxsw_sp,
4372 struct mlxsw_sp_fib_entry *fib_entry)
Ido Schimmel9aecce12017-02-09 10:28:42 +01004373{
Ido Schimmel9efbee62017-07-18 10:10:28 +02004374 struct mlxsw_sp_fib_node *fib_node = fib_entry->fib_node;
4375
Ido Schimmel9aecce12017-02-09 10:28:42 +01004376 if (!mlxsw_sp_fib_node_entry_is_first(fib_node, fib_entry))
4377 return;
4378
4379 /* Promote the next entry by overwriting the deleted entry */
4380 if (!list_is_singular(&fib_node->entry_list)) {
4381 struct mlxsw_sp_fib_entry *n = list_next_entry(fib_entry, list);
4382 enum mlxsw_reg_ralue_op op = MLXSW_REG_RALUE_OP_WRITE_DELETE;
4383
4384 mlxsw_sp_fib_entry_update(mlxsw_sp, n);
4385 mlxsw_sp_fib_entry_offload_refresh(fib_entry, op, 0);
4386 return;
4387 }
4388
4389 mlxsw_sp_fib_entry_del(mlxsw_sp, fib_entry);
4390}
4391
4392static int mlxsw_sp_fib4_node_entry_link(struct mlxsw_sp *mlxsw_sp,
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004393 struct mlxsw_sp_fib4_entry *fib4_entry,
Ido Schimmel599cf8f2017-02-09 10:28:44 +01004394 bool replace, bool append)
Ido Schimmel9aecce12017-02-09 10:28:42 +01004395{
Ido Schimmel9aecce12017-02-09 10:28:42 +01004396 int err;
4397
Ido Schimmel9efbee62017-07-18 10:10:28 +02004398 err = mlxsw_sp_fib4_node_list_insert(fib4_entry, replace, append);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004399 if (err)
4400 return err;
4401
Ido Schimmel80c238f2017-07-18 10:10:29 +02004402 err = mlxsw_sp_fib_node_entry_add(mlxsw_sp, &fib4_entry->common);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004403 if (err)
Ido Schimmel80c238f2017-07-18 10:10:29 +02004404 goto err_fib_node_entry_add;
Ido Schimmel9aecce12017-02-09 10:28:42 +01004405
Ido Schimmel9aecce12017-02-09 10:28:42 +01004406 return 0;
4407
Ido Schimmel80c238f2017-07-18 10:10:29 +02004408err_fib_node_entry_add:
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004409 mlxsw_sp_fib4_node_list_remove(fib4_entry);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004410 return err;
4411}
4412
4413static void
4414mlxsw_sp_fib4_node_entry_unlink(struct mlxsw_sp *mlxsw_sp,
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004415 struct mlxsw_sp_fib4_entry *fib4_entry)
Ido Schimmel9aecce12017-02-09 10:28:42 +01004416{
Ido Schimmel80c238f2017-07-18 10:10:29 +02004417 mlxsw_sp_fib_node_entry_del(mlxsw_sp, &fib4_entry->common);
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004418 mlxsw_sp_fib4_node_list_remove(fib4_entry);
Petr Machata4607f6d2017-09-02 23:49:25 +02004419
4420 if (fib4_entry->common.type == MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP)
4421 mlxsw_sp_fib_entry_decap_fini(mlxsw_sp, &fib4_entry->common);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004422}
4423
Ido Schimmel599cf8f2017-02-09 10:28:44 +01004424static void mlxsw_sp_fib4_entry_replace(struct mlxsw_sp *mlxsw_sp,
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004425 struct mlxsw_sp_fib4_entry *fib4_entry,
Ido Schimmel599cf8f2017-02-09 10:28:44 +01004426 bool replace)
4427{
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004428 struct mlxsw_sp_fib_node *fib_node = fib4_entry->common.fib_node;
4429 struct mlxsw_sp_fib4_entry *replaced;
Ido Schimmel599cf8f2017-02-09 10:28:44 +01004430
4431 if (!replace)
4432 return;
4433
4434 /* We inserted the new entry before replaced one */
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004435 replaced = list_next_entry(fib4_entry, common.list);
Ido Schimmel599cf8f2017-02-09 10:28:44 +01004436
4437 mlxsw_sp_fib4_node_entry_unlink(mlxsw_sp, replaced);
4438 mlxsw_sp_fib4_entry_destroy(mlxsw_sp, replaced);
Ido Schimmel731ea1c2017-07-18 10:10:21 +02004439 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
Ido Schimmel599cf8f2017-02-09 10:28:44 +01004440}
4441
Ido Schimmel9aecce12017-02-09 10:28:42 +01004442static int
4443mlxsw_sp_router_fib4_add(struct mlxsw_sp *mlxsw_sp,
Ido Schimmel4283bce2017-02-09 10:28:43 +01004444 const struct fib_entry_notifier_info *fen_info,
Ido Schimmel599cf8f2017-02-09 10:28:44 +01004445 bool replace, bool append)
Ido Schimmel9aecce12017-02-09 10:28:42 +01004446{
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004447 struct mlxsw_sp_fib4_entry *fib4_entry;
Ido Schimmel9aecce12017-02-09 10:28:42 +01004448 struct mlxsw_sp_fib_node *fib_node;
Jiri Pirko61c503f2016-07-04 08:23:11 +02004449 int err;
4450
Ido Schimmel9011b672017-05-16 19:38:25 +02004451 if (mlxsw_sp->router->aborted)
Jiri Pirkob45f64d2016-09-26 12:52:31 +02004452 return 0;
4453
Ido Schimmel731ea1c2017-07-18 10:10:21 +02004454 fib_node = mlxsw_sp_fib_node_get(mlxsw_sp, fen_info->tb_id,
4455 &fen_info->dst, sizeof(fen_info->dst),
4456 fen_info->dst_len,
4457 MLXSW_SP_L3_PROTO_IPV4);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004458 if (IS_ERR(fib_node)) {
4459 dev_warn(mlxsw_sp->bus_info->dev, "Failed to get FIB node\n");
4460 return PTR_ERR(fib_node);
4461 }
4462
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004463 fib4_entry = mlxsw_sp_fib4_entry_create(mlxsw_sp, fib_node, fen_info);
4464 if (IS_ERR(fib4_entry)) {
Ido Schimmel9aecce12017-02-09 10:28:42 +01004465 dev_warn(mlxsw_sp->bus_info->dev, "Failed to create FIB entry\n");
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004466 err = PTR_ERR(fib4_entry);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004467 goto err_fib4_entry_create;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02004468 }
Jiri Pirko61c503f2016-07-04 08:23:11 +02004469
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004470 err = mlxsw_sp_fib4_node_entry_link(mlxsw_sp, fib4_entry, replace,
Ido Schimmel599cf8f2017-02-09 10:28:44 +01004471 append);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02004472 if (err) {
Ido Schimmel9aecce12017-02-09 10:28:42 +01004473 dev_warn(mlxsw_sp->bus_info->dev, "Failed to link FIB entry to node\n");
4474 goto err_fib4_node_entry_link;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02004475 }
Ido Schimmel9aecce12017-02-09 10:28:42 +01004476
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004477 mlxsw_sp_fib4_entry_replace(mlxsw_sp, fib4_entry, replace);
Ido Schimmel599cf8f2017-02-09 10:28:44 +01004478
Jiri Pirko61c503f2016-07-04 08:23:11 +02004479 return 0;
4480
Ido Schimmel9aecce12017-02-09 10:28:42 +01004481err_fib4_node_entry_link:
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004482 mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib4_entry);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004483err_fib4_entry_create:
Ido Schimmel731ea1c2017-07-18 10:10:21 +02004484 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
Jiri Pirko61c503f2016-07-04 08:23:11 +02004485 return err;
4486}
4487
Jiri Pirko37956d72016-10-20 16:05:43 +02004488static void mlxsw_sp_router_fib4_del(struct mlxsw_sp *mlxsw_sp,
4489 struct fib_entry_notifier_info *fen_info)
Jiri Pirko61c503f2016-07-04 08:23:11 +02004490{
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004491 struct mlxsw_sp_fib4_entry *fib4_entry;
Ido Schimmel9aecce12017-02-09 10:28:42 +01004492 struct mlxsw_sp_fib_node *fib_node;
Jiri Pirko61c503f2016-07-04 08:23:11 +02004493
Ido Schimmel9011b672017-05-16 19:38:25 +02004494 if (mlxsw_sp->router->aborted)
Jiri Pirko37956d72016-10-20 16:05:43 +02004495 return;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02004496
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004497 fib4_entry = mlxsw_sp_fib4_entry_lookup(mlxsw_sp, fen_info);
4498 if (WARN_ON(!fib4_entry))
Jiri Pirko37956d72016-10-20 16:05:43 +02004499 return;
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004500 fib_node = fib4_entry->common.fib_node;
Jiri Pirko5b004412016-09-01 10:37:40 +02004501
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004502 mlxsw_sp_fib4_node_entry_unlink(mlxsw_sp, fib4_entry);
4503 mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib4_entry);
Ido Schimmel731ea1c2017-07-18 10:10:21 +02004504 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
Jiri Pirko61c503f2016-07-04 08:23:11 +02004505}
Jiri Pirkob45f64d2016-09-26 12:52:31 +02004506
Ido Schimmel428b8512017-08-03 13:28:28 +02004507static bool mlxsw_sp_fib6_rt_should_ignore(const struct rt6_info *rt)
4508{
4509 /* Packets with link-local destination IP arriving to the router
4510 * are trapped to the CPU, so no need to program specific routes
4511 * for them.
4512 */
4513 if (ipv6_addr_type(&rt->rt6i_dst.addr) & IPV6_ADDR_LINKLOCAL)
4514 return true;
4515
4516 /* Multicast routes aren't supported, so ignore them. Neighbour
4517 * Discovery packets are specifically trapped.
4518 */
4519 if (ipv6_addr_type(&rt->rt6i_dst.addr) & IPV6_ADDR_MULTICAST)
4520 return true;
4521
4522 /* Cloned routes are irrelevant in the forwarding path. */
4523 if (rt->rt6i_flags & RTF_CACHE)
4524 return true;
4525
4526 return false;
4527}
4528
4529static struct mlxsw_sp_rt6 *mlxsw_sp_rt6_create(struct rt6_info *rt)
4530{
4531 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
4532
4533 mlxsw_sp_rt6 = kzalloc(sizeof(*mlxsw_sp_rt6), GFP_KERNEL);
4534 if (!mlxsw_sp_rt6)
4535 return ERR_PTR(-ENOMEM);
4536
4537 /* In case of route replace, replaced route is deleted with
4538 * no notification. Take reference to prevent accessing freed
4539 * memory.
4540 */
4541 mlxsw_sp_rt6->rt = rt;
4542 rt6_hold(rt);
4543
4544 return mlxsw_sp_rt6;
4545}
4546
4547#if IS_ENABLED(CONFIG_IPV6)
4548static void mlxsw_sp_rt6_release(struct rt6_info *rt)
4549{
4550 rt6_release(rt);
4551}
4552#else
4553static void mlxsw_sp_rt6_release(struct rt6_info *rt)
4554{
4555}
4556#endif
4557
4558static void mlxsw_sp_rt6_destroy(struct mlxsw_sp_rt6 *mlxsw_sp_rt6)
4559{
4560 mlxsw_sp_rt6_release(mlxsw_sp_rt6->rt);
4561 kfree(mlxsw_sp_rt6);
4562}
4563
4564static bool mlxsw_sp_fib6_rt_can_mp(const struct rt6_info *rt)
4565{
4566 /* RTF_CACHE routes are ignored */
4567 return (rt->rt6i_flags & (RTF_GATEWAY | RTF_ADDRCONF)) == RTF_GATEWAY;
4568}
4569
4570static struct rt6_info *
4571mlxsw_sp_fib6_entry_rt(const struct mlxsw_sp_fib6_entry *fib6_entry)
4572{
4573 return list_first_entry(&fib6_entry->rt6_list, struct mlxsw_sp_rt6,
4574 list)->rt;
4575}
4576
4577static struct mlxsw_sp_fib6_entry *
4578mlxsw_sp_fib6_node_mp_entry_find(const struct mlxsw_sp_fib_node *fib_node,
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02004579 const struct rt6_info *nrt, bool replace)
Ido Schimmel428b8512017-08-03 13:28:28 +02004580{
4581 struct mlxsw_sp_fib6_entry *fib6_entry;
4582
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02004583 if (!mlxsw_sp_fib6_rt_can_mp(nrt) || replace)
Ido Schimmel428b8512017-08-03 13:28:28 +02004584 return NULL;
4585
4586 list_for_each_entry(fib6_entry, &fib_node->entry_list, common.list) {
4587 struct rt6_info *rt = mlxsw_sp_fib6_entry_rt(fib6_entry);
4588
4589 /* RT6_TABLE_LOCAL and RT6_TABLE_MAIN share the same
4590 * virtual router.
4591 */
4592 if (rt->rt6i_table->tb6_id > nrt->rt6i_table->tb6_id)
4593 continue;
4594 if (rt->rt6i_table->tb6_id != nrt->rt6i_table->tb6_id)
4595 break;
4596 if (rt->rt6i_metric < nrt->rt6i_metric)
4597 continue;
4598 if (rt->rt6i_metric == nrt->rt6i_metric &&
4599 mlxsw_sp_fib6_rt_can_mp(rt))
4600 return fib6_entry;
4601 if (rt->rt6i_metric > nrt->rt6i_metric)
4602 break;
4603 }
4604
4605 return NULL;
4606}
4607
4608static struct mlxsw_sp_rt6 *
4609mlxsw_sp_fib6_entry_rt_find(const struct mlxsw_sp_fib6_entry *fib6_entry,
4610 const struct rt6_info *rt)
4611{
4612 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
4613
4614 list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) {
4615 if (mlxsw_sp_rt6->rt == rt)
4616 return mlxsw_sp_rt6;
4617 }
4618
4619 return NULL;
4620}
4621
Petr Machata8f28a302017-09-02 23:49:24 +02004622static bool mlxsw_sp_nexthop6_ipip_type(const struct mlxsw_sp *mlxsw_sp,
4623 const struct rt6_info *rt,
4624 enum mlxsw_sp_ipip_type *ret)
4625{
4626 return rt->dst.dev &&
4627 mlxsw_sp_netdev_ipip_type(mlxsw_sp, rt->dst.dev, ret);
4628}
4629
Petr Machata35225e42017-09-02 23:49:22 +02004630static int mlxsw_sp_nexthop6_type_init(struct mlxsw_sp *mlxsw_sp,
4631 struct mlxsw_sp_nexthop_group *nh_grp,
4632 struct mlxsw_sp_nexthop *nh,
4633 const struct rt6_info *rt)
Ido Schimmel428b8512017-08-03 13:28:28 +02004634{
Petr Machata8f28a302017-09-02 23:49:24 +02004635 struct mlxsw_sp_router *router = mlxsw_sp->router;
Ido Schimmel428b8512017-08-03 13:28:28 +02004636 struct net_device *dev = rt->dst.dev;
Petr Machata8f28a302017-09-02 23:49:24 +02004637 enum mlxsw_sp_ipip_type ipipt;
Ido Schimmel428b8512017-08-03 13:28:28 +02004638 struct mlxsw_sp_rif *rif;
4639 int err;
4640
Petr Machata8f28a302017-09-02 23:49:24 +02004641 if (mlxsw_sp_nexthop6_ipip_type(mlxsw_sp, rt, &ipipt) &&
4642 router->ipip_ops_arr[ipipt]->can_offload(mlxsw_sp, dev,
4643 MLXSW_SP_L3_PROTO_IPV6)) {
4644 nh->type = MLXSW_SP_NEXTHOP_TYPE_IPIP;
Petr Machata4cccb732017-10-16 16:26:39 +02004645 err = mlxsw_sp_nexthop_ipip_init(mlxsw_sp, nh, dev);
Petr Machatade0f43c2017-10-02 12:14:57 +02004646 if (err)
4647 return err;
4648 mlxsw_sp_nexthop_rif_init(nh, &nh->ipip_entry->ol_lb->common);
4649 return 0;
Petr Machata8f28a302017-09-02 23:49:24 +02004650 }
4651
Petr Machata35225e42017-09-02 23:49:22 +02004652 nh->type = MLXSW_SP_NEXTHOP_TYPE_ETH;
Ido Schimmel428b8512017-08-03 13:28:28 +02004653 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
4654 if (!rif)
4655 return 0;
4656 mlxsw_sp_nexthop_rif_init(nh, rif);
4657
4658 err = mlxsw_sp_nexthop_neigh_init(mlxsw_sp, nh);
4659 if (err)
4660 goto err_nexthop_neigh_init;
4661
4662 return 0;
4663
4664err_nexthop_neigh_init:
4665 mlxsw_sp_nexthop_rif_fini(nh);
4666 return err;
4667}
4668
Petr Machata35225e42017-09-02 23:49:22 +02004669static void mlxsw_sp_nexthop6_type_fini(struct mlxsw_sp *mlxsw_sp,
4670 struct mlxsw_sp_nexthop *nh)
4671{
4672 mlxsw_sp_nexthop_type_fini(mlxsw_sp, nh);
4673}
4674
4675static int mlxsw_sp_nexthop6_init(struct mlxsw_sp *mlxsw_sp,
4676 struct mlxsw_sp_nexthop_group *nh_grp,
4677 struct mlxsw_sp_nexthop *nh,
4678 const struct rt6_info *rt)
4679{
4680 struct net_device *dev = rt->dst.dev;
4681
4682 nh->nh_grp = nh_grp;
Ido Schimmel408bd942017-10-22 23:11:46 +02004683 nh->nh_weight = 1;
Petr Machata35225e42017-09-02 23:49:22 +02004684 memcpy(&nh->gw_addr, &rt->rt6i_gateway, sizeof(nh->gw_addr));
Arkadi Sharshevskya5390272017-09-25 10:32:28 +02004685 mlxsw_sp_nexthop_counter_alloc(mlxsw_sp, nh);
Petr Machata35225e42017-09-02 23:49:22 +02004686
Arkadi Sharshevskydbe45982017-09-25 10:32:23 +02004687 list_add_tail(&nh->router_list_node, &mlxsw_sp->router->nexthop_list);
4688
Petr Machata35225e42017-09-02 23:49:22 +02004689 if (!dev)
4690 return 0;
4691 nh->ifindex = dev->ifindex;
4692
4693 return mlxsw_sp_nexthop6_type_init(mlxsw_sp, nh_grp, nh, rt);
4694}
4695
Ido Schimmel428b8512017-08-03 13:28:28 +02004696static void mlxsw_sp_nexthop6_fini(struct mlxsw_sp *mlxsw_sp,
4697 struct mlxsw_sp_nexthop *nh)
4698{
Petr Machata35225e42017-09-02 23:49:22 +02004699 mlxsw_sp_nexthop6_type_fini(mlxsw_sp, nh);
Arkadi Sharshevskydbe45982017-09-25 10:32:23 +02004700 list_del(&nh->router_list_node);
Arkadi Sharshevskya5390272017-09-25 10:32:28 +02004701 mlxsw_sp_nexthop_counter_free(mlxsw_sp, nh);
Ido Schimmel428b8512017-08-03 13:28:28 +02004702}
4703
Petr Machataf6050ee2017-09-02 23:49:21 +02004704static bool mlxsw_sp_rt6_is_gateway(const struct mlxsw_sp *mlxsw_sp,
4705 const struct rt6_info *rt)
4706{
Petr Machata8f28a302017-09-02 23:49:24 +02004707 return rt->rt6i_flags & RTF_GATEWAY ||
4708 mlxsw_sp_nexthop6_ipip_type(mlxsw_sp, rt, NULL);
Petr Machataf6050ee2017-09-02 23:49:21 +02004709}
4710
Ido Schimmel428b8512017-08-03 13:28:28 +02004711static struct mlxsw_sp_nexthop_group *
4712mlxsw_sp_nexthop6_group_create(struct mlxsw_sp *mlxsw_sp,
4713 struct mlxsw_sp_fib6_entry *fib6_entry)
4714{
4715 struct mlxsw_sp_nexthop_group *nh_grp;
4716 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
4717 struct mlxsw_sp_nexthop *nh;
4718 size_t alloc_size;
4719 int i = 0;
4720 int err;
4721
4722 alloc_size = sizeof(*nh_grp) +
4723 fib6_entry->nrt6 * sizeof(struct mlxsw_sp_nexthop);
4724 nh_grp = kzalloc(alloc_size, GFP_KERNEL);
4725 if (!nh_grp)
4726 return ERR_PTR(-ENOMEM);
4727 INIT_LIST_HEAD(&nh_grp->fib_list);
4728#if IS_ENABLED(CONFIG_IPV6)
4729 nh_grp->neigh_tbl = &nd_tbl;
4730#endif
4731 mlxsw_sp_rt6 = list_first_entry(&fib6_entry->rt6_list,
4732 struct mlxsw_sp_rt6, list);
Petr Machataf6050ee2017-09-02 23:49:21 +02004733 nh_grp->gateway = mlxsw_sp_rt6_is_gateway(mlxsw_sp, mlxsw_sp_rt6->rt);
Ido Schimmel428b8512017-08-03 13:28:28 +02004734 nh_grp->count = fib6_entry->nrt6;
4735 for (i = 0; i < nh_grp->count; i++) {
4736 struct rt6_info *rt = mlxsw_sp_rt6->rt;
4737
4738 nh = &nh_grp->nexthops[i];
4739 err = mlxsw_sp_nexthop6_init(mlxsw_sp, nh_grp, nh, rt);
4740 if (err)
4741 goto err_nexthop6_init;
4742 mlxsw_sp_rt6 = list_next_entry(mlxsw_sp_rt6, list);
4743 }
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02004744
4745 err = mlxsw_sp_nexthop_group_insert(mlxsw_sp, nh_grp);
4746 if (err)
4747 goto err_nexthop_group_insert;
4748
Ido Schimmel428b8512017-08-03 13:28:28 +02004749 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
4750 return nh_grp;
4751
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02004752err_nexthop_group_insert:
Ido Schimmel428b8512017-08-03 13:28:28 +02004753err_nexthop6_init:
4754 for (i--; i >= 0; i--) {
4755 nh = &nh_grp->nexthops[i];
4756 mlxsw_sp_nexthop6_fini(mlxsw_sp, nh);
4757 }
4758 kfree(nh_grp);
4759 return ERR_PTR(err);
4760}
4761
4762static void
4763mlxsw_sp_nexthop6_group_destroy(struct mlxsw_sp *mlxsw_sp,
4764 struct mlxsw_sp_nexthop_group *nh_grp)
4765{
4766 struct mlxsw_sp_nexthop *nh;
4767 int i = nh_grp->count;
4768
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02004769 mlxsw_sp_nexthop_group_remove(mlxsw_sp, nh_grp);
Ido Schimmel428b8512017-08-03 13:28:28 +02004770 for (i--; i >= 0; i--) {
4771 nh = &nh_grp->nexthops[i];
4772 mlxsw_sp_nexthop6_fini(mlxsw_sp, nh);
4773 }
4774 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
4775 WARN_ON(nh_grp->adj_index_valid);
4776 kfree(nh_grp);
4777}
4778
4779static int mlxsw_sp_nexthop6_group_get(struct mlxsw_sp *mlxsw_sp,
4780 struct mlxsw_sp_fib6_entry *fib6_entry)
4781{
4782 struct mlxsw_sp_nexthop_group *nh_grp;
4783
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02004784 nh_grp = mlxsw_sp_nexthop6_group_lookup(mlxsw_sp, fib6_entry);
4785 if (!nh_grp) {
4786 nh_grp = mlxsw_sp_nexthop6_group_create(mlxsw_sp, fib6_entry);
4787 if (IS_ERR(nh_grp))
4788 return PTR_ERR(nh_grp);
4789 }
Ido Schimmel428b8512017-08-03 13:28:28 +02004790
4791 list_add_tail(&fib6_entry->common.nexthop_group_node,
4792 &nh_grp->fib_list);
4793 fib6_entry->common.nh_group = nh_grp;
4794
4795 return 0;
4796}
4797
4798static void mlxsw_sp_nexthop6_group_put(struct mlxsw_sp *mlxsw_sp,
4799 struct mlxsw_sp_fib_entry *fib_entry)
4800{
4801 struct mlxsw_sp_nexthop_group *nh_grp = fib_entry->nh_group;
4802
4803 list_del(&fib_entry->nexthop_group_node);
4804 if (!list_empty(&nh_grp->fib_list))
4805 return;
4806 mlxsw_sp_nexthop6_group_destroy(mlxsw_sp, nh_grp);
4807}
4808
4809static int
4810mlxsw_sp_nexthop6_group_update(struct mlxsw_sp *mlxsw_sp,
4811 struct mlxsw_sp_fib6_entry *fib6_entry)
4812{
4813 struct mlxsw_sp_nexthop_group *old_nh_grp = fib6_entry->common.nh_group;
4814 int err;
4815
4816 fib6_entry->common.nh_group = NULL;
4817 list_del(&fib6_entry->common.nexthop_group_node);
4818
4819 err = mlxsw_sp_nexthop6_group_get(mlxsw_sp, fib6_entry);
4820 if (err)
4821 goto err_nexthop6_group_get;
4822
4823 /* In case this entry is offloaded, then the adjacency index
4824 * currently associated with it in the device's table is that
4825 * of the old group. Start using the new one instead.
4826 */
4827 err = mlxsw_sp_fib_node_entry_add(mlxsw_sp, &fib6_entry->common);
4828 if (err)
4829 goto err_fib_node_entry_add;
4830
4831 if (list_empty(&old_nh_grp->fib_list))
4832 mlxsw_sp_nexthop6_group_destroy(mlxsw_sp, old_nh_grp);
4833
4834 return 0;
4835
4836err_fib_node_entry_add:
4837 mlxsw_sp_nexthop6_group_put(mlxsw_sp, &fib6_entry->common);
4838err_nexthop6_group_get:
4839 list_add_tail(&fib6_entry->common.nexthop_group_node,
4840 &old_nh_grp->fib_list);
4841 fib6_entry->common.nh_group = old_nh_grp;
4842 return err;
4843}
4844
4845static int
4846mlxsw_sp_fib6_entry_nexthop_add(struct mlxsw_sp *mlxsw_sp,
4847 struct mlxsw_sp_fib6_entry *fib6_entry,
4848 struct rt6_info *rt)
4849{
4850 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
4851 int err;
4852
4853 mlxsw_sp_rt6 = mlxsw_sp_rt6_create(rt);
4854 if (IS_ERR(mlxsw_sp_rt6))
4855 return PTR_ERR(mlxsw_sp_rt6);
4856
4857 list_add_tail(&mlxsw_sp_rt6->list, &fib6_entry->rt6_list);
4858 fib6_entry->nrt6++;
4859
4860 err = mlxsw_sp_nexthop6_group_update(mlxsw_sp, fib6_entry);
4861 if (err)
4862 goto err_nexthop6_group_update;
4863
4864 return 0;
4865
4866err_nexthop6_group_update:
4867 fib6_entry->nrt6--;
4868 list_del(&mlxsw_sp_rt6->list);
4869 mlxsw_sp_rt6_destroy(mlxsw_sp_rt6);
4870 return err;
4871}
4872
4873static void
4874mlxsw_sp_fib6_entry_nexthop_del(struct mlxsw_sp *mlxsw_sp,
4875 struct mlxsw_sp_fib6_entry *fib6_entry,
4876 struct rt6_info *rt)
4877{
4878 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
4879
4880 mlxsw_sp_rt6 = mlxsw_sp_fib6_entry_rt_find(fib6_entry, rt);
4881 if (WARN_ON(!mlxsw_sp_rt6))
4882 return;
4883
4884 fib6_entry->nrt6--;
4885 list_del(&mlxsw_sp_rt6->list);
4886 mlxsw_sp_nexthop6_group_update(mlxsw_sp, fib6_entry);
4887 mlxsw_sp_rt6_destroy(mlxsw_sp_rt6);
4888}
4889
Petr Machataf6050ee2017-09-02 23:49:21 +02004890static void mlxsw_sp_fib6_entry_type_set(struct mlxsw_sp *mlxsw_sp,
4891 struct mlxsw_sp_fib_entry *fib_entry,
Ido Schimmel428b8512017-08-03 13:28:28 +02004892 const struct rt6_info *rt)
4893{
4894 /* Packets hitting RTF_REJECT routes need to be discarded by the
4895 * stack. We can rely on their destination device not having a
4896 * RIF (it's the loopback device) and can thus use action type
4897 * local, which will cause them to be trapped with a lower
4898 * priority than packets that need to be locally received.
4899 */
Ido Schimmeld3b6d372017-09-01 10:58:55 +02004900 if (rt->rt6i_flags & (RTF_LOCAL | RTF_ANYCAST))
Ido Schimmel428b8512017-08-03 13:28:28 +02004901 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
4902 else if (rt->rt6i_flags & RTF_REJECT)
4903 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_LOCAL;
Petr Machataf6050ee2017-09-02 23:49:21 +02004904 else if (mlxsw_sp_rt6_is_gateway(mlxsw_sp, rt))
Ido Schimmel428b8512017-08-03 13:28:28 +02004905 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_REMOTE;
4906 else
4907 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_LOCAL;
4908}
4909
4910static void
4911mlxsw_sp_fib6_entry_rt_destroy_all(struct mlxsw_sp_fib6_entry *fib6_entry)
4912{
4913 struct mlxsw_sp_rt6 *mlxsw_sp_rt6, *tmp;
4914
4915 list_for_each_entry_safe(mlxsw_sp_rt6, tmp, &fib6_entry->rt6_list,
4916 list) {
4917 fib6_entry->nrt6--;
4918 list_del(&mlxsw_sp_rt6->list);
4919 mlxsw_sp_rt6_destroy(mlxsw_sp_rt6);
4920 }
4921}
4922
4923static struct mlxsw_sp_fib6_entry *
4924mlxsw_sp_fib6_entry_create(struct mlxsw_sp *mlxsw_sp,
4925 struct mlxsw_sp_fib_node *fib_node,
4926 struct rt6_info *rt)
4927{
4928 struct mlxsw_sp_fib6_entry *fib6_entry;
4929 struct mlxsw_sp_fib_entry *fib_entry;
4930 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
4931 int err;
4932
4933 fib6_entry = kzalloc(sizeof(*fib6_entry), GFP_KERNEL);
4934 if (!fib6_entry)
4935 return ERR_PTR(-ENOMEM);
4936 fib_entry = &fib6_entry->common;
4937
4938 mlxsw_sp_rt6 = mlxsw_sp_rt6_create(rt);
4939 if (IS_ERR(mlxsw_sp_rt6)) {
4940 err = PTR_ERR(mlxsw_sp_rt6);
4941 goto err_rt6_create;
4942 }
4943
Petr Machataf6050ee2017-09-02 23:49:21 +02004944 mlxsw_sp_fib6_entry_type_set(mlxsw_sp, fib_entry, mlxsw_sp_rt6->rt);
Ido Schimmel428b8512017-08-03 13:28:28 +02004945
4946 INIT_LIST_HEAD(&fib6_entry->rt6_list);
4947 list_add_tail(&mlxsw_sp_rt6->list, &fib6_entry->rt6_list);
4948 fib6_entry->nrt6 = 1;
4949 err = mlxsw_sp_nexthop6_group_get(mlxsw_sp, fib6_entry);
4950 if (err)
4951 goto err_nexthop6_group_get;
4952
4953 fib_entry->fib_node = fib_node;
4954
4955 return fib6_entry;
4956
4957err_nexthop6_group_get:
4958 list_del(&mlxsw_sp_rt6->list);
4959 mlxsw_sp_rt6_destroy(mlxsw_sp_rt6);
4960err_rt6_create:
4961 kfree(fib6_entry);
4962 return ERR_PTR(err);
4963}
4964
4965static void mlxsw_sp_fib6_entry_destroy(struct mlxsw_sp *mlxsw_sp,
4966 struct mlxsw_sp_fib6_entry *fib6_entry)
4967{
4968 mlxsw_sp_nexthop6_group_put(mlxsw_sp, &fib6_entry->common);
4969 mlxsw_sp_fib6_entry_rt_destroy_all(fib6_entry);
4970 WARN_ON(fib6_entry->nrt6);
4971 kfree(fib6_entry);
4972}
4973
4974static struct mlxsw_sp_fib6_entry *
4975mlxsw_sp_fib6_node_entry_find(const struct mlxsw_sp_fib_node *fib_node,
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02004976 const struct rt6_info *nrt, bool replace)
Ido Schimmel428b8512017-08-03 13:28:28 +02004977{
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02004978 struct mlxsw_sp_fib6_entry *fib6_entry, *fallback = NULL;
Ido Schimmel428b8512017-08-03 13:28:28 +02004979
4980 list_for_each_entry(fib6_entry, &fib_node->entry_list, common.list) {
4981 struct rt6_info *rt = mlxsw_sp_fib6_entry_rt(fib6_entry);
4982
4983 if (rt->rt6i_table->tb6_id > nrt->rt6i_table->tb6_id)
4984 continue;
4985 if (rt->rt6i_table->tb6_id != nrt->rt6i_table->tb6_id)
4986 break;
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02004987 if (replace && rt->rt6i_metric == nrt->rt6i_metric) {
4988 if (mlxsw_sp_fib6_rt_can_mp(rt) ==
4989 mlxsw_sp_fib6_rt_can_mp(nrt))
4990 return fib6_entry;
4991 if (mlxsw_sp_fib6_rt_can_mp(nrt))
4992 fallback = fallback ?: fib6_entry;
4993 }
Ido Schimmel428b8512017-08-03 13:28:28 +02004994 if (rt->rt6i_metric > nrt->rt6i_metric)
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02004995 return fallback ?: fib6_entry;
Ido Schimmel428b8512017-08-03 13:28:28 +02004996 }
4997
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02004998 return fallback;
Ido Schimmel428b8512017-08-03 13:28:28 +02004999}
5000
5001static int
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02005002mlxsw_sp_fib6_node_list_insert(struct mlxsw_sp_fib6_entry *new6_entry,
5003 bool replace)
Ido Schimmel428b8512017-08-03 13:28:28 +02005004{
5005 struct mlxsw_sp_fib_node *fib_node = new6_entry->common.fib_node;
5006 struct rt6_info *nrt = mlxsw_sp_fib6_entry_rt(new6_entry);
5007 struct mlxsw_sp_fib6_entry *fib6_entry;
5008
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02005009 fib6_entry = mlxsw_sp_fib6_node_entry_find(fib_node, nrt, replace);
5010
5011 if (replace && WARN_ON(!fib6_entry))
5012 return -EINVAL;
Ido Schimmel428b8512017-08-03 13:28:28 +02005013
5014 if (fib6_entry) {
5015 list_add_tail(&new6_entry->common.list,
5016 &fib6_entry->common.list);
5017 } else {
5018 struct mlxsw_sp_fib6_entry *last;
5019
5020 list_for_each_entry(last, &fib_node->entry_list, common.list) {
5021 struct rt6_info *rt = mlxsw_sp_fib6_entry_rt(last);
5022
5023 if (nrt->rt6i_table->tb6_id > rt->rt6i_table->tb6_id)
5024 break;
5025 fib6_entry = last;
5026 }
5027
5028 if (fib6_entry)
5029 list_add(&new6_entry->common.list,
5030 &fib6_entry->common.list);
5031 else
5032 list_add(&new6_entry->common.list,
5033 &fib_node->entry_list);
5034 }
5035
5036 return 0;
5037}
5038
5039static void
5040mlxsw_sp_fib6_node_list_remove(struct mlxsw_sp_fib6_entry *fib6_entry)
5041{
5042 list_del(&fib6_entry->common.list);
5043}
5044
5045static int mlxsw_sp_fib6_node_entry_link(struct mlxsw_sp *mlxsw_sp,
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02005046 struct mlxsw_sp_fib6_entry *fib6_entry,
5047 bool replace)
Ido Schimmel428b8512017-08-03 13:28:28 +02005048{
5049 int err;
5050
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02005051 err = mlxsw_sp_fib6_node_list_insert(fib6_entry, replace);
Ido Schimmel428b8512017-08-03 13:28:28 +02005052 if (err)
5053 return err;
5054
5055 err = mlxsw_sp_fib_node_entry_add(mlxsw_sp, &fib6_entry->common);
5056 if (err)
5057 goto err_fib_node_entry_add;
5058
5059 return 0;
5060
5061err_fib_node_entry_add:
5062 mlxsw_sp_fib6_node_list_remove(fib6_entry);
5063 return err;
5064}
5065
5066static void
5067mlxsw_sp_fib6_node_entry_unlink(struct mlxsw_sp *mlxsw_sp,
5068 struct mlxsw_sp_fib6_entry *fib6_entry)
5069{
5070 mlxsw_sp_fib_node_entry_del(mlxsw_sp, &fib6_entry->common);
5071 mlxsw_sp_fib6_node_list_remove(fib6_entry);
5072}
5073
5074static struct mlxsw_sp_fib6_entry *
5075mlxsw_sp_fib6_entry_lookup(struct mlxsw_sp *mlxsw_sp,
5076 const struct rt6_info *rt)
5077{
5078 struct mlxsw_sp_fib6_entry *fib6_entry;
5079 struct mlxsw_sp_fib_node *fib_node;
5080 struct mlxsw_sp_fib *fib;
5081 struct mlxsw_sp_vr *vr;
5082
5083 vr = mlxsw_sp_vr_find(mlxsw_sp, rt->rt6i_table->tb6_id);
5084 if (!vr)
5085 return NULL;
5086 fib = mlxsw_sp_vr_fib(vr, MLXSW_SP_L3_PROTO_IPV6);
5087
5088 fib_node = mlxsw_sp_fib_node_lookup(fib, &rt->rt6i_dst.addr,
5089 sizeof(rt->rt6i_dst.addr),
5090 rt->rt6i_dst.plen);
5091 if (!fib_node)
5092 return NULL;
5093
5094 list_for_each_entry(fib6_entry, &fib_node->entry_list, common.list) {
5095 struct rt6_info *iter_rt = mlxsw_sp_fib6_entry_rt(fib6_entry);
5096
5097 if (rt->rt6i_table->tb6_id == iter_rt->rt6i_table->tb6_id &&
5098 rt->rt6i_metric == iter_rt->rt6i_metric &&
5099 mlxsw_sp_fib6_entry_rt_find(fib6_entry, rt))
5100 return fib6_entry;
5101 }
5102
5103 return NULL;
5104}
5105
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02005106static void mlxsw_sp_fib6_entry_replace(struct mlxsw_sp *mlxsw_sp,
5107 struct mlxsw_sp_fib6_entry *fib6_entry,
5108 bool replace)
5109{
5110 struct mlxsw_sp_fib_node *fib_node = fib6_entry->common.fib_node;
5111 struct mlxsw_sp_fib6_entry *replaced;
5112
5113 if (!replace)
5114 return;
5115
5116 replaced = list_next_entry(fib6_entry, common.list);
5117
5118 mlxsw_sp_fib6_node_entry_unlink(mlxsw_sp, replaced);
5119 mlxsw_sp_fib6_entry_destroy(mlxsw_sp, replaced);
5120 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
5121}
5122
Ido Schimmel428b8512017-08-03 13:28:28 +02005123static int mlxsw_sp_router_fib6_add(struct mlxsw_sp *mlxsw_sp,
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02005124 struct rt6_info *rt, bool replace)
Ido Schimmel428b8512017-08-03 13:28:28 +02005125{
5126 struct mlxsw_sp_fib6_entry *fib6_entry;
5127 struct mlxsw_sp_fib_node *fib_node;
5128 int err;
5129
5130 if (mlxsw_sp->router->aborted)
5131 return 0;
5132
Ido Schimmelf36f5ac2017-08-03 13:28:30 +02005133 if (rt->rt6i_src.plen)
5134 return -EINVAL;
5135
Ido Schimmel428b8512017-08-03 13:28:28 +02005136 if (mlxsw_sp_fib6_rt_should_ignore(rt))
5137 return 0;
5138
5139 fib_node = mlxsw_sp_fib_node_get(mlxsw_sp, rt->rt6i_table->tb6_id,
5140 &rt->rt6i_dst.addr,
5141 sizeof(rt->rt6i_dst.addr),
5142 rt->rt6i_dst.plen,
5143 MLXSW_SP_L3_PROTO_IPV6);
5144 if (IS_ERR(fib_node))
5145 return PTR_ERR(fib_node);
5146
5147 /* Before creating a new entry, try to append route to an existing
5148 * multipath entry.
5149 */
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02005150 fib6_entry = mlxsw_sp_fib6_node_mp_entry_find(fib_node, rt, replace);
Ido Schimmel428b8512017-08-03 13:28:28 +02005151 if (fib6_entry) {
5152 err = mlxsw_sp_fib6_entry_nexthop_add(mlxsw_sp, fib6_entry, rt);
5153 if (err)
5154 goto err_fib6_entry_nexthop_add;
5155 return 0;
5156 }
5157
5158 fib6_entry = mlxsw_sp_fib6_entry_create(mlxsw_sp, fib_node, rt);
5159 if (IS_ERR(fib6_entry)) {
5160 err = PTR_ERR(fib6_entry);
5161 goto err_fib6_entry_create;
5162 }
5163
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02005164 err = mlxsw_sp_fib6_node_entry_link(mlxsw_sp, fib6_entry, replace);
Ido Schimmel428b8512017-08-03 13:28:28 +02005165 if (err)
5166 goto err_fib6_node_entry_link;
5167
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02005168 mlxsw_sp_fib6_entry_replace(mlxsw_sp, fib6_entry, replace);
5169
Ido Schimmel428b8512017-08-03 13:28:28 +02005170 return 0;
5171
5172err_fib6_node_entry_link:
5173 mlxsw_sp_fib6_entry_destroy(mlxsw_sp, fib6_entry);
5174err_fib6_entry_create:
5175err_fib6_entry_nexthop_add:
5176 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
5177 return err;
5178}
5179
5180static void mlxsw_sp_router_fib6_del(struct mlxsw_sp *mlxsw_sp,
5181 struct rt6_info *rt)
5182{
5183 struct mlxsw_sp_fib6_entry *fib6_entry;
5184 struct mlxsw_sp_fib_node *fib_node;
5185
5186 if (mlxsw_sp->router->aborted)
5187 return;
5188
5189 if (mlxsw_sp_fib6_rt_should_ignore(rt))
5190 return;
5191
5192 fib6_entry = mlxsw_sp_fib6_entry_lookup(mlxsw_sp, rt);
5193 if (WARN_ON(!fib6_entry))
5194 return;
5195
5196 /* If route is part of a multipath entry, but not the last one
5197 * removed, then only reduce its nexthop group.
5198 */
5199 if (!list_is_singular(&fib6_entry->rt6_list)) {
5200 mlxsw_sp_fib6_entry_nexthop_del(mlxsw_sp, fib6_entry, rt);
5201 return;
5202 }
5203
5204 fib_node = fib6_entry->common.fib_node;
5205
5206 mlxsw_sp_fib6_node_entry_unlink(mlxsw_sp, fib6_entry);
5207 mlxsw_sp_fib6_entry_destroy(mlxsw_sp, fib6_entry);
5208 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
5209}
5210
Ido Schimmelbc65a8a2017-07-18 10:10:25 +02005211static int __mlxsw_sp_router_set_abort_trap(struct mlxsw_sp *mlxsw_sp,
5212 enum mlxsw_reg_ralxx_protocol proto,
5213 u8 tree_id)
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005214{
5215 char ralta_pl[MLXSW_REG_RALTA_LEN];
5216 char ralst_pl[MLXSW_REG_RALST_LEN];
Ido Schimmelb5d90e62017-03-10 08:53:43 +01005217 int i, err;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005218
Ido Schimmelbc65a8a2017-07-18 10:10:25 +02005219 mlxsw_reg_ralta_pack(ralta_pl, true, proto, tree_id);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005220 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralta), ralta_pl);
5221 if (err)
5222 return err;
5223
Ido Schimmelbc65a8a2017-07-18 10:10:25 +02005224 mlxsw_reg_ralst_pack(ralst_pl, 0xff, tree_id);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005225 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralst), ralst_pl);
5226 if (err)
5227 return err;
5228
Ido Schimmelb5d90e62017-03-10 08:53:43 +01005229 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
Ido Schimmel9011b672017-05-16 19:38:25 +02005230 struct mlxsw_sp_vr *vr = &mlxsw_sp->router->vrs[i];
Ido Schimmelb5d90e62017-03-10 08:53:43 +01005231 char raltb_pl[MLXSW_REG_RALTB_LEN];
5232 char ralue_pl[MLXSW_REG_RALUE_LEN];
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005233
Ido Schimmelbc65a8a2017-07-18 10:10:25 +02005234 mlxsw_reg_raltb_pack(raltb_pl, vr->id, proto, tree_id);
Ido Schimmelb5d90e62017-03-10 08:53:43 +01005235 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raltb),
5236 raltb_pl);
5237 if (err)
5238 return err;
5239
Ido Schimmelbc65a8a2017-07-18 10:10:25 +02005240 mlxsw_reg_ralue_pack(ralue_pl, proto,
5241 MLXSW_REG_RALUE_OP_WRITE_WRITE, vr->id, 0);
Ido Schimmelb5d90e62017-03-10 08:53:43 +01005242 mlxsw_reg_ralue_act_ip2me_pack(ralue_pl);
5243 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue),
5244 ralue_pl);
5245 if (err)
5246 return err;
5247 }
5248
5249 return 0;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005250}
5251
Yotam Gigid42b0962017-09-27 08:23:20 +02005252static int mlxsw_sp_router_fibmr_add(struct mlxsw_sp *mlxsw_sp,
5253 struct mfc_entry_notifier_info *men_info,
5254 bool replace)
5255{
5256 struct mlxsw_sp_vr *vr;
5257
5258 if (mlxsw_sp->router->aborted)
5259 return 0;
5260
David Ahernf8fa9b42017-10-18 09:56:56 -07005261 vr = mlxsw_sp_vr_get(mlxsw_sp, men_info->tb_id, NULL);
Yotam Gigid42b0962017-09-27 08:23:20 +02005262 if (IS_ERR(vr))
5263 return PTR_ERR(vr);
5264
5265 return mlxsw_sp_mr_route4_add(vr->mr4_table, men_info->mfc, replace);
5266}
5267
5268static void mlxsw_sp_router_fibmr_del(struct mlxsw_sp *mlxsw_sp,
5269 struct mfc_entry_notifier_info *men_info)
5270{
5271 struct mlxsw_sp_vr *vr;
5272
5273 if (mlxsw_sp->router->aborted)
5274 return;
5275
5276 vr = mlxsw_sp_vr_find(mlxsw_sp, men_info->tb_id);
5277 if (WARN_ON(!vr))
5278 return;
5279
5280 mlxsw_sp_mr_route4_del(vr->mr4_table, men_info->mfc);
5281 mlxsw_sp_vr_put(vr);
5282}
5283
5284static int
5285mlxsw_sp_router_fibmr_vif_add(struct mlxsw_sp *mlxsw_sp,
5286 struct vif_entry_notifier_info *ven_info)
5287{
5288 struct mlxsw_sp_rif *rif;
5289 struct mlxsw_sp_vr *vr;
5290
5291 if (mlxsw_sp->router->aborted)
5292 return 0;
5293
David Ahernf8fa9b42017-10-18 09:56:56 -07005294 vr = mlxsw_sp_vr_get(mlxsw_sp, ven_info->tb_id, NULL);
Yotam Gigid42b0962017-09-27 08:23:20 +02005295 if (IS_ERR(vr))
5296 return PTR_ERR(vr);
5297
5298 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, ven_info->dev);
5299 return mlxsw_sp_mr_vif_add(vr->mr4_table, ven_info->dev,
5300 ven_info->vif_index,
5301 ven_info->vif_flags, rif);
5302}
5303
5304static void
5305mlxsw_sp_router_fibmr_vif_del(struct mlxsw_sp *mlxsw_sp,
5306 struct vif_entry_notifier_info *ven_info)
5307{
5308 struct mlxsw_sp_vr *vr;
5309
5310 if (mlxsw_sp->router->aborted)
5311 return;
5312
5313 vr = mlxsw_sp_vr_find(mlxsw_sp, ven_info->tb_id);
5314 if (WARN_ON(!vr))
5315 return;
5316
5317 mlxsw_sp_mr_vif_del(vr->mr4_table, ven_info->vif_index);
5318 mlxsw_sp_vr_put(vr);
5319}
5320
Ido Schimmelbc65a8a2017-07-18 10:10:25 +02005321static int mlxsw_sp_router_set_abort_trap(struct mlxsw_sp *mlxsw_sp)
5322{
5323 enum mlxsw_reg_ralxx_protocol proto = MLXSW_REG_RALXX_PROTOCOL_IPV4;
5324 int err;
5325
5326 err = __mlxsw_sp_router_set_abort_trap(mlxsw_sp, proto,
5327 MLXSW_SP_LPM_TREE_MIN);
5328 if (err)
5329 return err;
5330
Yotam Gigid42b0962017-09-27 08:23:20 +02005331 /* The multicast router code does not need an abort trap as by default,
5332 * packets that don't match any routes are trapped to the CPU.
5333 */
5334
Ido Schimmelbc65a8a2017-07-18 10:10:25 +02005335 proto = MLXSW_REG_RALXX_PROTOCOL_IPV6;
5336 return __mlxsw_sp_router_set_abort_trap(mlxsw_sp, proto,
5337 MLXSW_SP_LPM_TREE_MIN + 1);
5338}
5339
Ido Schimmel9aecce12017-02-09 10:28:42 +01005340static void mlxsw_sp_fib4_node_flush(struct mlxsw_sp *mlxsw_sp,
5341 struct mlxsw_sp_fib_node *fib_node)
5342{
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02005343 struct mlxsw_sp_fib4_entry *fib4_entry, *tmp;
Ido Schimmel9aecce12017-02-09 10:28:42 +01005344
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02005345 list_for_each_entry_safe(fib4_entry, tmp, &fib_node->entry_list,
5346 common.list) {
5347 bool do_break = &tmp->common.list == &fib_node->entry_list;
Ido Schimmel9aecce12017-02-09 10:28:42 +01005348
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02005349 mlxsw_sp_fib4_node_entry_unlink(mlxsw_sp, fib4_entry);
5350 mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib4_entry);
Ido Schimmel731ea1c2017-07-18 10:10:21 +02005351 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
Ido Schimmel9aecce12017-02-09 10:28:42 +01005352 /* Break when entry list is empty and node was freed.
5353 * Otherwise, we'll access freed memory in the next
5354 * iteration.
5355 */
5356 if (do_break)
5357 break;
5358 }
5359}
5360
Ido Schimmel428b8512017-08-03 13:28:28 +02005361static void mlxsw_sp_fib6_node_flush(struct mlxsw_sp *mlxsw_sp,
5362 struct mlxsw_sp_fib_node *fib_node)
5363{
5364 struct mlxsw_sp_fib6_entry *fib6_entry, *tmp;
5365
5366 list_for_each_entry_safe(fib6_entry, tmp, &fib_node->entry_list,
5367 common.list) {
5368 bool do_break = &tmp->common.list == &fib_node->entry_list;
5369
5370 mlxsw_sp_fib6_node_entry_unlink(mlxsw_sp, fib6_entry);
5371 mlxsw_sp_fib6_entry_destroy(mlxsw_sp, fib6_entry);
5372 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
5373 if (do_break)
5374 break;
5375 }
5376}
5377
Ido Schimmel9aecce12017-02-09 10:28:42 +01005378static void mlxsw_sp_fib_node_flush(struct mlxsw_sp *mlxsw_sp,
5379 struct mlxsw_sp_fib_node *fib_node)
5380{
Ido Schimmel76610eb2017-03-10 08:53:41 +01005381 switch (fib_node->fib->proto) {
Ido Schimmel9aecce12017-02-09 10:28:42 +01005382 case MLXSW_SP_L3_PROTO_IPV4:
5383 mlxsw_sp_fib4_node_flush(mlxsw_sp, fib_node);
5384 break;
5385 case MLXSW_SP_L3_PROTO_IPV6:
Ido Schimmel428b8512017-08-03 13:28:28 +02005386 mlxsw_sp_fib6_node_flush(mlxsw_sp, fib_node);
Ido Schimmel9aecce12017-02-09 10:28:42 +01005387 break;
5388 }
5389}
5390
Ido Schimmel76610eb2017-03-10 08:53:41 +01005391static void mlxsw_sp_vr_fib_flush(struct mlxsw_sp *mlxsw_sp,
5392 struct mlxsw_sp_vr *vr,
5393 enum mlxsw_sp_l3proto proto)
5394{
5395 struct mlxsw_sp_fib *fib = mlxsw_sp_vr_fib(vr, proto);
5396 struct mlxsw_sp_fib_node *fib_node, *tmp;
5397
5398 list_for_each_entry_safe(fib_node, tmp, &fib->node_list, list) {
5399 bool do_break = &tmp->list == &fib->node_list;
5400
5401 mlxsw_sp_fib_node_flush(mlxsw_sp, fib_node);
5402 if (do_break)
5403 break;
5404 }
5405}
5406
Ido Schimmelac571de2016-11-14 11:26:32 +01005407static void mlxsw_sp_router_fib_flush(struct mlxsw_sp *mlxsw_sp)
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005408{
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005409 int i;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005410
Jiri Pirkoc1a38312016-10-21 16:07:23 +02005411 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
Ido Schimmel9011b672017-05-16 19:38:25 +02005412 struct mlxsw_sp_vr *vr = &mlxsw_sp->router->vrs[i];
Ido Schimmelac571de2016-11-14 11:26:32 +01005413
Ido Schimmel76610eb2017-03-10 08:53:41 +01005414 if (!mlxsw_sp_vr_is_used(vr))
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005415 continue;
Yotam Gigid42b0962017-09-27 08:23:20 +02005416
5417 mlxsw_sp_mr_table_flush(vr->mr4_table);
Ido Schimmel76610eb2017-03-10 08:53:41 +01005418 mlxsw_sp_vr_fib_flush(mlxsw_sp, vr, MLXSW_SP_L3_PROTO_IPV4);
Ido Schimmela3d9bc52017-07-18 10:10:22 +02005419
5420 /* If virtual router was only used for IPv4, then it's no
5421 * longer used.
5422 */
5423 if (!mlxsw_sp_vr_is_used(vr))
5424 continue;
5425 mlxsw_sp_vr_fib_flush(mlxsw_sp, vr, MLXSW_SP_L3_PROTO_IPV6);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005426 }
Ido Schimmelac571de2016-11-14 11:26:32 +01005427}
5428
Ido Schimmelbc65a8a2017-07-18 10:10:25 +02005429static void mlxsw_sp_router_fib_abort(struct mlxsw_sp *mlxsw_sp)
Ido Schimmelac571de2016-11-14 11:26:32 +01005430{
5431 int err;
5432
Ido Schimmel9011b672017-05-16 19:38:25 +02005433 if (mlxsw_sp->router->aborted)
Ido Schimmeld331d302016-11-16 09:51:58 +01005434 return;
5435 dev_warn(mlxsw_sp->bus_info->dev, "FIB abort triggered. Note that FIB entries are no longer being offloaded to this device.\n");
Ido Schimmelac571de2016-11-14 11:26:32 +01005436 mlxsw_sp_router_fib_flush(mlxsw_sp);
Ido Schimmel9011b672017-05-16 19:38:25 +02005437 mlxsw_sp->router->aborted = true;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005438 err = mlxsw_sp_router_set_abort_trap(mlxsw_sp);
5439 if (err)
5440 dev_warn(mlxsw_sp->bus_info->dev, "Failed to set abort trap.\n");
5441}
5442
Ido Schimmel30572242016-12-03 16:45:01 +01005443struct mlxsw_sp_fib_event_work {
Ido Schimmela0e47612017-02-06 16:20:10 +01005444 struct work_struct work;
Ido Schimmelad178c82017-02-08 11:16:40 +01005445 union {
Ido Schimmel428b8512017-08-03 13:28:28 +02005446 struct fib6_entry_notifier_info fen6_info;
Ido Schimmelad178c82017-02-08 11:16:40 +01005447 struct fib_entry_notifier_info fen_info;
Ido Schimmel5d7bfd12017-03-16 09:08:14 +01005448 struct fib_rule_notifier_info fr_info;
Ido Schimmelad178c82017-02-08 11:16:40 +01005449 struct fib_nh_notifier_info fnh_info;
Yotam Gigid42b0962017-09-27 08:23:20 +02005450 struct mfc_entry_notifier_info men_info;
5451 struct vif_entry_notifier_info ven_info;
Ido Schimmelad178c82017-02-08 11:16:40 +01005452 };
Ido Schimmel30572242016-12-03 16:45:01 +01005453 struct mlxsw_sp *mlxsw_sp;
5454 unsigned long event;
5455};
5456
Ido Schimmel66a57632017-08-03 13:28:26 +02005457static void mlxsw_sp_router_fib4_event_work(struct work_struct *work)
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005458{
Ido Schimmel30572242016-12-03 16:45:01 +01005459 struct mlxsw_sp_fib_event_work *fib_work =
Ido Schimmela0e47612017-02-06 16:20:10 +01005460 container_of(work, struct mlxsw_sp_fib_event_work, work);
Ido Schimmel30572242016-12-03 16:45:01 +01005461 struct mlxsw_sp *mlxsw_sp = fib_work->mlxsw_sp;
Ido Schimmel599cf8f2017-02-09 10:28:44 +01005462 bool replace, append;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005463 int err;
5464
Ido Schimmel30572242016-12-03 16:45:01 +01005465 /* Protect internal structures from changes */
5466 rtnl_lock();
5467 switch (fib_work->event) {
Ido Schimmel599cf8f2017-02-09 10:28:44 +01005468 case FIB_EVENT_ENTRY_REPLACE: /* fall through */
Ido Schimmel4283bce2017-02-09 10:28:43 +01005469 case FIB_EVENT_ENTRY_APPEND: /* fall through */
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005470 case FIB_EVENT_ENTRY_ADD:
Ido Schimmel599cf8f2017-02-09 10:28:44 +01005471 replace = fib_work->event == FIB_EVENT_ENTRY_REPLACE;
Ido Schimmel4283bce2017-02-09 10:28:43 +01005472 append = fib_work->event == FIB_EVENT_ENTRY_APPEND;
5473 err = mlxsw_sp_router_fib4_add(mlxsw_sp, &fib_work->fen_info,
Ido Schimmel599cf8f2017-02-09 10:28:44 +01005474 replace, append);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005475 if (err)
Ido Schimmelbc65a8a2017-07-18 10:10:25 +02005476 mlxsw_sp_router_fib_abort(mlxsw_sp);
Ido Schimmel30572242016-12-03 16:45:01 +01005477 fib_info_put(fib_work->fen_info.fi);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005478 break;
5479 case FIB_EVENT_ENTRY_DEL:
Ido Schimmel30572242016-12-03 16:45:01 +01005480 mlxsw_sp_router_fib4_del(mlxsw_sp, &fib_work->fen_info);
5481 fib_info_put(fib_work->fen_info.fi);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005482 break;
David Ahern1f279232017-10-27 17:37:14 -07005483 case FIB_EVENT_RULE_ADD:
5484 /* if we get here, a rule was added that we do not support.
5485 * just do the fib_abort
5486 */
5487 mlxsw_sp_router_fib_abort(mlxsw_sp);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005488 break;
Ido Schimmelad178c82017-02-08 11:16:40 +01005489 case FIB_EVENT_NH_ADD: /* fall through */
5490 case FIB_EVENT_NH_DEL:
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02005491 mlxsw_sp_nexthop4_event(mlxsw_sp, fib_work->event,
5492 fib_work->fnh_info.fib_nh);
Ido Schimmelad178c82017-02-08 11:16:40 +01005493 fib_info_put(fib_work->fnh_info.fib_nh->nh_parent);
5494 break;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005495 }
Ido Schimmel30572242016-12-03 16:45:01 +01005496 rtnl_unlock();
5497 kfree(fib_work);
5498}
5499
Ido Schimmel66a57632017-08-03 13:28:26 +02005500static void mlxsw_sp_router_fib6_event_work(struct work_struct *work)
5501{
Ido Schimmel583419f2017-08-03 13:28:27 +02005502 struct mlxsw_sp_fib_event_work *fib_work =
5503 container_of(work, struct mlxsw_sp_fib_event_work, work);
5504 struct mlxsw_sp *mlxsw_sp = fib_work->mlxsw_sp;
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02005505 bool replace;
Ido Schimmel428b8512017-08-03 13:28:28 +02005506 int err;
Ido Schimmel583419f2017-08-03 13:28:27 +02005507
5508 rtnl_lock();
5509 switch (fib_work->event) {
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02005510 case FIB_EVENT_ENTRY_REPLACE: /* fall through */
Ido Schimmel428b8512017-08-03 13:28:28 +02005511 case FIB_EVENT_ENTRY_ADD:
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02005512 replace = fib_work->event == FIB_EVENT_ENTRY_REPLACE;
Ido Schimmel428b8512017-08-03 13:28:28 +02005513 err = mlxsw_sp_router_fib6_add(mlxsw_sp,
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02005514 fib_work->fen6_info.rt, replace);
Ido Schimmel428b8512017-08-03 13:28:28 +02005515 if (err)
5516 mlxsw_sp_router_fib_abort(mlxsw_sp);
5517 mlxsw_sp_rt6_release(fib_work->fen6_info.rt);
5518 break;
5519 case FIB_EVENT_ENTRY_DEL:
5520 mlxsw_sp_router_fib6_del(mlxsw_sp, fib_work->fen6_info.rt);
5521 mlxsw_sp_rt6_release(fib_work->fen6_info.rt);
5522 break;
David Ahern1f279232017-10-27 17:37:14 -07005523 case FIB_EVENT_RULE_ADD:
5524 /* if we get here, a rule was added that we do not support.
5525 * just do the fib_abort
5526 */
5527 mlxsw_sp_router_fib_abort(mlxsw_sp);
Ido Schimmel583419f2017-08-03 13:28:27 +02005528 break;
5529 }
5530 rtnl_unlock();
5531 kfree(fib_work);
Ido Schimmel66a57632017-08-03 13:28:26 +02005532}
5533
Yotam Gigid42b0962017-09-27 08:23:20 +02005534static void mlxsw_sp_router_fibmr_event_work(struct work_struct *work)
5535{
5536 struct mlxsw_sp_fib_event_work *fib_work =
5537 container_of(work, struct mlxsw_sp_fib_event_work, work);
5538 struct mlxsw_sp *mlxsw_sp = fib_work->mlxsw_sp;
Yotam Gigid42b0962017-09-27 08:23:20 +02005539 bool replace;
5540 int err;
5541
5542 rtnl_lock();
5543 switch (fib_work->event) {
5544 case FIB_EVENT_ENTRY_REPLACE: /* fall through */
5545 case FIB_EVENT_ENTRY_ADD:
5546 replace = fib_work->event == FIB_EVENT_ENTRY_REPLACE;
5547
5548 err = mlxsw_sp_router_fibmr_add(mlxsw_sp, &fib_work->men_info,
5549 replace);
5550 if (err)
5551 mlxsw_sp_router_fib_abort(mlxsw_sp);
5552 ipmr_cache_put(fib_work->men_info.mfc);
5553 break;
5554 case FIB_EVENT_ENTRY_DEL:
5555 mlxsw_sp_router_fibmr_del(mlxsw_sp, &fib_work->men_info);
5556 ipmr_cache_put(fib_work->men_info.mfc);
5557 break;
5558 case FIB_EVENT_VIF_ADD:
5559 err = mlxsw_sp_router_fibmr_vif_add(mlxsw_sp,
5560 &fib_work->ven_info);
5561 if (err)
5562 mlxsw_sp_router_fib_abort(mlxsw_sp);
5563 dev_put(fib_work->ven_info.dev);
5564 break;
5565 case FIB_EVENT_VIF_DEL:
5566 mlxsw_sp_router_fibmr_vif_del(mlxsw_sp,
5567 &fib_work->ven_info);
5568 dev_put(fib_work->ven_info.dev);
5569 break;
David Ahern1f279232017-10-27 17:37:14 -07005570 case FIB_EVENT_RULE_ADD:
5571 /* if we get here, a rule was added that we do not support.
5572 * just do the fib_abort
5573 */
5574 mlxsw_sp_router_fib_abort(mlxsw_sp);
Yotam Gigid42b0962017-09-27 08:23:20 +02005575 break;
5576 }
5577 rtnl_unlock();
5578 kfree(fib_work);
5579}
5580
Ido Schimmel66a57632017-08-03 13:28:26 +02005581static void mlxsw_sp_router_fib4_event(struct mlxsw_sp_fib_event_work *fib_work,
5582 struct fib_notifier_info *info)
5583{
David Ahern3c75f9b2017-10-18 15:01:38 -07005584 struct fib_entry_notifier_info *fen_info;
David Ahern3c75f9b2017-10-18 15:01:38 -07005585 struct fib_nh_notifier_info *fnh_info;
5586
Ido Schimmel66a57632017-08-03 13:28:26 +02005587 switch (fib_work->event) {
5588 case FIB_EVENT_ENTRY_REPLACE: /* fall through */
5589 case FIB_EVENT_ENTRY_APPEND: /* fall through */
5590 case FIB_EVENT_ENTRY_ADD: /* fall through */
5591 case FIB_EVENT_ENTRY_DEL:
David Ahern3c75f9b2017-10-18 15:01:38 -07005592 fen_info = container_of(info, struct fib_entry_notifier_info,
5593 info);
5594 fib_work->fen_info = *fen_info;
5595 /* Take reference on fib_info to prevent it from being
Ido Schimmel66a57632017-08-03 13:28:26 +02005596 * freed while work is queued. Release it afterwards.
5597 */
5598 fib_info_hold(fib_work->fen_info.fi);
5599 break;
Ido Schimmel66a57632017-08-03 13:28:26 +02005600 case FIB_EVENT_NH_ADD: /* fall through */
5601 case FIB_EVENT_NH_DEL:
David Ahern3c75f9b2017-10-18 15:01:38 -07005602 fnh_info = container_of(info, struct fib_nh_notifier_info,
5603 info);
5604 fib_work->fnh_info = *fnh_info;
Ido Schimmel66a57632017-08-03 13:28:26 +02005605 fib_info_hold(fib_work->fnh_info.fib_nh->nh_parent);
5606 break;
5607 }
5608}
5609
5610static void mlxsw_sp_router_fib6_event(struct mlxsw_sp_fib_event_work *fib_work,
5611 struct fib_notifier_info *info)
5612{
David Ahern3c75f9b2017-10-18 15:01:38 -07005613 struct fib6_entry_notifier_info *fen6_info;
David Ahern3c75f9b2017-10-18 15:01:38 -07005614
Ido Schimmel583419f2017-08-03 13:28:27 +02005615 switch (fib_work->event) {
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02005616 case FIB_EVENT_ENTRY_REPLACE: /* fall through */
Ido Schimmel428b8512017-08-03 13:28:28 +02005617 case FIB_EVENT_ENTRY_ADD: /* fall through */
5618 case FIB_EVENT_ENTRY_DEL:
David Ahern3c75f9b2017-10-18 15:01:38 -07005619 fen6_info = container_of(info, struct fib6_entry_notifier_info,
5620 info);
5621 fib_work->fen6_info = *fen6_info;
Ido Schimmel428b8512017-08-03 13:28:28 +02005622 rt6_hold(fib_work->fen6_info.rt);
5623 break;
Ido Schimmel583419f2017-08-03 13:28:27 +02005624 }
Ido Schimmel66a57632017-08-03 13:28:26 +02005625}
5626
Yotam Gigid42b0962017-09-27 08:23:20 +02005627static void
5628mlxsw_sp_router_fibmr_event(struct mlxsw_sp_fib_event_work *fib_work,
5629 struct fib_notifier_info *info)
5630{
5631 switch (fib_work->event) {
5632 case FIB_EVENT_ENTRY_REPLACE: /* fall through */
5633 case FIB_EVENT_ENTRY_ADD: /* fall through */
5634 case FIB_EVENT_ENTRY_DEL:
5635 memcpy(&fib_work->men_info, info, sizeof(fib_work->men_info));
5636 ipmr_cache_hold(fib_work->men_info.mfc);
5637 break;
5638 case FIB_EVENT_VIF_ADD: /* fall through */
5639 case FIB_EVENT_VIF_DEL:
5640 memcpy(&fib_work->ven_info, info, sizeof(fib_work->ven_info));
5641 dev_hold(fib_work->ven_info.dev);
5642 break;
David Ahern1f279232017-10-27 17:37:14 -07005643 }
5644}
5645
5646static int mlxsw_sp_router_fib_rule_event(unsigned long event,
5647 struct fib_notifier_info *info,
5648 struct mlxsw_sp *mlxsw_sp)
5649{
5650 struct netlink_ext_ack *extack = info->extack;
5651 struct fib_rule_notifier_info *fr_info;
5652 struct fib_rule *rule;
5653 int err = 0;
5654
5655 /* nothing to do at the moment */
5656 if (event == FIB_EVENT_RULE_DEL)
5657 return 0;
5658
5659 if (mlxsw_sp->router->aborted)
5660 return 0;
5661
5662 fr_info = container_of(info, struct fib_rule_notifier_info, info);
5663 rule = fr_info->rule;
5664
5665 switch (info->family) {
5666 case AF_INET:
5667 if (!fib4_rule_default(rule) && !rule->l3mdev)
5668 err = -1;
5669 break;
5670 case AF_INET6:
5671 if (!fib6_rule_default(rule) && !rule->l3mdev)
5672 err = -1;
5673 break;
5674 case RTNL_FAMILY_IPMR:
5675 if (!ipmr_rule_default(rule) && !rule->l3mdev)
5676 err = -1;
Yotam Gigid42b0962017-09-27 08:23:20 +02005677 break;
5678 }
David Ahern1f279232017-10-27 17:37:14 -07005679
5680 if (err < 0)
5681 NL_SET_ERR_MSG(extack, "spectrum: FIB rules not supported. Aborting offload");
5682
5683 return err;
Yotam Gigid42b0962017-09-27 08:23:20 +02005684}
5685
Ido Schimmel30572242016-12-03 16:45:01 +01005686/* Called with rcu_read_lock() */
5687static int mlxsw_sp_router_fib_event(struct notifier_block *nb,
5688 unsigned long event, void *ptr)
5689{
Ido Schimmel30572242016-12-03 16:45:01 +01005690 struct mlxsw_sp_fib_event_work *fib_work;
5691 struct fib_notifier_info *info = ptr;
Ido Schimmel7e39d112017-05-16 19:38:28 +02005692 struct mlxsw_sp_router *router;
David Ahern1f279232017-10-27 17:37:14 -07005693 int err;
Ido Schimmel30572242016-12-03 16:45:01 +01005694
Ido Schimmel8e29f972017-09-15 15:31:07 +02005695 if (!net_eq(info->net, &init_net) ||
Yotam Gigi664375e2017-09-27 08:23:22 +02005696 (info->family != AF_INET && info->family != AF_INET6 &&
5697 info->family != RTNL_FAMILY_IPMR))
Ido Schimmel30572242016-12-03 16:45:01 +01005698 return NOTIFY_DONE;
5699
David Ahern1f279232017-10-27 17:37:14 -07005700 router = container_of(nb, struct mlxsw_sp_router, fib_nb);
5701
5702 switch (event) {
5703 case FIB_EVENT_RULE_ADD: /* fall through */
5704 case FIB_EVENT_RULE_DEL:
5705 err = mlxsw_sp_router_fib_rule_event(event, info,
5706 router->mlxsw_sp);
5707 if (!err)
5708 return NOTIFY_DONE;
5709 }
5710
Ido Schimmel30572242016-12-03 16:45:01 +01005711 fib_work = kzalloc(sizeof(*fib_work), GFP_ATOMIC);
5712 if (WARN_ON(!fib_work))
5713 return NOTIFY_BAD;
5714
Ido Schimmel7e39d112017-05-16 19:38:28 +02005715 fib_work->mlxsw_sp = router->mlxsw_sp;
Ido Schimmel30572242016-12-03 16:45:01 +01005716 fib_work->event = event;
5717
Ido Schimmel66a57632017-08-03 13:28:26 +02005718 switch (info->family) {
5719 case AF_INET:
5720 INIT_WORK(&fib_work->work, mlxsw_sp_router_fib4_event_work);
5721 mlxsw_sp_router_fib4_event(fib_work, info);
Ido Schimmel30572242016-12-03 16:45:01 +01005722 break;
Ido Schimmel66a57632017-08-03 13:28:26 +02005723 case AF_INET6:
5724 INIT_WORK(&fib_work->work, mlxsw_sp_router_fib6_event_work);
5725 mlxsw_sp_router_fib6_event(fib_work, info);
Ido Schimmelad178c82017-02-08 11:16:40 +01005726 break;
Yotam Gigid42b0962017-09-27 08:23:20 +02005727 case RTNL_FAMILY_IPMR:
5728 INIT_WORK(&fib_work->work, mlxsw_sp_router_fibmr_event_work);
5729 mlxsw_sp_router_fibmr_event(fib_work, info);
5730 break;
Ido Schimmel30572242016-12-03 16:45:01 +01005731 }
5732
Ido Schimmela0e47612017-02-06 16:20:10 +01005733 mlxsw_core_schedule_work(&fib_work->work);
Ido Schimmel30572242016-12-03 16:45:01 +01005734
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005735 return NOTIFY_DONE;
5736}
5737
Ido Schimmel4724ba562017-03-10 08:53:39 +01005738static struct mlxsw_sp_rif *
5739mlxsw_sp_rif_find_by_dev(const struct mlxsw_sp *mlxsw_sp,
5740 const struct net_device *dev)
5741{
5742 int i;
5743
5744 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++)
Ido Schimmel5f9efff2017-05-16 19:38:27 +02005745 if (mlxsw_sp->router->rifs[i] &&
5746 mlxsw_sp->router->rifs[i]->dev == dev)
5747 return mlxsw_sp->router->rifs[i];
Ido Schimmel4724ba562017-03-10 08:53:39 +01005748
5749 return NULL;
5750}
5751
5752static int mlxsw_sp_router_rif_disable(struct mlxsw_sp *mlxsw_sp, u16 rif)
5753{
5754 char ritr_pl[MLXSW_REG_RITR_LEN];
5755 int err;
5756
5757 mlxsw_reg_ritr_rif_pack(ritr_pl, rif);
5758 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
5759 if (WARN_ON_ONCE(err))
5760 return err;
5761
5762 mlxsw_reg_ritr_enable_set(ritr_pl, false);
5763 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
5764}
5765
5766static void mlxsw_sp_router_rif_gone_sync(struct mlxsw_sp *mlxsw_sp,
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01005767 struct mlxsw_sp_rif *rif)
Ido Schimmel4724ba562017-03-10 08:53:39 +01005768{
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01005769 mlxsw_sp_router_rif_disable(mlxsw_sp, rif->rif_index);
5770 mlxsw_sp_nexthop_rif_gone_sync(mlxsw_sp, rif);
5771 mlxsw_sp_neigh_rif_gone_sync(mlxsw_sp, rif);
Ido Schimmel4724ba562017-03-10 08:53:39 +01005772}
5773
Arkadi Sharshevsky5ea12372017-07-18 10:10:13 +02005774static bool
5775mlxsw_sp_rif_should_config(struct mlxsw_sp_rif *rif, struct net_device *dev,
5776 unsigned long event)
Ido Schimmel4724ba562017-03-10 08:53:39 +01005777{
Arkadi Sharshevsky5ea12372017-07-18 10:10:13 +02005778 struct inet6_dev *inet6_dev;
5779 bool addr_list_empty = true;
5780 struct in_device *idev;
5781
Ido Schimmel4724ba562017-03-10 08:53:39 +01005782 switch (event) {
5783 case NETDEV_UP:
Petr Machataf1b1f272017-07-31 09:27:28 +02005784 return rif == NULL;
Ido Schimmel4724ba562017-03-10 08:53:39 +01005785 case NETDEV_DOWN:
Arkadi Sharshevsky5ea12372017-07-18 10:10:13 +02005786 idev = __in_dev_get_rtnl(dev);
5787 if (idev && idev->ifa_list)
5788 addr_list_empty = false;
5789
5790 inet6_dev = __in6_dev_get(dev);
5791 if (addr_list_empty && inet6_dev &&
5792 !list_empty(&inet6_dev->addr_list))
5793 addr_list_empty = false;
5794
5795 if (rif && addr_list_empty &&
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01005796 !netif_is_l3_slave(rif->dev))
Ido Schimmel4724ba562017-03-10 08:53:39 +01005797 return true;
5798 /* It is possible we already removed the RIF ourselves
5799 * if it was assigned to a netdev that is now a bridge
5800 * or LAG slave.
5801 */
5802 return false;
5803 }
5804
5805 return false;
5806}
5807
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02005808static enum mlxsw_sp_rif_type
5809mlxsw_sp_dev_rif_type(const struct mlxsw_sp *mlxsw_sp,
5810 const struct net_device *dev)
5811{
5812 enum mlxsw_sp_fid_type type;
5813
Petr Machata6ddb7422017-09-02 23:49:19 +02005814 if (mlxsw_sp_netdev_ipip_type(mlxsw_sp, dev, NULL))
5815 return MLXSW_SP_RIF_TYPE_IPIP_LB;
5816
5817 /* Otherwise RIF type is derived from the type of the underlying FID. */
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02005818 if (is_vlan_dev(dev) && netif_is_bridge_master(vlan_dev_real_dev(dev)))
5819 type = MLXSW_SP_FID_TYPE_8021Q;
5820 else if (netif_is_bridge_master(dev) && br_vlan_enabled(dev))
5821 type = MLXSW_SP_FID_TYPE_8021Q;
5822 else if (netif_is_bridge_master(dev))
5823 type = MLXSW_SP_FID_TYPE_8021D;
5824 else
5825 type = MLXSW_SP_FID_TYPE_RFID;
5826
5827 return mlxsw_sp_fid_type_rif_type(mlxsw_sp, type);
5828}
5829
Ido Schimmelde5ed992017-06-04 16:53:40 +02005830static int mlxsw_sp_rif_index_alloc(struct mlxsw_sp *mlxsw_sp, u16 *p_rif_index)
Ido Schimmel4724ba562017-03-10 08:53:39 +01005831{
5832 int i;
5833
Ido Schimmelde5ed992017-06-04 16:53:40 +02005834 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++) {
5835 if (!mlxsw_sp->router->rifs[i]) {
5836 *p_rif_index = i;
5837 return 0;
5838 }
5839 }
Ido Schimmel4724ba562017-03-10 08:53:39 +01005840
Ido Schimmelde5ed992017-06-04 16:53:40 +02005841 return -ENOBUFS;
Ido Schimmel4724ba562017-03-10 08:53:39 +01005842}
5843
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02005844static struct mlxsw_sp_rif *mlxsw_sp_rif_alloc(size_t rif_size, u16 rif_index,
5845 u16 vr_id,
5846 struct net_device *l3_dev)
Ido Schimmel4724ba562017-03-10 08:53:39 +01005847{
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01005848 struct mlxsw_sp_rif *rif;
Ido Schimmel4724ba562017-03-10 08:53:39 +01005849
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02005850 rif = kzalloc(rif_size, GFP_KERNEL);
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01005851 if (!rif)
Ido Schimmel4724ba562017-03-10 08:53:39 +01005852 return NULL;
5853
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01005854 INIT_LIST_HEAD(&rif->nexthop_list);
5855 INIT_LIST_HEAD(&rif->neigh_list);
5856 ether_addr_copy(rif->addr, l3_dev->dev_addr);
5857 rif->mtu = l3_dev->mtu;
5858 rif->vr_id = vr_id;
5859 rif->dev = l3_dev;
5860 rif->rif_index = rif_index;
Ido Schimmel4724ba562017-03-10 08:53:39 +01005861
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01005862 return rif;
Ido Schimmel4724ba562017-03-10 08:53:39 +01005863}
5864
Ido Schimmel5f9efff2017-05-16 19:38:27 +02005865struct mlxsw_sp_rif *mlxsw_sp_rif_by_index(const struct mlxsw_sp *mlxsw_sp,
5866 u16 rif_index)
5867{
5868 return mlxsw_sp->router->rifs[rif_index];
5869}
5870
Arkadi Sharshevskyfd1b9d42017-03-28 17:24:16 +02005871u16 mlxsw_sp_rif_index(const struct mlxsw_sp_rif *rif)
5872{
5873 return rif->rif_index;
5874}
5875
Petr Machata92107cf2017-09-02 23:49:28 +02005876u16 mlxsw_sp_ipip_lb_rif_index(const struct mlxsw_sp_rif_ipip_lb *lb_rif)
5877{
5878 return lb_rif->common.rif_index;
5879}
5880
5881u16 mlxsw_sp_ipip_lb_ul_vr_id(const struct mlxsw_sp_rif_ipip_lb *lb_rif)
5882{
5883 return lb_rif->ul_vr_id;
5884}
5885
Arkadi Sharshevskyfd1b9d42017-03-28 17:24:16 +02005886int mlxsw_sp_rif_dev_ifindex(const struct mlxsw_sp_rif *rif)
5887{
5888 return rif->dev->ifindex;
5889}
5890
Yotam Gigi91e4d592017-09-19 10:00:19 +02005891const struct net_device *mlxsw_sp_rif_dev(const struct mlxsw_sp_rif *rif)
5892{
5893 return rif->dev;
5894}
5895
Ido Schimmel4724ba562017-03-10 08:53:39 +01005896static struct mlxsw_sp_rif *
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02005897mlxsw_sp_rif_create(struct mlxsw_sp *mlxsw_sp,
David Ahernf8fa9b42017-10-18 09:56:56 -07005898 const struct mlxsw_sp_rif_params *params,
5899 struct netlink_ext_ack *extack)
Ido Schimmel4724ba562017-03-10 08:53:39 +01005900{
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02005901 u32 tb_id = l3mdev_fib_table(params->dev);
5902 const struct mlxsw_sp_rif_ops *ops;
Petr Machata010cadf2017-09-02 23:49:18 +02005903 struct mlxsw_sp_fid *fid = NULL;
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02005904 enum mlxsw_sp_rif_type type;
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01005905 struct mlxsw_sp_rif *rif;
Ido Schimmela1107482017-05-26 08:37:39 +02005906 struct mlxsw_sp_vr *vr;
5907 u16 rif_index;
Ido Schimmel4724ba562017-03-10 08:53:39 +01005908 int err;
5909
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02005910 type = mlxsw_sp_dev_rif_type(mlxsw_sp, params->dev);
5911 ops = mlxsw_sp->router->rif_ops_arr[type];
5912
David Ahernf8fa9b42017-10-18 09:56:56 -07005913 vr = mlxsw_sp_vr_get(mlxsw_sp, tb_id ? : RT_TABLE_MAIN, extack);
Ido Schimmelc9ec53f2017-05-26 08:37:38 +02005914 if (IS_ERR(vr))
5915 return ERR_CAST(vr);
Petr Machata28a04c72017-10-02 12:14:56 +02005916 vr->rif_count++;
Ido Schimmelc9ec53f2017-05-26 08:37:38 +02005917
Ido Schimmelde5ed992017-06-04 16:53:40 +02005918 err = mlxsw_sp_rif_index_alloc(mlxsw_sp, &rif_index);
David Ahernf8fa9b42017-10-18 09:56:56 -07005919 if (err) {
5920 NL_SET_ERR_MSG(extack, "spectrum: Exceeded number of supported router interfaces");
Ido Schimmelde5ed992017-06-04 16:53:40 +02005921 goto err_rif_index_alloc;
David Ahernf8fa9b42017-10-18 09:56:56 -07005922 }
Ido Schimmel4724ba562017-03-10 08:53:39 +01005923
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02005924 rif = mlxsw_sp_rif_alloc(ops->rif_size, rif_index, vr->id, params->dev);
Ido Schimmela13a5942017-05-26 08:37:33 +02005925 if (!rif) {
5926 err = -ENOMEM;
5927 goto err_rif_alloc;
5928 }
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02005929 rif->mlxsw_sp = mlxsw_sp;
5930 rif->ops = ops;
Ido Schimmela13a5942017-05-26 08:37:33 +02005931
Petr Machata010cadf2017-09-02 23:49:18 +02005932 if (ops->fid_get) {
5933 fid = ops->fid_get(rif);
5934 if (IS_ERR(fid)) {
5935 err = PTR_ERR(fid);
5936 goto err_fid_get;
5937 }
5938 rif->fid = fid;
Ido Schimmel4d93cee2017-05-26 08:37:34 +02005939 }
5940
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02005941 if (ops->setup)
5942 ops->setup(rif, params);
5943
5944 err = ops->configure(rif);
Ido Schimmel4724ba562017-03-10 08:53:39 +01005945 if (err)
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02005946 goto err_configure;
Ido Schimmel4724ba562017-03-10 08:53:39 +01005947
Yotam Gigid42b0962017-09-27 08:23:20 +02005948 err = mlxsw_sp_mr_rif_add(vr->mr4_table, rif);
5949 if (err)
5950 goto err_mr_rif_add;
5951
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02005952 mlxsw_sp_rif_counters_alloc(rif);
Ido Schimmel5f9efff2017-05-16 19:38:27 +02005953 mlxsw_sp->router->rifs[rif_index] = rif;
Ido Schimmel4724ba562017-03-10 08:53:39 +01005954
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01005955 return rif;
Ido Schimmel4724ba562017-03-10 08:53:39 +01005956
Yotam Gigid42b0962017-09-27 08:23:20 +02005957err_mr_rif_add:
5958 ops->deconfigure(rif);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02005959err_configure:
Petr Machata010cadf2017-09-02 23:49:18 +02005960 if (fid)
5961 mlxsw_sp_fid_put(fid);
Ido Schimmela1107482017-05-26 08:37:39 +02005962err_fid_get:
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02005963 kfree(rif);
5964err_rif_alloc:
Ido Schimmelde5ed992017-06-04 16:53:40 +02005965err_rif_index_alloc:
Petr Machata28a04c72017-10-02 12:14:56 +02005966 vr->rif_count--;
Ido Schimmelc9ec53f2017-05-26 08:37:38 +02005967 mlxsw_sp_vr_put(vr);
Ido Schimmel4724ba562017-03-10 08:53:39 +01005968 return ERR_PTR(err);
5969}
5970
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02005971void mlxsw_sp_rif_destroy(struct mlxsw_sp_rif *rif)
Ido Schimmel4724ba562017-03-10 08:53:39 +01005972{
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02005973 const struct mlxsw_sp_rif_ops *ops = rif->ops;
5974 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
Ido Schimmela1107482017-05-26 08:37:39 +02005975 struct mlxsw_sp_fid *fid = rif->fid;
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02005976 struct mlxsw_sp_vr *vr;
Ido Schimmel4724ba562017-03-10 08:53:39 +01005977
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01005978 mlxsw_sp_router_rif_gone_sync(mlxsw_sp, rif);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02005979 vr = &mlxsw_sp->router->vrs[rif->vr_id];
Arkadi Sharshevskye0c0afd2017-03-28 17:24:15 +02005980
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02005981 mlxsw_sp->router->rifs[rif->rif_index] = NULL;
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02005982 mlxsw_sp_rif_counters_free(rif);
Yotam Gigid42b0962017-09-27 08:23:20 +02005983 mlxsw_sp_mr_rif_del(vr->mr4_table, rif);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02005984 ops->deconfigure(rif);
Petr Machata010cadf2017-09-02 23:49:18 +02005985 if (fid)
5986 /* Loopback RIFs are not associated with a FID. */
5987 mlxsw_sp_fid_put(fid);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02005988 kfree(rif);
Petr Machata28a04c72017-10-02 12:14:56 +02005989 vr->rif_count--;
Ido Schimmelc9ec53f2017-05-26 08:37:38 +02005990 mlxsw_sp_vr_put(vr);
Ido Schimmel4724ba562017-03-10 08:53:39 +01005991}
5992
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02005993static void
5994mlxsw_sp_rif_subport_params_init(struct mlxsw_sp_rif_params *params,
5995 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
5996{
5997 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
5998
5999 params->vid = mlxsw_sp_port_vlan->vid;
6000 params->lag = mlxsw_sp_port->lagged;
6001 if (params->lag)
6002 params->lag_id = mlxsw_sp_port->lag_id;
6003 else
6004 params->system_port = mlxsw_sp_port->local_port;
6005}
6006
Ido Schimmel7cbecf22017-05-26 08:37:28 +02006007static int
Ido Schimmela1107482017-05-26 08:37:39 +02006008mlxsw_sp_port_vlan_router_join(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan,
David Ahernf8fa9b42017-10-18 09:56:56 -07006009 struct net_device *l3_dev,
6010 struct netlink_ext_ack *extack)
Ido Schimmel4724ba562017-03-10 08:53:39 +01006011{
Ido Schimmel7cbecf22017-05-26 08:37:28 +02006012 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
Ido Schimmel1b8f09a2017-05-26 08:37:36 +02006013 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
Ido Schimmel7cbecf22017-05-26 08:37:28 +02006014 u16 vid = mlxsw_sp_port_vlan->vid;
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01006015 struct mlxsw_sp_rif *rif;
Ido Schimmela1107482017-05-26 08:37:39 +02006016 struct mlxsw_sp_fid *fid;
Ido Schimmel03ea01e2017-05-23 21:56:30 +02006017 int err;
Ido Schimmel4724ba562017-03-10 08:53:39 +01006018
Ido Schimmel1b8f09a2017-05-26 08:37:36 +02006019 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev);
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01006020 if (!rif) {
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006021 struct mlxsw_sp_rif_params params = {
6022 .dev = l3_dev,
6023 };
6024
6025 mlxsw_sp_rif_subport_params_init(&params, mlxsw_sp_port_vlan);
David Ahernf8fa9b42017-10-18 09:56:56 -07006026 rif = mlxsw_sp_rif_create(mlxsw_sp, &params, extack);
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01006027 if (IS_ERR(rif))
6028 return PTR_ERR(rif);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006029 }
6030
Ido Schimmela1107482017-05-26 08:37:39 +02006031 /* FID was already created, just take a reference */
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006032 fid = rif->ops->fid_get(rif);
Ido Schimmela1107482017-05-26 08:37:39 +02006033 err = mlxsw_sp_fid_port_vid_map(fid, mlxsw_sp_port, vid);
6034 if (err)
6035 goto err_fid_port_vid_map;
6036
Ido Schimmel7cbecf22017-05-26 08:37:28 +02006037 err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, false);
Ido Schimmel03ea01e2017-05-23 21:56:30 +02006038 if (err)
6039 goto err_port_vid_learning_set;
6040
Ido Schimmel7cbecf22017-05-26 08:37:28 +02006041 err = mlxsw_sp_port_vid_stp_set(mlxsw_sp_port, vid,
Ido Schimmel03ea01e2017-05-23 21:56:30 +02006042 BR_STATE_FORWARDING);
6043 if (err)
6044 goto err_port_vid_stp_set;
6045
Ido Schimmela1107482017-05-26 08:37:39 +02006046 mlxsw_sp_port_vlan->fid = fid;
Ido Schimmel4724ba562017-03-10 08:53:39 +01006047
Ido Schimmel4724ba562017-03-10 08:53:39 +01006048 return 0;
Ido Schimmel03ea01e2017-05-23 21:56:30 +02006049
6050err_port_vid_stp_set:
Ido Schimmel7cbecf22017-05-26 08:37:28 +02006051 mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, true);
Ido Schimmel03ea01e2017-05-23 21:56:30 +02006052err_port_vid_learning_set:
Ido Schimmela1107482017-05-26 08:37:39 +02006053 mlxsw_sp_fid_port_vid_unmap(fid, mlxsw_sp_port, vid);
6054err_fid_port_vid_map:
6055 mlxsw_sp_fid_put(fid);
Ido Schimmel03ea01e2017-05-23 21:56:30 +02006056 return err;
Ido Schimmel4724ba562017-03-10 08:53:39 +01006057}
6058
Ido Schimmela1107482017-05-26 08:37:39 +02006059void
6060mlxsw_sp_port_vlan_router_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
Ido Schimmel4724ba562017-03-10 08:53:39 +01006061{
Ido Schimmelce95e152017-05-26 08:37:27 +02006062 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
Ido Schimmel7cbecf22017-05-26 08:37:28 +02006063 struct mlxsw_sp_fid *fid = mlxsw_sp_port_vlan->fid;
Ido Schimmelce95e152017-05-26 08:37:27 +02006064 u16 vid = mlxsw_sp_port_vlan->vid;
Ido Schimmelce95e152017-05-26 08:37:27 +02006065
Ido Schimmela1107482017-05-26 08:37:39 +02006066 if (WARN_ON(mlxsw_sp_fid_type(fid) != MLXSW_SP_FID_TYPE_RFID))
6067 return;
Ido Schimmel4aafc362017-05-26 08:37:25 +02006068
Ido Schimmela1107482017-05-26 08:37:39 +02006069 mlxsw_sp_port_vlan->fid = NULL;
Ido Schimmel7cbecf22017-05-26 08:37:28 +02006070 mlxsw_sp_port_vid_stp_set(mlxsw_sp_port, vid, BR_STATE_BLOCKING);
6071 mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, true);
Ido Schimmela1107482017-05-26 08:37:39 +02006072 mlxsw_sp_fid_port_vid_unmap(fid, mlxsw_sp_port, vid);
6073 /* If router port holds the last reference on the rFID, then the
6074 * associated Sub-port RIF will be destroyed.
6075 */
6076 mlxsw_sp_fid_put(fid);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006077}
6078
Ido Schimmel7cbecf22017-05-26 08:37:28 +02006079static int mlxsw_sp_inetaddr_port_vlan_event(struct net_device *l3_dev,
6080 struct net_device *port_dev,
David Ahernf8fa9b42017-10-18 09:56:56 -07006081 unsigned long event, u16 vid,
6082 struct netlink_ext_ack *extack)
Ido Schimmel4724ba562017-03-10 08:53:39 +01006083{
6084 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(port_dev);
Ido Schimmelce95e152017-05-26 08:37:27 +02006085 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
Ido Schimmel4724ba562017-03-10 08:53:39 +01006086
Ido Schimmelce95e152017-05-26 08:37:27 +02006087 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
Ido Schimmel7cbecf22017-05-26 08:37:28 +02006088 if (WARN_ON(!mlxsw_sp_port_vlan))
6089 return -EINVAL;
Ido Schimmel4724ba562017-03-10 08:53:39 +01006090
6091 switch (event) {
6092 case NETDEV_UP:
Ido Schimmela1107482017-05-26 08:37:39 +02006093 return mlxsw_sp_port_vlan_router_join(mlxsw_sp_port_vlan,
David Ahernf8fa9b42017-10-18 09:56:56 -07006094 l3_dev, extack);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006095 case NETDEV_DOWN:
Ido Schimmela1107482017-05-26 08:37:39 +02006096 mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port_vlan);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006097 break;
6098 }
6099
6100 return 0;
6101}
6102
6103static int mlxsw_sp_inetaddr_port_event(struct net_device *port_dev,
David Ahernf8fa9b42017-10-18 09:56:56 -07006104 unsigned long event,
6105 struct netlink_ext_ack *extack)
Ido Schimmel4724ba562017-03-10 08:53:39 +01006106{
Jiri Pirko2b94e582017-04-18 16:55:37 +02006107 if (netif_is_bridge_port(port_dev) ||
6108 netif_is_lag_port(port_dev) ||
6109 netif_is_ovs_port(port_dev))
Ido Schimmel4724ba562017-03-10 08:53:39 +01006110 return 0;
6111
David Ahernf8fa9b42017-10-18 09:56:56 -07006112 return mlxsw_sp_inetaddr_port_vlan_event(port_dev, port_dev, event, 1,
6113 extack);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006114}
6115
6116static int __mlxsw_sp_inetaddr_lag_event(struct net_device *l3_dev,
6117 struct net_device *lag_dev,
David Ahernf8fa9b42017-10-18 09:56:56 -07006118 unsigned long event, u16 vid,
6119 struct netlink_ext_ack *extack)
Ido Schimmel4724ba562017-03-10 08:53:39 +01006120{
6121 struct net_device *port_dev;
6122 struct list_head *iter;
6123 int err;
6124
6125 netdev_for_each_lower_dev(lag_dev, port_dev, iter) {
6126 if (mlxsw_sp_port_dev_check(port_dev)) {
Ido Schimmel7cbecf22017-05-26 08:37:28 +02006127 err = mlxsw_sp_inetaddr_port_vlan_event(l3_dev,
6128 port_dev,
David Ahernf8fa9b42017-10-18 09:56:56 -07006129 event, vid,
6130 extack);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006131 if (err)
6132 return err;
6133 }
6134 }
6135
6136 return 0;
6137}
6138
6139static int mlxsw_sp_inetaddr_lag_event(struct net_device *lag_dev,
David Ahernf8fa9b42017-10-18 09:56:56 -07006140 unsigned long event,
6141 struct netlink_ext_ack *extack)
Ido Schimmel4724ba562017-03-10 08:53:39 +01006142{
6143 if (netif_is_bridge_port(lag_dev))
6144 return 0;
6145
David Ahernf8fa9b42017-10-18 09:56:56 -07006146 return __mlxsw_sp_inetaddr_lag_event(lag_dev, lag_dev, event, 1,
6147 extack);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006148}
6149
Ido Schimmel4724ba562017-03-10 08:53:39 +01006150static int mlxsw_sp_inetaddr_bridge_event(struct net_device *l3_dev,
David Ahernf8fa9b42017-10-18 09:56:56 -07006151 unsigned long event,
6152 struct netlink_ext_ack *extack)
Ido Schimmel4724ba562017-03-10 08:53:39 +01006153{
6154 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(l3_dev);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006155 struct mlxsw_sp_rif_params params = {
6156 .dev = l3_dev,
6157 };
Ido Schimmela1107482017-05-26 08:37:39 +02006158 struct mlxsw_sp_rif *rif;
Ido Schimmel4724ba562017-03-10 08:53:39 +01006159
6160 switch (event) {
6161 case NETDEV_UP:
David Ahernf8fa9b42017-10-18 09:56:56 -07006162 rif = mlxsw_sp_rif_create(mlxsw_sp, &params, extack);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006163 if (IS_ERR(rif))
6164 return PTR_ERR(rif);
6165 break;
Ido Schimmel4724ba562017-03-10 08:53:39 +01006166 case NETDEV_DOWN:
Ido Schimmela1107482017-05-26 08:37:39 +02006167 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006168 mlxsw_sp_rif_destroy(rif);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006169 break;
6170 }
6171
6172 return 0;
6173}
6174
6175static int mlxsw_sp_inetaddr_vlan_event(struct net_device *vlan_dev,
David Ahernf8fa9b42017-10-18 09:56:56 -07006176 unsigned long event,
6177 struct netlink_ext_ack *extack)
Ido Schimmel4724ba562017-03-10 08:53:39 +01006178{
6179 struct net_device *real_dev = vlan_dev_real_dev(vlan_dev);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006180 u16 vid = vlan_dev_vlan_id(vlan_dev);
6181
Ido Schimmel6b27c8a2017-06-28 09:03:12 +03006182 if (netif_is_bridge_port(vlan_dev))
6183 return 0;
6184
Ido Schimmel4724ba562017-03-10 08:53:39 +01006185 if (mlxsw_sp_port_dev_check(real_dev))
Ido Schimmel7cbecf22017-05-26 08:37:28 +02006186 return mlxsw_sp_inetaddr_port_vlan_event(vlan_dev, real_dev,
David Ahernf8fa9b42017-10-18 09:56:56 -07006187 event, vid, extack);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006188 else if (netif_is_lag_master(real_dev))
6189 return __mlxsw_sp_inetaddr_lag_event(vlan_dev, real_dev, event,
David Ahernf8fa9b42017-10-18 09:56:56 -07006190 vid, extack);
Ido Schimmelc57529e2017-05-26 08:37:31 +02006191 else if (netif_is_bridge_master(real_dev) && br_vlan_enabled(real_dev))
David Ahernf8fa9b42017-10-18 09:56:56 -07006192 return mlxsw_sp_inetaddr_bridge_event(vlan_dev, event, extack);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006193
6194 return 0;
6195}
6196
Ido Schimmelb1e45522017-04-30 19:47:14 +03006197static int __mlxsw_sp_inetaddr_event(struct net_device *dev,
David Ahernf8fa9b42017-10-18 09:56:56 -07006198 unsigned long event,
6199 struct netlink_ext_ack *extack)
Ido Schimmelb1e45522017-04-30 19:47:14 +03006200{
6201 if (mlxsw_sp_port_dev_check(dev))
David Ahernf8fa9b42017-10-18 09:56:56 -07006202 return mlxsw_sp_inetaddr_port_event(dev, event, extack);
Ido Schimmelb1e45522017-04-30 19:47:14 +03006203 else if (netif_is_lag_master(dev))
David Ahernf8fa9b42017-10-18 09:56:56 -07006204 return mlxsw_sp_inetaddr_lag_event(dev, event, extack);
Ido Schimmelb1e45522017-04-30 19:47:14 +03006205 else if (netif_is_bridge_master(dev))
David Ahernf8fa9b42017-10-18 09:56:56 -07006206 return mlxsw_sp_inetaddr_bridge_event(dev, event, extack);
Ido Schimmelb1e45522017-04-30 19:47:14 +03006207 else if (is_vlan_dev(dev))
David Ahernf8fa9b42017-10-18 09:56:56 -07006208 return mlxsw_sp_inetaddr_vlan_event(dev, event, extack);
Ido Schimmelb1e45522017-04-30 19:47:14 +03006209 else
6210 return 0;
6211}
6212
Ido Schimmel4724ba562017-03-10 08:53:39 +01006213int mlxsw_sp_inetaddr_event(struct notifier_block *unused,
6214 unsigned long event, void *ptr)
6215{
6216 struct in_ifaddr *ifa = (struct in_ifaddr *) ptr;
6217 struct net_device *dev = ifa->ifa_dev->dev;
6218 struct mlxsw_sp *mlxsw_sp;
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01006219 struct mlxsw_sp_rif *rif;
Ido Schimmel4724ba562017-03-10 08:53:39 +01006220 int err = 0;
6221
David Ahern89d5dd22017-10-18 09:56:55 -07006222 /* NETDEV_UP event is handled by mlxsw_sp_inetaddr_valid_event */
6223 if (event == NETDEV_UP)
6224 goto out;
6225
6226 mlxsw_sp = mlxsw_sp_lower_get(dev);
6227 if (!mlxsw_sp)
6228 goto out;
6229
6230 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
6231 if (!mlxsw_sp_rif_should_config(rif, dev, event))
6232 goto out;
6233
David Ahernf8fa9b42017-10-18 09:56:56 -07006234 err = __mlxsw_sp_inetaddr_event(dev, event, NULL);
David Ahern89d5dd22017-10-18 09:56:55 -07006235out:
6236 return notifier_from_errno(err);
6237}
6238
6239int mlxsw_sp_inetaddr_valid_event(struct notifier_block *unused,
6240 unsigned long event, void *ptr)
6241{
6242 struct in_validator_info *ivi = (struct in_validator_info *) ptr;
6243 struct net_device *dev = ivi->ivi_dev->dev;
6244 struct mlxsw_sp *mlxsw_sp;
6245 struct mlxsw_sp_rif *rif;
6246 int err = 0;
6247
Ido Schimmel4724ba562017-03-10 08:53:39 +01006248 mlxsw_sp = mlxsw_sp_lower_get(dev);
6249 if (!mlxsw_sp)
6250 goto out;
6251
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01006252 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
Arkadi Sharshevsky5ea12372017-07-18 10:10:13 +02006253 if (!mlxsw_sp_rif_should_config(rif, dev, event))
Ido Schimmel4724ba562017-03-10 08:53:39 +01006254 goto out;
6255
David Ahernf8fa9b42017-10-18 09:56:56 -07006256 err = __mlxsw_sp_inetaddr_event(dev, event, ivi->extack);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006257out:
6258 return notifier_from_errno(err);
6259}
6260
Arkadi Sharshevsky5ea12372017-07-18 10:10:13 +02006261struct mlxsw_sp_inet6addr_event_work {
6262 struct work_struct work;
6263 struct net_device *dev;
6264 unsigned long event;
6265};
6266
6267static void mlxsw_sp_inet6addr_event_work(struct work_struct *work)
6268{
6269 struct mlxsw_sp_inet6addr_event_work *inet6addr_work =
6270 container_of(work, struct mlxsw_sp_inet6addr_event_work, work);
6271 struct net_device *dev = inet6addr_work->dev;
6272 unsigned long event = inet6addr_work->event;
6273 struct mlxsw_sp *mlxsw_sp;
6274 struct mlxsw_sp_rif *rif;
6275
6276 rtnl_lock();
6277 mlxsw_sp = mlxsw_sp_lower_get(dev);
6278 if (!mlxsw_sp)
6279 goto out;
6280
6281 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
6282 if (!mlxsw_sp_rif_should_config(rif, dev, event))
6283 goto out;
6284
David Ahernf8fa9b42017-10-18 09:56:56 -07006285 __mlxsw_sp_inetaddr_event(dev, event, NULL);
Arkadi Sharshevsky5ea12372017-07-18 10:10:13 +02006286out:
6287 rtnl_unlock();
6288 dev_put(dev);
6289 kfree(inet6addr_work);
6290}
6291
6292/* Called with rcu_read_lock() */
6293int mlxsw_sp_inet6addr_event(struct notifier_block *unused,
6294 unsigned long event, void *ptr)
6295{
6296 struct inet6_ifaddr *if6 = (struct inet6_ifaddr *) ptr;
6297 struct mlxsw_sp_inet6addr_event_work *inet6addr_work;
6298 struct net_device *dev = if6->idev->dev;
6299
David Ahern89d5dd22017-10-18 09:56:55 -07006300 /* NETDEV_UP event is handled by mlxsw_sp_inet6addr_valid_event */
6301 if (event == NETDEV_UP)
6302 return NOTIFY_DONE;
6303
Arkadi Sharshevsky5ea12372017-07-18 10:10:13 +02006304 if (!mlxsw_sp_port_dev_lower_find_rcu(dev))
6305 return NOTIFY_DONE;
6306
6307 inet6addr_work = kzalloc(sizeof(*inet6addr_work), GFP_ATOMIC);
6308 if (!inet6addr_work)
6309 return NOTIFY_BAD;
6310
6311 INIT_WORK(&inet6addr_work->work, mlxsw_sp_inet6addr_event_work);
6312 inet6addr_work->dev = dev;
6313 inet6addr_work->event = event;
6314 dev_hold(dev);
6315 mlxsw_core_schedule_work(&inet6addr_work->work);
6316
6317 return NOTIFY_DONE;
6318}
6319
David Ahern89d5dd22017-10-18 09:56:55 -07006320int mlxsw_sp_inet6addr_valid_event(struct notifier_block *unused,
6321 unsigned long event, void *ptr)
6322{
6323 struct in6_validator_info *i6vi = (struct in6_validator_info *) ptr;
6324 struct net_device *dev = i6vi->i6vi_dev->dev;
6325 struct mlxsw_sp *mlxsw_sp;
6326 struct mlxsw_sp_rif *rif;
6327 int err = 0;
6328
6329 mlxsw_sp = mlxsw_sp_lower_get(dev);
6330 if (!mlxsw_sp)
6331 goto out;
6332
6333 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
6334 if (!mlxsw_sp_rif_should_config(rif, dev, event))
6335 goto out;
6336
David Ahernf8fa9b42017-10-18 09:56:56 -07006337 err = __mlxsw_sp_inetaddr_event(dev, event, i6vi->extack);
David Ahern89d5dd22017-10-18 09:56:55 -07006338out:
6339 return notifier_from_errno(err);
6340}
6341
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01006342static int mlxsw_sp_rif_edit(struct mlxsw_sp *mlxsw_sp, u16 rif_index,
Ido Schimmel4724ba562017-03-10 08:53:39 +01006343 const char *mac, int mtu)
6344{
6345 char ritr_pl[MLXSW_REG_RITR_LEN];
6346 int err;
6347
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01006348 mlxsw_reg_ritr_rif_pack(ritr_pl, rif_index);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006349 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
6350 if (err)
6351 return err;
6352
6353 mlxsw_reg_ritr_mtu_set(ritr_pl, mtu);
6354 mlxsw_reg_ritr_if_mac_memcpy_to(ritr_pl, mac);
6355 mlxsw_reg_ritr_op_set(ritr_pl, MLXSW_REG_RITR_RIF_CREATE);
6356 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
6357}
6358
6359int mlxsw_sp_netdevice_router_port_event(struct net_device *dev)
6360{
6361 struct mlxsw_sp *mlxsw_sp;
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01006362 struct mlxsw_sp_rif *rif;
Ido Schimmela1107482017-05-26 08:37:39 +02006363 u16 fid_index;
Ido Schimmel4724ba562017-03-10 08:53:39 +01006364 int err;
6365
6366 mlxsw_sp = mlxsw_sp_lower_get(dev);
6367 if (!mlxsw_sp)
6368 return 0;
6369
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01006370 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
6371 if (!rif)
Ido Schimmel4724ba562017-03-10 08:53:39 +01006372 return 0;
Ido Schimmela1107482017-05-26 08:37:39 +02006373 fid_index = mlxsw_sp_fid_index(rif->fid);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006374
Ido Schimmela1107482017-05-26 08:37:39 +02006375 err = mlxsw_sp_rif_fdb_op(mlxsw_sp, rif->addr, fid_index, false);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006376 if (err)
6377 return err;
6378
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01006379 err = mlxsw_sp_rif_edit(mlxsw_sp, rif->rif_index, dev->dev_addr,
6380 dev->mtu);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006381 if (err)
6382 goto err_rif_edit;
6383
Ido Schimmela1107482017-05-26 08:37:39 +02006384 err = mlxsw_sp_rif_fdb_op(mlxsw_sp, dev->dev_addr, fid_index, true);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006385 if (err)
6386 goto err_rif_fdb_op;
6387
Yotam Gigifd890fe2017-09-27 08:23:21 +02006388 if (rif->mtu != dev->mtu) {
6389 struct mlxsw_sp_vr *vr;
6390
6391 /* The RIF is relevant only to its mr_table instance, as unlike
6392 * unicast routing, in multicast routing a RIF cannot be shared
6393 * between several multicast routing tables.
6394 */
6395 vr = &mlxsw_sp->router->vrs[rif->vr_id];
6396 mlxsw_sp_mr_rif_mtu_update(vr->mr4_table, rif, dev->mtu);
6397 }
6398
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01006399 ether_addr_copy(rif->addr, dev->dev_addr);
6400 rif->mtu = dev->mtu;
Ido Schimmel4724ba562017-03-10 08:53:39 +01006401
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01006402 netdev_dbg(dev, "Updated RIF=%d\n", rif->rif_index);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006403
6404 return 0;
6405
6406err_rif_fdb_op:
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01006407 mlxsw_sp_rif_edit(mlxsw_sp, rif->rif_index, rif->addr, rif->mtu);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006408err_rif_edit:
Ido Schimmela1107482017-05-26 08:37:39 +02006409 mlxsw_sp_rif_fdb_op(mlxsw_sp, rif->addr, fid_index, true);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006410 return err;
6411}
6412
Ido Schimmelb1e45522017-04-30 19:47:14 +03006413static int mlxsw_sp_port_vrf_join(struct mlxsw_sp *mlxsw_sp,
David Ahernf8fa9b42017-10-18 09:56:56 -07006414 struct net_device *l3_dev,
6415 struct netlink_ext_ack *extack)
Ido Schimmel7179eb52017-03-16 09:08:18 +01006416{
Ido Schimmelb1e45522017-04-30 19:47:14 +03006417 struct mlxsw_sp_rif *rif;
Ido Schimmel7179eb52017-03-16 09:08:18 +01006418
Ido Schimmelb1e45522017-04-30 19:47:14 +03006419 /* If netdev is already associated with a RIF, then we need to
6420 * destroy it and create a new one with the new virtual router ID.
Ido Schimmel7179eb52017-03-16 09:08:18 +01006421 */
Ido Schimmelb1e45522017-04-30 19:47:14 +03006422 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev);
6423 if (rif)
David Ahernf8fa9b42017-10-18 09:56:56 -07006424 __mlxsw_sp_inetaddr_event(l3_dev, NETDEV_DOWN, extack);
Ido Schimmel7179eb52017-03-16 09:08:18 +01006425
David Ahernf8fa9b42017-10-18 09:56:56 -07006426 return __mlxsw_sp_inetaddr_event(l3_dev, NETDEV_UP, extack);
Ido Schimmel7179eb52017-03-16 09:08:18 +01006427}
6428
Ido Schimmelb1e45522017-04-30 19:47:14 +03006429static void mlxsw_sp_port_vrf_leave(struct mlxsw_sp *mlxsw_sp,
6430 struct net_device *l3_dev)
Ido Schimmel7179eb52017-03-16 09:08:18 +01006431{
Ido Schimmelb1e45522017-04-30 19:47:14 +03006432 struct mlxsw_sp_rif *rif;
Ido Schimmel7179eb52017-03-16 09:08:18 +01006433
Ido Schimmelb1e45522017-04-30 19:47:14 +03006434 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev);
6435 if (!rif)
Ido Schimmel7179eb52017-03-16 09:08:18 +01006436 return;
David Ahernf8fa9b42017-10-18 09:56:56 -07006437 __mlxsw_sp_inetaddr_event(l3_dev, NETDEV_DOWN, NULL);
Ido Schimmel7179eb52017-03-16 09:08:18 +01006438}
6439
Ido Schimmelb1e45522017-04-30 19:47:14 +03006440int mlxsw_sp_netdevice_vrf_event(struct net_device *l3_dev, unsigned long event,
6441 struct netdev_notifier_changeupper_info *info)
Ido Schimmel3d70e4582017-03-16 09:08:19 +01006442{
Ido Schimmelb1e45522017-04-30 19:47:14 +03006443 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(l3_dev);
6444 int err = 0;
Ido Schimmel3d70e4582017-03-16 09:08:19 +01006445
Ido Schimmelb1e45522017-04-30 19:47:14 +03006446 if (!mlxsw_sp)
6447 return 0;
Ido Schimmel3d70e4582017-03-16 09:08:19 +01006448
Ido Schimmelb1e45522017-04-30 19:47:14 +03006449 switch (event) {
6450 case NETDEV_PRECHANGEUPPER:
6451 return 0;
6452 case NETDEV_CHANGEUPPER:
David Ahernf8fa9b42017-10-18 09:56:56 -07006453 if (info->linking) {
6454 struct netlink_ext_ack *extack;
6455
6456 extack = netdev_notifier_info_to_extack(&info->info);
6457 err = mlxsw_sp_port_vrf_join(mlxsw_sp, l3_dev, extack);
6458 } else {
Ido Schimmelb1e45522017-04-30 19:47:14 +03006459 mlxsw_sp_port_vrf_leave(mlxsw_sp, l3_dev);
David Ahernf8fa9b42017-10-18 09:56:56 -07006460 }
Ido Schimmelb1e45522017-04-30 19:47:14 +03006461 break;
6462 }
Ido Schimmel3d70e4582017-03-16 09:08:19 +01006463
Ido Schimmelb1e45522017-04-30 19:47:14 +03006464 return err;
Ido Schimmel3d70e4582017-03-16 09:08:19 +01006465}
6466
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006467static struct mlxsw_sp_rif_subport *
6468mlxsw_sp_rif_subport_rif(const struct mlxsw_sp_rif *rif)
Ido Schimmela1107482017-05-26 08:37:39 +02006469{
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006470 return container_of(rif, struct mlxsw_sp_rif_subport, common);
Ido Schimmela1107482017-05-26 08:37:39 +02006471}
6472
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006473static void mlxsw_sp_rif_subport_setup(struct mlxsw_sp_rif *rif,
6474 const struct mlxsw_sp_rif_params *params)
6475{
6476 struct mlxsw_sp_rif_subport *rif_subport;
6477
6478 rif_subport = mlxsw_sp_rif_subport_rif(rif);
6479 rif_subport->vid = params->vid;
6480 rif_subport->lag = params->lag;
6481 if (params->lag)
6482 rif_subport->lag_id = params->lag_id;
6483 else
6484 rif_subport->system_port = params->system_port;
6485}
6486
6487static int mlxsw_sp_rif_subport_op(struct mlxsw_sp_rif *rif, bool enable)
6488{
6489 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
6490 struct mlxsw_sp_rif_subport *rif_subport;
6491 char ritr_pl[MLXSW_REG_RITR_LEN];
6492
6493 rif_subport = mlxsw_sp_rif_subport_rif(rif);
6494 mlxsw_reg_ritr_pack(ritr_pl, enable, MLXSW_REG_RITR_SP_IF,
Petr Machata9571e822017-09-02 23:49:14 +02006495 rif->rif_index, rif->vr_id, rif->dev->mtu);
6496 mlxsw_reg_ritr_mac_pack(ritr_pl, rif->dev->dev_addr);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006497 mlxsw_reg_ritr_sp_if_pack(ritr_pl, rif_subport->lag,
6498 rif_subport->lag ? rif_subport->lag_id :
6499 rif_subport->system_port,
6500 rif_subport->vid);
6501
6502 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
6503}
6504
6505static int mlxsw_sp_rif_subport_configure(struct mlxsw_sp_rif *rif)
6506{
Petr Machata010cadf2017-09-02 23:49:18 +02006507 int err;
6508
6509 err = mlxsw_sp_rif_subport_op(rif, true);
6510 if (err)
6511 return err;
6512
6513 err = mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
6514 mlxsw_sp_fid_index(rif->fid), true);
6515 if (err)
6516 goto err_rif_fdb_op;
6517
6518 mlxsw_sp_fid_rif_set(rif->fid, rif);
6519 return 0;
6520
6521err_rif_fdb_op:
6522 mlxsw_sp_rif_subport_op(rif, false);
6523 return err;
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006524}
6525
6526static void mlxsw_sp_rif_subport_deconfigure(struct mlxsw_sp_rif *rif)
6527{
Petr Machata010cadf2017-09-02 23:49:18 +02006528 struct mlxsw_sp_fid *fid = rif->fid;
6529
6530 mlxsw_sp_fid_rif_set(fid, NULL);
6531 mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
6532 mlxsw_sp_fid_index(fid), false);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006533 mlxsw_sp_rif_subport_op(rif, false);
6534}
6535
6536static struct mlxsw_sp_fid *
6537mlxsw_sp_rif_subport_fid_get(struct mlxsw_sp_rif *rif)
6538{
6539 return mlxsw_sp_fid_rfid_get(rif->mlxsw_sp, rif->rif_index);
6540}
6541
6542static const struct mlxsw_sp_rif_ops mlxsw_sp_rif_subport_ops = {
6543 .type = MLXSW_SP_RIF_TYPE_SUBPORT,
6544 .rif_size = sizeof(struct mlxsw_sp_rif_subport),
6545 .setup = mlxsw_sp_rif_subport_setup,
6546 .configure = mlxsw_sp_rif_subport_configure,
6547 .deconfigure = mlxsw_sp_rif_subport_deconfigure,
6548 .fid_get = mlxsw_sp_rif_subport_fid_get,
6549};
6550
6551static int mlxsw_sp_rif_vlan_fid_op(struct mlxsw_sp_rif *rif,
6552 enum mlxsw_reg_ritr_if_type type,
6553 u16 vid_fid, bool enable)
6554{
6555 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
6556 char ritr_pl[MLXSW_REG_RITR_LEN];
6557
6558 mlxsw_reg_ritr_pack(ritr_pl, enable, type, rif->rif_index, rif->vr_id,
Petr Machata9571e822017-09-02 23:49:14 +02006559 rif->dev->mtu);
6560 mlxsw_reg_ritr_mac_pack(ritr_pl, rif->dev->dev_addr);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006561 mlxsw_reg_ritr_fid_set(ritr_pl, type, vid_fid);
6562
6563 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
6564}
6565
Yotam Gigib35750f2017-10-09 11:15:33 +02006566u8 mlxsw_sp_router_port(const struct mlxsw_sp *mlxsw_sp)
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006567{
6568 return mlxsw_core_max_ports(mlxsw_sp->core) + 1;
6569}
6570
6571static int mlxsw_sp_rif_vlan_configure(struct mlxsw_sp_rif *rif)
6572{
6573 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
6574 u16 vid = mlxsw_sp_fid_8021q_vid(rif->fid);
6575 int err;
6576
6577 err = mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_VLAN_IF, vid, true);
6578 if (err)
6579 return err;
6580
Ido Schimmel0d284812017-07-18 10:10:12 +02006581 err = mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
6582 mlxsw_sp_router_port(mlxsw_sp), true);
6583 if (err)
6584 goto err_fid_mc_flood_set;
6585
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006586 err = mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
6587 mlxsw_sp_router_port(mlxsw_sp), true);
6588 if (err)
6589 goto err_fid_bc_flood_set;
6590
Petr Machata010cadf2017-09-02 23:49:18 +02006591 err = mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
6592 mlxsw_sp_fid_index(rif->fid), true);
6593 if (err)
6594 goto err_rif_fdb_op;
6595
6596 mlxsw_sp_fid_rif_set(rif->fid, rif);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006597 return 0;
6598
Petr Machata010cadf2017-09-02 23:49:18 +02006599err_rif_fdb_op:
6600 mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
6601 mlxsw_sp_router_port(mlxsw_sp), false);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006602err_fid_bc_flood_set:
Ido Schimmel0d284812017-07-18 10:10:12 +02006603 mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
6604 mlxsw_sp_router_port(mlxsw_sp), false);
6605err_fid_mc_flood_set:
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006606 mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_VLAN_IF, vid, false);
6607 return err;
6608}
6609
6610static void mlxsw_sp_rif_vlan_deconfigure(struct mlxsw_sp_rif *rif)
6611{
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006612 u16 vid = mlxsw_sp_fid_8021q_vid(rif->fid);
Petr Machata010cadf2017-09-02 23:49:18 +02006613 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
6614 struct mlxsw_sp_fid *fid = rif->fid;
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006615
Petr Machata010cadf2017-09-02 23:49:18 +02006616 mlxsw_sp_fid_rif_set(fid, NULL);
6617 mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
6618 mlxsw_sp_fid_index(fid), false);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006619 mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
6620 mlxsw_sp_router_port(mlxsw_sp), false);
Ido Schimmel0d284812017-07-18 10:10:12 +02006621 mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
6622 mlxsw_sp_router_port(mlxsw_sp), false);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006623 mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_VLAN_IF, vid, false);
6624}
6625
6626static struct mlxsw_sp_fid *
6627mlxsw_sp_rif_vlan_fid_get(struct mlxsw_sp_rif *rif)
6628{
6629 u16 vid = is_vlan_dev(rif->dev) ? vlan_dev_vlan_id(rif->dev) : 1;
6630
6631 return mlxsw_sp_fid_8021q_get(rif->mlxsw_sp, vid);
6632}
6633
6634static const struct mlxsw_sp_rif_ops mlxsw_sp_rif_vlan_ops = {
6635 .type = MLXSW_SP_RIF_TYPE_VLAN,
6636 .rif_size = sizeof(struct mlxsw_sp_rif),
6637 .configure = mlxsw_sp_rif_vlan_configure,
6638 .deconfigure = mlxsw_sp_rif_vlan_deconfigure,
6639 .fid_get = mlxsw_sp_rif_vlan_fid_get,
6640};
6641
6642static int mlxsw_sp_rif_fid_configure(struct mlxsw_sp_rif *rif)
6643{
6644 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
6645 u16 fid_index = mlxsw_sp_fid_index(rif->fid);
6646 int err;
6647
6648 err = mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_FID_IF, fid_index,
6649 true);
6650 if (err)
6651 return err;
6652
Ido Schimmel0d284812017-07-18 10:10:12 +02006653 err = mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
6654 mlxsw_sp_router_port(mlxsw_sp), true);
6655 if (err)
6656 goto err_fid_mc_flood_set;
6657
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006658 err = mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
6659 mlxsw_sp_router_port(mlxsw_sp), true);
6660 if (err)
6661 goto err_fid_bc_flood_set;
6662
Petr Machata010cadf2017-09-02 23:49:18 +02006663 err = mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
6664 mlxsw_sp_fid_index(rif->fid), true);
6665 if (err)
6666 goto err_rif_fdb_op;
6667
6668 mlxsw_sp_fid_rif_set(rif->fid, rif);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006669 return 0;
6670
Petr Machata010cadf2017-09-02 23:49:18 +02006671err_rif_fdb_op:
6672 mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
6673 mlxsw_sp_router_port(mlxsw_sp), false);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006674err_fid_bc_flood_set:
Ido Schimmel0d284812017-07-18 10:10:12 +02006675 mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
6676 mlxsw_sp_router_port(mlxsw_sp), false);
6677err_fid_mc_flood_set:
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006678 mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_FID_IF, fid_index, false);
6679 return err;
6680}
6681
6682static void mlxsw_sp_rif_fid_deconfigure(struct mlxsw_sp_rif *rif)
6683{
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006684 u16 fid_index = mlxsw_sp_fid_index(rif->fid);
Petr Machata010cadf2017-09-02 23:49:18 +02006685 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
6686 struct mlxsw_sp_fid *fid = rif->fid;
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006687
Petr Machata010cadf2017-09-02 23:49:18 +02006688 mlxsw_sp_fid_rif_set(fid, NULL);
6689 mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
6690 mlxsw_sp_fid_index(fid), false);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006691 mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
6692 mlxsw_sp_router_port(mlxsw_sp), false);
Ido Schimmel0d284812017-07-18 10:10:12 +02006693 mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
6694 mlxsw_sp_router_port(mlxsw_sp), false);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006695 mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_FID_IF, fid_index, false);
6696}
6697
6698static struct mlxsw_sp_fid *
6699mlxsw_sp_rif_fid_fid_get(struct mlxsw_sp_rif *rif)
6700{
6701 return mlxsw_sp_fid_8021d_get(rif->mlxsw_sp, rif->dev->ifindex);
6702}
6703
6704static const struct mlxsw_sp_rif_ops mlxsw_sp_rif_fid_ops = {
6705 .type = MLXSW_SP_RIF_TYPE_FID,
6706 .rif_size = sizeof(struct mlxsw_sp_rif),
6707 .configure = mlxsw_sp_rif_fid_configure,
6708 .deconfigure = mlxsw_sp_rif_fid_deconfigure,
6709 .fid_get = mlxsw_sp_rif_fid_fid_get,
6710};
6711
Petr Machata6ddb7422017-09-02 23:49:19 +02006712static struct mlxsw_sp_rif_ipip_lb *
6713mlxsw_sp_rif_ipip_lb_rif(struct mlxsw_sp_rif *rif)
6714{
6715 return container_of(rif, struct mlxsw_sp_rif_ipip_lb, common);
6716}
6717
6718static void
6719mlxsw_sp_rif_ipip_lb_setup(struct mlxsw_sp_rif *rif,
6720 const struct mlxsw_sp_rif_params *params)
6721{
6722 struct mlxsw_sp_rif_params_ipip_lb *params_lb;
6723 struct mlxsw_sp_rif_ipip_lb *rif_lb;
6724
6725 params_lb = container_of(params, struct mlxsw_sp_rif_params_ipip_lb,
6726 common);
6727 rif_lb = mlxsw_sp_rif_ipip_lb_rif(rif);
6728 rif_lb->lb_config = params_lb->lb_config;
6729}
6730
6731static int
6732mlxsw_sp_rif_ipip_lb_op(struct mlxsw_sp_rif_ipip_lb *lb_rif,
6733 struct mlxsw_sp_vr *ul_vr, bool enable)
6734{
6735 struct mlxsw_sp_rif_ipip_lb_config lb_cf = lb_rif->lb_config;
6736 struct mlxsw_sp_rif *rif = &lb_rif->common;
6737 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
6738 char ritr_pl[MLXSW_REG_RITR_LEN];
6739 u32 saddr4;
6740
6741 switch (lb_cf.ul_protocol) {
6742 case MLXSW_SP_L3_PROTO_IPV4:
6743 saddr4 = be32_to_cpu(lb_cf.saddr.addr4);
6744 mlxsw_reg_ritr_pack(ritr_pl, enable, MLXSW_REG_RITR_LOOPBACK_IF,
6745 rif->rif_index, rif->vr_id, rif->dev->mtu);
6746 mlxsw_reg_ritr_loopback_ipip4_pack(ritr_pl, lb_cf.lb_ipipt,
6747 MLXSW_REG_RITR_LOOPBACK_IPIP_OPTIONS_GRE_KEY_PRESET,
6748 ul_vr->id, saddr4, lb_cf.okey);
6749 break;
6750
6751 case MLXSW_SP_L3_PROTO_IPV6:
6752 return -EAFNOSUPPORT;
6753 }
6754
6755 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
6756}
6757
6758static int
6759mlxsw_sp_rif_ipip_lb_configure(struct mlxsw_sp_rif *rif)
6760{
6761 struct mlxsw_sp_rif_ipip_lb *lb_rif = mlxsw_sp_rif_ipip_lb_rif(rif);
6762 u32 ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(rif->dev);
6763 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
6764 struct mlxsw_sp_vr *ul_vr;
6765 int err;
6766
David Ahernf8fa9b42017-10-18 09:56:56 -07006767 ul_vr = mlxsw_sp_vr_get(mlxsw_sp, ul_tb_id, NULL);
Petr Machata6ddb7422017-09-02 23:49:19 +02006768 if (IS_ERR(ul_vr))
6769 return PTR_ERR(ul_vr);
6770
6771 err = mlxsw_sp_rif_ipip_lb_op(lb_rif, ul_vr, true);
6772 if (err)
6773 goto err_loopback_op;
6774
6775 lb_rif->ul_vr_id = ul_vr->id;
6776 ++ul_vr->rif_count;
6777 return 0;
6778
6779err_loopback_op:
6780 mlxsw_sp_vr_put(ul_vr);
6781 return err;
6782}
6783
6784static void mlxsw_sp_rif_ipip_lb_deconfigure(struct mlxsw_sp_rif *rif)
6785{
6786 struct mlxsw_sp_rif_ipip_lb *lb_rif = mlxsw_sp_rif_ipip_lb_rif(rif);
6787 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
6788 struct mlxsw_sp_vr *ul_vr;
6789
6790 ul_vr = &mlxsw_sp->router->vrs[lb_rif->ul_vr_id];
6791 mlxsw_sp_rif_ipip_lb_op(lb_rif, ul_vr, false);
6792
6793 --ul_vr->rif_count;
6794 mlxsw_sp_vr_put(ul_vr);
6795}
6796
6797static const struct mlxsw_sp_rif_ops mlxsw_sp_rif_ipip_lb_ops = {
6798 .type = MLXSW_SP_RIF_TYPE_IPIP_LB,
6799 .rif_size = sizeof(struct mlxsw_sp_rif_ipip_lb),
6800 .setup = mlxsw_sp_rif_ipip_lb_setup,
6801 .configure = mlxsw_sp_rif_ipip_lb_configure,
6802 .deconfigure = mlxsw_sp_rif_ipip_lb_deconfigure,
6803};
6804
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006805static const struct mlxsw_sp_rif_ops *mlxsw_sp_rif_ops_arr[] = {
6806 [MLXSW_SP_RIF_TYPE_SUBPORT] = &mlxsw_sp_rif_subport_ops,
6807 [MLXSW_SP_RIF_TYPE_VLAN] = &mlxsw_sp_rif_vlan_ops,
6808 [MLXSW_SP_RIF_TYPE_FID] = &mlxsw_sp_rif_fid_ops,
Petr Machata6ddb7422017-09-02 23:49:19 +02006809 [MLXSW_SP_RIF_TYPE_IPIP_LB] = &mlxsw_sp_rif_ipip_lb_ops,
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006810};
6811
Ido Schimmel348b8fc2017-05-16 19:38:29 +02006812static int mlxsw_sp_rifs_init(struct mlxsw_sp *mlxsw_sp)
6813{
6814 u64 max_rifs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS);
6815
6816 mlxsw_sp->router->rifs = kcalloc(max_rifs,
6817 sizeof(struct mlxsw_sp_rif *),
6818 GFP_KERNEL);
6819 if (!mlxsw_sp->router->rifs)
6820 return -ENOMEM;
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006821
6822 mlxsw_sp->router->rif_ops_arr = mlxsw_sp_rif_ops_arr;
6823
Ido Schimmel348b8fc2017-05-16 19:38:29 +02006824 return 0;
6825}
6826
6827static void mlxsw_sp_rifs_fini(struct mlxsw_sp *mlxsw_sp)
6828{
6829 int i;
6830
6831 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++)
6832 WARN_ON_ONCE(mlxsw_sp->router->rifs[i]);
6833
6834 kfree(mlxsw_sp->router->rifs);
6835}
6836
Petr Machatadcbda282017-10-20 09:16:16 +02006837static int
6838mlxsw_sp_ipip_config_tigcr(struct mlxsw_sp *mlxsw_sp)
6839{
6840 char tigcr_pl[MLXSW_REG_TIGCR_LEN];
6841
6842 mlxsw_reg_tigcr_pack(tigcr_pl, true, 0);
6843 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(tigcr), tigcr_pl);
6844}
6845
Petr Machata38ebc0f2017-09-02 23:49:17 +02006846static int mlxsw_sp_ipips_init(struct mlxsw_sp *mlxsw_sp)
6847{
6848 mlxsw_sp->router->ipip_ops_arr = mlxsw_sp_ipip_ops_arr;
Petr Machata1012b9a2017-09-02 23:49:23 +02006849 INIT_LIST_HEAD(&mlxsw_sp->router->ipip_list);
Petr Machatadcbda282017-10-20 09:16:16 +02006850 return mlxsw_sp_ipip_config_tigcr(mlxsw_sp);
Petr Machata38ebc0f2017-09-02 23:49:17 +02006851}
6852
6853static void mlxsw_sp_ipips_fini(struct mlxsw_sp *mlxsw_sp)
6854{
Petr Machata1012b9a2017-09-02 23:49:23 +02006855 WARN_ON(!list_empty(&mlxsw_sp->router->ipip_list));
Petr Machata38ebc0f2017-09-02 23:49:17 +02006856}
6857
Ido Schimmelc3852ef2016-12-03 16:45:07 +01006858static void mlxsw_sp_router_fib_dump_flush(struct notifier_block *nb)
6859{
Ido Schimmel7e39d112017-05-16 19:38:28 +02006860 struct mlxsw_sp_router *router;
Ido Schimmelc3852ef2016-12-03 16:45:07 +01006861
6862 /* Flush pending FIB notifications and then flush the device's
6863 * table before requesting another dump. The FIB notification
6864 * block is unregistered, so no need to take RTNL.
6865 */
6866 mlxsw_core_flush_owq();
Ido Schimmel7e39d112017-05-16 19:38:28 +02006867 router = container_of(nb, struct mlxsw_sp_router, fib_nb);
6868 mlxsw_sp_router_fib_flush(router->mlxsw_sp);
Ido Schimmelc3852ef2016-12-03 16:45:07 +01006869}
6870
Ido Schimmelaf658b62017-11-02 17:14:09 +01006871#ifdef CONFIG_IP_ROUTE_MULTIPATH
6872static void mlxsw_sp_mp_hash_header_set(char *recr2_pl, int header)
6873{
6874 mlxsw_reg_recr2_outer_header_enables_set(recr2_pl, header, true);
6875}
6876
6877static void mlxsw_sp_mp_hash_field_set(char *recr2_pl, int field)
6878{
6879 mlxsw_reg_recr2_outer_header_fields_enable_set(recr2_pl, field, true);
6880}
6881
6882static void mlxsw_sp_mp4_hash_init(char *recr2_pl)
6883{
6884 bool only_l3 = !init_net.ipv4.sysctl_fib_multipath_hash_policy;
6885
6886 mlxsw_sp_mp_hash_header_set(recr2_pl,
6887 MLXSW_REG_RECR2_IPV4_EN_NOT_TCP_NOT_UDP);
6888 mlxsw_sp_mp_hash_header_set(recr2_pl, MLXSW_REG_RECR2_IPV4_EN_TCP_UDP);
6889 mlxsw_reg_recr2_ipv4_sip_enable(recr2_pl);
6890 mlxsw_reg_recr2_ipv4_dip_enable(recr2_pl);
6891 if (only_l3)
6892 return;
6893 mlxsw_sp_mp_hash_header_set(recr2_pl, MLXSW_REG_RECR2_TCP_UDP_EN_IPV4);
6894 mlxsw_sp_mp_hash_field_set(recr2_pl, MLXSW_REG_RECR2_IPV4_PROTOCOL);
6895 mlxsw_sp_mp_hash_field_set(recr2_pl, MLXSW_REG_RECR2_TCP_UDP_SPORT);
6896 mlxsw_sp_mp_hash_field_set(recr2_pl, MLXSW_REG_RECR2_TCP_UDP_DPORT);
6897}
6898
6899static void mlxsw_sp_mp6_hash_init(char *recr2_pl)
6900{
6901 mlxsw_sp_mp_hash_header_set(recr2_pl,
6902 MLXSW_REG_RECR2_IPV6_EN_NOT_TCP_NOT_UDP);
6903 mlxsw_sp_mp_hash_header_set(recr2_pl, MLXSW_REG_RECR2_IPV6_EN_TCP_UDP);
6904 mlxsw_reg_recr2_ipv6_sip_enable(recr2_pl);
6905 mlxsw_reg_recr2_ipv6_dip_enable(recr2_pl);
6906 mlxsw_sp_mp_hash_field_set(recr2_pl, MLXSW_REG_RECR2_IPV6_FLOW_LABEL);
6907 mlxsw_sp_mp_hash_field_set(recr2_pl, MLXSW_REG_RECR2_IPV6_NEXT_HEADER);
6908}
6909
6910static int mlxsw_sp_mp_hash_init(struct mlxsw_sp *mlxsw_sp)
6911{
6912 char recr2_pl[MLXSW_REG_RECR2_LEN];
6913 u32 seed;
6914
6915 get_random_bytes(&seed, sizeof(seed));
6916 mlxsw_reg_recr2_pack(recr2_pl, seed);
6917 mlxsw_sp_mp4_hash_init(recr2_pl);
6918 mlxsw_sp_mp6_hash_init(recr2_pl);
6919
6920 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(recr2), recr2_pl);
6921}
6922#else
6923static int mlxsw_sp_mp_hash_init(struct mlxsw_sp *mlxsw_sp)
6924{
6925 return 0;
6926}
6927#endif
6928
Ido Schimmel4724ba562017-03-10 08:53:39 +01006929static int __mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp)
6930{
6931 char rgcr_pl[MLXSW_REG_RGCR_LEN];
6932 u64 max_rifs;
6933 int err;
6934
6935 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_RIFS))
6936 return -EIO;
Ido Schimmel4724ba562017-03-10 08:53:39 +01006937 max_rifs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006938
Arkadi Sharshevskye29237e2017-07-18 10:10:09 +02006939 mlxsw_reg_rgcr_pack(rgcr_pl, true, true);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006940 mlxsw_reg_rgcr_max_router_interfaces_set(rgcr_pl, max_rifs);
6941 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rgcr), rgcr_pl);
6942 if (err)
Ido Schimmel348b8fc2017-05-16 19:38:29 +02006943 return err;
Ido Schimmel4724ba562017-03-10 08:53:39 +01006944 return 0;
Ido Schimmel4724ba562017-03-10 08:53:39 +01006945}
6946
6947static void __mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp)
6948{
6949 char rgcr_pl[MLXSW_REG_RGCR_LEN];
Ido Schimmel4724ba562017-03-10 08:53:39 +01006950
Arkadi Sharshevskye29237e2017-07-18 10:10:09 +02006951 mlxsw_reg_rgcr_pack(rgcr_pl, false, false);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006952 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rgcr), rgcr_pl);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006953}
6954
Jiri Pirkob45f64d2016-09-26 12:52:31 +02006955int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp)
6956{
Ido Schimmel9011b672017-05-16 19:38:25 +02006957 struct mlxsw_sp_router *router;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02006958 int err;
6959
Ido Schimmel9011b672017-05-16 19:38:25 +02006960 router = kzalloc(sizeof(*mlxsw_sp->router), GFP_KERNEL);
6961 if (!router)
6962 return -ENOMEM;
6963 mlxsw_sp->router = router;
6964 router->mlxsw_sp = mlxsw_sp;
6965
6966 INIT_LIST_HEAD(&mlxsw_sp->router->nexthop_neighs_list);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02006967 err = __mlxsw_sp_router_init(mlxsw_sp);
6968 if (err)
Ido Schimmel9011b672017-05-16 19:38:25 +02006969 goto err_router_init;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02006970
Ido Schimmel348b8fc2017-05-16 19:38:29 +02006971 err = mlxsw_sp_rifs_init(mlxsw_sp);
6972 if (err)
6973 goto err_rifs_init;
6974
Petr Machata38ebc0f2017-09-02 23:49:17 +02006975 err = mlxsw_sp_ipips_init(mlxsw_sp);
6976 if (err)
6977 goto err_ipips_init;
6978
Ido Schimmel9011b672017-05-16 19:38:25 +02006979 err = rhashtable_init(&mlxsw_sp->router->nexthop_ht,
Ido Schimmelc53b8e12017-02-08 11:16:30 +01006980 &mlxsw_sp_nexthop_ht_params);
6981 if (err)
6982 goto err_nexthop_ht_init;
6983
Ido Schimmel9011b672017-05-16 19:38:25 +02006984 err = rhashtable_init(&mlxsw_sp->router->nexthop_group_ht,
Ido Schimmele9ad5e72017-02-08 11:16:29 +01006985 &mlxsw_sp_nexthop_group_ht_params);
6986 if (err)
6987 goto err_nexthop_group_ht_init;
6988
Arkadi Sharshevskydbe45982017-09-25 10:32:23 +02006989 INIT_LIST_HEAD(&mlxsw_sp->router->nexthop_list);
Ido Schimmel8494ab02017-03-24 08:02:47 +01006990 err = mlxsw_sp_lpm_init(mlxsw_sp);
6991 if (err)
6992 goto err_lpm_init;
6993
Yotam Gigid42b0962017-09-27 08:23:20 +02006994 err = mlxsw_sp_mr_init(mlxsw_sp, &mlxsw_sp_mr_tcam_ops);
6995 if (err)
6996 goto err_mr_init;
6997
Jiri Pirkob45f64d2016-09-26 12:52:31 +02006998 err = mlxsw_sp_vrs_init(mlxsw_sp);
6999 if (err)
7000 goto err_vrs_init;
7001
Ido Schimmel8c9583a2016-10-27 15:12:57 +02007002 err = mlxsw_sp_neigh_init(mlxsw_sp);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02007003 if (err)
7004 goto err_neigh_init;
7005
Ido Schimmel48fac882017-11-02 17:14:06 +01007006 mlxsw_sp->router->netevent_nb.notifier_call =
7007 mlxsw_sp_router_netevent_event;
7008 err = register_netevent_notifier(&mlxsw_sp->router->netevent_nb);
7009 if (err)
7010 goto err_register_netevent_notifier;
7011
Ido Schimmelaf658b62017-11-02 17:14:09 +01007012 err = mlxsw_sp_mp_hash_init(mlxsw_sp);
7013 if (err)
7014 goto err_mp_hash_init;
7015
Ido Schimmel7e39d112017-05-16 19:38:28 +02007016 mlxsw_sp->router->fib_nb.notifier_call = mlxsw_sp_router_fib_event;
7017 err = register_fib_notifier(&mlxsw_sp->router->fib_nb,
Ido Schimmelc3852ef2016-12-03 16:45:07 +01007018 mlxsw_sp_router_fib_dump_flush);
7019 if (err)
7020 goto err_register_fib_notifier;
7021
Jiri Pirkob45f64d2016-09-26 12:52:31 +02007022 return 0;
7023
Ido Schimmelc3852ef2016-12-03 16:45:07 +01007024err_register_fib_notifier:
Ido Schimmelaf658b62017-11-02 17:14:09 +01007025err_mp_hash_init:
Ido Schimmel48fac882017-11-02 17:14:06 +01007026 unregister_netevent_notifier(&mlxsw_sp->router->netevent_nb);
7027err_register_netevent_notifier:
Ido Schimmelc3852ef2016-12-03 16:45:07 +01007028 mlxsw_sp_neigh_fini(mlxsw_sp);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02007029err_neigh_init:
7030 mlxsw_sp_vrs_fini(mlxsw_sp);
7031err_vrs_init:
Yotam Gigid42b0962017-09-27 08:23:20 +02007032 mlxsw_sp_mr_fini(mlxsw_sp);
7033err_mr_init:
Ido Schimmel8494ab02017-03-24 08:02:47 +01007034 mlxsw_sp_lpm_fini(mlxsw_sp);
7035err_lpm_init:
Ido Schimmel9011b672017-05-16 19:38:25 +02007036 rhashtable_destroy(&mlxsw_sp->router->nexthop_group_ht);
Ido Schimmele9ad5e72017-02-08 11:16:29 +01007037err_nexthop_group_ht_init:
Ido Schimmel9011b672017-05-16 19:38:25 +02007038 rhashtable_destroy(&mlxsw_sp->router->nexthop_ht);
Ido Schimmelc53b8e12017-02-08 11:16:30 +01007039err_nexthop_ht_init:
Petr Machata38ebc0f2017-09-02 23:49:17 +02007040 mlxsw_sp_ipips_fini(mlxsw_sp);
7041err_ipips_init:
Ido Schimmel348b8fc2017-05-16 19:38:29 +02007042 mlxsw_sp_rifs_fini(mlxsw_sp);
7043err_rifs_init:
Jiri Pirkob45f64d2016-09-26 12:52:31 +02007044 __mlxsw_sp_router_fini(mlxsw_sp);
Ido Schimmel9011b672017-05-16 19:38:25 +02007045err_router_init:
7046 kfree(mlxsw_sp->router);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02007047 return err;
7048}
7049
7050void mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp)
7051{
Ido Schimmel7e39d112017-05-16 19:38:28 +02007052 unregister_fib_notifier(&mlxsw_sp->router->fib_nb);
Ido Schimmel48fac882017-11-02 17:14:06 +01007053 unregister_netevent_notifier(&mlxsw_sp->router->netevent_nb);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02007054 mlxsw_sp_neigh_fini(mlxsw_sp);
7055 mlxsw_sp_vrs_fini(mlxsw_sp);
Yotam Gigid42b0962017-09-27 08:23:20 +02007056 mlxsw_sp_mr_fini(mlxsw_sp);
Ido Schimmel8494ab02017-03-24 08:02:47 +01007057 mlxsw_sp_lpm_fini(mlxsw_sp);
Ido Schimmel9011b672017-05-16 19:38:25 +02007058 rhashtable_destroy(&mlxsw_sp->router->nexthop_group_ht);
7059 rhashtable_destroy(&mlxsw_sp->router->nexthop_ht);
Petr Machata38ebc0f2017-09-02 23:49:17 +02007060 mlxsw_sp_ipips_fini(mlxsw_sp);
Ido Schimmel348b8fc2017-05-16 19:38:29 +02007061 mlxsw_sp_rifs_fini(mlxsw_sp);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02007062 __mlxsw_sp_router_fini(mlxsw_sp);
Ido Schimmel9011b672017-05-16 19:38:25 +02007063 kfree(mlxsw_sp->router);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02007064}