blob: e2795b8890686db058ca07d5e045598da488ba7f [file] [log] [blame]
Ido Schimmel464dce12016-07-02 11:00:15 +02001/*
2 * drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
Petr Machata6ddb7422017-09-02 23:49:19 +02003 * Copyright (c) 2016-2017 Mellanox Technologies. All rights reserved.
Ido Schimmel464dce12016-07-02 11:00:15 +02004 * Copyright (c) 2016 Jiri Pirko <jiri@mellanox.com>
5 * Copyright (c) 2016 Ido Schimmel <idosch@mellanox.com>
Yotam Gigic723c7352016-07-05 11:27:43 +02006 * Copyright (c) 2016 Yotam Gigi <yotamg@mellanox.com>
Petr Machata6ddb7422017-09-02 23:49:19 +02007 * Copyright (c) 2017 Petr Machata <petrm@mellanox.com>
Ido Schimmel464dce12016-07-02 11:00:15 +02008 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions are met:
11 *
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. Neither the names of the copyright holders nor the names of its
18 * contributors may be used to endorse or promote products derived from
19 * this software without specific prior written permission.
20 *
21 * Alternatively, this software may be distributed under the terms of the
22 * GNU General Public License ("GPL") version 2 as published by the Free
23 * Software Foundation.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
26 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
29 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
36 */
37
38#include <linux/kernel.h>
39#include <linux/types.h>
Jiri Pirko5e9c16c2016-07-04 08:23:04 +020040#include <linux/rhashtable.h>
41#include <linux/bitops.h>
42#include <linux/in6.h>
Yotam Gigic723c7352016-07-05 11:27:43 +020043#include <linux/notifier.h>
Ido Schimmeldf6dd79b2017-02-08 14:36:49 +010044#include <linux/inetdevice.h>
Ido Schimmel9db032b2017-03-16 09:08:17 +010045#include <linux/netdevice.h>
Ido Schimmel03ea01e2017-05-23 21:56:30 +020046#include <linux/if_bridge.h>
Ido Schimmelb5f3e0d2017-07-24 09:56:00 +020047#include <linux/socket.h>
Ido Schimmel428b8512017-08-03 13:28:28 +020048#include <linux/route.h>
Ido Schimmeleb789982017-10-22 23:11:48 +020049#include <linux/gcd.h>
Ido Schimmelaf658b62017-11-02 17:14:09 +010050#include <linux/random.h>
Yotam Gigic723c7352016-07-05 11:27:43 +020051#include <net/netevent.h>
Jiri Pirko6cf3c972016-07-05 11:27:39 +020052#include <net/neighbour.h>
53#include <net/arp.h>
Jiri Pirkob45f64d2016-09-26 12:52:31 +020054#include <net/ip_fib.h>
Ido Schimmel583419f2017-08-03 13:28:27 +020055#include <net/ip6_fib.h>
Ido Schimmel5d7bfd12017-03-16 09:08:14 +010056#include <net/fib_rules.h>
Petr Machata6ddb7422017-09-02 23:49:19 +020057#include <net/ip_tunnels.h>
Ido Schimmel57837882017-03-16 09:08:16 +010058#include <net/l3mdev.h>
Arkadi Sharshevsky5ea12372017-07-18 10:10:13 +020059#include <net/addrconf.h>
Arkadi Sharshevskyd5eb89c2017-07-18 10:10:15 +020060#include <net/ndisc.h>
61#include <net/ipv6.h>
Ido Schimmel04b1d4e2017-08-03 13:28:11 +020062#include <net/fib_notifier.h>
Ido Schimmel464dce12016-07-02 11:00:15 +020063
64#include "spectrum.h"
65#include "core.h"
66#include "reg.h"
Arkadi Sharshevskye0c0afd2017-03-28 17:24:15 +020067#include "spectrum_cnt.h"
68#include "spectrum_dpipe.h"
Petr Machata38ebc0f2017-09-02 23:49:17 +020069#include "spectrum_ipip.h"
Yotam Gigid42b0962017-09-27 08:23:20 +020070#include "spectrum_mr.h"
71#include "spectrum_mr_tcam.h"
Arkadi Sharshevskye0c0afd2017-03-28 17:24:15 +020072#include "spectrum_router.h"
Ido Schimmel464dce12016-07-02 11:00:15 +020073
Ido Schimmel9011b672017-05-16 19:38:25 +020074struct mlxsw_sp_vr;
75struct mlxsw_sp_lpm_tree;
Ido Schimmele4f3c1c2017-05-26 08:37:40 +020076struct mlxsw_sp_rif_ops;
Ido Schimmel9011b672017-05-16 19:38:25 +020077
78struct mlxsw_sp_router {
79 struct mlxsw_sp *mlxsw_sp;
Ido Schimmel5f9efff2017-05-16 19:38:27 +020080 struct mlxsw_sp_rif **rifs;
Ido Schimmel9011b672017-05-16 19:38:25 +020081 struct mlxsw_sp_vr *vrs;
82 struct rhashtable neigh_ht;
83 struct rhashtable nexthop_group_ht;
84 struct rhashtable nexthop_ht;
Arkadi Sharshevskydbe45982017-09-25 10:32:23 +020085 struct list_head nexthop_list;
Ido Schimmel9011b672017-05-16 19:38:25 +020086 struct {
87 struct mlxsw_sp_lpm_tree *trees;
88 unsigned int tree_count;
89 } lpm;
90 struct {
91 struct delayed_work dw;
92 unsigned long interval; /* ms */
93 } neighs_update;
94 struct delayed_work nexthop_probe_dw;
95#define MLXSW_SP_UNRESOLVED_NH_PROBE_INTERVAL 5000 /* ms */
96 struct list_head nexthop_neighs_list;
Petr Machata1012b9a2017-09-02 23:49:23 +020097 struct list_head ipip_list;
Ido Schimmel9011b672017-05-16 19:38:25 +020098 bool aborted;
Ido Schimmel7e39d112017-05-16 19:38:28 +020099 struct notifier_block fib_nb;
Ido Schimmel48fac882017-11-02 17:14:06 +0100100 struct notifier_block netevent_nb;
Ido Schimmele4f3c1c2017-05-26 08:37:40 +0200101 const struct mlxsw_sp_rif_ops **rif_ops_arr;
Petr Machata38ebc0f2017-09-02 23:49:17 +0200102 const struct mlxsw_sp_ipip_ops **ipip_ops_arr;
Ido Schimmel9011b672017-05-16 19:38:25 +0200103};
104
Ido Schimmel4724ba562017-03-10 08:53:39 +0100105struct mlxsw_sp_rif {
106 struct list_head nexthop_list;
107 struct list_head neigh_list;
108 struct net_device *dev;
Ido Schimmela1107482017-05-26 08:37:39 +0200109 struct mlxsw_sp_fid *fid;
Ido Schimmel4724ba562017-03-10 08:53:39 +0100110 unsigned char addr[ETH_ALEN];
111 int mtu;
Arkadi Sharshevskybf952332017-03-17 09:38:00 +0100112 u16 rif_index;
Ido Schimmel69132292017-03-10 08:53:42 +0100113 u16 vr_id;
Ido Schimmele4f3c1c2017-05-26 08:37:40 +0200114 const struct mlxsw_sp_rif_ops *ops;
115 struct mlxsw_sp *mlxsw_sp;
116
Arkadi Sharshevskye0c0afd2017-03-28 17:24:15 +0200117 unsigned int counter_ingress;
118 bool counter_ingress_valid;
119 unsigned int counter_egress;
120 bool counter_egress_valid;
Ido Schimmel4724ba562017-03-10 08:53:39 +0100121};
122
Ido Schimmele4f3c1c2017-05-26 08:37:40 +0200123struct mlxsw_sp_rif_params {
124 struct net_device *dev;
125 union {
126 u16 system_port;
127 u16 lag_id;
128 };
129 u16 vid;
130 bool lag;
131};
132
Ido Schimmel4d93cee2017-05-26 08:37:34 +0200133struct mlxsw_sp_rif_subport {
134 struct mlxsw_sp_rif common;
135 union {
136 u16 system_port;
137 u16 lag_id;
138 };
139 u16 vid;
140 bool lag;
141};
142
Petr Machata6ddb7422017-09-02 23:49:19 +0200143struct mlxsw_sp_rif_ipip_lb {
144 struct mlxsw_sp_rif common;
145 struct mlxsw_sp_rif_ipip_lb_config lb_config;
146 u16 ul_vr_id; /* Reserved for Spectrum-2. */
147};
148
149struct mlxsw_sp_rif_params_ipip_lb {
150 struct mlxsw_sp_rif_params common;
151 struct mlxsw_sp_rif_ipip_lb_config lb_config;
152};
153
Ido Schimmele4f3c1c2017-05-26 08:37:40 +0200154struct mlxsw_sp_rif_ops {
155 enum mlxsw_sp_rif_type type;
156 size_t rif_size;
157
158 void (*setup)(struct mlxsw_sp_rif *rif,
159 const struct mlxsw_sp_rif_params *params);
160 int (*configure)(struct mlxsw_sp_rif *rif);
161 void (*deconfigure)(struct mlxsw_sp_rif *rif);
162 struct mlxsw_sp_fid * (*fid_get)(struct mlxsw_sp_rif *rif);
163};
164
Arkadi Sharshevskye0c0afd2017-03-28 17:24:15 +0200165static unsigned int *
166mlxsw_sp_rif_p_counter_get(struct mlxsw_sp_rif *rif,
167 enum mlxsw_sp_rif_counter_dir dir)
168{
169 switch (dir) {
170 case MLXSW_SP_RIF_COUNTER_EGRESS:
171 return &rif->counter_egress;
172 case MLXSW_SP_RIF_COUNTER_INGRESS:
173 return &rif->counter_ingress;
174 }
175 return NULL;
176}
177
178static bool
179mlxsw_sp_rif_counter_valid_get(struct mlxsw_sp_rif *rif,
180 enum mlxsw_sp_rif_counter_dir dir)
181{
182 switch (dir) {
183 case MLXSW_SP_RIF_COUNTER_EGRESS:
184 return rif->counter_egress_valid;
185 case MLXSW_SP_RIF_COUNTER_INGRESS:
186 return rif->counter_ingress_valid;
187 }
188 return false;
189}
190
191static void
192mlxsw_sp_rif_counter_valid_set(struct mlxsw_sp_rif *rif,
193 enum mlxsw_sp_rif_counter_dir dir,
194 bool valid)
195{
196 switch (dir) {
197 case MLXSW_SP_RIF_COUNTER_EGRESS:
198 rif->counter_egress_valid = valid;
199 break;
200 case MLXSW_SP_RIF_COUNTER_INGRESS:
201 rif->counter_ingress_valid = valid;
202 break;
203 }
204}
205
206static int mlxsw_sp_rif_counter_edit(struct mlxsw_sp *mlxsw_sp, u16 rif_index,
207 unsigned int counter_index, bool enable,
208 enum mlxsw_sp_rif_counter_dir dir)
209{
210 char ritr_pl[MLXSW_REG_RITR_LEN];
211 bool is_egress = false;
212 int err;
213
214 if (dir == MLXSW_SP_RIF_COUNTER_EGRESS)
215 is_egress = true;
216 mlxsw_reg_ritr_rif_pack(ritr_pl, rif_index);
217 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
218 if (err)
219 return err;
220
221 mlxsw_reg_ritr_counter_pack(ritr_pl, counter_index, enable,
222 is_egress);
223 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
224}
225
226int mlxsw_sp_rif_counter_value_get(struct mlxsw_sp *mlxsw_sp,
227 struct mlxsw_sp_rif *rif,
228 enum mlxsw_sp_rif_counter_dir dir, u64 *cnt)
229{
230 char ricnt_pl[MLXSW_REG_RICNT_LEN];
231 unsigned int *p_counter_index;
232 bool valid;
233 int err;
234
235 valid = mlxsw_sp_rif_counter_valid_get(rif, dir);
236 if (!valid)
237 return -EINVAL;
238
239 p_counter_index = mlxsw_sp_rif_p_counter_get(rif, dir);
240 if (!p_counter_index)
241 return -EINVAL;
242 mlxsw_reg_ricnt_pack(ricnt_pl, *p_counter_index,
243 MLXSW_REG_RICNT_OPCODE_NOP);
244 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ricnt), ricnt_pl);
245 if (err)
246 return err;
247 *cnt = mlxsw_reg_ricnt_good_unicast_packets_get(ricnt_pl);
248 return 0;
249}
250
251static int mlxsw_sp_rif_counter_clear(struct mlxsw_sp *mlxsw_sp,
252 unsigned int counter_index)
253{
254 char ricnt_pl[MLXSW_REG_RICNT_LEN];
255
256 mlxsw_reg_ricnt_pack(ricnt_pl, counter_index,
257 MLXSW_REG_RICNT_OPCODE_CLEAR);
258 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ricnt), ricnt_pl);
259}
260
261int mlxsw_sp_rif_counter_alloc(struct mlxsw_sp *mlxsw_sp,
262 struct mlxsw_sp_rif *rif,
263 enum mlxsw_sp_rif_counter_dir dir)
264{
265 unsigned int *p_counter_index;
266 int err;
267
268 p_counter_index = mlxsw_sp_rif_p_counter_get(rif, dir);
269 if (!p_counter_index)
270 return -EINVAL;
271 err = mlxsw_sp_counter_alloc(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_RIF,
272 p_counter_index);
273 if (err)
274 return err;
275
276 err = mlxsw_sp_rif_counter_clear(mlxsw_sp, *p_counter_index);
277 if (err)
278 goto err_counter_clear;
279
280 err = mlxsw_sp_rif_counter_edit(mlxsw_sp, rif->rif_index,
281 *p_counter_index, true, dir);
282 if (err)
283 goto err_counter_edit;
284 mlxsw_sp_rif_counter_valid_set(rif, dir, true);
285 return 0;
286
287err_counter_edit:
288err_counter_clear:
289 mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_RIF,
290 *p_counter_index);
291 return err;
292}
293
294void mlxsw_sp_rif_counter_free(struct mlxsw_sp *mlxsw_sp,
295 struct mlxsw_sp_rif *rif,
296 enum mlxsw_sp_rif_counter_dir dir)
297{
298 unsigned int *p_counter_index;
299
Arkadi Sharshevsky6b1206b2017-05-18 09:18:53 +0200300 if (!mlxsw_sp_rif_counter_valid_get(rif, dir))
301 return;
302
Arkadi Sharshevskye0c0afd2017-03-28 17:24:15 +0200303 p_counter_index = mlxsw_sp_rif_p_counter_get(rif, dir);
304 if (WARN_ON(!p_counter_index))
305 return;
306 mlxsw_sp_rif_counter_edit(mlxsw_sp, rif->rif_index,
307 *p_counter_index, false, dir);
308 mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_RIF,
309 *p_counter_index);
310 mlxsw_sp_rif_counter_valid_set(rif, dir, false);
311}
312
Ido Schimmele4f3c1c2017-05-26 08:37:40 +0200313static void mlxsw_sp_rif_counters_alloc(struct mlxsw_sp_rif *rif)
314{
315 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
316 struct devlink *devlink;
317
318 devlink = priv_to_devlink(mlxsw_sp->core);
319 if (!devlink_dpipe_table_counter_enabled(devlink,
320 MLXSW_SP_DPIPE_TABLE_NAME_ERIF))
321 return;
322 mlxsw_sp_rif_counter_alloc(mlxsw_sp, rif, MLXSW_SP_RIF_COUNTER_EGRESS);
323}
324
325static void mlxsw_sp_rif_counters_free(struct mlxsw_sp_rif *rif)
326{
327 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
328
329 mlxsw_sp_rif_counter_free(mlxsw_sp, rif, MLXSW_SP_RIF_COUNTER_EGRESS);
330}
331
Ido Schimmel4724ba562017-03-10 08:53:39 +0100332static struct mlxsw_sp_rif *
333mlxsw_sp_rif_find_by_dev(const struct mlxsw_sp *mlxsw_sp,
334 const struct net_device *dev);
335
Ido Schimmel7dcc18a2017-07-18 10:10:30 +0200336#define MLXSW_SP_PREFIX_COUNT (sizeof(struct in6_addr) * BITS_PER_BYTE + 1)
Ido Schimmel9011b672017-05-16 19:38:25 +0200337
338struct mlxsw_sp_prefix_usage {
339 DECLARE_BITMAP(b, MLXSW_SP_PREFIX_COUNT);
340};
341
Jiri Pirko53342022016-07-04 08:23:08 +0200342#define mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage) \
343 for_each_set_bit(prefix, (prefix_usage)->b, MLXSW_SP_PREFIX_COUNT)
344
345static bool
346mlxsw_sp_prefix_usage_eq(struct mlxsw_sp_prefix_usage *prefix_usage1,
347 struct mlxsw_sp_prefix_usage *prefix_usage2)
348{
349 return !memcmp(prefix_usage1, prefix_usage2, sizeof(*prefix_usage1));
350}
351
Jiri Pirko6b75c482016-07-04 08:23:09 +0200352static bool
353mlxsw_sp_prefix_usage_none(struct mlxsw_sp_prefix_usage *prefix_usage)
354{
355 struct mlxsw_sp_prefix_usage prefix_usage_none = {{ 0 } };
356
357 return mlxsw_sp_prefix_usage_eq(prefix_usage, &prefix_usage_none);
358}
359
360static void
361mlxsw_sp_prefix_usage_cpy(struct mlxsw_sp_prefix_usage *prefix_usage1,
362 struct mlxsw_sp_prefix_usage *prefix_usage2)
363{
364 memcpy(prefix_usage1, prefix_usage2, sizeof(*prefix_usage1));
365}
366
367static void
Jiri Pirko5e9c16c2016-07-04 08:23:04 +0200368mlxsw_sp_prefix_usage_set(struct mlxsw_sp_prefix_usage *prefix_usage,
369 unsigned char prefix_len)
370{
371 set_bit(prefix_len, prefix_usage->b);
372}
373
374static void
375mlxsw_sp_prefix_usage_clear(struct mlxsw_sp_prefix_usage *prefix_usage,
376 unsigned char prefix_len)
377{
378 clear_bit(prefix_len, prefix_usage->b);
379}
380
381struct mlxsw_sp_fib_key {
382 unsigned char addr[sizeof(struct in6_addr)];
383 unsigned char prefix_len;
384};
385
Jiri Pirko61c503f2016-07-04 08:23:11 +0200386enum mlxsw_sp_fib_entry_type {
387 MLXSW_SP_FIB_ENTRY_TYPE_REMOTE,
388 MLXSW_SP_FIB_ENTRY_TYPE_LOCAL,
389 MLXSW_SP_FIB_ENTRY_TYPE_TRAP,
Petr Machata4607f6d2017-09-02 23:49:25 +0200390
391 /* This is a special case of local delivery, where a packet should be
392 * decapsulated on reception. Note that there is no corresponding ENCAP,
393 * because that's a type of next hop, not of FIB entry. (There can be
394 * several next hops in a REMOTE entry, and some of them may be
395 * encapsulating entries.)
396 */
397 MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP,
Jiri Pirko61c503f2016-07-04 08:23:11 +0200398};
399
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +0200400struct mlxsw_sp_nexthop_group;
Ido Schimmel9011b672017-05-16 19:38:25 +0200401struct mlxsw_sp_fib;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +0200402
Ido Schimmel9aecce12017-02-09 10:28:42 +0100403struct mlxsw_sp_fib_node {
404 struct list_head entry_list;
Jiri Pirkob45f64d2016-09-26 12:52:31 +0200405 struct list_head list;
Ido Schimmel9aecce12017-02-09 10:28:42 +0100406 struct rhash_head ht_node;
Ido Schimmel76610eb2017-03-10 08:53:41 +0100407 struct mlxsw_sp_fib *fib;
Ido Schimmel9aecce12017-02-09 10:28:42 +0100408 struct mlxsw_sp_fib_key key;
409};
410
Petr Machata4607f6d2017-09-02 23:49:25 +0200411struct mlxsw_sp_fib_entry_decap {
412 struct mlxsw_sp_ipip_entry *ipip_entry;
413 u32 tunnel_index;
414};
415
Ido Schimmel9aecce12017-02-09 10:28:42 +0100416struct mlxsw_sp_fib_entry {
417 struct list_head list;
418 struct mlxsw_sp_fib_node *fib_node;
419 enum mlxsw_sp_fib_entry_type type;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +0200420 struct list_head nexthop_group_node;
421 struct mlxsw_sp_nexthop_group *nh_group;
Petr Machata4607f6d2017-09-02 23:49:25 +0200422 struct mlxsw_sp_fib_entry_decap decap; /* Valid for decap entries. */
Jiri Pirko5e9c16c2016-07-04 08:23:04 +0200423};
424
Ido Schimmel4f1c7f12017-07-18 10:10:26 +0200425struct mlxsw_sp_fib4_entry {
426 struct mlxsw_sp_fib_entry common;
427 u32 tb_id;
428 u32 prio;
429 u8 tos;
430 u8 type;
431};
432
Ido Schimmel428b8512017-08-03 13:28:28 +0200433struct mlxsw_sp_fib6_entry {
434 struct mlxsw_sp_fib_entry common;
435 struct list_head rt6_list;
436 unsigned int nrt6;
437};
438
439struct mlxsw_sp_rt6 {
440 struct list_head list;
441 struct rt6_info *rt;
442};
443
Ido Schimmel9011b672017-05-16 19:38:25 +0200444struct mlxsw_sp_lpm_tree {
445 u8 id; /* tree ID */
446 unsigned int ref_count;
447 enum mlxsw_sp_l3proto proto;
448 struct mlxsw_sp_prefix_usage prefix_usage;
449};
450
Jiri Pirko5e9c16c2016-07-04 08:23:04 +0200451struct mlxsw_sp_fib {
452 struct rhashtable ht;
Ido Schimmel9aecce12017-02-09 10:28:42 +0100453 struct list_head node_list;
Ido Schimmel76610eb2017-03-10 08:53:41 +0100454 struct mlxsw_sp_vr *vr;
455 struct mlxsw_sp_lpm_tree *lpm_tree;
Jiri Pirko5e9c16c2016-07-04 08:23:04 +0200456 unsigned long prefix_ref_count[MLXSW_SP_PREFIX_COUNT];
457 struct mlxsw_sp_prefix_usage prefix_usage;
Ido Schimmel76610eb2017-03-10 08:53:41 +0100458 enum mlxsw_sp_l3proto proto;
Jiri Pirko5e9c16c2016-07-04 08:23:04 +0200459};
460
Ido Schimmel9011b672017-05-16 19:38:25 +0200461struct mlxsw_sp_vr {
462 u16 id; /* virtual router ID */
463 u32 tb_id; /* kernel fib table id */
464 unsigned int rif_count;
465 struct mlxsw_sp_fib *fib4;
Ido Schimmela3d9bc52017-07-18 10:10:22 +0200466 struct mlxsw_sp_fib *fib6;
Yotam Gigid42b0962017-09-27 08:23:20 +0200467 struct mlxsw_sp_mr_table *mr4_table;
Ido Schimmel9011b672017-05-16 19:38:25 +0200468};
469
Ido Schimmel9aecce12017-02-09 10:28:42 +0100470static const struct rhashtable_params mlxsw_sp_fib_ht_params;
Jiri Pirko5e9c16c2016-07-04 08:23:04 +0200471
Ido Schimmel76610eb2017-03-10 08:53:41 +0100472static struct mlxsw_sp_fib *mlxsw_sp_fib_create(struct mlxsw_sp_vr *vr,
473 enum mlxsw_sp_l3proto proto)
Jiri Pirko5e9c16c2016-07-04 08:23:04 +0200474{
475 struct mlxsw_sp_fib *fib;
476 int err;
477
478 fib = kzalloc(sizeof(*fib), GFP_KERNEL);
479 if (!fib)
480 return ERR_PTR(-ENOMEM);
481 err = rhashtable_init(&fib->ht, &mlxsw_sp_fib_ht_params);
482 if (err)
483 goto err_rhashtable_init;
Ido Schimmel9aecce12017-02-09 10:28:42 +0100484 INIT_LIST_HEAD(&fib->node_list);
Ido Schimmel76610eb2017-03-10 08:53:41 +0100485 fib->proto = proto;
486 fib->vr = vr;
Jiri Pirko5e9c16c2016-07-04 08:23:04 +0200487 return fib;
488
489err_rhashtable_init:
490 kfree(fib);
491 return ERR_PTR(err);
492}
493
494static void mlxsw_sp_fib_destroy(struct mlxsw_sp_fib *fib)
495{
Ido Schimmel9aecce12017-02-09 10:28:42 +0100496 WARN_ON(!list_empty(&fib->node_list));
Ido Schimmel76610eb2017-03-10 08:53:41 +0100497 WARN_ON(fib->lpm_tree);
Jiri Pirko5e9c16c2016-07-04 08:23:04 +0200498 rhashtable_destroy(&fib->ht);
499 kfree(fib);
500}
501
Jiri Pirko53342022016-07-04 08:23:08 +0200502static struct mlxsw_sp_lpm_tree *
Ido Schimmel382dbb42017-03-10 08:53:40 +0100503mlxsw_sp_lpm_tree_find_unused(struct mlxsw_sp *mlxsw_sp)
Jiri Pirko53342022016-07-04 08:23:08 +0200504{
505 static struct mlxsw_sp_lpm_tree *lpm_tree;
506 int i;
507
Ido Schimmel9011b672017-05-16 19:38:25 +0200508 for (i = 0; i < mlxsw_sp->router->lpm.tree_count; i++) {
509 lpm_tree = &mlxsw_sp->router->lpm.trees[i];
Ido Schimmel382dbb42017-03-10 08:53:40 +0100510 if (lpm_tree->ref_count == 0)
511 return lpm_tree;
Jiri Pirko53342022016-07-04 08:23:08 +0200512 }
513 return NULL;
514}
515
516static int mlxsw_sp_lpm_tree_alloc(struct mlxsw_sp *mlxsw_sp,
517 struct mlxsw_sp_lpm_tree *lpm_tree)
518{
519 char ralta_pl[MLXSW_REG_RALTA_LEN];
520
Ido Schimmel1a9234e662016-09-19 08:29:26 +0200521 mlxsw_reg_ralta_pack(ralta_pl, true,
522 (enum mlxsw_reg_ralxx_protocol) lpm_tree->proto,
523 lpm_tree->id);
Jiri Pirko53342022016-07-04 08:23:08 +0200524 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralta), ralta_pl);
525}
526
Ido Schimmelcc702672017-08-14 10:54:03 +0200527static void mlxsw_sp_lpm_tree_free(struct mlxsw_sp *mlxsw_sp,
528 struct mlxsw_sp_lpm_tree *lpm_tree)
Jiri Pirko53342022016-07-04 08:23:08 +0200529{
530 char ralta_pl[MLXSW_REG_RALTA_LEN];
531
Ido Schimmel1a9234e662016-09-19 08:29:26 +0200532 mlxsw_reg_ralta_pack(ralta_pl, false,
533 (enum mlxsw_reg_ralxx_protocol) lpm_tree->proto,
534 lpm_tree->id);
Ido Schimmelcc702672017-08-14 10:54:03 +0200535 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralta), ralta_pl);
Jiri Pirko53342022016-07-04 08:23:08 +0200536}
537
538static int
539mlxsw_sp_lpm_tree_left_struct_set(struct mlxsw_sp *mlxsw_sp,
540 struct mlxsw_sp_prefix_usage *prefix_usage,
541 struct mlxsw_sp_lpm_tree *lpm_tree)
542{
543 char ralst_pl[MLXSW_REG_RALST_LEN];
544 u8 root_bin = 0;
545 u8 prefix;
546 u8 last_prefix = MLXSW_REG_RALST_BIN_NO_CHILD;
547
548 mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage)
549 root_bin = prefix;
550
551 mlxsw_reg_ralst_pack(ralst_pl, root_bin, lpm_tree->id);
552 mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage) {
553 if (prefix == 0)
554 continue;
555 mlxsw_reg_ralst_bin_pack(ralst_pl, prefix, last_prefix,
556 MLXSW_REG_RALST_BIN_NO_CHILD);
557 last_prefix = prefix;
558 }
559 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralst), ralst_pl);
560}
561
562static struct mlxsw_sp_lpm_tree *
563mlxsw_sp_lpm_tree_create(struct mlxsw_sp *mlxsw_sp,
564 struct mlxsw_sp_prefix_usage *prefix_usage,
Ido Schimmel382dbb42017-03-10 08:53:40 +0100565 enum mlxsw_sp_l3proto proto)
Jiri Pirko53342022016-07-04 08:23:08 +0200566{
567 struct mlxsw_sp_lpm_tree *lpm_tree;
568 int err;
569
Ido Schimmel382dbb42017-03-10 08:53:40 +0100570 lpm_tree = mlxsw_sp_lpm_tree_find_unused(mlxsw_sp);
Jiri Pirko53342022016-07-04 08:23:08 +0200571 if (!lpm_tree)
572 return ERR_PTR(-EBUSY);
573 lpm_tree->proto = proto;
574 err = mlxsw_sp_lpm_tree_alloc(mlxsw_sp, lpm_tree);
575 if (err)
576 return ERR_PTR(err);
577
578 err = mlxsw_sp_lpm_tree_left_struct_set(mlxsw_sp, prefix_usage,
579 lpm_tree);
580 if (err)
581 goto err_left_struct_set;
Jiri Pirko2083d362016-10-25 11:25:56 +0200582 memcpy(&lpm_tree->prefix_usage, prefix_usage,
583 sizeof(lpm_tree->prefix_usage));
Jiri Pirko53342022016-07-04 08:23:08 +0200584 return lpm_tree;
585
586err_left_struct_set:
587 mlxsw_sp_lpm_tree_free(mlxsw_sp, lpm_tree);
588 return ERR_PTR(err);
589}
590
Ido Schimmelcc702672017-08-14 10:54:03 +0200591static void mlxsw_sp_lpm_tree_destroy(struct mlxsw_sp *mlxsw_sp,
592 struct mlxsw_sp_lpm_tree *lpm_tree)
Jiri Pirko53342022016-07-04 08:23:08 +0200593{
Ido Schimmelcc702672017-08-14 10:54:03 +0200594 mlxsw_sp_lpm_tree_free(mlxsw_sp, lpm_tree);
Jiri Pirko53342022016-07-04 08:23:08 +0200595}
596
597static struct mlxsw_sp_lpm_tree *
598mlxsw_sp_lpm_tree_get(struct mlxsw_sp *mlxsw_sp,
599 struct mlxsw_sp_prefix_usage *prefix_usage,
Ido Schimmel382dbb42017-03-10 08:53:40 +0100600 enum mlxsw_sp_l3proto proto)
Jiri Pirko53342022016-07-04 08:23:08 +0200601{
602 struct mlxsw_sp_lpm_tree *lpm_tree;
603 int i;
604
Ido Schimmel9011b672017-05-16 19:38:25 +0200605 for (i = 0; i < mlxsw_sp->router->lpm.tree_count; i++) {
606 lpm_tree = &mlxsw_sp->router->lpm.trees[i];
Jiri Pirko8b99bec2016-10-25 11:25:57 +0200607 if (lpm_tree->ref_count != 0 &&
608 lpm_tree->proto == proto &&
Jiri Pirko53342022016-07-04 08:23:08 +0200609 mlxsw_sp_prefix_usage_eq(&lpm_tree->prefix_usage,
610 prefix_usage))
Ido Schimmelfc922bb2017-08-14 10:54:05 +0200611 return lpm_tree;
Jiri Pirko53342022016-07-04 08:23:08 +0200612 }
Ido Schimmelfc922bb2017-08-14 10:54:05 +0200613 return mlxsw_sp_lpm_tree_create(mlxsw_sp, prefix_usage, proto);
614}
Jiri Pirko53342022016-07-04 08:23:08 +0200615
Ido Schimmelfc922bb2017-08-14 10:54:05 +0200616static void mlxsw_sp_lpm_tree_hold(struct mlxsw_sp_lpm_tree *lpm_tree)
617{
Jiri Pirko53342022016-07-04 08:23:08 +0200618 lpm_tree->ref_count++;
Jiri Pirko53342022016-07-04 08:23:08 +0200619}
620
Ido Schimmelcc702672017-08-14 10:54:03 +0200621static void mlxsw_sp_lpm_tree_put(struct mlxsw_sp *mlxsw_sp,
622 struct mlxsw_sp_lpm_tree *lpm_tree)
Jiri Pirko53342022016-07-04 08:23:08 +0200623{
624 if (--lpm_tree->ref_count == 0)
Ido Schimmelcc702672017-08-14 10:54:03 +0200625 mlxsw_sp_lpm_tree_destroy(mlxsw_sp, lpm_tree);
Jiri Pirko53342022016-07-04 08:23:08 +0200626}
627
Ido Schimmeld7a60302017-06-08 08:47:43 +0200628#define MLXSW_SP_LPM_TREE_MIN 1 /* tree 0 is reserved */
Ido Schimmel8494ab02017-03-24 08:02:47 +0100629
630static int mlxsw_sp_lpm_init(struct mlxsw_sp *mlxsw_sp)
Jiri Pirko53342022016-07-04 08:23:08 +0200631{
632 struct mlxsw_sp_lpm_tree *lpm_tree;
Ido Schimmel8494ab02017-03-24 08:02:47 +0100633 u64 max_trees;
Jiri Pirko53342022016-07-04 08:23:08 +0200634 int i;
635
Ido Schimmel8494ab02017-03-24 08:02:47 +0100636 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_LPM_TREES))
637 return -EIO;
638
639 max_trees = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_LPM_TREES);
Ido Schimmel9011b672017-05-16 19:38:25 +0200640 mlxsw_sp->router->lpm.tree_count = max_trees - MLXSW_SP_LPM_TREE_MIN;
641 mlxsw_sp->router->lpm.trees = kcalloc(mlxsw_sp->router->lpm.tree_count,
Ido Schimmel8494ab02017-03-24 08:02:47 +0100642 sizeof(struct mlxsw_sp_lpm_tree),
643 GFP_KERNEL);
Ido Schimmel9011b672017-05-16 19:38:25 +0200644 if (!mlxsw_sp->router->lpm.trees)
Ido Schimmel8494ab02017-03-24 08:02:47 +0100645 return -ENOMEM;
646
Ido Schimmel9011b672017-05-16 19:38:25 +0200647 for (i = 0; i < mlxsw_sp->router->lpm.tree_count; i++) {
648 lpm_tree = &mlxsw_sp->router->lpm.trees[i];
Jiri Pirko53342022016-07-04 08:23:08 +0200649 lpm_tree->id = i + MLXSW_SP_LPM_TREE_MIN;
650 }
Ido Schimmel8494ab02017-03-24 08:02:47 +0100651
652 return 0;
653}
654
655static void mlxsw_sp_lpm_fini(struct mlxsw_sp *mlxsw_sp)
656{
Ido Schimmel9011b672017-05-16 19:38:25 +0200657 kfree(mlxsw_sp->router->lpm.trees);
Jiri Pirko53342022016-07-04 08:23:08 +0200658}
659
Ido Schimmel76610eb2017-03-10 08:53:41 +0100660static bool mlxsw_sp_vr_is_used(const struct mlxsw_sp_vr *vr)
661{
Yotam Gigid42b0962017-09-27 08:23:20 +0200662 return !!vr->fib4 || !!vr->fib6 || !!vr->mr4_table;
Ido Schimmel76610eb2017-03-10 08:53:41 +0100663}
664
Jiri Pirko6b75c482016-07-04 08:23:09 +0200665static struct mlxsw_sp_vr *mlxsw_sp_vr_find_unused(struct mlxsw_sp *mlxsw_sp)
666{
667 struct mlxsw_sp_vr *vr;
668 int i;
669
Jiri Pirkoc1a38312016-10-21 16:07:23 +0200670 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
Ido Schimmel9011b672017-05-16 19:38:25 +0200671 vr = &mlxsw_sp->router->vrs[i];
Ido Schimmel76610eb2017-03-10 08:53:41 +0100672 if (!mlxsw_sp_vr_is_used(vr))
Jiri Pirko6b75c482016-07-04 08:23:09 +0200673 return vr;
674 }
675 return NULL;
676}
677
678static int mlxsw_sp_vr_lpm_tree_bind(struct mlxsw_sp *mlxsw_sp,
Ido Schimmel0adb2142017-08-14 10:54:04 +0200679 const struct mlxsw_sp_fib *fib, u8 tree_id)
Jiri Pirko6b75c482016-07-04 08:23:09 +0200680{
681 char raltb_pl[MLXSW_REG_RALTB_LEN];
682
Ido Schimmel76610eb2017-03-10 08:53:41 +0100683 mlxsw_reg_raltb_pack(raltb_pl, fib->vr->id,
684 (enum mlxsw_reg_ralxx_protocol) fib->proto,
Ido Schimmel0adb2142017-08-14 10:54:04 +0200685 tree_id);
Jiri Pirko6b75c482016-07-04 08:23:09 +0200686 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raltb), raltb_pl);
687}
688
689static int mlxsw_sp_vr_lpm_tree_unbind(struct mlxsw_sp *mlxsw_sp,
Ido Schimmel76610eb2017-03-10 08:53:41 +0100690 const struct mlxsw_sp_fib *fib)
Jiri Pirko6b75c482016-07-04 08:23:09 +0200691{
692 char raltb_pl[MLXSW_REG_RALTB_LEN];
693
694 /* Bind to tree 0 which is default */
Ido Schimmel76610eb2017-03-10 08:53:41 +0100695 mlxsw_reg_raltb_pack(raltb_pl, fib->vr->id,
696 (enum mlxsw_reg_ralxx_protocol) fib->proto, 0);
Jiri Pirko6b75c482016-07-04 08:23:09 +0200697 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raltb), raltb_pl);
698}
699
700static u32 mlxsw_sp_fix_tb_id(u32 tb_id)
701{
Yotam Gigi7e50d432017-09-27 08:23:19 +0200702 /* For our purpose, squash main, default and local tables into one */
703 if (tb_id == RT_TABLE_LOCAL || tb_id == RT_TABLE_DEFAULT)
Jiri Pirko6b75c482016-07-04 08:23:09 +0200704 tb_id = RT_TABLE_MAIN;
705 return tb_id;
706}
707
708static struct mlxsw_sp_vr *mlxsw_sp_vr_find(struct mlxsw_sp *mlxsw_sp,
Ido Schimmel76610eb2017-03-10 08:53:41 +0100709 u32 tb_id)
Jiri Pirko6b75c482016-07-04 08:23:09 +0200710{
711 struct mlxsw_sp_vr *vr;
712 int i;
713
714 tb_id = mlxsw_sp_fix_tb_id(tb_id);
Nogah Frankel9497c042016-09-20 11:16:54 +0200715
Jiri Pirkoc1a38312016-10-21 16:07:23 +0200716 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
Ido Schimmel9011b672017-05-16 19:38:25 +0200717 vr = &mlxsw_sp->router->vrs[i];
Ido Schimmel76610eb2017-03-10 08:53:41 +0100718 if (mlxsw_sp_vr_is_used(vr) && vr->tb_id == tb_id)
Jiri Pirko6b75c482016-07-04 08:23:09 +0200719 return vr;
720 }
721 return NULL;
722}
723
Ido Schimmel76610eb2017-03-10 08:53:41 +0100724static struct mlxsw_sp_fib *mlxsw_sp_vr_fib(const struct mlxsw_sp_vr *vr,
725 enum mlxsw_sp_l3proto proto)
Jiri Pirko6b75c482016-07-04 08:23:09 +0200726{
Ido Schimmel76610eb2017-03-10 08:53:41 +0100727 switch (proto) {
728 case MLXSW_SP_L3_PROTO_IPV4:
729 return vr->fib4;
730 case MLXSW_SP_L3_PROTO_IPV6:
Ido Schimmela3d9bc52017-07-18 10:10:22 +0200731 return vr->fib6;
Ido Schimmel76610eb2017-03-10 08:53:41 +0100732 }
733 return NULL;
734}
735
736static struct mlxsw_sp_vr *mlxsw_sp_vr_create(struct mlxsw_sp *mlxsw_sp,
David Ahernf8fa9b42017-10-18 09:56:56 -0700737 u32 tb_id,
738 struct netlink_ext_ack *extack)
Ido Schimmel76610eb2017-03-10 08:53:41 +0100739{
Jiri Pirko6b75c482016-07-04 08:23:09 +0200740 struct mlxsw_sp_vr *vr;
Ido Schimmela3d9bc52017-07-18 10:10:22 +0200741 int err;
Jiri Pirko6b75c482016-07-04 08:23:09 +0200742
743 vr = mlxsw_sp_vr_find_unused(mlxsw_sp);
David Ahernf8fa9b42017-10-18 09:56:56 -0700744 if (!vr) {
745 NL_SET_ERR_MSG(extack, "spectrum: Exceeded number of supported virtual routers");
Jiri Pirko6b75c482016-07-04 08:23:09 +0200746 return ERR_PTR(-EBUSY);
David Ahernf8fa9b42017-10-18 09:56:56 -0700747 }
Ido Schimmel76610eb2017-03-10 08:53:41 +0100748 vr->fib4 = mlxsw_sp_fib_create(vr, MLXSW_SP_L3_PROTO_IPV4);
749 if (IS_ERR(vr->fib4))
750 return ERR_CAST(vr->fib4);
Ido Schimmela3d9bc52017-07-18 10:10:22 +0200751 vr->fib6 = mlxsw_sp_fib_create(vr, MLXSW_SP_L3_PROTO_IPV6);
752 if (IS_ERR(vr->fib6)) {
753 err = PTR_ERR(vr->fib6);
754 goto err_fib6_create;
755 }
Yotam Gigid42b0962017-09-27 08:23:20 +0200756 vr->mr4_table = mlxsw_sp_mr_table_create(mlxsw_sp, vr->id,
757 MLXSW_SP_L3_PROTO_IPV4);
758 if (IS_ERR(vr->mr4_table)) {
759 err = PTR_ERR(vr->mr4_table);
760 goto err_mr_table_create;
761 }
Jiri Pirko6b75c482016-07-04 08:23:09 +0200762 vr->tb_id = tb_id;
Jiri Pirko6b75c482016-07-04 08:23:09 +0200763 return vr;
Ido Schimmela3d9bc52017-07-18 10:10:22 +0200764
Yotam Gigid42b0962017-09-27 08:23:20 +0200765err_mr_table_create:
766 mlxsw_sp_fib_destroy(vr->fib6);
767 vr->fib6 = NULL;
Ido Schimmela3d9bc52017-07-18 10:10:22 +0200768err_fib6_create:
769 mlxsw_sp_fib_destroy(vr->fib4);
770 vr->fib4 = NULL;
771 return ERR_PTR(err);
Jiri Pirko6b75c482016-07-04 08:23:09 +0200772}
773
Ido Schimmel76610eb2017-03-10 08:53:41 +0100774static void mlxsw_sp_vr_destroy(struct mlxsw_sp_vr *vr)
Jiri Pirko6b75c482016-07-04 08:23:09 +0200775{
Yotam Gigid42b0962017-09-27 08:23:20 +0200776 mlxsw_sp_mr_table_destroy(vr->mr4_table);
777 vr->mr4_table = NULL;
Ido Schimmela3d9bc52017-07-18 10:10:22 +0200778 mlxsw_sp_fib_destroy(vr->fib6);
779 vr->fib6 = NULL;
Ido Schimmel76610eb2017-03-10 08:53:41 +0100780 mlxsw_sp_fib_destroy(vr->fib4);
781 vr->fib4 = NULL;
Jiri Pirko6b75c482016-07-04 08:23:09 +0200782}
783
David Ahernf8fa9b42017-10-18 09:56:56 -0700784static struct mlxsw_sp_vr *mlxsw_sp_vr_get(struct mlxsw_sp *mlxsw_sp, u32 tb_id,
785 struct netlink_ext_ack *extack)
Jiri Pirko6b75c482016-07-04 08:23:09 +0200786{
787 struct mlxsw_sp_vr *vr;
Jiri Pirko6b75c482016-07-04 08:23:09 +0200788
789 tb_id = mlxsw_sp_fix_tb_id(tb_id);
Ido Schimmel76610eb2017-03-10 08:53:41 +0100790 vr = mlxsw_sp_vr_find(mlxsw_sp, tb_id);
791 if (!vr)
David Ahernf8fa9b42017-10-18 09:56:56 -0700792 vr = mlxsw_sp_vr_create(mlxsw_sp, tb_id, extack);
Jiri Pirko6b75c482016-07-04 08:23:09 +0200793 return vr;
794}
795
Ido Schimmel76610eb2017-03-10 08:53:41 +0100796static void mlxsw_sp_vr_put(struct mlxsw_sp_vr *vr)
Jiri Pirko6b75c482016-07-04 08:23:09 +0200797{
Ido Schimmela3d9bc52017-07-18 10:10:22 +0200798 if (!vr->rif_count && list_empty(&vr->fib4->node_list) &&
Yotam Gigid42b0962017-09-27 08:23:20 +0200799 list_empty(&vr->fib6->node_list) &&
800 mlxsw_sp_mr_table_empty(vr->mr4_table))
Ido Schimmel76610eb2017-03-10 08:53:41 +0100801 mlxsw_sp_vr_destroy(vr);
Jiri Pirko6b75c482016-07-04 08:23:09 +0200802}
803
Ido Schimmelfc922bb2017-08-14 10:54:05 +0200804static bool
805mlxsw_sp_vr_lpm_tree_should_replace(struct mlxsw_sp_vr *vr,
806 enum mlxsw_sp_l3proto proto, u8 tree_id)
807{
808 struct mlxsw_sp_fib *fib = mlxsw_sp_vr_fib(vr, proto);
809
810 if (!mlxsw_sp_vr_is_used(vr))
811 return false;
812 if (fib->lpm_tree && fib->lpm_tree->id == tree_id)
813 return true;
814 return false;
815}
816
817static int mlxsw_sp_vr_lpm_tree_replace(struct mlxsw_sp *mlxsw_sp,
818 struct mlxsw_sp_fib *fib,
819 struct mlxsw_sp_lpm_tree *new_tree)
820{
821 struct mlxsw_sp_lpm_tree *old_tree = fib->lpm_tree;
822 int err;
823
824 err = mlxsw_sp_vr_lpm_tree_bind(mlxsw_sp, fib, new_tree->id);
825 if (err)
826 return err;
827 fib->lpm_tree = new_tree;
828 mlxsw_sp_lpm_tree_hold(new_tree);
829 mlxsw_sp_lpm_tree_put(mlxsw_sp, old_tree);
830 return 0;
831}
832
833static int mlxsw_sp_vrs_lpm_tree_replace(struct mlxsw_sp *mlxsw_sp,
834 struct mlxsw_sp_fib *fib,
835 struct mlxsw_sp_lpm_tree *new_tree)
836{
837 struct mlxsw_sp_lpm_tree *old_tree = fib->lpm_tree;
838 enum mlxsw_sp_l3proto proto = fib->proto;
839 u8 old_id, new_id = new_tree->id;
840 struct mlxsw_sp_vr *vr;
841 int i, err;
842
843 if (!old_tree)
844 goto no_replace;
845 old_id = old_tree->id;
846
847 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
848 vr = &mlxsw_sp->router->vrs[i];
849 if (!mlxsw_sp_vr_lpm_tree_should_replace(vr, proto, old_id))
850 continue;
851 err = mlxsw_sp_vr_lpm_tree_replace(mlxsw_sp,
852 mlxsw_sp_vr_fib(vr, proto),
853 new_tree);
854 if (err)
855 goto err_tree_replace;
856 }
857
858 return 0;
859
860err_tree_replace:
861 for (i--; i >= 0; i--) {
862 if (!mlxsw_sp_vr_lpm_tree_should_replace(vr, proto, new_id))
863 continue;
864 mlxsw_sp_vr_lpm_tree_replace(mlxsw_sp,
865 mlxsw_sp_vr_fib(vr, proto),
866 old_tree);
867 }
868 return err;
869
870no_replace:
871 err = mlxsw_sp_vr_lpm_tree_bind(mlxsw_sp, fib, new_tree->id);
872 if (err)
873 return err;
874 fib->lpm_tree = new_tree;
875 mlxsw_sp_lpm_tree_hold(new_tree);
876 return 0;
877}
878
879static void
880mlxsw_sp_vrs_prefixes(struct mlxsw_sp *mlxsw_sp,
881 enum mlxsw_sp_l3proto proto,
882 struct mlxsw_sp_prefix_usage *req_prefix_usage)
883{
884 int i;
885
886 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
887 struct mlxsw_sp_vr *vr = &mlxsw_sp->router->vrs[i];
888 struct mlxsw_sp_fib *fib = mlxsw_sp_vr_fib(vr, proto);
889 unsigned char prefix;
890
891 if (!mlxsw_sp_vr_is_used(vr))
892 continue;
893 mlxsw_sp_prefix_usage_for_each(prefix, &fib->prefix_usage)
894 mlxsw_sp_prefix_usage_set(req_prefix_usage, prefix);
895 }
896}
897
Nogah Frankel9497c042016-09-20 11:16:54 +0200898static int mlxsw_sp_vrs_init(struct mlxsw_sp *mlxsw_sp)
Jiri Pirko6b75c482016-07-04 08:23:09 +0200899{
900 struct mlxsw_sp_vr *vr;
Jiri Pirkoc1a38312016-10-21 16:07:23 +0200901 u64 max_vrs;
Jiri Pirko6b75c482016-07-04 08:23:09 +0200902 int i;
903
Jiri Pirkoc1a38312016-10-21 16:07:23 +0200904 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_VRS))
Nogah Frankel9497c042016-09-20 11:16:54 +0200905 return -EIO;
906
Jiri Pirkoc1a38312016-10-21 16:07:23 +0200907 max_vrs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS);
Ido Schimmel9011b672017-05-16 19:38:25 +0200908 mlxsw_sp->router->vrs = kcalloc(max_vrs, sizeof(struct mlxsw_sp_vr),
909 GFP_KERNEL);
910 if (!mlxsw_sp->router->vrs)
Nogah Frankel9497c042016-09-20 11:16:54 +0200911 return -ENOMEM;
912
Jiri Pirkoc1a38312016-10-21 16:07:23 +0200913 for (i = 0; i < max_vrs; i++) {
Ido Schimmel9011b672017-05-16 19:38:25 +0200914 vr = &mlxsw_sp->router->vrs[i];
Jiri Pirko6b75c482016-07-04 08:23:09 +0200915 vr->id = i;
916 }
Nogah Frankel9497c042016-09-20 11:16:54 +0200917
918 return 0;
919}
920
Ido Schimmelac571de2016-11-14 11:26:32 +0100921static void mlxsw_sp_router_fib_flush(struct mlxsw_sp *mlxsw_sp);
922
Nogah Frankel9497c042016-09-20 11:16:54 +0200923static void mlxsw_sp_vrs_fini(struct mlxsw_sp *mlxsw_sp)
924{
Ido Schimmel30572242016-12-03 16:45:01 +0100925 /* At this stage we're guaranteed not to have new incoming
926 * FIB notifications and the work queue is free from FIBs
927 * sitting on top of mlxsw netdevs. However, we can still
928 * have other FIBs queued. Flush the queue before flushing
929 * the device's tables. No need for locks, as we're the only
930 * writer.
931 */
932 mlxsw_core_flush_owq();
Ido Schimmelac571de2016-11-14 11:26:32 +0100933 mlxsw_sp_router_fib_flush(mlxsw_sp);
Ido Schimmel9011b672017-05-16 19:38:25 +0200934 kfree(mlxsw_sp->router->vrs);
Jiri Pirko6b75c482016-07-04 08:23:09 +0200935}
936
Petr Machata6ddb7422017-09-02 23:49:19 +0200937static struct net_device *
938__mlxsw_sp_ipip_netdev_ul_dev_get(const struct net_device *ol_dev)
939{
940 struct ip_tunnel *tun = netdev_priv(ol_dev);
941 struct net *net = dev_net(ol_dev);
942
943 return __dev_get_by_index(net, tun->parms.link);
944}
945
946static u32 mlxsw_sp_ipip_dev_ul_tb_id(const struct net_device *ol_dev)
947{
948 struct net_device *d = __mlxsw_sp_ipip_netdev_ul_dev_get(ol_dev);
949
950 if (d)
951 return l3mdev_fib_table(d) ? : RT_TABLE_MAIN;
952 else
953 return l3mdev_fib_table(ol_dev) ? : RT_TABLE_MAIN;
954}
955
Petr Machata1012b9a2017-09-02 23:49:23 +0200956static struct mlxsw_sp_rif *
957mlxsw_sp_rif_create(struct mlxsw_sp *mlxsw_sp,
David Ahernf8fa9b42017-10-18 09:56:56 -0700958 const struct mlxsw_sp_rif_params *params,
959 struct netlink_ext_ack *extack);
Petr Machata1012b9a2017-09-02 23:49:23 +0200960
961static struct mlxsw_sp_rif_ipip_lb *
962mlxsw_sp_ipip_ol_ipip_lb_create(struct mlxsw_sp *mlxsw_sp,
963 enum mlxsw_sp_ipip_type ipipt,
Petr Machata7e75af62017-11-03 10:03:36 +0100964 struct net_device *ol_dev,
965 struct netlink_ext_ack *extack)
Petr Machata1012b9a2017-09-02 23:49:23 +0200966{
967 struct mlxsw_sp_rif_params_ipip_lb lb_params;
968 const struct mlxsw_sp_ipip_ops *ipip_ops;
969 struct mlxsw_sp_rif *rif;
970
971 ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipipt];
972 lb_params = (struct mlxsw_sp_rif_params_ipip_lb) {
973 .common.dev = ol_dev,
974 .common.lag = false,
975 .lb_config = ipip_ops->ol_loopback_config(mlxsw_sp, ol_dev),
976 };
977
Petr Machata7e75af62017-11-03 10:03:36 +0100978 rif = mlxsw_sp_rif_create(mlxsw_sp, &lb_params.common, extack);
Petr Machata1012b9a2017-09-02 23:49:23 +0200979 if (IS_ERR(rif))
980 return ERR_CAST(rif);
981 return container_of(rif, struct mlxsw_sp_rif_ipip_lb, common);
982}
983
984static struct mlxsw_sp_ipip_entry *
985mlxsw_sp_ipip_entry_alloc(struct mlxsw_sp *mlxsw_sp,
986 enum mlxsw_sp_ipip_type ipipt,
987 struct net_device *ol_dev)
988{
989 struct mlxsw_sp_ipip_entry *ipip_entry;
990 struct mlxsw_sp_ipip_entry *ret = NULL;
991
992 ipip_entry = kzalloc(sizeof(*ipip_entry), GFP_KERNEL);
993 if (!ipip_entry)
994 return ERR_PTR(-ENOMEM);
995
996 ipip_entry->ol_lb = mlxsw_sp_ipip_ol_ipip_lb_create(mlxsw_sp, ipipt,
Petr Machata7e75af62017-11-03 10:03:36 +0100997 ol_dev, NULL);
Petr Machata1012b9a2017-09-02 23:49:23 +0200998 if (IS_ERR(ipip_entry->ol_lb)) {
999 ret = ERR_CAST(ipip_entry->ol_lb);
1000 goto err_ol_ipip_lb_create;
1001 }
1002
1003 ipip_entry->ipipt = ipipt;
1004 ipip_entry->ol_dev = ol_dev;
1005
1006 return ipip_entry;
1007
1008err_ol_ipip_lb_create:
1009 kfree(ipip_entry);
1010 return ret;
1011}
1012
1013static void
Petr Machata4cccb732017-10-16 16:26:39 +02001014mlxsw_sp_ipip_entry_dealloc(struct mlxsw_sp_ipip_entry *ipip_entry)
Petr Machata1012b9a2017-09-02 23:49:23 +02001015{
Petr Machata1012b9a2017-09-02 23:49:23 +02001016 mlxsw_sp_rif_destroy(&ipip_entry->ol_lb->common);
1017 kfree(ipip_entry);
1018}
1019
Petr Machata1012b9a2017-09-02 23:49:23 +02001020static bool mlxsw_sp_l3addr_eq(const union mlxsw_sp_l3addr *addr1,
1021 const union mlxsw_sp_l3addr *addr2)
1022{
1023 return !memcmp(addr1, addr2, sizeof(*addr1));
1024}
1025
1026static bool
1027mlxsw_sp_ipip_entry_saddr_matches(struct mlxsw_sp *mlxsw_sp,
1028 const enum mlxsw_sp_l3proto ul_proto,
1029 union mlxsw_sp_l3addr saddr,
1030 u32 ul_tb_id,
1031 struct mlxsw_sp_ipip_entry *ipip_entry)
1032{
1033 u32 tun_ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(ipip_entry->ol_dev);
1034 enum mlxsw_sp_ipip_type ipipt = ipip_entry->ipipt;
1035 union mlxsw_sp_l3addr tun_saddr;
1036
1037 if (mlxsw_sp->router->ipip_ops_arr[ipipt]->ul_proto != ul_proto)
1038 return false;
1039
1040 tun_saddr = mlxsw_sp_ipip_netdev_saddr(ul_proto, ipip_entry->ol_dev);
1041 return tun_ul_tb_id == ul_tb_id &&
1042 mlxsw_sp_l3addr_eq(&tun_saddr, &saddr);
1043}
1044
Petr Machata4607f6d2017-09-02 23:49:25 +02001045static int
1046mlxsw_sp_fib_entry_decap_init(struct mlxsw_sp *mlxsw_sp,
1047 struct mlxsw_sp_fib_entry *fib_entry,
1048 struct mlxsw_sp_ipip_entry *ipip_entry)
1049{
1050 u32 tunnel_index;
1051 int err;
1052
1053 err = mlxsw_sp_kvdl_alloc(mlxsw_sp, 1, &tunnel_index);
1054 if (err)
1055 return err;
1056
1057 ipip_entry->decap_fib_entry = fib_entry;
1058 fib_entry->decap.ipip_entry = ipip_entry;
1059 fib_entry->decap.tunnel_index = tunnel_index;
1060 return 0;
1061}
1062
1063static void mlxsw_sp_fib_entry_decap_fini(struct mlxsw_sp *mlxsw_sp,
1064 struct mlxsw_sp_fib_entry *fib_entry)
1065{
1066 /* Unlink this node from the IPIP entry that it's the decap entry of. */
1067 fib_entry->decap.ipip_entry->decap_fib_entry = NULL;
1068 fib_entry->decap.ipip_entry = NULL;
1069 mlxsw_sp_kvdl_free(mlxsw_sp, fib_entry->decap.tunnel_index);
1070}
1071
Petr Machata1cc38fb2017-09-02 23:49:26 +02001072static struct mlxsw_sp_fib_node *
1073mlxsw_sp_fib_node_lookup(struct mlxsw_sp_fib *fib, const void *addr,
1074 size_t addr_len, unsigned char prefix_len);
Petr Machata4607f6d2017-09-02 23:49:25 +02001075static int mlxsw_sp_fib_entry_update(struct mlxsw_sp *mlxsw_sp,
1076 struct mlxsw_sp_fib_entry *fib_entry);
1077
1078static void
1079mlxsw_sp_ipip_entry_demote_decap(struct mlxsw_sp *mlxsw_sp,
1080 struct mlxsw_sp_ipip_entry *ipip_entry)
1081{
1082 struct mlxsw_sp_fib_entry *fib_entry = ipip_entry->decap_fib_entry;
1083
1084 mlxsw_sp_fib_entry_decap_fini(mlxsw_sp, fib_entry);
1085 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
1086
1087 mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
1088}
1089
Petr Machata1cc38fb2017-09-02 23:49:26 +02001090static void
1091mlxsw_sp_ipip_entry_promote_decap(struct mlxsw_sp *mlxsw_sp,
1092 struct mlxsw_sp_ipip_entry *ipip_entry,
1093 struct mlxsw_sp_fib_entry *decap_fib_entry)
1094{
1095 if (mlxsw_sp_fib_entry_decap_init(mlxsw_sp, decap_fib_entry,
1096 ipip_entry))
1097 return;
1098 decap_fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP;
1099
1100 if (mlxsw_sp_fib_entry_update(mlxsw_sp, decap_fib_entry))
1101 mlxsw_sp_ipip_entry_demote_decap(mlxsw_sp, ipip_entry);
1102}
1103
1104/* Given an IPIP entry, find the corresponding decap route. */
1105static struct mlxsw_sp_fib_entry *
1106mlxsw_sp_ipip_entry_find_decap(struct mlxsw_sp *mlxsw_sp,
1107 struct mlxsw_sp_ipip_entry *ipip_entry)
1108{
1109 static struct mlxsw_sp_fib_node *fib_node;
1110 const struct mlxsw_sp_ipip_ops *ipip_ops;
1111 struct mlxsw_sp_fib_entry *fib_entry;
1112 unsigned char saddr_prefix_len;
1113 union mlxsw_sp_l3addr saddr;
1114 struct mlxsw_sp_fib *ul_fib;
1115 struct mlxsw_sp_vr *ul_vr;
1116 const void *saddrp;
1117 size_t saddr_len;
1118 u32 ul_tb_id;
1119 u32 saddr4;
1120
1121 ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt];
1122
1123 ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(ipip_entry->ol_dev);
1124 ul_vr = mlxsw_sp_vr_find(mlxsw_sp, ul_tb_id);
1125 if (!ul_vr)
1126 return NULL;
1127
1128 ul_fib = mlxsw_sp_vr_fib(ul_vr, ipip_ops->ul_proto);
1129 saddr = mlxsw_sp_ipip_netdev_saddr(ipip_ops->ul_proto,
1130 ipip_entry->ol_dev);
1131
1132 switch (ipip_ops->ul_proto) {
1133 case MLXSW_SP_L3_PROTO_IPV4:
1134 saddr4 = be32_to_cpu(saddr.addr4);
1135 saddrp = &saddr4;
1136 saddr_len = 4;
1137 saddr_prefix_len = 32;
1138 break;
1139 case MLXSW_SP_L3_PROTO_IPV6:
1140 WARN_ON(1);
1141 return NULL;
1142 }
1143
1144 fib_node = mlxsw_sp_fib_node_lookup(ul_fib, saddrp, saddr_len,
1145 saddr_prefix_len);
1146 if (!fib_node || list_empty(&fib_node->entry_list))
1147 return NULL;
1148
1149 fib_entry = list_first_entry(&fib_node->entry_list,
1150 struct mlxsw_sp_fib_entry, list);
1151 if (fib_entry->type != MLXSW_SP_FIB_ENTRY_TYPE_TRAP)
1152 return NULL;
1153
1154 return fib_entry;
1155}
1156
Petr Machata1012b9a2017-09-02 23:49:23 +02001157static struct mlxsw_sp_ipip_entry *
Petr Machata4cccb732017-10-16 16:26:39 +02001158mlxsw_sp_ipip_entry_create(struct mlxsw_sp *mlxsw_sp,
1159 enum mlxsw_sp_ipip_type ipipt,
1160 struct net_device *ol_dev)
Petr Machata1012b9a2017-09-02 23:49:23 +02001161{
1162 u32 ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(ol_dev);
1163 struct mlxsw_sp_router *router = mlxsw_sp->router;
1164 struct mlxsw_sp_ipip_entry *ipip_entry;
1165 enum mlxsw_sp_l3proto ul_proto;
1166 union mlxsw_sp_l3addr saddr;
1167
Petr Machata4cccb732017-10-16 16:26:39 +02001168 /* The configuration where several tunnels have the same local address
1169 * in the same underlay table needs special treatment in the HW. That is
1170 * currently not implemented in the driver.
1171 */
Petr Machata1012b9a2017-09-02 23:49:23 +02001172 list_for_each_entry(ipip_entry, &mlxsw_sp->router->ipip_list,
1173 ipip_list_node) {
Petr Machata1012b9a2017-09-02 23:49:23 +02001174 ul_proto = router->ipip_ops_arr[ipip_entry->ipipt]->ul_proto;
1175 saddr = mlxsw_sp_ipip_netdev_saddr(ul_proto, ol_dev);
1176 if (mlxsw_sp_ipip_entry_saddr_matches(mlxsw_sp, ul_proto, saddr,
1177 ul_tb_id, ipip_entry))
1178 return ERR_PTR(-EEXIST);
1179 }
1180
1181 ipip_entry = mlxsw_sp_ipip_entry_alloc(mlxsw_sp, ipipt, ol_dev);
1182 if (IS_ERR(ipip_entry))
1183 return ipip_entry;
1184
1185 list_add_tail(&ipip_entry->ipip_list_node,
1186 &mlxsw_sp->router->ipip_list);
1187
Petr Machata1012b9a2017-09-02 23:49:23 +02001188 return ipip_entry;
1189}
1190
1191static void
Petr Machata4cccb732017-10-16 16:26:39 +02001192mlxsw_sp_ipip_entry_destroy(struct mlxsw_sp *mlxsw_sp,
1193 struct mlxsw_sp_ipip_entry *ipip_entry)
Petr Machata1012b9a2017-09-02 23:49:23 +02001194{
Petr Machata4cccb732017-10-16 16:26:39 +02001195 list_del(&ipip_entry->ipip_list_node);
1196 mlxsw_sp_ipip_entry_dealloc(ipip_entry);
Petr Machata1012b9a2017-09-02 23:49:23 +02001197}
1198
Petr Machata4607f6d2017-09-02 23:49:25 +02001199static bool
1200mlxsw_sp_ipip_entry_matches_decap(struct mlxsw_sp *mlxsw_sp,
1201 const struct net_device *ul_dev,
1202 enum mlxsw_sp_l3proto ul_proto,
1203 union mlxsw_sp_l3addr ul_dip,
1204 struct mlxsw_sp_ipip_entry *ipip_entry)
1205{
1206 u32 ul_tb_id = l3mdev_fib_table(ul_dev) ? : RT_TABLE_MAIN;
1207 enum mlxsw_sp_ipip_type ipipt = ipip_entry->ipipt;
1208 struct net_device *ipip_ul_dev;
1209
1210 if (mlxsw_sp->router->ipip_ops_arr[ipipt]->ul_proto != ul_proto)
1211 return false;
1212
1213 ipip_ul_dev = __mlxsw_sp_ipip_netdev_ul_dev_get(ipip_entry->ol_dev);
1214 return mlxsw_sp_ipip_entry_saddr_matches(mlxsw_sp, ul_proto, ul_dip,
1215 ul_tb_id, ipip_entry) &&
1216 (!ipip_ul_dev || ipip_ul_dev == ul_dev);
1217}
1218
1219/* Given decap parameters, find the corresponding IPIP entry. */
1220static struct mlxsw_sp_ipip_entry *
1221mlxsw_sp_ipip_entry_find_by_decap(struct mlxsw_sp *mlxsw_sp,
1222 const struct net_device *ul_dev,
1223 enum mlxsw_sp_l3proto ul_proto,
1224 union mlxsw_sp_l3addr ul_dip)
1225{
1226 struct mlxsw_sp_ipip_entry *ipip_entry;
1227
1228 list_for_each_entry(ipip_entry, &mlxsw_sp->router->ipip_list,
1229 ipip_list_node)
1230 if (mlxsw_sp_ipip_entry_matches_decap(mlxsw_sp, ul_dev,
1231 ul_proto, ul_dip,
1232 ipip_entry))
1233 return ipip_entry;
1234
1235 return NULL;
1236}
1237
Petr Machata6698c162017-10-16 16:26:36 +02001238static bool mlxsw_sp_netdev_ipip_type(const struct mlxsw_sp *mlxsw_sp,
1239 const struct net_device *dev,
1240 enum mlxsw_sp_ipip_type *p_type)
1241{
1242 struct mlxsw_sp_router *router = mlxsw_sp->router;
1243 const struct mlxsw_sp_ipip_ops *ipip_ops;
1244 enum mlxsw_sp_ipip_type ipipt;
1245
1246 for (ipipt = 0; ipipt < MLXSW_SP_IPIP_TYPE_MAX; ++ipipt) {
1247 ipip_ops = router->ipip_ops_arr[ipipt];
1248 if (dev->type == ipip_ops->dev_type) {
1249 if (p_type)
1250 *p_type = ipipt;
1251 return true;
1252 }
1253 }
1254 return false;
1255}
1256
Petr Machata796ec772017-11-03 10:03:29 +01001257bool mlxsw_sp_netdev_is_ipip_ol(const struct mlxsw_sp *mlxsw_sp,
1258 const struct net_device *dev)
Petr Machata00635872017-10-16 16:26:37 +02001259{
1260 return mlxsw_sp_netdev_ipip_type(mlxsw_sp, dev, NULL);
1261}
1262
1263static struct mlxsw_sp_ipip_entry *
1264mlxsw_sp_ipip_entry_find_by_ol_dev(struct mlxsw_sp *mlxsw_sp,
1265 const struct net_device *ol_dev)
1266{
1267 struct mlxsw_sp_ipip_entry *ipip_entry;
1268
1269 list_for_each_entry(ipip_entry, &mlxsw_sp->router->ipip_list,
1270 ipip_list_node)
1271 if (ipip_entry->ol_dev == ol_dev)
1272 return ipip_entry;
1273
1274 return NULL;
1275}
1276
Petr Machatacafdb2a2017-11-03 10:03:30 +01001277static bool mlxsw_sp_netdevice_ipip_can_offload(struct mlxsw_sp *mlxsw_sp,
1278 const struct net_device *ol_dev,
1279 enum mlxsw_sp_ipip_type ipipt)
1280{
1281 const struct mlxsw_sp_ipip_ops *ops
1282 = mlxsw_sp->router->ipip_ops_arr[ipipt];
1283
1284 /* For deciding whether decap should be offloaded, we don't care about
1285 * overlay protocol, so ask whether either one is supported.
1286 */
1287 return ops->can_offload(mlxsw_sp, ol_dev, MLXSW_SP_L3_PROTO_IPV4) ||
1288 ops->can_offload(mlxsw_sp, ol_dev, MLXSW_SP_L3_PROTO_IPV6);
1289}
1290
Petr Machata796ec772017-11-03 10:03:29 +01001291static int mlxsw_sp_netdevice_ipip_ol_reg_event(struct mlxsw_sp *mlxsw_sp,
1292 struct net_device *ol_dev)
Petr Machata00635872017-10-16 16:26:37 +02001293{
Petr Machata00635872017-10-16 16:26:37 +02001294 struct mlxsw_sp_ipip_entry *ipip_entry;
1295 enum mlxsw_sp_ipip_type ipipt;
1296
1297 mlxsw_sp_netdev_ipip_type(mlxsw_sp, ol_dev, &ipipt);
Petr Machatacafdb2a2017-11-03 10:03:30 +01001298 if (mlxsw_sp_netdevice_ipip_can_offload(mlxsw_sp, ol_dev, ipipt)) {
Petr Machata4cccb732017-10-16 16:26:39 +02001299 ipip_entry = mlxsw_sp_ipip_entry_create(mlxsw_sp, ipipt,
1300 ol_dev);
Petr Machata00635872017-10-16 16:26:37 +02001301 if (IS_ERR(ipip_entry))
1302 return PTR_ERR(ipip_entry);
1303 }
1304
1305 return 0;
1306}
1307
Petr Machata796ec772017-11-03 10:03:29 +01001308static void mlxsw_sp_netdevice_ipip_ol_unreg_event(struct mlxsw_sp *mlxsw_sp,
1309 struct net_device *ol_dev)
Petr Machata00635872017-10-16 16:26:37 +02001310{
1311 struct mlxsw_sp_ipip_entry *ipip_entry;
1312
1313 ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
1314 if (ipip_entry)
Petr Machata4cccb732017-10-16 16:26:39 +02001315 mlxsw_sp_ipip_entry_destroy(mlxsw_sp, ipip_entry);
Petr Machata00635872017-10-16 16:26:37 +02001316}
1317
Petr Machata47518ca2017-11-03 10:03:35 +01001318static void
1319mlxsw_sp_ipip_entry_ol_up_event(struct mlxsw_sp *mlxsw_sp,
1320 struct mlxsw_sp_ipip_entry *ipip_entry)
1321{
1322 struct mlxsw_sp_fib_entry *decap_fib_entry;
1323
1324 decap_fib_entry = mlxsw_sp_ipip_entry_find_decap(mlxsw_sp, ipip_entry);
1325 if (decap_fib_entry)
1326 mlxsw_sp_ipip_entry_promote_decap(mlxsw_sp, ipip_entry,
1327 decap_fib_entry);
1328}
1329
Petr Machata6d4de442017-11-03 10:03:34 +01001330static void mlxsw_sp_netdevice_ipip_ol_up_event(struct mlxsw_sp *mlxsw_sp,
1331 struct net_device *ol_dev)
Petr Machata00635872017-10-16 16:26:37 +02001332{
Petr Machata00635872017-10-16 16:26:37 +02001333 struct mlxsw_sp_ipip_entry *ipip_entry;
1334
1335 ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
Petr Machata47518ca2017-11-03 10:03:35 +01001336 if (ipip_entry)
1337 mlxsw_sp_ipip_entry_ol_up_event(mlxsw_sp, ipip_entry);
Petr Machata00635872017-10-16 16:26:37 +02001338}
1339
Petr Machataa3fe1982017-11-03 10:03:33 +01001340static void
1341mlxsw_sp_ipip_entry_ol_down_event(struct mlxsw_sp *mlxsw_sp,
1342 struct mlxsw_sp_ipip_entry *ipip_entry)
1343{
1344 if (ipip_entry->decap_fib_entry)
1345 mlxsw_sp_ipip_entry_demote_decap(mlxsw_sp, ipip_entry);
1346}
1347
Petr Machata796ec772017-11-03 10:03:29 +01001348static void mlxsw_sp_netdevice_ipip_ol_down_event(struct mlxsw_sp *mlxsw_sp,
1349 struct net_device *ol_dev)
Petr Machata00635872017-10-16 16:26:37 +02001350{
1351 struct mlxsw_sp_ipip_entry *ipip_entry;
1352
1353 ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
Petr Machataa3fe1982017-11-03 10:03:33 +01001354 if (ipip_entry)
1355 mlxsw_sp_ipip_entry_ol_down_event(mlxsw_sp, ipip_entry);
Petr Machata00635872017-10-16 16:26:37 +02001356}
1357
Petr Machata65a61212017-11-03 10:03:37 +01001358static int
1359mlxsw_sp_ipip_entry_ol_lb_update(struct mlxsw_sp *mlxsw_sp,
1360 struct mlxsw_sp_ipip_entry *ipip_entry,
1361 struct netlink_ext_ack *extack)
Petr Machataf63ce4e2017-10-16 16:26:38 +02001362{
Petr Machata65a61212017-11-03 10:03:37 +01001363 struct mlxsw_sp_rif_ipip_lb *old_lb_rif = ipip_entry->ol_lb;
1364 struct mlxsw_sp_rif_ipip_lb *new_lb_rif;
Petr Machataf63ce4e2017-10-16 16:26:38 +02001365
Petr Machata65a61212017-11-03 10:03:37 +01001366 new_lb_rif = mlxsw_sp_ipip_ol_ipip_lb_create(mlxsw_sp,
1367 ipip_entry->ipipt,
1368 ipip_entry->ol_dev,
1369 extack);
1370 if (IS_ERR(new_lb_rif))
1371 return PTR_ERR(new_lb_rif);
1372 ipip_entry->ol_lb = new_lb_rif;
1373 mlxsw_sp_rif_destroy(&old_lb_rif->common);
Petr Machataf63ce4e2017-10-16 16:26:38 +02001374
Petr Machata65a61212017-11-03 10:03:37 +01001375 return 0;
1376}
1377
1378int __mlxsw_sp_ipip_entry_update_tunnel(struct mlxsw_sp *mlxsw_sp,
1379 struct mlxsw_sp_ipip_entry *ipip_entry,
1380 struct netlink_ext_ack *extack)
1381{
1382 int err;
1383
1384 /* RIFs can't be edited, so to update loopback, we need to destroy and
1385 * recreate it. That creates a window of opportunity where RALUE and
1386 * RATR registers end up referencing a RIF that's already gone. RATRs
1387 * are handled in mlxsw_sp_ipip_entry_ol_lb_update(), and to take care
Petr Machataf63ce4e2017-10-16 16:26:38 +02001388 * of RALUE, demote the decap route back.
1389 */
1390 if (ipip_entry->decap_fib_entry)
1391 mlxsw_sp_ipip_entry_demote_decap(mlxsw_sp, ipip_entry);
1392
Petr Machata65a61212017-11-03 10:03:37 +01001393 err = mlxsw_sp_ipip_entry_ol_lb_update(mlxsw_sp, ipip_entry, extack);
1394 if (err)
1395 return err;
Petr Machataf63ce4e2017-10-16 16:26:38 +02001396
Petr Machata65a61212017-11-03 10:03:37 +01001397 if (ipip_entry->ol_dev->flags & IFF_UP)
1398 mlxsw_sp_ipip_entry_ol_up_event(mlxsw_sp, ipip_entry);
Petr Machataf63ce4e2017-10-16 16:26:38 +02001399
1400 return 0;
1401}
1402
Petr Machata65a61212017-11-03 10:03:37 +01001403static int mlxsw_sp_netdevice_ipip_ol_vrf_event(struct mlxsw_sp *mlxsw_sp,
1404 struct net_device *ol_dev,
1405 struct netlink_ext_ack *extack)
1406{
1407 struct mlxsw_sp_ipip_entry *ipip_entry =
1408 mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
1409
1410 if (!ipip_entry)
1411 return 0;
1412 return __mlxsw_sp_ipip_entry_update_tunnel(mlxsw_sp, ipip_entry,
1413 extack);
1414}
1415
Petr Machata7e75af62017-11-03 10:03:36 +01001416int mlxsw_sp_netdevice_ipip_ol_event(struct mlxsw_sp *mlxsw_sp,
1417 struct net_device *ol_dev,
1418 unsigned long event,
1419 struct netdev_notifier_info *info)
Petr Machata00635872017-10-16 16:26:37 +02001420{
Petr Machata7e75af62017-11-03 10:03:36 +01001421 struct netdev_notifier_changeupper_info *chup;
1422 struct netlink_ext_ack *extack;
1423
Petr Machata00635872017-10-16 16:26:37 +02001424 switch (event) {
1425 case NETDEV_REGISTER:
Petr Machata796ec772017-11-03 10:03:29 +01001426 return mlxsw_sp_netdevice_ipip_ol_reg_event(mlxsw_sp, ol_dev);
Petr Machata00635872017-10-16 16:26:37 +02001427 case NETDEV_UNREGISTER:
Petr Machata796ec772017-11-03 10:03:29 +01001428 mlxsw_sp_netdevice_ipip_ol_unreg_event(mlxsw_sp, ol_dev);
Petr Machata00635872017-10-16 16:26:37 +02001429 return 0;
1430 case NETDEV_UP:
Petr Machata6d4de442017-11-03 10:03:34 +01001431 mlxsw_sp_netdevice_ipip_ol_up_event(mlxsw_sp, ol_dev);
1432 return 0;
Petr Machata00635872017-10-16 16:26:37 +02001433 case NETDEV_DOWN:
Petr Machata796ec772017-11-03 10:03:29 +01001434 mlxsw_sp_netdevice_ipip_ol_down_event(mlxsw_sp, ol_dev);
Petr Machata00635872017-10-16 16:26:37 +02001435 return 0;
Petr Machataf63ce4e2017-10-16 16:26:38 +02001436 case NETDEV_CHANGEUPPER:
Petr Machata7e75af62017-11-03 10:03:36 +01001437 chup = container_of(info, typeof(*chup), info);
1438 extack = info->extack;
1439 if (netif_is_l3_master(chup->upper_dev))
Petr Machata796ec772017-11-03 10:03:29 +01001440 return mlxsw_sp_netdevice_ipip_ol_vrf_event(mlxsw_sp,
Petr Machata7e75af62017-11-03 10:03:36 +01001441 ol_dev,
1442 extack);
Petr Machataf63ce4e2017-10-16 16:26:38 +02001443 return 0;
Petr Machata00635872017-10-16 16:26:37 +02001444 }
1445 return 0;
1446}
1447
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001448struct mlxsw_sp_neigh_key {
Jiri Pirko33b13412016-11-10 12:31:04 +01001449 struct neighbour *n;
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001450};
1451
1452struct mlxsw_sp_neigh_entry {
Ido Schimmel9665b742017-02-08 11:16:42 +01001453 struct list_head rif_list_node;
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001454 struct rhash_head ht_node;
1455 struct mlxsw_sp_neigh_key key;
1456 u16 rif;
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001457 bool connected;
Yotam Gigia6bf9e92016-07-05 11:27:44 +02001458 unsigned char ha[ETH_ALEN];
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001459 struct list_head nexthop_list; /* list of nexthops using
1460 * this neigh entry
1461 */
Yotam Gigib2157142016-07-05 11:27:51 +02001462 struct list_head nexthop_neighs_list_node;
Arkadi Sharshevsky7cfcbc72017-08-24 08:40:08 +02001463 unsigned int counter_index;
1464 bool counter_valid;
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001465};
1466
1467static const struct rhashtable_params mlxsw_sp_neigh_ht_params = {
1468 .key_offset = offsetof(struct mlxsw_sp_neigh_entry, key),
1469 .head_offset = offsetof(struct mlxsw_sp_neigh_entry, ht_node),
1470 .key_len = sizeof(struct mlxsw_sp_neigh_key),
1471};
1472
Arkadi Sharshevskyf17cc842017-08-24 08:40:04 +02001473struct mlxsw_sp_neigh_entry *
1474mlxsw_sp_rif_neigh_next(struct mlxsw_sp_rif *rif,
1475 struct mlxsw_sp_neigh_entry *neigh_entry)
1476{
1477 if (!neigh_entry) {
1478 if (list_empty(&rif->neigh_list))
1479 return NULL;
1480 else
1481 return list_first_entry(&rif->neigh_list,
1482 typeof(*neigh_entry),
1483 rif_list_node);
1484 }
Arkadi Sharshevskyec2437f2017-09-25 10:32:24 +02001485 if (list_is_last(&neigh_entry->rif_list_node, &rif->neigh_list))
Arkadi Sharshevskyf17cc842017-08-24 08:40:04 +02001486 return NULL;
1487 return list_next_entry(neigh_entry, rif_list_node);
1488}
1489
1490int mlxsw_sp_neigh_entry_type(struct mlxsw_sp_neigh_entry *neigh_entry)
1491{
1492 return neigh_entry->key.n->tbl->family;
1493}
1494
1495unsigned char *
1496mlxsw_sp_neigh_entry_ha(struct mlxsw_sp_neigh_entry *neigh_entry)
1497{
1498 return neigh_entry->ha;
1499}
1500
1501u32 mlxsw_sp_neigh4_entry_dip(struct mlxsw_sp_neigh_entry *neigh_entry)
1502{
1503 struct neighbour *n;
1504
1505 n = neigh_entry->key.n;
1506 return ntohl(*((__be32 *) n->primary_key));
1507}
1508
Arkadi Sharshevsky02507682017-08-31 17:59:15 +02001509struct in6_addr *
1510mlxsw_sp_neigh6_entry_dip(struct mlxsw_sp_neigh_entry *neigh_entry)
1511{
1512 struct neighbour *n;
1513
1514 n = neigh_entry->key.n;
1515 return (struct in6_addr *) &n->primary_key;
1516}
1517
Arkadi Sharshevsky7cfcbc72017-08-24 08:40:08 +02001518int mlxsw_sp_neigh_counter_get(struct mlxsw_sp *mlxsw_sp,
1519 struct mlxsw_sp_neigh_entry *neigh_entry,
1520 u64 *p_counter)
1521{
1522 if (!neigh_entry->counter_valid)
1523 return -EINVAL;
1524
1525 return mlxsw_sp_flow_counter_get(mlxsw_sp, neigh_entry->counter_index,
1526 p_counter, NULL);
1527}
1528
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001529static struct mlxsw_sp_neigh_entry *
1530mlxsw_sp_neigh_entry_alloc(struct mlxsw_sp *mlxsw_sp, struct neighbour *n,
1531 u16 rif)
1532{
1533 struct mlxsw_sp_neigh_entry *neigh_entry;
1534
1535 neigh_entry = kzalloc(sizeof(*neigh_entry), GFP_KERNEL);
1536 if (!neigh_entry)
1537 return NULL;
1538
1539 neigh_entry->key.n = n;
1540 neigh_entry->rif = rif;
1541 INIT_LIST_HEAD(&neigh_entry->nexthop_list);
1542
1543 return neigh_entry;
1544}
1545
1546static void mlxsw_sp_neigh_entry_free(struct mlxsw_sp_neigh_entry *neigh_entry)
1547{
1548 kfree(neigh_entry);
1549}
1550
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001551static int
1552mlxsw_sp_neigh_entry_insert(struct mlxsw_sp *mlxsw_sp,
1553 struct mlxsw_sp_neigh_entry *neigh_entry)
1554{
Ido Schimmel9011b672017-05-16 19:38:25 +02001555 return rhashtable_insert_fast(&mlxsw_sp->router->neigh_ht,
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001556 &neigh_entry->ht_node,
1557 mlxsw_sp_neigh_ht_params);
1558}
1559
1560static void
1561mlxsw_sp_neigh_entry_remove(struct mlxsw_sp *mlxsw_sp,
1562 struct mlxsw_sp_neigh_entry *neigh_entry)
1563{
Ido Schimmel9011b672017-05-16 19:38:25 +02001564 rhashtable_remove_fast(&mlxsw_sp->router->neigh_ht,
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001565 &neigh_entry->ht_node,
1566 mlxsw_sp_neigh_ht_params);
1567}
1568
Arkadi Sharshevsky7cfcbc72017-08-24 08:40:08 +02001569static bool
Arkadi Sharshevsky1ed55742017-08-31 17:59:18 +02001570mlxsw_sp_neigh_counter_should_alloc(struct mlxsw_sp *mlxsw_sp,
1571 struct mlxsw_sp_neigh_entry *neigh_entry)
Arkadi Sharshevsky7cfcbc72017-08-24 08:40:08 +02001572{
1573 struct devlink *devlink;
Arkadi Sharshevsky1ed55742017-08-31 17:59:18 +02001574 const char *table_name;
1575
1576 switch (mlxsw_sp_neigh_entry_type(neigh_entry)) {
1577 case AF_INET:
1578 table_name = MLXSW_SP_DPIPE_TABLE_NAME_HOST4;
1579 break;
1580 case AF_INET6:
1581 table_name = MLXSW_SP_DPIPE_TABLE_NAME_HOST6;
1582 break;
1583 default:
1584 WARN_ON(1);
1585 return false;
1586 }
Arkadi Sharshevsky7cfcbc72017-08-24 08:40:08 +02001587
1588 devlink = priv_to_devlink(mlxsw_sp->core);
Arkadi Sharshevsky1ed55742017-08-31 17:59:18 +02001589 return devlink_dpipe_table_counter_enabled(devlink, table_name);
Arkadi Sharshevsky7cfcbc72017-08-24 08:40:08 +02001590}
1591
1592static void
1593mlxsw_sp_neigh_counter_alloc(struct mlxsw_sp *mlxsw_sp,
1594 struct mlxsw_sp_neigh_entry *neigh_entry)
1595{
Arkadi Sharshevsky1ed55742017-08-31 17:59:18 +02001596 if (!mlxsw_sp_neigh_counter_should_alloc(mlxsw_sp, neigh_entry))
Arkadi Sharshevsky7cfcbc72017-08-24 08:40:08 +02001597 return;
1598
1599 if (mlxsw_sp_flow_counter_alloc(mlxsw_sp, &neigh_entry->counter_index))
1600 return;
1601
1602 neigh_entry->counter_valid = true;
1603}
1604
1605static void
1606mlxsw_sp_neigh_counter_free(struct mlxsw_sp *mlxsw_sp,
1607 struct mlxsw_sp_neigh_entry *neigh_entry)
1608{
1609 if (!neigh_entry->counter_valid)
1610 return;
1611 mlxsw_sp_flow_counter_free(mlxsw_sp,
1612 neigh_entry->counter_index);
1613 neigh_entry->counter_valid = false;
1614}
1615
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001616static struct mlxsw_sp_neigh_entry *
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001617mlxsw_sp_neigh_entry_create(struct mlxsw_sp *mlxsw_sp, struct neighbour *n)
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001618{
1619 struct mlxsw_sp_neigh_entry *neigh_entry;
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01001620 struct mlxsw_sp_rif *rif;
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001621 int err;
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001622
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01001623 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, n->dev);
1624 if (!rif)
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001625 return ERR_PTR(-EINVAL);
1626
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01001627 neigh_entry = mlxsw_sp_neigh_entry_alloc(mlxsw_sp, n, rif->rif_index);
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001628 if (!neigh_entry)
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001629 return ERR_PTR(-ENOMEM);
1630
1631 err = mlxsw_sp_neigh_entry_insert(mlxsw_sp, neigh_entry);
1632 if (err)
1633 goto err_neigh_entry_insert;
1634
Arkadi Sharshevsky7cfcbc72017-08-24 08:40:08 +02001635 mlxsw_sp_neigh_counter_alloc(mlxsw_sp, neigh_entry);
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01001636 list_add(&neigh_entry->rif_list_node, &rif->neigh_list);
Ido Schimmel9665b742017-02-08 11:16:42 +01001637
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001638 return neigh_entry;
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001639
1640err_neigh_entry_insert:
1641 mlxsw_sp_neigh_entry_free(neigh_entry);
1642 return ERR_PTR(err);
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001643}
1644
1645static void
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001646mlxsw_sp_neigh_entry_destroy(struct mlxsw_sp *mlxsw_sp,
1647 struct mlxsw_sp_neigh_entry *neigh_entry)
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001648{
Ido Schimmel9665b742017-02-08 11:16:42 +01001649 list_del(&neigh_entry->rif_list_node);
Arkadi Sharshevsky7cfcbc72017-08-24 08:40:08 +02001650 mlxsw_sp_neigh_counter_free(mlxsw_sp, neigh_entry);
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001651 mlxsw_sp_neigh_entry_remove(mlxsw_sp, neigh_entry);
1652 mlxsw_sp_neigh_entry_free(neigh_entry);
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001653}
1654
1655static struct mlxsw_sp_neigh_entry *
Jiri Pirko33b13412016-11-10 12:31:04 +01001656mlxsw_sp_neigh_entry_lookup(struct mlxsw_sp *mlxsw_sp, struct neighbour *n)
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001657{
Jiri Pirko33b13412016-11-10 12:31:04 +01001658 struct mlxsw_sp_neigh_key key;
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001659
Jiri Pirko33b13412016-11-10 12:31:04 +01001660 key.n = n;
Ido Schimmel9011b672017-05-16 19:38:25 +02001661 return rhashtable_lookup_fast(&mlxsw_sp->router->neigh_ht,
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001662 &key, mlxsw_sp_neigh_ht_params);
1663}
1664
Yotam Gigic723c7352016-07-05 11:27:43 +02001665static void
1666mlxsw_sp_router_neighs_update_interval_init(struct mlxsw_sp *mlxsw_sp)
1667{
Arkadi Sharshevskya6c9b5d2017-07-18 10:10:18 +02001668 unsigned long interval;
Yotam Gigic723c7352016-07-05 11:27:43 +02001669
Ido Schimmelb5f3e0d2017-07-24 09:56:00 +02001670#if IS_ENABLED(CONFIG_IPV6)
Arkadi Sharshevskya6c9b5d2017-07-18 10:10:18 +02001671 interval = min_t(unsigned long,
1672 NEIGH_VAR(&arp_tbl.parms, DELAY_PROBE_TIME),
1673 NEIGH_VAR(&nd_tbl.parms, DELAY_PROBE_TIME));
Ido Schimmelb5f3e0d2017-07-24 09:56:00 +02001674#else
1675 interval = NEIGH_VAR(&arp_tbl.parms, DELAY_PROBE_TIME);
1676#endif
Ido Schimmel9011b672017-05-16 19:38:25 +02001677 mlxsw_sp->router->neighs_update.interval = jiffies_to_msecs(interval);
Yotam Gigic723c7352016-07-05 11:27:43 +02001678}
1679
1680static void mlxsw_sp_router_neigh_ent_ipv4_process(struct mlxsw_sp *mlxsw_sp,
1681 char *rauhtd_pl,
1682 int ent_index)
1683{
1684 struct net_device *dev;
1685 struct neighbour *n;
1686 __be32 dipn;
1687 u32 dip;
1688 u16 rif;
1689
1690 mlxsw_reg_rauhtd_ent_ipv4_unpack(rauhtd_pl, ent_index, &rif, &dip);
1691
Ido Schimmel5f9efff2017-05-16 19:38:27 +02001692 if (!mlxsw_sp->router->rifs[rif]) {
Yotam Gigic723c7352016-07-05 11:27:43 +02001693 dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Incorrect RIF in neighbour entry\n");
1694 return;
1695 }
1696
1697 dipn = htonl(dip);
Ido Schimmel5f9efff2017-05-16 19:38:27 +02001698 dev = mlxsw_sp->router->rifs[rif]->dev;
Yotam Gigic723c7352016-07-05 11:27:43 +02001699 n = neigh_lookup(&arp_tbl, &dipn, dev);
1700 if (!n) {
1701 netdev_err(dev, "Failed to find matching neighbour for IP=%pI4h\n",
1702 &dip);
1703 return;
1704 }
1705
1706 netdev_dbg(dev, "Updating neighbour with IP=%pI4h\n", &dip);
1707 neigh_event_send(n, NULL);
1708 neigh_release(n);
1709}
1710
Ido Schimmeldf9a21f2017-08-15 09:10:33 +02001711#if IS_ENABLED(CONFIG_IPV6)
Arkadi Sharshevsky60f040c2017-07-18 10:10:17 +02001712static void mlxsw_sp_router_neigh_ent_ipv6_process(struct mlxsw_sp *mlxsw_sp,
1713 char *rauhtd_pl,
1714 int rec_index)
1715{
1716 struct net_device *dev;
1717 struct neighbour *n;
1718 struct in6_addr dip;
1719 u16 rif;
1720
1721 mlxsw_reg_rauhtd_ent_ipv6_unpack(rauhtd_pl, rec_index, &rif,
1722 (char *) &dip);
1723
1724 if (!mlxsw_sp->router->rifs[rif]) {
1725 dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Incorrect RIF in neighbour entry\n");
1726 return;
1727 }
1728
1729 dev = mlxsw_sp->router->rifs[rif]->dev;
1730 n = neigh_lookup(&nd_tbl, &dip, dev);
1731 if (!n) {
1732 netdev_err(dev, "Failed to find matching neighbour for IP=%pI6c\n",
1733 &dip);
1734 return;
1735 }
1736
1737 netdev_dbg(dev, "Updating neighbour with IP=%pI6c\n", &dip);
1738 neigh_event_send(n, NULL);
1739 neigh_release(n);
1740}
Ido Schimmelb5f3e0d2017-07-24 09:56:00 +02001741#else
1742static void mlxsw_sp_router_neigh_ent_ipv6_process(struct mlxsw_sp *mlxsw_sp,
1743 char *rauhtd_pl,
1744 int rec_index)
1745{
1746}
1747#endif
Arkadi Sharshevsky60f040c2017-07-18 10:10:17 +02001748
Yotam Gigic723c7352016-07-05 11:27:43 +02001749static void mlxsw_sp_router_neigh_rec_ipv4_process(struct mlxsw_sp *mlxsw_sp,
1750 char *rauhtd_pl,
1751 int rec_index)
1752{
1753 u8 num_entries;
1754 int i;
1755
1756 num_entries = mlxsw_reg_rauhtd_ipv4_rec_num_entries_get(rauhtd_pl,
1757 rec_index);
1758 /* Hardware starts counting at 0, so add 1. */
1759 num_entries++;
1760
1761 /* Each record consists of several neighbour entries. */
1762 for (i = 0; i < num_entries; i++) {
1763 int ent_index;
1764
1765 ent_index = rec_index * MLXSW_REG_RAUHTD_IPV4_ENT_PER_REC + i;
1766 mlxsw_sp_router_neigh_ent_ipv4_process(mlxsw_sp, rauhtd_pl,
1767 ent_index);
1768 }
1769
1770}
1771
Arkadi Sharshevsky60f040c2017-07-18 10:10:17 +02001772static void mlxsw_sp_router_neigh_rec_ipv6_process(struct mlxsw_sp *mlxsw_sp,
1773 char *rauhtd_pl,
1774 int rec_index)
1775{
1776 /* One record contains one entry. */
1777 mlxsw_sp_router_neigh_ent_ipv6_process(mlxsw_sp, rauhtd_pl,
1778 rec_index);
1779}
1780
Yotam Gigic723c7352016-07-05 11:27:43 +02001781static void mlxsw_sp_router_neigh_rec_process(struct mlxsw_sp *mlxsw_sp,
1782 char *rauhtd_pl, int rec_index)
1783{
1784 switch (mlxsw_reg_rauhtd_rec_type_get(rauhtd_pl, rec_index)) {
1785 case MLXSW_REG_RAUHTD_TYPE_IPV4:
1786 mlxsw_sp_router_neigh_rec_ipv4_process(mlxsw_sp, rauhtd_pl,
1787 rec_index);
1788 break;
1789 case MLXSW_REG_RAUHTD_TYPE_IPV6:
Arkadi Sharshevsky60f040c2017-07-18 10:10:17 +02001790 mlxsw_sp_router_neigh_rec_ipv6_process(mlxsw_sp, rauhtd_pl,
1791 rec_index);
Yotam Gigic723c7352016-07-05 11:27:43 +02001792 break;
1793 }
1794}
1795
Arkadi Sharshevsky42cdb332016-11-11 16:34:26 +01001796static bool mlxsw_sp_router_rauhtd_is_full(char *rauhtd_pl)
1797{
1798 u8 num_rec, last_rec_index, num_entries;
1799
1800 num_rec = mlxsw_reg_rauhtd_num_rec_get(rauhtd_pl);
1801 last_rec_index = num_rec - 1;
1802
1803 if (num_rec < MLXSW_REG_RAUHTD_REC_MAX_NUM)
1804 return false;
1805 if (mlxsw_reg_rauhtd_rec_type_get(rauhtd_pl, last_rec_index) ==
1806 MLXSW_REG_RAUHTD_TYPE_IPV6)
1807 return true;
1808
1809 num_entries = mlxsw_reg_rauhtd_ipv4_rec_num_entries_get(rauhtd_pl,
1810 last_rec_index);
1811 if (++num_entries == MLXSW_REG_RAUHTD_IPV4_ENT_PER_REC)
1812 return true;
1813 return false;
1814}
1815
Arkadi Sharshevsky60f040c2017-07-18 10:10:17 +02001816static int
1817__mlxsw_sp_router_neighs_update_rauhtd(struct mlxsw_sp *mlxsw_sp,
1818 char *rauhtd_pl,
1819 enum mlxsw_reg_rauhtd_type type)
Yotam Gigic723c7352016-07-05 11:27:43 +02001820{
Arkadi Sharshevsky60f040c2017-07-18 10:10:17 +02001821 int i, num_rec;
1822 int err;
Yotam Gigic723c7352016-07-05 11:27:43 +02001823
1824 /* Make sure the neighbour's netdev isn't removed in the
1825 * process.
1826 */
1827 rtnl_lock();
1828 do {
Arkadi Sharshevsky60f040c2017-07-18 10:10:17 +02001829 mlxsw_reg_rauhtd_pack(rauhtd_pl, type);
Yotam Gigic723c7352016-07-05 11:27:43 +02001830 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(rauhtd),
1831 rauhtd_pl);
1832 if (err) {
Petr Machata7ff176f2017-10-02 12:21:57 +02001833 dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Failed to dump neighbour table\n");
Yotam Gigic723c7352016-07-05 11:27:43 +02001834 break;
1835 }
1836 num_rec = mlxsw_reg_rauhtd_num_rec_get(rauhtd_pl);
1837 for (i = 0; i < num_rec; i++)
1838 mlxsw_sp_router_neigh_rec_process(mlxsw_sp, rauhtd_pl,
1839 i);
Arkadi Sharshevsky42cdb332016-11-11 16:34:26 +01001840 } while (mlxsw_sp_router_rauhtd_is_full(rauhtd_pl));
Yotam Gigic723c7352016-07-05 11:27:43 +02001841 rtnl_unlock();
1842
Arkadi Sharshevsky60f040c2017-07-18 10:10:17 +02001843 return err;
1844}
1845
1846static int mlxsw_sp_router_neighs_update_rauhtd(struct mlxsw_sp *mlxsw_sp)
1847{
1848 enum mlxsw_reg_rauhtd_type type;
1849 char *rauhtd_pl;
1850 int err;
1851
1852 rauhtd_pl = kmalloc(MLXSW_REG_RAUHTD_LEN, GFP_KERNEL);
1853 if (!rauhtd_pl)
1854 return -ENOMEM;
1855
1856 type = MLXSW_REG_RAUHTD_TYPE_IPV4;
1857 err = __mlxsw_sp_router_neighs_update_rauhtd(mlxsw_sp, rauhtd_pl, type);
1858 if (err)
1859 goto out;
1860
1861 type = MLXSW_REG_RAUHTD_TYPE_IPV6;
1862 err = __mlxsw_sp_router_neighs_update_rauhtd(mlxsw_sp, rauhtd_pl, type);
1863out:
Yotam Gigic723c7352016-07-05 11:27:43 +02001864 kfree(rauhtd_pl);
Yotam Gigib2157142016-07-05 11:27:51 +02001865 return err;
1866}
1867
1868static void mlxsw_sp_router_neighs_update_nh(struct mlxsw_sp *mlxsw_sp)
1869{
1870 struct mlxsw_sp_neigh_entry *neigh_entry;
1871
1872 /* Take RTNL mutex here to prevent lists from changes */
1873 rtnl_lock();
Ido Schimmel9011b672017-05-16 19:38:25 +02001874 list_for_each_entry(neigh_entry, &mlxsw_sp->router->nexthop_neighs_list,
Ido Schimmel8a0b7272017-02-06 16:20:15 +01001875 nexthop_neighs_list_node)
Yotam Gigib2157142016-07-05 11:27:51 +02001876 /* If this neigh have nexthops, make the kernel think this neigh
1877 * is active regardless of the traffic.
1878 */
Ido Schimmel8a0b7272017-02-06 16:20:15 +01001879 neigh_event_send(neigh_entry->key.n, NULL);
Yotam Gigib2157142016-07-05 11:27:51 +02001880 rtnl_unlock();
1881}
1882
1883static void
1884mlxsw_sp_router_neighs_update_work_schedule(struct mlxsw_sp *mlxsw_sp)
1885{
Ido Schimmel9011b672017-05-16 19:38:25 +02001886 unsigned long interval = mlxsw_sp->router->neighs_update.interval;
Yotam Gigib2157142016-07-05 11:27:51 +02001887
Ido Schimmel9011b672017-05-16 19:38:25 +02001888 mlxsw_core_schedule_dw(&mlxsw_sp->router->neighs_update.dw,
Yotam Gigib2157142016-07-05 11:27:51 +02001889 msecs_to_jiffies(interval));
1890}
1891
1892static void mlxsw_sp_router_neighs_update_work(struct work_struct *work)
1893{
Ido Schimmel9011b672017-05-16 19:38:25 +02001894 struct mlxsw_sp_router *router;
Yotam Gigib2157142016-07-05 11:27:51 +02001895 int err;
1896
Ido Schimmel9011b672017-05-16 19:38:25 +02001897 router = container_of(work, struct mlxsw_sp_router,
1898 neighs_update.dw.work);
1899 err = mlxsw_sp_router_neighs_update_rauhtd(router->mlxsw_sp);
Yotam Gigib2157142016-07-05 11:27:51 +02001900 if (err)
Ido Schimmel9011b672017-05-16 19:38:25 +02001901 dev_err(router->mlxsw_sp->bus_info->dev, "Could not update kernel for neigh activity");
Yotam Gigib2157142016-07-05 11:27:51 +02001902
Ido Schimmel9011b672017-05-16 19:38:25 +02001903 mlxsw_sp_router_neighs_update_nh(router->mlxsw_sp);
Yotam Gigib2157142016-07-05 11:27:51 +02001904
Ido Schimmel9011b672017-05-16 19:38:25 +02001905 mlxsw_sp_router_neighs_update_work_schedule(router->mlxsw_sp);
Yotam Gigic723c7352016-07-05 11:27:43 +02001906}
1907
Yotam Gigi0b2361d2016-07-05 11:27:52 +02001908static void mlxsw_sp_router_probe_unresolved_nexthops(struct work_struct *work)
1909{
1910 struct mlxsw_sp_neigh_entry *neigh_entry;
Ido Schimmel9011b672017-05-16 19:38:25 +02001911 struct mlxsw_sp_router *router;
Yotam Gigi0b2361d2016-07-05 11:27:52 +02001912
Ido Schimmel9011b672017-05-16 19:38:25 +02001913 router = container_of(work, struct mlxsw_sp_router,
1914 nexthop_probe_dw.work);
Yotam Gigi0b2361d2016-07-05 11:27:52 +02001915 /* Iterate over nexthop neighbours, find those who are unresolved and
1916 * send arp on them. This solves the chicken-egg problem when
1917 * the nexthop wouldn't get offloaded until the neighbor is resolved
1918 * but it wouldn't get resolved ever in case traffic is flowing in HW
1919 * using different nexthop.
1920 *
1921 * Take RTNL mutex here to prevent lists from changes.
1922 */
1923 rtnl_lock();
Ido Schimmel9011b672017-05-16 19:38:25 +02001924 list_for_each_entry(neigh_entry, &router->nexthop_neighs_list,
Ido Schimmel8a0b7272017-02-06 16:20:15 +01001925 nexthop_neighs_list_node)
Ido Schimmel01b1aa32017-02-06 16:20:16 +01001926 if (!neigh_entry->connected)
Jiri Pirko33b13412016-11-10 12:31:04 +01001927 neigh_event_send(neigh_entry->key.n, NULL);
Yotam Gigi0b2361d2016-07-05 11:27:52 +02001928 rtnl_unlock();
1929
Ido Schimmel9011b672017-05-16 19:38:25 +02001930 mlxsw_core_schedule_dw(&router->nexthop_probe_dw,
Yotam Gigi0b2361d2016-07-05 11:27:52 +02001931 MLXSW_SP_UNRESOLVED_NH_PROBE_INTERVAL);
1932}
1933
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001934static void
1935mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp *mlxsw_sp,
1936 struct mlxsw_sp_neigh_entry *neigh_entry,
1937 bool removing);
1938
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001939static enum mlxsw_reg_rauht_op mlxsw_sp_rauht_op(bool adding)
Yotam Gigia6bf9e92016-07-05 11:27:44 +02001940{
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001941 return adding ? MLXSW_REG_RAUHT_OP_WRITE_ADD :
1942 MLXSW_REG_RAUHT_OP_WRITE_DELETE;
1943}
1944
1945static void
1946mlxsw_sp_router_neigh_entry_op4(struct mlxsw_sp *mlxsw_sp,
1947 struct mlxsw_sp_neigh_entry *neigh_entry,
1948 enum mlxsw_reg_rauht_op op)
1949{
Jiri Pirko33b13412016-11-10 12:31:04 +01001950 struct neighbour *n = neigh_entry->key.n;
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001951 u32 dip = ntohl(*((__be32 *) n->primary_key));
Yotam Gigia6bf9e92016-07-05 11:27:44 +02001952 char rauht_pl[MLXSW_REG_RAUHT_LEN];
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001953
1954 mlxsw_reg_rauht_pack4(rauht_pl, op, neigh_entry->rif, neigh_entry->ha,
1955 dip);
Arkadi Sharshevsky7cfcbc72017-08-24 08:40:08 +02001956 if (neigh_entry->counter_valid)
1957 mlxsw_reg_rauht_pack_counter(rauht_pl,
1958 neigh_entry->counter_index);
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001959 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rauht), rauht_pl);
1960}
1961
1962static void
Arkadi Sharshevskyd5eb89c2017-07-18 10:10:15 +02001963mlxsw_sp_router_neigh_entry_op6(struct mlxsw_sp *mlxsw_sp,
1964 struct mlxsw_sp_neigh_entry *neigh_entry,
1965 enum mlxsw_reg_rauht_op op)
1966{
1967 struct neighbour *n = neigh_entry->key.n;
1968 char rauht_pl[MLXSW_REG_RAUHT_LEN];
1969 const char *dip = n->primary_key;
1970
1971 mlxsw_reg_rauht_pack6(rauht_pl, op, neigh_entry->rif, neigh_entry->ha,
1972 dip);
Arkadi Sharshevsky7cfcbc72017-08-24 08:40:08 +02001973 if (neigh_entry->counter_valid)
1974 mlxsw_reg_rauht_pack_counter(rauht_pl,
1975 neigh_entry->counter_index);
Arkadi Sharshevskyd5eb89c2017-07-18 10:10:15 +02001976 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rauht), rauht_pl);
1977}
1978
Arkadi Sharshevsky1d1056d82017-08-31 17:59:13 +02001979bool mlxsw_sp_neigh_ipv6_ignore(struct mlxsw_sp_neigh_entry *neigh_entry)
Arkadi Sharshevskyd5eb89c2017-07-18 10:10:15 +02001980{
Arkadi Sharshevsky1d1056d82017-08-31 17:59:13 +02001981 struct neighbour *n = neigh_entry->key.n;
1982
Arkadi Sharshevskyd5eb89c2017-07-18 10:10:15 +02001983 /* Packets with a link-local destination address are trapped
1984 * after LPM lookup and never reach the neighbour table, so
1985 * there is no need to program such neighbours to the device.
1986 */
1987 if (ipv6_addr_type((struct in6_addr *) &n->primary_key) &
1988 IPV6_ADDR_LINKLOCAL)
1989 return true;
1990 return false;
1991}
1992
1993static void
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001994mlxsw_sp_neigh_entry_update(struct mlxsw_sp *mlxsw_sp,
1995 struct mlxsw_sp_neigh_entry *neigh_entry,
1996 bool adding)
1997{
1998 if (!adding && !neigh_entry->connected)
1999 return;
2000 neigh_entry->connected = adding;
Ido Schimmelb5f3e0d2017-07-24 09:56:00 +02002001 if (neigh_entry->key.n->tbl->family == AF_INET) {
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002002 mlxsw_sp_router_neigh_entry_op4(mlxsw_sp, neigh_entry,
2003 mlxsw_sp_rauht_op(adding));
Ido Schimmelb5f3e0d2017-07-24 09:56:00 +02002004 } else if (neigh_entry->key.n->tbl->family == AF_INET6) {
Arkadi Sharshevsky1d1056d82017-08-31 17:59:13 +02002005 if (mlxsw_sp_neigh_ipv6_ignore(neigh_entry))
Arkadi Sharshevskyd5eb89c2017-07-18 10:10:15 +02002006 return;
2007 mlxsw_sp_router_neigh_entry_op6(mlxsw_sp, neigh_entry,
2008 mlxsw_sp_rauht_op(adding));
2009 } else {
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002010 WARN_ON_ONCE(1);
Arkadi Sharshevskyd5eb89c2017-07-18 10:10:15 +02002011 }
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002012}
2013
Arkadi Sharshevskya481d712017-08-24 08:40:10 +02002014void
2015mlxsw_sp_neigh_entry_counter_update(struct mlxsw_sp *mlxsw_sp,
2016 struct mlxsw_sp_neigh_entry *neigh_entry,
2017 bool adding)
2018{
2019 if (adding)
2020 mlxsw_sp_neigh_counter_alloc(mlxsw_sp, neigh_entry);
2021 else
2022 mlxsw_sp_neigh_counter_free(mlxsw_sp, neigh_entry);
2023 mlxsw_sp_neigh_entry_update(mlxsw_sp, neigh_entry, true);
2024}
2025
Ido Schimmelceb88812017-11-02 17:14:07 +01002026struct mlxsw_sp_netevent_work {
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002027 struct work_struct work;
2028 struct mlxsw_sp *mlxsw_sp;
2029 struct neighbour *n;
2030};
2031
2032static void mlxsw_sp_router_neigh_event_work(struct work_struct *work)
2033{
Ido Schimmelceb88812017-11-02 17:14:07 +01002034 struct mlxsw_sp_netevent_work *net_work =
2035 container_of(work, struct mlxsw_sp_netevent_work, work);
2036 struct mlxsw_sp *mlxsw_sp = net_work->mlxsw_sp;
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002037 struct mlxsw_sp_neigh_entry *neigh_entry;
Ido Schimmelceb88812017-11-02 17:14:07 +01002038 struct neighbour *n = net_work->n;
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002039 unsigned char ha[ETH_ALEN];
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002040 bool entry_connected;
Ido Schimmel93a87e52016-12-23 09:32:49 +01002041 u8 nud_state, dead;
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002042
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002043 /* If these parameters are changed after we release the lock,
2044 * then we are guaranteed to receive another event letting us
2045 * know about it.
2046 */
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002047 read_lock_bh(&n->lock);
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002048 memcpy(ha, n->ha, ETH_ALEN);
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002049 nud_state = n->nud_state;
Ido Schimmel93a87e52016-12-23 09:32:49 +01002050 dead = n->dead;
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002051 read_unlock_bh(&n->lock);
2052
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002053 rtnl_lock();
Ido Schimmel93a87e52016-12-23 09:32:49 +01002054 entry_connected = nud_state & NUD_VALID && !dead;
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002055 neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp, n);
2056 if (!entry_connected && !neigh_entry)
2057 goto out;
2058 if (!neigh_entry) {
2059 neigh_entry = mlxsw_sp_neigh_entry_create(mlxsw_sp, n);
2060 if (IS_ERR(neigh_entry))
2061 goto out;
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002062 }
2063
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002064 memcpy(neigh_entry->ha, ha, ETH_ALEN);
2065 mlxsw_sp_neigh_entry_update(mlxsw_sp, neigh_entry, entry_connected);
2066 mlxsw_sp_nexthop_neigh_update(mlxsw_sp, neigh_entry, !entry_connected);
2067
2068 if (!neigh_entry->connected && list_empty(&neigh_entry->nexthop_list))
2069 mlxsw_sp_neigh_entry_destroy(mlxsw_sp, neigh_entry);
2070
2071out:
2072 rtnl_unlock();
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002073 neigh_release(n);
Ido Schimmelceb88812017-11-02 17:14:07 +01002074 kfree(net_work);
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002075}
2076
Ido Schimmel28678f02017-11-02 17:14:10 +01002077static int mlxsw_sp_mp_hash_init(struct mlxsw_sp *mlxsw_sp);
2078
2079static void mlxsw_sp_router_mp_hash_event_work(struct work_struct *work)
2080{
2081 struct mlxsw_sp_netevent_work *net_work =
2082 container_of(work, struct mlxsw_sp_netevent_work, work);
2083 struct mlxsw_sp *mlxsw_sp = net_work->mlxsw_sp;
2084
2085 mlxsw_sp_mp_hash_init(mlxsw_sp);
2086 kfree(net_work);
2087}
2088
2089static int mlxsw_sp_router_netevent_event(struct notifier_block *nb,
Ido Schimmel48fac882017-11-02 17:14:06 +01002090 unsigned long event, void *ptr)
Yotam Gigic723c7352016-07-05 11:27:43 +02002091{
Ido Schimmelceb88812017-11-02 17:14:07 +01002092 struct mlxsw_sp_netevent_work *net_work;
Yotam Gigic723c7352016-07-05 11:27:43 +02002093 struct mlxsw_sp_port *mlxsw_sp_port;
Ido Schimmel28678f02017-11-02 17:14:10 +01002094 struct mlxsw_sp_router *router;
Yotam Gigic723c7352016-07-05 11:27:43 +02002095 struct mlxsw_sp *mlxsw_sp;
2096 unsigned long interval;
2097 struct neigh_parms *p;
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002098 struct neighbour *n;
Ido Schimmel28678f02017-11-02 17:14:10 +01002099 struct net *net;
Yotam Gigic723c7352016-07-05 11:27:43 +02002100
2101 switch (event) {
2102 case NETEVENT_DELAY_PROBE_TIME_UPDATE:
2103 p = ptr;
2104
2105 /* We don't care about changes in the default table. */
Ido Schimmelb5f3e0d2017-07-24 09:56:00 +02002106 if (!p->dev || (p->tbl->family != AF_INET &&
2107 p->tbl->family != AF_INET6))
Yotam Gigic723c7352016-07-05 11:27:43 +02002108 return NOTIFY_DONE;
2109
2110 /* We are in atomic context and can't take RTNL mutex,
2111 * so use RCU variant to walk the device chain.
2112 */
2113 mlxsw_sp_port = mlxsw_sp_port_lower_dev_hold(p->dev);
2114 if (!mlxsw_sp_port)
2115 return NOTIFY_DONE;
2116
2117 mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2118 interval = jiffies_to_msecs(NEIGH_VAR(p, DELAY_PROBE_TIME));
Ido Schimmel9011b672017-05-16 19:38:25 +02002119 mlxsw_sp->router->neighs_update.interval = interval;
Yotam Gigic723c7352016-07-05 11:27:43 +02002120
2121 mlxsw_sp_port_dev_put(mlxsw_sp_port);
2122 break;
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002123 case NETEVENT_NEIGH_UPDATE:
2124 n = ptr;
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002125
Ido Schimmelb5f3e0d2017-07-24 09:56:00 +02002126 if (n->tbl->family != AF_INET && n->tbl->family != AF_INET6)
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002127 return NOTIFY_DONE;
2128
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002129 mlxsw_sp_port = mlxsw_sp_port_lower_dev_hold(n->dev);
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002130 if (!mlxsw_sp_port)
2131 return NOTIFY_DONE;
2132
Ido Schimmelceb88812017-11-02 17:14:07 +01002133 net_work = kzalloc(sizeof(*net_work), GFP_ATOMIC);
2134 if (!net_work) {
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002135 mlxsw_sp_port_dev_put(mlxsw_sp_port);
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002136 return NOTIFY_BAD;
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002137 }
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002138
Ido Schimmelceb88812017-11-02 17:14:07 +01002139 INIT_WORK(&net_work->work, mlxsw_sp_router_neigh_event_work);
2140 net_work->mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2141 net_work->n = n;
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002142
2143 /* Take a reference to ensure the neighbour won't be
2144 * destructed until we drop the reference in delayed
2145 * work.
2146 */
2147 neigh_clone(n);
Ido Schimmelceb88812017-11-02 17:14:07 +01002148 mlxsw_core_schedule_work(&net_work->work);
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002149 mlxsw_sp_port_dev_put(mlxsw_sp_port);
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002150 break;
Ido Schimmel28678f02017-11-02 17:14:10 +01002151 case NETEVENT_MULTIPATH_HASH_UPDATE:
2152 net = ptr;
2153
2154 if (!net_eq(net, &init_net))
2155 return NOTIFY_DONE;
2156
2157 net_work = kzalloc(sizeof(*net_work), GFP_ATOMIC);
2158 if (!net_work)
2159 return NOTIFY_BAD;
2160
2161 router = container_of(nb, struct mlxsw_sp_router, netevent_nb);
2162 INIT_WORK(&net_work->work, mlxsw_sp_router_mp_hash_event_work);
2163 net_work->mlxsw_sp = router->mlxsw_sp;
2164 mlxsw_core_schedule_work(&net_work->work);
2165 break;
Yotam Gigic723c7352016-07-05 11:27:43 +02002166 }
2167
2168 return NOTIFY_DONE;
2169}
2170
Jiri Pirko6cf3c972016-07-05 11:27:39 +02002171static int mlxsw_sp_neigh_init(struct mlxsw_sp *mlxsw_sp)
2172{
Yotam Gigic723c7352016-07-05 11:27:43 +02002173 int err;
2174
Ido Schimmel9011b672017-05-16 19:38:25 +02002175 err = rhashtable_init(&mlxsw_sp->router->neigh_ht,
Yotam Gigic723c7352016-07-05 11:27:43 +02002176 &mlxsw_sp_neigh_ht_params);
2177 if (err)
2178 return err;
2179
2180 /* Initialize the polling interval according to the default
2181 * table.
2182 */
2183 mlxsw_sp_router_neighs_update_interval_init(mlxsw_sp);
2184
Yotam Gigi0b2361d2016-07-05 11:27:52 +02002185 /* Create the delayed works for the activity_update */
Ido Schimmel9011b672017-05-16 19:38:25 +02002186 INIT_DELAYED_WORK(&mlxsw_sp->router->neighs_update.dw,
Yotam Gigic723c7352016-07-05 11:27:43 +02002187 mlxsw_sp_router_neighs_update_work);
Ido Schimmel9011b672017-05-16 19:38:25 +02002188 INIT_DELAYED_WORK(&mlxsw_sp->router->nexthop_probe_dw,
Yotam Gigi0b2361d2016-07-05 11:27:52 +02002189 mlxsw_sp_router_probe_unresolved_nexthops);
Ido Schimmel9011b672017-05-16 19:38:25 +02002190 mlxsw_core_schedule_dw(&mlxsw_sp->router->neighs_update.dw, 0);
2191 mlxsw_core_schedule_dw(&mlxsw_sp->router->nexthop_probe_dw, 0);
Yotam Gigic723c7352016-07-05 11:27:43 +02002192 return 0;
Jiri Pirko6cf3c972016-07-05 11:27:39 +02002193}
2194
2195static void mlxsw_sp_neigh_fini(struct mlxsw_sp *mlxsw_sp)
2196{
Ido Schimmel9011b672017-05-16 19:38:25 +02002197 cancel_delayed_work_sync(&mlxsw_sp->router->neighs_update.dw);
2198 cancel_delayed_work_sync(&mlxsw_sp->router->nexthop_probe_dw);
2199 rhashtable_destroy(&mlxsw_sp->router->neigh_ht);
Jiri Pirko6cf3c972016-07-05 11:27:39 +02002200}
2201
Ido Schimmel9665b742017-02-08 11:16:42 +01002202static void mlxsw_sp_neigh_rif_gone_sync(struct mlxsw_sp *mlxsw_sp,
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01002203 struct mlxsw_sp_rif *rif)
Ido Schimmel9665b742017-02-08 11:16:42 +01002204{
2205 struct mlxsw_sp_neigh_entry *neigh_entry, *tmp;
2206
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01002207 list_for_each_entry_safe(neigh_entry, tmp, &rif->neigh_list,
Ido Schimmel4a3c67a2017-07-21 20:31:38 +02002208 rif_list_node) {
2209 mlxsw_sp_neigh_entry_update(mlxsw_sp, neigh_entry, false);
Ido Schimmel9665b742017-02-08 11:16:42 +01002210 mlxsw_sp_neigh_entry_destroy(mlxsw_sp, neigh_entry);
Ido Schimmel4a3c67a2017-07-21 20:31:38 +02002211 }
Ido Schimmel9665b742017-02-08 11:16:42 +01002212}
2213
Petr Machata35225e42017-09-02 23:49:22 +02002214enum mlxsw_sp_nexthop_type {
2215 MLXSW_SP_NEXTHOP_TYPE_ETH,
Petr Machata1012b9a2017-09-02 23:49:23 +02002216 MLXSW_SP_NEXTHOP_TYPE_IPIP,
Petr Machata35225e42017-09-02 23:49:22 +02002217};
2218
Ido Schimmelc53b8e12017-02-08 11:16:30 +01002219struct mlxsw_sp_nexthop_key {
2220 struct fib_nh *fib_nh;
2221};
2222
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002223struct mlxsw_sp_nexthop {
2224 struct list_head neigh_list_node; /* member of neigh entry list */
Ido Schimmel9665b742017-02-08 11:16:42 +01002225 struct list_head rif_list_node;
Arkadi Sharshevskydbe45982017-09-25 10:32:23 +02002226 struct list_head router_list_node;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002227 struct mlxsw_sp_nexthop_group *nh_grp; /* pointer back to the group
2228 * this belongs to
2229 */
Ido Schimmelc53b8e12017-02-08 11:16:30 +01002230 struct rhash_head ht_node;
2231 struct mlxsw_sp_nexthop_key key;
Ido Schimmel58adf2c2017-07-18 10:10:19 +02002232 unsigned char gw_addr[sizeof(struct in6_addr)];
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02002233 int ifindex;
Ido Schimmel408bd942017-10-22 23:11:46 +02002234 int nh_weight;
Ido Schimmeleb789982017-10-22 23:11:48 +02002235 int norm_nh_weight;
2236 int num_adj_entries;
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01002237 struct mlxsw_sp_rif *rif;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002238 u8 should_offload:1, /* set indicates this neigh is connected and
2239 * should be put to KVD linear area of this group.
2240 */
2241 offloaded:1, /* set in case the neigh is actually put into
2242 * KVD linear area of this group.
2243 */
2244 update:1; /* set indicates that MAC of this neigh should be
2245 * updated in HW
2246 */
Petr Machata35225e42017-09-02 23:49:22 +02002247 enum mlxsw_sp_nexthop_type type;
2248 union {
2249 struct mlxsw_sp_neigh_entry *neigh_entry;
Petr Machata1012b9a2017-09-02 23:49:23 +02002250 struct mlxsw_sp_ipip_entry *ipip_entry;
Petr Machata35225e42017-09-02 23:49:22 +02002251 };
Arkadi Sharshevskya5390272017-09-25 10:32:28 +02002252 unsigned int counter_index;
2253 bool counter_valid;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002254};
2255
2256struct mlxsw_sp_nexthop_group {
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02002257 void *priv;
Ido Schimmele9ad5e72017-02-08 11:16:29 +01002258 struct rhash_head ht_node;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002259 struct list_head fib_list; /* list of fib entries that use this group */
Ido Schimmel58adf2c2017-07-18 10:10:19 +02002260 struct neigh_table *neigh_tbl;
Ido Schimmelb3e8d1e2017-02-08 11:16:32 +01002261 u8 adj_index_valid:1,
2262 gateway:1; /* routes using the group use a gateway */
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002263 u32 adj_index;
2264 u16 ecmp_size;
2265 u16 count;
Ido Schimmeleb789982017-10-22 23:11:48 +02002266 int sum_norm_weight;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002267 struct mlxsw_sp_nexthop nexthops[0];
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01002268#define nh_rif nexthops[0].rif
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002269};
2270
Arkadi Sharshevsky427e6522017-09-25 10:32:30 +02002271void mlxsw_sp_nexthop_counter_alloc(struct mlxsw_sp *mlxsw_sp,
2272 struct mlxsw_sp_nexthop *nh)
Arkadi Sharshevskya5390272017-09-25 10:32:28 +02002273{
2274 struct devlink *devlink;
2275
2276 devlink = priv_to_devlink(mlxsw_sp->core);
2277 if (!devlink_dpipe_table_counter_enabled(devlink,
2278 MLXSW_SP_DPIPE_TABLE_NAME_ADJ))
2279 return;
2280
2281 if (mlxsw_sp_flow_counter_alloc(mlxsw_sp, &nh->counter_index))
2282 return;
2283
2284 nh->counter_valid = true;
2285}
2286
Arkadi Sharshevsky427e6522017-09-25 10:32:30 +02002287void mlxsw_sp_nexthop_counter_free(struct mlxsw_sp *mlxsw_sp,
2288 struct mlxsw_sp_nexthop *nh)
Arkadi Sharshevskya5390272017-09-25 10:32:28 +02002289{
2290 if (!nh->counter_valid)
2291 return;
2292 mlxsw_sp_flow_counter_free(mlxsw_sp, nh->counter_index);
2293 nh->counter_valid = false;
2294}
2295
2296int mlxsw_sp_nexthop_counter_get(struct mlxsw_sp *mlxsw_sp,
2297 struct mlxsw_sp_nexthop *nh, u64 *p_counter)
2298{
2299 if (!nh->counter_valid)
2300 return -EINVAL;
2301
2302 return mlxsw_sp_flow_counter_get(mlxsw_sp, nh->counter_index,
2303 p_counter, NULL);
2304}
2305
Arkadi Sharshevskyc556cd22017-09-25 10:32:25 +02002306struct mlxsw_sp_nexthop *mlxsw_sp_nexthop_next(struct mlxsw_sp_router *router,
2307 struct mlxsw_sp_nexthop *nh)
2308{
2309 if (!nh) {
2310 if (list_empty(&router->nexthop_list))
2311 return NULL;
2312 else
2313 return list_first_entry(&router->nexthop_list,
2314 typeof(*nh), router_list_node);
2315 }
2316 if (list_is_last(&nh->router_list_node, &router->nexthop_list))
2317 return NULL;
2318 return list_next_entry(nh, router_list_node);
2319}
2320
2321bool mlxsw_sp_nexthop_offload(struct mlxsw_sp_nexthop *nh)
2322{
2323 return nh->offloaded;
2324}
2325
2326unsigned char *mlxsw_sp_nexthop_ha(struct mlxsw_sp_nexthop *nh)
2327{
2328 if (!nh->offloaded)
2329 return NULL;
2330 return nh->neigh_entry->ha;
2331}
2332
2333int mlxsw_sp_nexthop_indexes(struct mlxsw_sp_nexthop *nh, u32 *p_adj_index,
Ido Schimmele69cd9d2017-10-22 23:11:43 +02002334 u32 *p_adj_size, u32 *p_adj_hash_index)
Arkadi Sharshevskyc556cd22017-09-25 10:32:25 +02002335{
2336 struct mlxsw_sp_nexthop_group *nh_grp = nh->nh_grp;
2337 u32 adj_hash_index = 0;
2338 int i;
2339
2340 if (!nh->offloaded || !nh_grp->adj_index_valid)
2341 return -EINVAL;
2342
2343 *p_adj_index = nh_grp->adj_index;
Ido Schimmele69cd9d2017-10-22 23:11:43 +02002344 *p_adj_size = nh_grp->ecmp_size;
Arkadi Sharshevskyc556cd22017-09-25 10:32:25 +02002345
2346 for (i = 0; i < nh_grp->count; i++) {
2347 struct mlxsw_sp_nexthop *nh_iter = &nh_grp->nexthops[i];
2348
2349 if (nh_iter == nh)
2350 break;
2351 if (nh_iter->offloaded)
Ido Schimmeleb789982017-10-22 23:11:48 +02002352 adj_hash_index += nh_iter->num_adj_entries;
Arkadi Sharshevskyc556cd22017-09-25 10:32:25 +02002353 }
2354
2355 *p_adj_hash_index = adj_hash_index;
2356 return 0;
2357}
2358
2359struct mlxsw_sp_rif *mlxsw_sp_nexthop_rif(struct mlxsw_sp_nexthop *nh)
2360{
2361 return nh->rif;
2362}
2363
2364bool mlxsw_sp_nexthop_group_has_ipip(struct mlxsw_sp_nexthop *nh)
2365{
2366 struct mlxsw_sp_nexthop_group *nh_grp = nh->nh_grp;
2367 int i;
2368
2369 for (i = 0; i < nh_grp->count; i++) {
2370 struct mlxsw_sp_nexthop *nh_iter = &nh_grp->nexthops[i];
2371
2372 if (nh_iter->type == MLXSW_SP_NEXTHOP_TYPE_IPIP)
2373 return true;
2374 }
2375 return false;
2376}
2377
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02002378static struct fib_info *
2379mlxsw_sp_nexthop4_group_fi(const struct mlxsw_sp_nexthop_group *nh_grp)
2380{
2381 return nh_grp->priv;
2382}
2383
2384struct mlxsw_sp_nexthop_group_cmp_arg {
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02002385 enum mlxsw_sp_l3proto proto;
2386 union {
2387 struct fib_info *fi;
2388 struct mlxsw_sp_fib6_entry *fib6_entry;
2389 };
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02002390};
2391
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02002392static bool
2393mlxsw_sp_nexthop6_group_has_nexthop(const struct mlxsw_sp_nexthop_group *nh_grp,
2394 const struct in6_addr *gw, int ifindex)
2395{
2396 int i;
2397
2398 for (i = 0; i < nh_grp->count; i++) {
2399 const struct mlxsw_sp_nexthop *nh;
2400
2401 nh = &nh_grp->nexthops[i];
2402 if (nh->ifindex == ifindex &&
2403 ipv6_addr_equal(gw, (struct in6_addr *) nh->gw_addr))
2404 return true;
2405 }
2406
2407 return false;
2408}
2409
2410static bool
2411mlxsw_sp_nexthop6_group_cmp(const struct mlxsw_sp_nexthop_group *nh_grp,
2412 const struct mlxsw_sp_fib6_entry *fib6_entry)
2413{
2414 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
2415
2416 if (nh_grp->count != fib6_entry->nrt6)
2417 return false;
2418
2419 list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) {
2420 struct in6_addr *gw;
2421 int ifindex;
2422
2423 ifindex = mlxsw_sp_rt6->rt->dst.dev->ifindex;
2424 gw = &mlxsw_sp_rt6->rt->rt6i_gateway;
2425 if (!mlxsw_sp_nexthop6_group_has_nexthop(nh_grp, gw, ifindex))
2426 return false;
2427 }
2428
2429 return true;
2430}
2431
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02002432static int
2433mlxsw_sp_nexthop_group_cmp(struct rhashtable_compare_arg *arg, const void *ptr)
2434{
2435 const struct mlxsw_sp_nexthop_group_cmp_arg *cmp_arg = arg->key;
2436 const struct mlxsw_sp_nexthop_group *nh_grp = ptr;
2437
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02002438 switch (cmp_arg->proto) {
2439 case MLXSW_SP_L3_PROTO_IPV4:
2440 return cmp_arg->fi != mlxsw_sp_nexthop4_group_fi(nh_grp);
2441 case MLXSW_SP_L3_PROTO_IPV6:
2442 return !mlxsw_sp_nexthop6_group_cmp(nh_grp,
2443 cmp_arg->fib6_entry);
2444 default:
2445 WARN_ON(1);
2446 return 1;
2447 }
2448}
2449
2450static int
2451mlxsw_sp_nexthop_group_type(const struct mlxsw_sp_nexthop_group *nh_grp)
2452{
2453 return nh_grp->neigh_tbl->family;
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02002454}
2455
2456static u32 mlxsw_sp_nexthop_group_hash_obj(const void *data, u32 len, u32 seed)
2457{
2458 const struct mlxsw_sp_nexthop_group *nh_grp = data;
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02002459 const struct mlxsw_sp_nexthop *nh;
2460 struct fib_info *fi;
2461 unsigned int val;
2462 int i;
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02002463
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02002464 switch (mlxsw_sp_nexthop_group_type(nh_grp)) {
2465 case AF_INET:
2466 fi = mlxsw_sp_nexthop4_group_fi(nh_grp);
2467 return jhash(&fi, sizeof(fi), seed);
2468 case AF_INET6:
2469 val = nh_grp->count;
2470 for (i = 0; i < nh_grp->count; i++) {
2471 nh = &nh_grp->nexthops[i];
2472 val ^= nh->ifindex;
2473 }
2474 return jhash(&val, sizeof(val), seed);
2475 default:
2476 WARN_ON(1);
2477 return 0;
2478 }
2479}
2480
2481static u32
2482mlxsw_sp_nexthop6_group_hash(struct mlxsw_sp_fib6_entry *fib6_entry, u32 seed)
2483{
2484 unsigned int val = fib6_entry->nrt6;
2485 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
2486 struct net_device *dev;
2487
2488 list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) {
2489 dev = mlxsw_sp_rt6->rt->dst.dev;
2490 val ^= dev->ifindex;
2491 }
2492
2493 return jhash(&val, sizeof(val), seed);
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02002494}
2495
2496static u32
2497mlxsw_sp_nexthop_group_hash(const void *data, u32 len, u32 seed)
2498{
2499 const struct mlxsw_sp_nexthop_group_cmp_arg *cmp_arg = data;
2500
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02002501 switch (cmp_arg->proto) {
2502 case MLXSW_SP_L3_PROTO_IPV4:
2503 return jhash(&cmp_arg->fi, sizeof(cmp_arg->fi), seed);
2504 case MLXSW_SP_L3_PROTO_IPV6:
2505 return mlxsw_sp_nexthop6_group_hash(cmp_arg->fib6_entry, seed);
2506 default:
2507 WARN_ON(1);
2508 return 0;
2509 }
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02002510}
2511
Ido Schimmele9ad5e72017-02-08 11:16:29 +01002512static const struct rhashtable_params mlxsw_sp_nexthop_group_ht_params = {
Ido Schimmele9ad5e72017-02-08 11:16:29 +01002513 .head_offset = offsetof(struct mlxsw_sp_nexthop_group, ht_node),
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02002514 .hashfn = mlxsw_sp_nexthop_group_hash,
2515 .obj_hashfn = mlxsw_sp_nexthop_group_hash_obj,
2516 .obj_cmpfn = mlxsw_sp_nexthop_group_cmp,
Ido Schimmele9ad5e72017-02-08 11:16:29 +01002517};
2518
2519static int mlxsw_sp_nexthop_group_insert(struct mlxsw_sp *mlxsw_sp,
2520 struct mlxsw_sp_nexthop_group *nh_grp)
2521{
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02002522 if (mlxsw_sp_nexthop_group_type(nh_grp) == AF_INET6 &&
2523 !nh_grp->gateway)
2524 return 0;
2525
Ido Schimmel9011b672017-05-16 19:38:25 +02002526 return rhashtable_insert_fast(&mlxsw_sp->router->nexthop_group_ht,
Ido Schimmele9ad5e72017-02-08 11:16:29 +01002527 &nh_grp->ht_node,
2528 mlxsw_sp_nexthop_group_ht_params);
2529}
2530
2531static void mlxsw_sp_nexthop_group_remove(struct mlxsw_sp *mlxsw_sp,
2532 struct mlxsw_sp_nexthop_group *nh_grp)
2533{
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02002534 if (mlxsw_sp_nexthop_group_type(nh_grp) == AF_INET6 &&
2535 !nh_grp->gateway)
2536 return;
2537
Ido Schimmel9011b672017-05-16 19:38:25 +02002538 rhashtable_remove_fast(&mlxsw_sp->router->nexthop_group_ht,
Ido Schimmele9ad5e72017-02-08 11:16:29 +01002539 &nh_grp->ht_node,
2540 mlxsw_sp_nexthop_group_ht_params);
2541}
2542
2543static struct mlxsw_sp_nexthop_group *
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02002544mlxsw_sp_nexthop4_group_lookup(struct mlxsw_sp *mlxsw_sp,
2545 struct fib_info *fi)
Ido Schimmele9ad5e72017-02-08 11:16:29 +01002546{
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02002547 struct mlxsw_sp_nexthop_group_cmp_arg cmp_arg;
2548
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02002549 cmp_arg.proto = MLXSW_SP_L3_PROTO_IPV4;
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02002550 cmp_arg.fi = fi;
2551 return rhashtable_lookup_fast(&mlxsw_sp->router->nexthop_group_ht,
2552 &cmp_arg,
Ido Schimmele9ad5e72017-02-08 11:16:29 +01002553 mlxsw_sp_nexthop_group_ht_params);
2554}
2555
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02002556static struct mlxsw_sp_nexthop_group *
2557mlxsw_sp_nexthop6_group_lookup(struct mlxsw_sp *mlxsw_sp,
2558 struct mlxsw_sp_fib6_entry *fib6_entry)
2559{
2560 struct mlxsw_sp_nexthop_group_cmp_arg cmp_arg;
2561
2562 cmp_arg.proto = MLXSW_SP_L3_PROTO_IPV6;
2563 cmp_arg.fib6_entry = fib6_entry;
2564 return rhashtable_lookup_fast(&mlxsw_sp->router->nexthop_group_ht,
2565 &cmp_arg,
2566 mlxsw_sp_nexthop_group_ht_params);
2567}
2568
Ido Schimmelc53b8e12017-02-08 11:16:30 +01002569static const struct rhashtable_params mlxsw_sp_nexthop_ht_params = {
2570 .key_offset = offsetof(struct mlxsw_sp_nexthop, key),
2571 .head_offset = offsetof(struct mlxsw_sp_nexthop, ht_node),
2572 .key_len = sizeof(struct mlxsw_sp_nexthop_key),
2573};
2574
2575static int mlxsw_sp_nexthop_insert(struct mlxsw_sp *mlxsw_sp,
2576 struct mlxsw_sp_nexthop *nh)
2577{
Ido Schimmel9011b672017-05-16 19:38:25 +02002578 return rhashtable_insert_fast(&mlxsw_sp->router->nexthop_ht,
Ido Schimmelc53b8e12017-02-08 11:16:30 +01002579 &nh->ht_node, mlxsw_sp_nexthop_ht_params);
2580}
2581
2582static void mlxsw_sp_nexthop_remove(struct mlxsw_sp *mlxsw_sp,
2583 struct mlxsw_sp_nexthop *nh)
2584{
Ido Schimmel9011b672017-05-16 19:38:25 +02002585 rhashtable_remove_fast(&mlxsw_sp->router->nexthop_ht, &nh->ht_node,
Ido Schimmelc53b8e12017-02-08 11:16:30 +01002586 mlxsw_sp_nexthop_ht_params);
2587}
2588
Ido Schimmelad178c82017-02-08 11:16:40 +01002589static struct mlxsw_sp_nexthop *
2590mlxsw_sp_nexthop_lookup(struct mlxsw_sp *mlxsw_sp,
2591 struct mlxsw_sp_nexthop_key key)
2592{
Ido Schimmel9011b672017-05-16 19:38:25 +02002593 return rhashtable_lookup_fast(&mlxsw_sp->router->nexthop_ht, &key,
Ido Schimmelad178c82017-02-08 11:16:40 +01002594 mlxsw_sp_nexthop_ht_params);
2595}
2596
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002597static int mlxsw_sp_adj_index_mass_update_vr(struct mlxsw_sp *mlxsw_sp,
Ido Schimmel76610eb2017-03-10 08:53:41 +01002598 const struct mlxsw_sp_fib *fib,
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002599 u32 adj_index, u16 ecmp_size,
2600 u32 new_adj_index,
2601 u16 new_ecmp_size)
2602{
2603 char raleu_pl[MLXSW_REG_RALEU_LEN];
2604
Ido Schimmel1a9234e662016-09-19 08:29:26 +02002605 mlxsw_reg_raleu_pack(raleu_pl,
Ido Schimmel76610eb2017-03-10 08:53:41 +01002606 (enum mlxsw_reg_ralxx_protocol) fib->proto,
2607 fib->vr->id, adj_index, ecmp_size, new_adj_index,
Ido Schimmel1a9234e662016-09-19 08:29:26 +02002608 new_ecmp_size);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002609 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raleu), raleu_pl);
2610}
2611
2612static int mlxsw_sp_adj_index_mass_update(struct mlxsw_sp *mlxsw_sp,
2613 struct mlxsw_sp_nexthop_group *nh_grp,
2614 u32 old_adj_index, u16 old_ecmp_size)
2615{
2616 struct mlxsw_sp_fib_entry *fib_entry;
Ido Schimmel76610eb2017-03-10 08:53:41 +01002617 struct mlxsw_sp_fib *fib = NULL;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002618 int err;
2619
2620 list_for_each_entry(fib_entry, &nh_grp->fib_list, nexthop_group_node) {
Ido Schimmel76610eb2017-03-10 08:53:41 +01002621 if (fib == fib_entry->fib_node->fib)
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002622 continue;
Ido Schimmel76610eb2017-03-10 08:53:41 +01002623 fib = fib_entry->fib_node->fib;
2624 err = mlxsw_sp_adj_index_mass_update_vr(mlxsw_sp, fib,
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002625 old_adj_index,
2626 old_ecmp_size,
2627 nh_grp->adj_index,
2628 nh_grp->ecmp_size);
2629 if (err)
2630 return err;
2631 }
2632 return 0;
2633}
2634
Ido Schimmeleb789982017-10-22 23:11:48 +02002635static int __mlxsw_sp_nexthop_update(struct mlxsw_sp *mlxsw_sp, u32 adj_index,
2636 struct mlxsw_sp_nexthop *nh)
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002637{
2638 struct mlxsw_sp_neigh_entry *neigh_entry = nh->neigh_entry;
2639 char ratr_pl[MLXSW_REG_RATR_LEN];
2640
2641 mlxsw_reg_ratr_pack(ratr_pl, MLXSW_REG_RATR_OP_WRITE_WRITE_ENTRY,
Petr Machata89e41982017-09-02 23:49:15 +02002642 true, MLXSW_REG_RATR_TYPE_ETHERNET,
2643 adj_index, neigh_entry->rif);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002644 mlxsw_reg_ratr_eth_entry_pack(ratr_pl, neigh_entry->ha);
Arkadi Sharshevskya5390272017-09-25 10:32:28 +02002645 if (nh->counter_valid)
2646 mlxsw_reg_ratr_counter_pack(ratr_pl, nh->counter_index, true);
2647 else
2648 mlxsw_reg_ratr_counter_pack(ratr_pl, 0, false);
2649
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002650 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ratr), ratr_pl);
2651}
2652
Ido Schimmeleb789982017-10-22 23:11:48 +02002653int mlxsw_sp_nexthop_update(struct mlxsw_sp *mlxsw_sp, u32 adj_index,
2654 struct mlxsw_sp_nexthop *nh)
2655{
2656 int i;
2657
2658 for (i = 0; i < nh->num_adj_entries; i++) {
2659 int err;
2660
2661 err = __mlxsw_sp_nexthop_update(mlxsw_sp, adj_index + i, nh);
2662 if (err)
2663 return err;
2664 }
2665
2666 return 0;
2667}
2668
2669static int __mlxsw_sp_nexthop_ipip_update(struct mlxsw_sp *mlxsw_sp,
2670 u32 adj_index,
2671 struct mlxsw_sp_nexthop *nh)
Petr Machata1012b9a2017-09-02 23:49:23 +02002672{
2673 const struct mlxsw_sp_ipip_ops *ipip_ops;
2674
2675 ipip_ops = mlxsw_sp->router->ipip_ops_arr[nh->ipip_entry->ipipt];
2676 return ipip_ops->nexthop_update(mlxsw_sp, adj_index, nh->ipip_entry);
2677}
2678
Ido Schimmeleb789982017-10-22 23:11:48 +02002679static int mlxsw_sp_nexthop_ipip_update(struct mlxsw_sp *mlxsw_sp,
2680 u32 adj_index,
2681 struct mlxsw_sp_nexthop *nh)
2682{
2683 int i;
2684
2685 for (i = 0; i < nh->num_adj_entries; i++) {
2686 int err;
2687
2688 err = __mlxsw_sp_nexthop_ipip_update(mlxsw_sp, adj_index + i,
2689 nh);
2690 if (err)
2691 return err;
2692 }
2693
2694 return 0;
2695}
2696
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002697static int
Petr Machata35225e42017-09-02 23:49:22 +02002698mlxsw_sp_nexthop_group_update(struct mlxsw_sp *mlxsw_sp,
2699 struct mlxsw_sp_nexthop_group *nh_grp,
2700 bool reallocate)
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002701{
2702 u32 adj_index = nh_grp->adj_index; /* base */
2703 struct mlxsw_sp_nexthop *nh;
2704 int i;
2705 int err;
2706
2707 for (i = 0; i < nh_grp->count; i++) {
2708 nh = &nh_grp->nexthops[i];
2709
2710 if (!nh->should_offload) {
2711 nh->offloaded = 0;
2712 continue;
2713 }
2714
Ido Schimmela59b7e02017-01-23 11:11:42 +01002715 if (nh->update || reallocate) {
Petr Machata35225e42017-09-02 23:49:22 +02002716 switch (nh->type) {
2717 case MLXSW_SP_NEXTHOP_TYPE_ETH:
Arkadi Sharshevskya5390272017-09-25 10:32:28 +02002718 err = mlxsw_sp_nexthop_update
Petr Machata35225e42017-09-02 23:49:22 +02002719 (mlxsw_sp, adj_index, nh);
2720 break;
Petr Machata1012b9a2017-09-02 23:49:23 +02002721 case MLXSW_SP_NEXTHOP_TYPE_IPIP:
2722 err = mlxsw_sp_nexthop_ipip_update
2723 (mlxsw_sp, adj_index, nh);
2724 break;
Petr Machata35225e42017-09-02 23:49:22 +02002725 }
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002726 if (err)
2727 return err;
2728 nh->update = 0;
2729 nh->offloaded = 1;
2730 }
Ido Schimmeleb789982017-10-22 23:11:48 +02002731 adj_index += nh->num_adj_entries;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002732 }
2733 return 0;
2734}
2735
Ido Schimmel1819ae32017-07-21 18:04:28 +02002736static bool
2737mlxsw_sp_fib_node_entry_is_first(const struct mlxsw_sp_fib_node *fib_node,
2738 const struct mlxsw_sp_fib_entry *fib_entry);
2739
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002740static int
2741mlxsw_sp_nexthop_fib_entries_update(struct mlxsw_sp *mlxsw_sp,
2742 struct mlxsw_sp_nexthop_group *nh_grp)
2743{
2744 struct mlxsw_sp_fib_entry *fib_entry;
2745 int err;
2746
2747 list_for_each_entry(fib_entry, &nh_grp->fib_list, nexthop_group_node) {
Ido Schimmel1819ae32017-07-21 18:04:28 +02002748 if (!mlxsw_sp_fib_node_entry_is_first(fib_entry->fib_node,
2749 fib_entry))
2750 continue;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002751 err = mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
2752 if (err)
2753 return err;
2754 }
2755 return 0;
2756}
2757
2758static void
Ido Schimmel77d964e2017-08-02 09:56:05 +02002759mlxsw_sp_fib_entry_offload_refresh(struct mlxsw_sp_fib_entry *fib_entry,
2760 enum mlxsw_reg_ralue_op op, int err);
2761
2762static void
2763mlxsw_sp_nexthop_fib_entries_refresh(struct mlxsw_sp_nexthop_group *nh_grp)
2764{
2765 enum mlxsw_reg_ralue_op op = MLXSW_REG_RALUE_OP_WRITE_WRITE;
2766 struct mlxsw_sp_fib_entry *fib_entry;
2767
2768 list_for_each_entry(fib_entry, &nh_grp->fib_list, nexthop_group_node) {
2769 if (!mlxsw_sp_fib_node_entry_is_first(fib_entry->fib_node,
2770 fib_entry))
2771 continue;
2772 mlxsw_sp_fib_entry_offload_refresh(fib_entry, op, 0);
2773 }
2774}
2775
Ido Schimmel425a08c2017-10-22 23:11:47 +02002776static void mlxsw_sp_adj_grp_size_round_up(u16 *p_adj_grp_size)
2777{
2778 /* Valid sizes for an adjacency group are:
2779 * 1-64, 512, 1024, 2048 and 4096.
2780 */
2781 if (*p_adj_grp_size <= 64)
2782 return;
2783 else if (*p_adj_grp_size <= 512)
2784 *p_adj_grp_size = 512;
2785 else if (*p_adj_grp_size <= 1024)
2786 *p_adj_grp_size = 1024;
2787 else if (*p_adj_grp_size <= 2048)
2788 *p_adj_grp_size = 2048;
2789 else
2790 *p_adj_grp_size = 4096;
2791}
2792
2793static void mlxsw_sp_adj_grp_size_round_down(u16 *p_adj_grp_size,
2794 unsigned int alloc_size)
2795{
2796 if (alloc_size >= 4096)
2797 *p_adj_grp_size = 4096;
2798 else if (alloc_size >= 2048)
2799 *p_adj_grp_size = 2048;
2800 else if (alloc_size >= 1024)
2801 *p_adj_grp_size = 1024;
2802 else if (alloc_size >= 512)
2803 *p_adj_grp_size = 512;
2804}
2805
2806static int mlxsw_sp_fix_adj_grp_size(struct mlxsw_sp *mlxsw_sp,
2807 u16 *p_adj_grp_size)
2808{
2809 unsigned int alloc_size;
2810 int err;
2811
2812 /* Round up the requested group size to the next size supported
2813 * by the device and make sure the request can be satisfied.
2814 */
2815 mlxsw_sp_adj_grp_size_round_up(p_adj_grp_size);
2816 err = mlxsw_sp_kvdl_alloc_size_query(mlxsw_sp, *p_adj_grp_size,
2817 &alloc_size);
2818 if (err)
2819 return err;
2820 /* It is possible the allocation results in more allocated
2821 * entries than requested. Try to use as much of them as
2822 * possible.
2823 */
2824 mlxsw_sp_adj_grp_size_round_down(p_adj_grp_size, alloc_size);
2825
2826 return 0;
2827}
2828
Ido Schimmel77d964e2017-08-02 09:56:05 +02002829static void
Ido Schimmeleb789982017-10-22 23:11:48 +02002830mlxsw_sp_nexthop_group_normalize(struct mlxsw_sp_nexthop_group *nh_grp)
2831{
2832 int i, g = 0, sum_norm_weight = 0;
2833 struct mlxsw_sp_nexthop *nh;
2834
2835 for (i = 0; i < nh_grp->count; i++) {
2836 nh = &nh_grp->nexthops[i];
2837
2838 if (!nh->should_offload)
2839 continue;
2840 if (g > 0)
2841 g = gcd(nh->nh_weight, g);
2842 else
2843 g = nh->nh_weight;
2844 }
2845
2846 for (i = 0; i < nh_grp->count; i++) {
2847 nh = &nh_grp->nexthops[i];
2848
2849 if (!nh->should_offload)
2850 continue;
2851 nh->norm_nh_weight = nh->nh_weight / g;
2852 sum_norm_weight += nh->norm_nh_weight;
2853 }
2854
2855 nh_grp->sum_norm_weight = sum_norm_weight;
2856}
2857
2858static void
2859mlxsw_sp_nexthop_group_rebalance(struct mlxsw_sp_nexthop_group *nh_grp)
2860{
2861 int total = nh_grp->sum_norm_weight;
2862 u16 ecmp_size = nh_grp->ecmp_size;
2863 int i, weight = 0, lower_bound = 0;
2864
2865 for (i = 0; i < nh_grp->count; i++) {
2866 struct mlxsw_sp_nexthop *nh = &nh_grp->nexthops[i];
2867 int upper_bound;
2868
2869 if (!nh->should_offload)
2870 continue;
2871 weight += nh->norm_nh_weight;
2872 upper_bound = DIV_ROUND_CLOSEST(ecmp_size * weight, total);
2873 nh->num_adj_entries = upper_bound - lower_bound;
2874 lower_bound = upper_bound;
2875 }
2876}
2877
2878static void
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002879mlxsw_sp_nexthop_group_refresh(struct mlxsw_sp *mlxsw_sp,
2880 struct mlxsw_sp_nexthop_group *nh_grp)
2881{
Ido Schimmeleb789982017-10-22 23:11:48 +02002882 u16 ecmp_size, old_ecmp_size;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002883 struct mlxsw_sp_nexthop *nh;
2884 bool offload_change = false;
2885 u32 adj_index;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002886 bool old_adj_index_valid;
2887 u32 old_adj_index;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002888 int i;
2889 int err;
2890
Ido Schimmelb3e8d1e2017-02-08 11:16:32 +01002891 if (!nh_grp->gateway) {
2892 mlxsw_sp_nexthop_fib_entries_update(mlxsw_sp, nh_grp);
2893 return;
2894 }
2895
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002896 for (i = 0; i < nh_grp->count; i++) {
2897 nh = &nh_grp->nexthops[i];
2898
Petr Machata56b8a9e2017-07-31 09:27:29 +02002899 if (nh->should_offload != nh->offloaded) {
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002900 offload_change = true;
2901 if (nh->should_offload)
2902 nh->update = 1;
2903 }
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002904 }
2905 if (!offload_change) {
2906 /* Nothing was added or removed, so no need to reallocate. Just
2907 * update MAC on existing adjacency indexes.
2908 */
Petr Machata35225e42017-09-02 23:49:22 +02002909 err = mlxsw_sp_nexthop_group_update(mlxsw_sp, nh_grp, false);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002910 if (err) {
2911 dev_warn(mlxsw_sp->bus_info->dev, "Failed to update neigh MAC in adjacency table.\n");
2912 goto set_trap;
2913 }
2914 return;
2915 }
Ido Schimmeleb789982017-10-22 23:11:48 +02002916 mlxsw_sp_nexthop_group_normalize(nh_grp);
2917 if (!nh_grp->sum_norm_weight)
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002918 /* No neigh of this group is connected so we just set
2919 * the trap and let everthing flow through kernel.
2920 */
2921 goto set_trap;
2922
Ido Schimmeleb789982017-10-22 23:11:48 +02002923 ecmp_size = nh_grp->sum_norm_weight;
Ido Schimmel425a08c2017-10-22 23:11:47 +02002924 err = mlxsw_sp_fix_adj_grp_size(mlxsw_sp, &ecmp_size);
2925 if (err)
2926 /* No valid allocation size available. */
2927 goto set_trap;
2928
Arkadi Sharshevsky13124442017-03-25 08:28:22 +01002929 err = mlxsw_sp_kvdl_alloc(mlxsw_sp, ecmp_size, &adj_index);
2930 if (err) {
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002931 /* We ran out of KVD linear space, just set the
2932 * trap and let everything flow through kernel.
2933 */
2934 dev_warn(mlxsw_sp->bus_info->dev, "Failed to allocate KVD linear area for nexthop group.\n");
2935 goto set_trap;
2936 }
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002937 old_adj_index_valid = nh_grp->adj_index_valid;
2938 old_adj_index = nh_grp->adj_index;
2939 old_ecmp_size = nh_grp->ecmp_size;
2940 nh_grp->adj_index_valid = 1;
2941 nh_grp->adj_index = adj_index;
2942 nh_grp->ecmp_size = ecmp_size;
Ido Schimmeleb789982017-10-22 23:11:48 +02002943 mlxsw_sp_nexthop_group_rebalance(nh_grp);
Petr Machata35225e42017-09-02 23:49:22 +02002944 err = mlxsw_sp_nexthop_group_update(mlxsw_sp, nh_grp, true);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002945 if (err) {
2946 dev_warn(mlxsw_sp->bus_info->dev, "Failed to update neigh MAC in adjacency table.\n");
2947 goto set_trap;
2948 }
2949
2950 if (!old_adj_index_valid) {
2951 /* The trap was set for fib entries, so we have to call
2952 * fib entry update to unset it and use adjacency index.
2953 */
2954 err = mlxsw_sp_nexthop_fib_entries_update(mlxsw_sp, nh_grp);
2955 if (err) {
2956 dev_warn(mlxsw_sp->bus_info->dev, "Failed to add adjacency index to fib entries.\n");
2957 goto set_trap;
2958 }
2959 return;
2960 }
2961
2962 err = mlxsw_sp_adj_index_mass_update(mlxsw_sp, nh_grp,
2963 old_adj_index, old_ecmp_size);
2964 mlxsw_sp_kvdl_free(mlxsw_sp, old_adj_index);
2965 if (err) {
2966 dev_warn(mlxsw_sp->bus_info->dev, "Failed to mass-update adjacency index for nexthop group.\n");
2967 goto set_trap;
2968 }
Ido Schimmel77d964e2017-08-02 09:56:05 +02002969
2970 /* Offload state within the group changed, so update the flags. */
2971 mlxsw_sp_nexthop_fib_entries_refresh(nh_grp);
2972
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002973 return;
2974
2975set_trap:
2976 old_adj_index_valid = nh_grp->adj_index_valid;
2977 nh_grp->adj_index_valid = 0;
2978 for (i = 0; i < nh_grp->count; i++) {
2979 nh = &nh_grp->nexthops[i];
2980 nh->offloaded = 0;
2981 }
2982 err = mlxsw_sp_nexthop_fib_entries_update(mlxsw_sp, nh_grp);
2983 if (err)
2984 dev_warn(mlxsw_sp->bus_info->dev, "Failed to set traps for fib entries.\n");
2985 if (old_adj_index_valid)
2986 mlxsw_sp_kvdl_free(mlxsw_sp, nh_grp->adj_index);
2987}
2988
2989static void __mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp_nexthop *nh,
2990 bool removing)
2991{
Petr Machata213666a2017-07-31 09:27:30 +02002992 if (!removing)
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002993 nh->should_offload = 1;
Petr Machata213666a2017-07-31 09:27:30 +02002994 else if (nh->offloaded)
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002995 nh->should_offload = 0;
2996 nh->update = 1;
2997}
2998
2999static void
3000mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp *mlxsw_sp,
3001 struct mlxsw_sp_neigh_entry *neigh_entry,
3002 bool removing)
3003{
3004 struct mlxsw_sp_nexthop *nh;
3005
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003006 list_for_each_entry(nh, &neigh_entry->nexthop_list,
3007 neigh_list_node) {
3008 __mlxsw_sp_nexthop_neigh_update(nh, removing);
3009 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nh_grp);
3010 }
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003011}
3012
Ido Schimmel9665b742017-02-08 11:16:42 +01003013static void mlxsw_sp_nexthop_rif_init(struct mlxsw_sp_nexthop *nh,
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003014 struct mlxsw_sp_rif *rif)
Ido Schimmel9665b742017-02-08 11:16:42 +01003015{
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003016 if (nh->rif)
Ido Schimmel9665b742017-02-08 11:16:42 +01003017 return;
3018
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003019 nh->rif = rif;
3020 list_add(&nh->rif_list_node, &rif->nexthop_list);
Ido Schimmel9665b742017-02-08 11:16:42 +01003021}
3022
3023static void mlxsw_sp_nexthop_rif_fini(struct mlxsw_sp_nexthop *nh)
3024{
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003025 if (!nh->rif)
Ido Schimmel9665b742017-02-08 11:16:42 +01003026 return;
3027
3028 list_del(&nh->rif_list_node);
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003029 nh->rif = NULL;
Ido Schimmel9665b742017-02-08 11:16:42 +01003030}
3031
Ido Schimmela8c97012017-02-08 11:16:35 +01003032static int mlxsw_sp_nexthop_neigh_init(struct mlxsw_sp *mlxsw_sp,
3033 struct mlxsw_sp_nexthop *nh)
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003034{
3035 struct mlxsw_sp_neigh_entry *neigh_entry;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003036 struct neighbour *n;
Ido Schimmel93a87e52016-12-23 09:32:49 +01003037 u8 nud_state, dead;
Ido Schimmelc53b8e12017-02-08 11:16:30 +01003038 int err;
3039
Ido Schimmelad178c82017-02-08 11:16:40 +01003040 if (!nh->nh_grp->gateway || nh->neigh_entry)
Ido Schimmelb8399a12017-02-08 11:16:33 +01003041 return 0;
3042
Jiri Pirko33b13412016-11-10 12:31:04 +01003043 /* Take a reference of neigh here ensuring that neigh would
Petr Machata8de3c172017-07-31 09:27:25 +02003044 * not be destructed before the nexthop entry is finished.
Jiri Pirko33b13412016-11-10 12:31:04 +01003045 * The reference is taken either in neigh_lookup() or
Ido Schimmelfd76d912017-02-06 16:20:17 +01003046 * in neigh_create() in case n is not found.
Jiri Pirko33b13412016-11-10 12:31:04 +01003047 */
Ido Schimmel58adf2c2017-07-18 10:10:19 +02003048 n = neigh_lookup(nh->nh_grp->neigh_tbl, &nh->gw_addr, nh->rif->dev);
Jiri Pirko33b13412016-11-10 12:31:04 +01003049 if (!n) {
Ido Schimmel58adf2c2017-07-18 10:10:19 +02003050 n = neigh_create(nh->nh_grp->neigh_tbl, &nh->gw_addr,
3051 nh->rif->dev);
Ido Schimmela8c97012017-02-08 11:16:35 +01003052 if (IS_ERR(n))
3053 return PTR_ERR(n);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003054 neigh_event_send(n, NULL);
Jiri Pirko33b13412016-11-10 12:31:04 +01003055 }
3056 neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp, n);
3057 if (!neigh_entry) {
Ido Schimmel5c8802f2017-02-06 16:20:13 +01003058 neigh_entry = mlxsw_sp_neigh_entry_create(mlxsw_sp, n);
3059 if (IS_ERR(neigh_entry)) {
Ido Schimmelc53b8e12017-02-08 11:16:30 +01003060 err = -EINVAL;
3061 goto err_neigh_entry_create;
Ido Schimmel5c8802f2017-02-06 16:20:13 +01003062 }
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003063 }
Yotam Gigib2157142016-07-05 11:27:51 +02003064
3065 /* If that is the first nexthop connected to that neigh, add to
3066 * nexthop_neighs_list
3067 */
3068 if (list_empty(&neigh_entry->nexthop_list))
3069 list_add_tail(&neigh_entry->nexthop_neighs_list_node,
Ido Schimmel9011b672017-05-16 19:38:25 +02003070 &mlxsw_sp->router->nexthop_neighs_list);
Yotam Gigib2157142016-07-05 11:27:51 +02003071
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003072 nh->neigh_entry = neigh_entry;
3073 list_add_tail(&nh->neigh_list_node, &neigh_entry->nexthop_list);
3074 read_lock_bh(&n->lock);
3075 nud_state = n->nud_state;
Ido Schimmel93a87e52016-12-23 09:32:49 +01003076 dead = n->dead;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003077 read_unlock_bh(&n->lock);
Ido Schimmel93a87e52016-12-23 09:32:49 +01003078 __mlxsw_sp_nexthop_neigh_update(nh, !(nud_state & NUD_VALID && !dead));
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003079
3080 return 0;
Ido Schimmelc53b8e12017-02-08 11:16:30 +01003081
3082err_neigh_entry_create:
3083 neigh_release(n);
Ido Schimmelc53b8e12017-02-08 11:16:30 +01003084 return err;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003085}
3086
Ido Schimmela8c97012017-02-08 11:16:35 +01003087static void mlxsw_sp_nexthop_neigh_fini(struct mlxsw_sp *mlxsw_sp,
3088 struct mlxsw_sp_nexthop *nh)
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003089{
3090 struct mlxsw_sp_neigh_entry *neigh_entry = nh->neigh_entry;
Ido Schimmela8c97012017-02-08 11:16:35 +01003091 struct neighbour *n;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003092
Ido Schimmelb8399a12017-02-08 11:16:33 +01003093 if (!neigh_entry)
Ido Schimmela8c97012017-02-08 11:16:35 +01003094 return;
3095 n = neigh_entry->key.n;
Ido Schimmelb8399a12017-02-08 11:16:33 +01003096
Ido Schimmel58312122016-12-23 09:32:50 +01003097 __mlxsw_sp_nexthop_neigh_update(nh, true);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003098 list_del(&nh->neigh_list_node);
Ido Schimmele58be792017-02-08 11:16:28 +01003099 nh->neigh_entry = NULL;
Yotam Gigib2157142016-07-05 11:27:51 +02003100
3101 /* If that is the last nexthop connected to that neigh, remove from
3102 * nexthop_neighs_list
3103 */
Ido Schimmele58be792017-02-08 11:16:28 +01003104 if (list_empty(&neigh_entry->nexthop_list))
3105 list_del(&neigh_entry->nexthop_neighs_list_node);
Yotam Gigib2157142016-07-05 11:27:51 +02003106
Ido Schimmel5c8802f2017-02-06 16:20:13 +01003107 if (!neigh_entry->connected && list_empty(&neigh_entry->nexthop_list))
3108 mlxsw_sp_neigh_entry_destroy(mlxsw_sp, neigh_entry);
3109
3110 neigh_release(n);
Ido Schimmela8c97012017-02-08 11:16:35 +01003111}
Ido Schimmelc53b8e12017-02-08 11:16:30 +01003112
Petr Machata1012b9a2017-09-02 23:49:23 +02003113static int mlxsw_sp_nexthop_ipip_init(struct mlxsw_sp *mlxsw_sp,
Petr Machata1012b9a2017-09-02 23:49:23 +02003114 struct mlxsw_sp_nexthop *nh,
3115 struct net_device *ol_dev)
3116{
3117 if (!nh->nh_grp->gateway || nh->ipip_entry)
3118 return 0;
3119
Petr Machata4cccb732017-10-16 16:26:39 +02003120 nh->ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
3121 if (!nh->ipip_entry)
3122 return -ENOENT;
Petr Machata1012b9a2017-09-02 23:49:23 +02003123
3124 __mlxsw_sp_nexthop_neigh_update(nh, false);
3125 return 0;
3126}
3127
3128static void mlxsw_sp_nexthop_ipip_fini(struct mlxsw_sp *mlxsw_sp,
3129 struct mlxsw_sp_nexthop *nh)
3130{
3131 struct mlxsw_sp_ipip_entry *ipip_entry = nh->ipip_entry;
3132
3133 if (!ipip_entry)
3134 return;
3135
3136 __mlxsw_sp_nexthop_neigh_update(nh, true);
Petr Machata1012b9a2017-09-02 23:49:23 +02003137 nh->ipip_entry = NULL;
3138}
3139
3140static bool mlxsw_sp_nexthop4_ipip_type(const struct mlxsw_sp *mlxsw_sp,
3141 const struct fib_nh *fib_nh,
3142 enum mlxsw_sp_ipip_type *p_ipipt)
3143{
3144 struct net_device *dev = fib_nh->nh_dev;
3145
3146 return dev &&
3147 fib_nh->nh_parent->fib_type == RTN_UNICAST &&
3148 mlxsw_sp_netdev_ipip_type(mlxsw_sp, dev, p_ipipt);
3149}
3150
Petr Machata35225e42017-09-02 23:49:22 +02003151static void mlxsw_sp_nexthop_type_fini(struct mlxsw_sp *mlxsw_sp,
3152 struct mlxsw_sp_nexthop *nh)
3153{
3154 switch (nh->type) {
3155 case MLXSW_SP_NEXTHOP_TYPE_ETH:
3156 mlxsw_sp_nexthop_neigh_fini(mlxsw_sp, nh);
3157 mlxsw_sp_nexthop_rif_fini(nh);
3158 break;
Petr Machata1012b9a2017-09-02 23:49:23 +02003159 case MLXSW_SP_NEXTHOP_TYPE_IPIP:
Petr Machatade0f43c2017-10-02 12:14:57 +02003160 mlxsw_sp_nexthop_rif_fini(nh);
Petr Machata1012b9a2017-09-02 23:49:23 +02003161 mlxsw_sp_nexthop_ipip_fini(mlxsw_sp, nh);
3162 break;
Petr Machata35225e42017-09-02 23:49:22 +02003163 }
3164}
3165
3166static int mlxsw_sp_nexthop4_type_init(struct mlxsw_sp *mlxsw_sp,
3167 struct mlxsw_sp_nexthop *nh,
3168 struct fib_nh *fib_nh)
3169{
Petr Machata1012b9a2017-09-02 23:49:23 +02003170 struct mlxsw_sp_router *router = mlxsw_sp->router;
Petr Machata35225e42017-09-02 23:49:22 +02003171 struct net_device *dev = fib_nh->nh_dev;
Petr Machata1012b9a2017-09-02 23:49:23 +02003172 enum mlxsw_sp_ipip_type ipipt;
Petr Machata35225e42017-09-02 23:49:22 +02003173 struct mlxsw_sp_rif *rif;
3174 int err;
3175
Petr Machata1012b9a2017-09-02 23:49:23 +02003176 if (mlxsw_sp_nexthop4_ipip_type(mlxsw_sp, fib_nh, &ipipt) &&
3177 router->ipip_ops_arr[ipipt]->can_offload(mlxsw_sp, dev,
3178 MLXSW_SP_L3_PROTO_IPV4)) {
3179 nh->type = MLXSW_SP_NEXTHOP_TYPE_IPIP;
Petr Machata4cccb732017-10-16 16:26:39 +02003180 err = mlxsw_sp_nexthop_ipip_init(mlxsw_sp, nh, dev);
Petr Machatade0f43c2017-10-02 12:14:57 +02003181 if (err)
3182 return err;
3183 mlxsw_sp_nexthop_rif_init(nh, &nh->ipip_entry->ol_lb->common);
3184 return 0;
Petr Machata1012b9a2017-09-02 23:49:23 +02003185 }
3186
Petr Machata35225e42017-09-02 23:49:22 +02003187 nh->type = MLXSW_SP_NEXTHOP_TYPE_ETH;
3188 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
3189 if (!rif)
3190 return 0;
3191
3192 mlxsw_sp_nexthop_rif_init(nh, rif);
3193 err = mlxsw_sp_nexthop_neigh_init(mlxsw_sp, nh);
3194 if (err)
3195 goto err_neigh_init;
3196
3197 return 0;
3198
3199err_neigh_init:
3200 mlxsw_sp_nexthop_rif_fini(nh);
3201 return err;
3202}
3203
3204static void mlxsw_sp_nexthop4_type_fini(struct mlxsw_sp *mlxsw_sp,
3205 struct mlxsw_sp_nexthop *nh)
3206{
3207 mlxsw_sp_nexthop_type_fini(mlxsw_sp, nh);
3208}
3209
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02003210static int mlxsw_sp_nexthop4_init(struct mlxsw_sp *mlxsw_sp,
3211 struct mlxsw_sp_nexthop_group *nh_grp,
3212 struct mlxsw_sp_nexthop *nh,
3213 struct fib_nh *fib_nh)
Ido Schimmela8c97012017-02-08 11:16:35 +01003214{
3215 struct net_device *dev = fib_nh->nh_dev;
Ido Schimmeldf6dd79b2017-02-08 14:36:49 +01003216 struct in_device *in_dev;
Ido Schimmela8c97012017-02-08 11:16:35 +01003217 int err;
3218
3219 nh->nh_grp = nh_grp;
3220 nh->key.fib_nh = fib_nh;
Ido Schimmel408bd942017-10-22 23:11:46 +02003221#ifdef CONFIG_IP_ROUTE_MULTIPATH
3222 nh->nh_weight = fib_nh->nh_weight;
3223#else
3224 nh->nh_weight = 1;
3225#endif
Ido Schimmel58adf2c2017-07-18 10:10:19 +02003226 memcpy(&nh->gw_addr, &fib_nh->nh_gw, sizeof(fib_nh->nh_gw));
Ido Schimmela8c97012017-02-08 11:16:35 +01003227 err = mlxsw_sp_nexthop_insert(mlxsw_sp, nh);
3228 if (err)
3229 return err;
3230
Arkadi Sharshevskya5390272017-09-25 10:32:28 +02003231 mlxsw_sp_nexthop_counter_alloc(mlxsw_sp, nh);
Arkadi Sharshevskydbe45982017-09-25 10:32:23 +02003232 list_add_tail(&nh->router_list_node, &mlxsw_sp->router->nexthop_list);
3233
Ido Schimmel97989ee2017-03-10 08:53:38 +01003234 if (!dev)
3235 return 0;
3236
Ido Schimmeldf6dd79b2017-02-08 14:36:49 +01003237 in_dev = __in_dev_get_rtnl(dev);
3238 if (in_dev && IN_DEV_IGNORE_ROUTES_WITH_LINKDOWN(in_dev) &&
3239 fib_nh->nh_flags & RTNH_F_LINKDOWN)
3240 return 0;
3241
Petr Machata35225e42017-09-02 23:49:22 +02003242 err = mlxsw_sp_nexthop4_type_init(mlxsw_sp, nh, fib_nh);
Ido Schimmela8c97012017-02-08 11:16:35 +01003243 if (err)
3244 goto err_nexthop_neigh_init;
3245
3246 return 0;
3247
3248err_nexthop_neigh_init:
3249 mlxsw_sp_nexthop_remove(mlxsw_sp, nh);
3250 return err;
3251}
3252
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02003253static void mlxsw_sp_nexthop4_fini(struct mlxsw_sp *mlxsw_sp,
3254 struct mlxsw_sp_nexthop *nh)
Ido Schimmela8c97012017-02-08 11:16:35 +01003255{
Petr Machata35225e42017-09-02 23:49:22 +02003256 mlxsw_sp_nexthop4_type_fini(mlxsw_sp, nh);
Arkadi Sharshevskydbe45982017-09-25 10:32:23 +02003257 list_del(&nh->router_list_node);
Arkadi Sharshevskya5390272017-09-25 10:32:28 +02003258 mlxsw_sp_nexthop_counter_free(mlxsw_sp, nh);
Ido Schimmelc53b8e12017-02-08 11:16:30 +01003259 mlxsw_sp_nexthop_remove(mlxsw_sp, nh);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003260}
3261
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02003262static void mlxsw_sp_nexthop4_event(struct mlxsw_sp *mlxsw_sp,
3263 unsigned long event, struct fib_nh *fib_nh)
Ido Schimmelad178c82017-02-08 11:16:40 +01003264{
3265 struct mlxsw_sp_nexthop_key key;
3266 struct mlxsw_sp_nexthop *nh;
Ido Schimmelad178c82017-02-08 11:16:40 +01003267
Ido Schimmel9011b672017-05-16 19:38:25 +02003268 if (mlxsw_sp->router->aborted)
Ido Schimmelad178c82017-02-08 11:16:40 +01003269 return;
3270
3271 key.fib_nh = fib_nh;
3272 nh = mlxsw_sp_nexthop_lookup(mlxsw_sp, key);
3273 if (WARN_ON_ONCE(!nh))
3274 return;
3275
Ido Schimmelad178c82017-02-08 11:16:40 +01003276 switch (event) {
3277 case FIB_EVENT_NH_ADD:
Petr Machata35225e42017-09-02 23:49:22 +02003278 mlxsw_sp_nexthop4_type_init(mlxsw_sp, nh, fib_nh);
Ido Schimmelad178c82017-02-08 11:16:40 +01003279 break;
3280 case FIB_EVENT_NH_DEL:
Petr Machata35225e42017-09-02 23:49:22 +02003281 mlxsw_sp_nexthop4_type_fini(mlxsw_sp, nh);
Ido Schimmelad178c82017-02-08 11:16:40 +01003282 break;
3283 }
3284
3285 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nh_grp);
3286}
3287
Ido Schimmel9665b742017-02-08 11:16:42 +01003288static void mlxsw_sp_nexthop_rif_gone_sync(struct mlxsw_sp *mlxsw_sp,
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003289 struct mlxsw_sp_rif *rif)
Ido Schimmel9665b742017-02-08 11:16:42 +01003290{
3291 struct mlxsw_sp_nexthop *nh, *tmp;
3292
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003293 list_for_each_entry_safe(nh, tmp, &rif->nexthop_list, rif_list_node) {
Petr Machata35225e42017-09-02 23:49:22 +02003294 mlxsw_sp_nexthop_type_fini(mlxsw_sp, nh);
Ido Schimmel9665b742017-02-08 11:16:42 +01003295 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nh_grp);
3296 }
3297}
3298
Petr Machata9b014512017-09-02 23:49:20 +02003299static bool mlxsw_sp_fi_is_gateway(const struct mlxsw_sp *mlxsw_sp,
3300 const struct fib_info *fi)
3301{
Petr Machata1012b9a2017-09-02 23:49:23 +02003302 return fi->fib_nh->nh_scope == RT_SCOPE_LINK ||
3303 mlxsw_sp_nexthop4_ipip_type(mlxsw_sp, fi->fib_nh, NULL);
Petr Machata9b014512017-09-02 23:49:20 +02003304}
3305
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003306static struct mlxsw_sp_nexthop_group *
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02003307mlxsw_sp_nexthop4_group_create(struct mlxsw_sp *mlxsw_sp, struct fib_info *fi)
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003308{
3309 struct mlxsw_sp_nexthop_group *nh_grp;
3310 struct mlxsw_sp_nexthop *nh;
3311 struct fib_nh *fib_nh;
3312 size_t alloc_size;
3313 int i;
3314 int err;
3315
3316 alloc_size = sizeof(*nh_grp) +
3317 fi->fib_nhs * sizeof(struct mlxsw_sp_nexthop);
3318 nh_grp = kzalloc(alloc_size, GFP_KERNEL);
3319 if (!nh_grp)
3320 return ERR_PTR(-ENOMEM);
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02003321 nh_grp->priv = fi;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003322 INIT_LIST_HEAD(&nh_grp->fib_list);
Ido Schimmel58adf2c2017-07-18 10:10:19 +02003323 nh_grp->neigh_tbl = &arp_tbl;
3324
Petr Machata9b014512017-09-02 23:49:20 +02003325 nh_grp->gateway = mlxsw_sp_fi_is_gateway(mlxsw_sp, fi);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003326 nh_grp->count = fi->fib_nhs;
Ido Schimmel7387dbb2017-07-12 09:12:53 +02003327 fib_info_hold(fi);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003328 for (i = 0; i < nh_grp->count; i++) {
3329 nh = &nh_grp->nexthops[i];
3330 fib_nh = &fi->fib_nh[i];
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02003331 err = mlxsw_sp_nexthop4_init(mlxsw_sp, nh_grp, nh, fib_nh);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003332 if (err)
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02003333 goto err_nexthop4_init;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003334 }
Ido Schimmele9ad5e72017-02-08 11:16:29 +01003335 err = mlxsw_sp_nexthop_group_insert(mlxsw_sp, nh_grp);
3336 if (err)
3337 goto err_nexthop_group_insert;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003338 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
3339 return nh_grp;
3340
Ido Schimmele9ad5e72017-02-08 11:16:29 +01003341err_nexthop_group_insert:
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02003342err_nexthop4_init:
Ido Schimmeldf6dd79b2017-02-08 14:36:49 +01003343 for (i--; i >= 0; i--) {
3344 nh = &nh_grp->nexthops[i];
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02003345 mlxsw_sp_nexthop4_fini(mlxsw_sp, nh);
Ido Schimmeldf6dd79b2017-02-08 14:36:49 +01003346 }
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02003347 fib_info_put(fi);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003348 kfree(nh_grp);
3349 return ERR_PTR(err);
3350}
3351
3352static void
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02003353mlxsw_sp_nexthop4_group_destroy(struct mlxsw_sp *mlxsw_sp,
3354 struct mlxsw_sp_nexthop_group *nh_grp)
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003355{
3356 struct mlxsw_sp_nexthop *nh;
3357 int i;
3358
Ido Schimmele9ad5e72017-02-08 11:16:29 +01003359 mlxsw_sp_nexthop_group_remove(mlxsw_sp, nh_grp);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003360 for (i = 0; i < nh_grp->count; i++) {
3361 nh = &nh_grp->nexthops[i];
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02003362 mlxsw_sp_nexthop4_fini(mlxsw_sp, nh);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003363 }
Ido Schimmel58312122016-12-23 09:32:50 +01003364 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
3365 WARN_ON_ONCE(nh_grp->adj_index_valid);
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02003366 fib_info_put(mlxsw_sp_nexthop4_group_fi(nh_grp));
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003367 kfree(nh_grp);
3368}
3369
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02003370static int mlxsw_sp_nexthop4_group_get(struct mlxsw_sp *mlxsw_sp,
3371 struct mlxsw_sp_fib_entry *fib_entry,
3372 struct fib_info *fi)
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003373{
3374 struct mlxsw_sp_nexthop_group *nh_grp;
3375
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02003376 nh_grp = mlxsw_sp_nexthop4_group_lookup(mlxsw_sp, fi);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003377 if (!nh_grp) {
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02003378 nh_grp = mlxsw_sp_nexthop4_group_create(mlxsw_sp, fi);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003379 if (IS_ERR(nh_grp))
3380 return PTR_ERR(nh_grp);
3381 }
3382 list_add_tail(&fib_entry->nexthop_group_node, &nh_grp->fib_list);
3383 fib_entry->nh_group = nh_grp;
3384 return 0;
3385}
3386
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02003387static void mlxsw_sp_nexthop4_group_put(struct mlxsw_sp *mlxsw_sp,
3388 struct mlxsw_sp_fib_entry *fib_entry)
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003389{
3390 struct mlxsw_sp_nexthop_group *nh_grp = fib_entry->nh_group;
3391
3392 list_del(&fib_entry->nexthop_group_node);
3393 if (!list_empty(&nh_grp->fib_list))
3394 return;
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02003395 mlxsw_sp_nexthop4_group_destroy(mlxsw_sp, nh_grp);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003396}
3397
Ido Schimmel013b20f2017-02-08 11:16:36 +01003398static bool
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02003399mlxsw_sp_fib4_entry_should_offload(const struct mlxsw_sp_fib_entry *fib_entry)
3400{
3401 struct mlxsw_sp_fib4_entry *fib4_entry;
3402
3403 fib4_entry = container_of(fib_entry, struct mlxsw_sp_fib4_entry,
3404 common);
3405 return !fib4_entry->tos;
3406}
3407
3408static bool
Ido Schimmel013b20f2017-02-08 11:16:36 +01003409mlxsw_sp_fib_entry_should_offload(const struct mlxsw_sp_fib_entry *fib_entry)
3410{
3411 struct mlxsw_sp_nexthop_group *nh_group = fib_entry->nh_group;
3412
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02003413 switch (fib_entry->fib_node->fib->proto) {
3414 case MLXSW_SP_L3_PROTO_IPV4:
3415 if (!mlxsw_sp_fib4_entry_should_offload(fib_entry))
3416 return false;
3417 break;
3418 case MLXSW_SP_L3_PROTO_IPV6:
3419 break;
3420 }
Ido Schimmel9aecce12017-02-09 10:28:42 +01003421
Ido Schimmel013b20f2017-02-08 11:16:36 +01003422 switch (fib_entry->type) {
3423 case MLXSW_SP_FIB_ENTRY_TYPE_REMOTE:
3424 return !!nh_group->adj_index_valid;
3425 case MLXSW_SP_FIB_ENTRY_TYPE_LOCAL:
Ido Schimmel70ad3502017-02-08 11:16:38 +01003426 return !!nh_group->nh_rif;
Petr Machata4607f6d2017-09-02 23:49:25 +02003427 case MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP:
3428 return true;
Ido Schimmel013b20f2017-02-08 11:16:36 +01003429 default:
3430 return false;
3431 }
3432}
3433
Ido Schimmel428b8512017-08-03 13:28:28 +02003434static struct mlxsw_sp_nexthop *
3435mlxsw_sp_rt6_nexthop(struct mlxsw_sp_nexthop_group *nh_grp,
3436 const struct mlxsw_sp_rt6 *mlxsw_sp_rt6)
3437{
3438 int i;
3439
3440 for (i = 0; i < nh_grp->count; i++) {
3441 struct mlxsw_sp_nexthop *nh = &nh_grp->nexthops[i];
3442 struct rt6_info *rt = mlxsw_sp_rt6->rt;
3443
3444 if (nh->rif && nh->rif->dev == rt->dst.dev &&
3445 ipv6_addr_equal((const struct in6_addr *) &nh->gw_addr,
3446 &rt->rt6i_gateway))
3447 return nh;
3448 continue;
3449 }
3450
3451 return NULL;
3452}
3453
Ido Schimmel3984d1a2017-08-02 09:56:03 +02003454static void
3455mlxsw_sp_fib4_entry_offload_set(struct mlxsw_sp_fib_entry *fib_entry)
3456{
3457 struct mlxsw_sp_nexthop_group *nh_grp = fib_entry->nh_group;
3458 int i;
3459
Petr Machata4607f6d2017-09-02 23:49:25 +02003460 if (fib_entry->type == MLXSW_SP_FIB_ENTRY_TYPE_LOCAL ||
3461 fib_entry->type == MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP) {
Ido Schimmel3984d1a2017-08-02 09:56:03 +02003462 nh_grp->nexthops->key.fib_nh->nh_flags |= RTNH_F_OFFLOAD;
3463 return;
3464 }
3465
3466 for (i = 0; i < nh_grp->count; i++) {
3467 struct mlxsw_sp_nexthop *nh = &nh_grp->nexthops[i];
3468
3469 if (nh->offloaded)
3470 nh->key.fib_nh->nh_flags |= RTNH_F_OFFLOAD;
3471 else
3472 nh->key.fib_nh->nh_flags &= ~RTNH_F_OFFLOAD;
3473 }
3474}
3475
3476static void
3477mlxsw_sp_fib4_entry_offload_unset(struct mlxsw_sp_fib_entry *fib_entry)
3478{
3479 struct mlxsw_sp_nexthop_group *nh_grp = fib_entry->nh_group;
3480 int i;
3481
3482 for (i = 0; i < nh_grp->count; i++) {
3483 struct mlxsw_sp_nexthop *nh = &nh_grp->nexthops[i];
3484
3485 nh->key.fib_nh->nh_flags &= ~RTNH_F_OFFLOAD;
3486 }
3487}
3488
Ido Schimmel428b8512017-08-03 13:28:28 +02003489static void
3490mlxsw_sp_fib6_entry_offload_set(struct mlxsw_sp_fib_entry *fib_entry)
3491{
3492 struct mlxsw_sp_fib6_entry *fib6_entry;
3493 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
3494
3495 fib6_entry = container_of(fib_entry, struct mlxsw_sp_fib6_entry,
3496 common);
3497
3498 if (fib_entry->type == MLXSW_SP_FIB_ENTRY_TYPE_LOCAL) {
3499 list_first_entry(&fib6_entry->rt6_list, struct mlxsw_sp_rt6,
Ido Schimmelfe400792017-08-15 09:09:49 +02003500 list)->rt->rt6i_nh_flags |= RTNH_F_OFFLOAD;
Ido Schimmel428b8512017-08-03 13:28:28 +02003501 return;
3502 }
3503
3504 list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) {
3505 struct mlxsw_sp_nexthop_group *nh_grp = fib_entry->nh_group;
3506 struct mlxsw_sp_nexthop *nh;
3507
3508 nh = mlxsw_sp_rt6_nexthop(nh_grp, mlxsw_sp_rt6);
3509 if (nh && nh->offloaded)
Ido Schimmelfe400792017-08-15 09:09:49 +02003510 mlxsw_sp_rt6->rt->rt6i_nh_flags |= RTNH_F_OFFLOAD;
Ido Schimmel428b8512017-08-03 13:28:28 +02003511 else
Ido Schimmelfe400792017-08-15 09:09:49 +02003512 mlxsw_sp_rt6->rt->rt6i_nh_flags &= ~RTNH_F_OFFLOAD;
Ido Schimmel428b8512017-08-03 13:28:28 +02003513 }
3514}
3515
3516static void
3517mlxsw_sp_fib6_entry_offload_unset(struct mlxsw_sp_fib_entry *fib_entry)
3518{
3519 struct mlxsw_sp_fib6_entry *fib6_entry;
3520 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
3521
3522 fib6_entry = container_of(fib_entry, struct mlxsw_sp_fib6_entry,
3523 common);
3524 list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) {
3525 struct rt6_info *rt = mlxsw_sp_rt6->rt;
3526
Ido Schimmelfe400792017-08-15 09:09:49 +02003527 rt->rt6i_nh_flags &= ~RTNH_F_OFFLOAD;
Ido Schimmel428b8512017-08-03 13:28:28 +02003528 }
3529}
3530
Ido Schimmel013b20f2017-02-08 11:16:36 +01003531static void mlxsw_sp_fib_entry_offload_set(struct mlxsw_sp_fib_entry *fib_entry)
3532{
Ido Schimmel76610eb2017-03-10 08:53:41 +01003533 switch (fib_entry->fib_node->fib->proto) {
Ido Schimmel013b20f2017-02-08 11:16:36 +01003534 case MLXSW_SP_L3_PROTO_IPV4:
Ido Schimmel3984d1a2017-08-02 09:56:03 +02003535 mlxsw_sp_fib4_entry_offload_set(fib_entry);
Ido Schimmel013b20f2017-02-08 11:16:36 +01003536 break;
3537 case MLXSW_SP_L3_PROTO_IPV6:
Ido Schimmel428b8512017-08-03 13:28:28 +02003538 mlxsw_sp_fib6_entry_offload_set(fib_entry);
3539 break;
Ido Schimmel013b20f2017-02-08 11:16:36 +01003540 }
3541}
3542
3543static void
3544mlxsw_sp_fib_entry_offload_unset(struct mlxsw_sp_fib_entry *fib_entry)
3545{
Ido Schimmel76610eb2017-03-10 08:53:41 +01003546 switch (fib_entry->fib_node->fib->proto) {
Ido Schimmel013b20f2017-02-08 11:16:36 +01003547 case MLXSW_SP_L3_PROTO_IPV4:
Ido Schimmel3984d1a2017-08-02 09:56:03 +02003548 mlxsw_sp_fib4_entry_offload_unset(fib_entry);
Ido Schimmel013b20f2017-02-08 11:16:36 +01003549 break;
3550 case MLXSW_SP_L3_PROTO_IPV6:
Ido Schimmel428b8512017-08-03 13:28:28 +02003551 mlxsw_sp_fib6_entry_offload_unset(fib_entry);
3552 break;
Ido Schimmel013b20f2017-02-08 11:16:36 +01003553 }
Ido Schimmel013b20f2017-02-08 11:16:36 +01003554}
3555
3556static void
3557mlxsw_sp_fib_entry_offload_refresh(struct mlxsw_sp_fib_entry *fib_entry,
3558 enum mlxsw_reg_ralue_op op, int err)
3559{
3560 switch (op) {
3561 case MLXSW_REG_RALUE_OP_WRITE_DELETE:
Ido Schimmel013b20f2017-02-08 11:16:36 +01003562 return mlxsw_sp_fib_entry_offload_unset(fib_entry);
3563 case MLXSW_REG_RALUE_OP_WRITE_WRITE:
3564 if (err)
3565 return;
Ido Schimmel1353ee72017-08-02 09:56:04 +02003566 if (mlxsw_sp_fib_entry_should_offload(fib_entry))
Ido Schimmel013b20f2017-02-08 11:16:36 +01003567 mlxsw_sp_fib_entry_offload_set(fib_entry);
Petr Machata85f44a12017-10-02 12:21:58 +02003568 else
Ido Schimmel013b20f2017-02-08 11:16:36 +01003569 mlxsw_sp_fib_entry_offload_unset(fib_entry);
3570 return;
3571 default:
3572 return;
3573 }
3574}
3575
Ido Schimmel9dbf4d72017-07-18 10:10:24 +02003576static void
3577mlxsw_sp_fib_entry_ralue_pack(char *ralue_pl,
3578 const struct mlxsw_sp_fib_entry *fib_entry,
3579 enum mlxsw_reg_ralue_op op)
3580{
3581 struct mlxsw_sp_fib *fib = fib_entry->fib_node->fib;
3582 enum mlxsw_reg_ralxx_protocol proto;
3583 u32 *p_dip;
3584
3585 proto = (enum mlxsw_reg_ralxx_protocol) fib->proto;
3586
3587 switch (fib->proto) {
3588 case MLXSW_SP_L3_PROTO_IPV4:
3589 p_dip = (u32 *) fib_entry->fib_node->key.addr;
3590 mlxsw_reg_ralue_pack4(ralue_pl, proto, op, fib->vr->id,
3591 fib_entry->fib_node->key.prefix_len,
3592 *p_dip);
3593 break;
3594 case MLXSW_SP_L3_PROTO_IPV6:
3595 mlxsw_reg_ralue_pack6(ralue_pl, proto, op, fib->vr->id,
3596 fib_entry->fib_node->key.prefix_len,
3597 fib_entry->fib_node->key.addr);
3598 break;
3599 }
3600}
3601
3602static int mlxsw_sp_fib_entry_op_remote(struct mlxsw_sp *mlxsw_sp,
3603 struct mlxsw_sp_fib_entry *fib_entry,
3604 enum mlxsw_reg_ralue_op op)
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003605{
3606 char ralue_pl[MLXSW_REG_RALUE_LEN];
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003607 enum mlxsw_reg_ralue_trap_action trap_action;
3608 u16 trap_id = 0;
3609 u32 adjacency_index = 0;
3610 u16 ecmp_size = 0;
3611
3612 /* In case the nexthop group adjacency index is valid, use it
3613 * with provided ECMP size. Otherwise, setup trap and pass
3614 * traffic to kernel.
3615 */
Ido Schimmel4b411472017-02-08 11:16:37 +01003616 if (mlxsw_sp_fib_entry_should_offload(fib_entry)) {
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003617 trap_action = MLXSW_REG_RALUE_TRAP_ACTION_NOP;
3618 adjacency_index = fib_entry->nh_group->adj_index;
3619 ecmp_size = fib_entry->nh_group->ecmp_size;
3620 } else {
3621 trap_action = MLXSW_REG_RALUE_TRAP_ACTION_TRAP;
3622 trap_id = MLXSW_TRAP_ID_RTR_INGRESS0;
3623 }
3624
Ido Schimmel9dbf4d72017-07-18 10:10:24 +02003625 mlxsw_sp_fib_entry_ralue_pack(ralue_pl, fib_entry, op);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003626 mlxsw_reg_ralue_act_remote_pack(ralue_pl, trap_action, trap_id,
3627 adjacency_index, ecmp_size);
3628 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
3629}
3630
Ido Schimmel9dbf4d72017-07-18 10:10:24 +02003631static int mlxsw_sp_fib_entry_op_local(struct mlxsw_sp *mlxsw_sp,
3632 struct mlxsw_sp_fib_entry *fib_entry,
3633 enum mlxsw_reg_ralue_op op)
Jiri Pirko61c503f2016-07-04 08:23:11 +02003634{
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003635 struct mlxsw_sp_rif *rif = fib_entry->nh_group->nh_rif;
Ido Schimmel70ad3502017-02-08 11:16:38 +01003636 enum mlxsw_reg_ralue_trap_action trap_action;
Jiri Pirko61c503f2016-07-04 08:23:11 +02003637 char ralue_pl[MLXSW_REG_RALUE_LEN];
Ido Schimmel70ad3502017-02-08 11:16:38 +01003638 u16 trap_id = 0;
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003639 u16 rif_index = 0;
Ido Schimmel70ad3502017-02-08 11:16:38 +01003640
3641 if (mlxsw_sp_fib_entry_should_offload(fib_entry)) {
3642 trap_action = MLXSW_REG_RALUE_TRAP_ACTION_NOP;
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003643 rif_index = rif->rif_index;
Ido Schimmel70ad3502017-02-08 11:16:38 +01003644 } else {
3645 trap_action = MLXSW_REG_RALUE_TRAP_ACTION_TRAP;
3646 trap_id = MLXSW_TRAP_ID_RTR_INGRESS0;
3647 }
Jiri Pirko61c503f2016-07-04 08:23:11 +02003648
Ido Schimmel9dbf4d72017-07-18 10:10:24 +02003649 mlxsw_sp_fib_entry_ralue_pack(ralue_pl, fib_entry, op);
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003650 mlxsw_reg_ralue_act_local_pack(ralue_pl, trap_action, trap_id,
3651 rif_index);
Jiri Pirko61c503f2016-07-04 08:23:11 +02003652 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
3653}
3654
Ido Schimmel9dbf4d72017-07-18 10:10:24 +02003655static int mlxsw_sp_fib_entry_op_trap(struct mlxsw_sp *mlxsw_sp,
3656 struct mlxsw_sp_fib_entry *fib_entry,
3657 enum mlxsw_reg_ralue_op op)
Jiri Pirko61c503f2016-07-04 08:23:11 +02003658{
3659 char ralue_pl[MLXSW_REG_RALUE_LEN];
Jiri Pirko61c503f2016-07-04 08:23:11 +02003660
Ido Schimmel9dbf4d72017-07-18 10:10:24 +02003661 mlxsw_sp_fib_entry_ralue_pack(ralue_pl, fib_entry, op);
Jiri Pirko61c503f2016-07-04 08:23:11 +02003662 mlxsw_reg_ralue_act_ip2me_pack(ralue_pl);
3663 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
3664}
3665
Petr Machata4607f6d2017-09-02 23:49:25 +02003666static int
3667mlxsw_sp_fib_entry_op_ipip_decap(struct mlxsw_sp *mlxsw_sp,
3668 struct mlxsw_sp_fib_entry *fib_entry,
3669 enum mlxsw_reg_ralue_op op)
3670{
3671 struct mlxsw_sp_ipip_entry *ipip_entry = fib_entry->decap.ipip_entry;
3672 const struct mlxsw_sp_ipip_ops *ipip_ops;
3673
3674 if (WARN_ON(!ipip_entry))
3675 return -EINVAL;
3676
3677 ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt];
3678 return ipip_ops->fib_entry_op(mlxsw_sp, ipip_entry, op,
3679 fib_entry->decap.tunnel_index);
3680}
3681
Ido Schimmel9dbf4d72017-07-18 10:10:24 +02003682static int __mlxsw_sp_fib_entry_op(struct mlxsw_sp *mlxsw_sp,
3683 struct mlxsw_sp_fib_entry *fib_entry,
3684 enum mlxsw_reg_ralue_op op)
Jiri Pirko61c503f2016-07-04 08:23:11 +02003685{
3686 switch (fib_entry->type) {
3687 case MLXSW_SP_FIB_ENTRY_TYPE_REMOTE:
Ido Schimmel9dbf4d72017-07-18 10:10:24 +02003688 return mlxsw_sp_fib_entry_op_remote(mlxsw_sp, fib_entry, op);
Jiri Pirko61c503f2016-07-04 08:23:11 +02003689 case MLXSW_SP_FIB_ENTRY_TYPE_LOCAL:
Ido Schimmel9dbf4d72017-07-18 10:10:24 +02003690 return mlxsw_sp_fib_entry_op_local(mlxsw_sp, fib_entry, op);
Jiri Pirko61c503f2016-07-04 08:23:11 +02003691 case MLXSW_SP_FIB_ENTRY_TYPE_TRAP:
Ido Schimmel9dbf4d72017-07-18 10:10:24 +02003692 return mlxsw_sp_fib_entry_op_trap(mlxsw_sp, fib_entry, op);
Petr Machata4607f6d2017-09-02 23:49:25 +02003693 case MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP:
3694 return mlxsw_sp_fib_entry_op_ipip_decap(mlxsw_sp,
3695 fib_entry, op);
Jiri Pirko61c503f2016-07-04 08:23:11 +02003696 }
3697 return -EINVAL;
3698}
3699
3700static int mlxsw_sp_fib_entry_op(struct mlxsw_sp *mlxsw_sp,
3701 struct mlxsw_sp_fib_entry *fib_entry,
3702 enum mlxsw_reg_ralue_op op)
3703{
Ido Schimmel9dbf4d72017-07-18 10:10:24 +02003704 int err = __mlxsw_sp_fib_entry_op(mlxsw_sp, fib_entry, op);
Ido Schimmel013b20f2017-02-08 11:16:36 +01003705
Ido Schimmel013b20f2017-02-08 11:16:36 +01003706 mlxsw_sp_fib_entry_offload_refresh(fib_entry, op, err);
Ido Schimmel9dbf4d72017-07-18 10:10:24 +02003707
Ido Schimmel013b20f2017-02-08 11:16:36 +01003708 return err;
Jiri Pirko61c503f2016-07-04 08:23:11 +02003709}
3710
3711static int mlxsw_sp_fib_entry_update(struct mlxsw_sp *mlxsw_sp,
3712 struct mlxsw_sp_fib_entry *fib_entry)
3713{
Jiri Pirko7146da32016-09-01 10:37:41 +02003714 return mlxsw_sp_fib_entry_op(mlxsw_sp, fib_entry,
3715 MLXSW_REG_RALUE_OP_WRITE_WRITE);
Jiri Pirko61c503f2016-07-04 08:23:11 +02003716}
3717
3718static int mlxsw_sp_fib_entry_del(struct mlxsw_sp *mlxsw_sp,
3719 struct mlxsw_sp_fib_entry *fib_entry)
3720{
3721 return mlxsw_sp_fib_entry_op(mlxsw_sp, fib_entry,
3722 MLXSW_REG_RALUE_OP_WRITE_DELETE);
3723}
3724
Jiri Pirko61c503f2016-07-04 08:23:11 +02003725static int
Ido Schimmel013b20f2017-02-08 11:16:36 +01003726mlxsw_sp_fib4_entry_type_set(struct mlxsw_sp *mlxsw_sp,
3727 const struct fib_entry_notifier_info *fen_info,
3728 struct mlxsw_sp_fib_entry *fib_entry)
Jiri Pirko61c503f2016-07-04 08:23:11 +02003729{
Petr Machata4607f6d2017-09-02 23:49:25 +02003730 union mlxsw_sp_l3addr dip = { .addr4 = htonl(fen_info->dst) };
3731 struct net_device *dev = fen_info->fi->fib_dev;
3732 struct mlxsw_sp_ipip_entry *ipip_entry;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02003733 struct fib_info *fi = fen_info->fi;
Jiri Pirko61c503f2016-07-04 08:23:11 +02003734
Ido Schimmel97989ee2017-03-10 08:53:38 +01003735 switch (fen_info->type) {
Ido Schimmel97989ee2017-03-10 08:53:38 +01003736 case RTN_LOCAL:
Petr Machata4607f6d2017-09-02 23:49:25 +02003737 ipip_entry = mlxsw_sp_ipip_entry_find_by_decap(mlxsw_sp, dev,
3738 MLXSW_SP_L3_PROTO_IPV4, dip);
3739 if (ipip_entry) {
3740 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP;
3741 return mlxsw_sp_fib_entry_decap_init(mlxsw_sp,
3742 fib_entry,
3743 ipip_entry);
3744 }
3745 /* fall through */
3746 case RTN_BROADCAST:
Jiri Pirko61c503f2016-07-04 08:23:11 +02003747 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
3748 return 0;
Ido Schimmel97989ee2017-03-10 08:53:38 +01003749 case RTN_UNREACHABLE: /* fall through */
3750 case RTN_BLACKHOLE: /* fall through */
3751 case RTN_PROHIBIT:
3752 /* Packets hitting these routes need to be trapped, but
3753 * can do so with a lower priority than packets directed
3754 * at the host, so use action type local instead of trap.
3755 */
Jiri Pirkob45f64d2016-09-26 12:52:31 +02003756 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_LOCAL;
Ido Schimmel97989ee2017-03-10 08:53:38 +01003757 return 0;
3758 case RTN_UNICAST:
Petr Machata9b014512017-09-02 23:49:20 +02003759 if (mlxsw_sp_fi_is_gateway(mlxsw_sp, fi))
Ido Schimmel97989ee2017-03-10 08:53:38 +01003760 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_REMOTE;
Petr Machata9b014512017-09-02 23:49:20 +02003761 else
3762 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_LOCAL;
Ido Schimmel97989ee2017-03-10 08:53:38 +01003763 return 0;
3764 default:
3765 return -EINVAL;
3766 }
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003767}
3768
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02003769static struct mlxsw_sp_fib4_entry *
Ido Schimmel9aecce12017-02-09 10:28:42 +01003770mlxsw_sp_fib4_entry_create(struct mlxsw_sp *mlxsw_sp,
3771 struct mlxsw_sp_fib_node *fib_node,
3772 const struct fib_entry_notifier_info *fen_info)
Jiri Pirko5b004412016-09-01 10:37:40 +02003773{
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02003774 struct mlxsw_sp_fib4_entry *fib4_entry;
Jiri Pirko5b004412016-09-01 10:37:40 +02003775 struct mlxsw_sp_fib_entry *fib_entry;
Ido Schimmel9aecce12017-02-09 10:28:42 +01003776 int err;
3777
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02003778 fib4_entry = kzalloc(sizeof(*fib4_entry), GFP_KERNEL);
3779 if (!fib4_entry)
3780 return ERR_PTR(-ENOMEM);
3781 fib_entry = &fib4_entry->common;
Ido Schimmel9aecce12017-02-09 10:28:42 +01003782
3783 err = mlxsw_sp_fib4_entry_type_set(mlxsw_sp, fen_info, fib_entry);
3784 if (err)
3785 goto err_fib4_entry_type_set;
3786
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02003787 err = mlxsw_sp_nexthop4_group_get(mlxsw_sp, fib_entry, fen_info->fi);
Ido Schimmel9aecce12017-02-09 10:28:42 +01003788 if (err)
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02003789 goto err_nexthop4_group_get;
Ido Schimmel9aecce12017-02-09 10:28:42 +01003790
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02003791 fib4_entry->prio = fen_info->fi->fib_priority;
3792 fib4_entry->tb_id = fen_info->tb_id;
3793 fib4_entry->type = fen_info->type;
3794 fib4_entry->tos = fen_info->tos;
Ido Schimmel9aecce12017-02-09 10:28:42 +01003795
3796 fib_entry->fib_node = fib_node;
3797
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02003798 return fib4_entry;
Ido Schimmel9aecce12017-02-09 10:28:42 +01003799
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02003800err_nexthop4_group_get:
Ido Schimmel9aecce12017-02-09 10:28:42 +01003801err_fib4_entry_type_set:
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02003802 kfree(fib4_entry);
Ido Schimmel9aecce12017-02-09 10:28:42 +01003803 return ERR_PTR(err);
3804}
3805
3806static void mlxsw_sp_fib4_entry_destroy(struct mlxsw_sp *mlxsw_sp,
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02003807 struct mlxsw_sp_fib4_entry *fib4_entry)
Ido Schimmel9aecce12017-02-09 10:28:42 +01003808{
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02003809 mlxsw_sp_nexthop4_group_put(mlxsw_sp, &fib4_entry->common);
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02003810 kfree(fib4_entry);
Ido Schimmel9aecce12017-02-09 10:28:42 +01003811}
3812
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02003813static struct mlxsw_sp_fib4_entry *
Ido Schimmel9aecce12017-02-09 10:28:42 +01003814mlxsw_sp_fib4_entry_lookup(struct mlxsw_sp *mlxsw_sp,
3815 const struct fib_entry_notifier_info *fen_info)
3816{
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02003817 struct mlxsw_sp_fib4_entry *fib4_entry;
Ido Schimmel9aecce12017-02-09 10:28:42 +01003818 struct mlxsw_sp_fib_node *fib_node;
Ido Schimmel160e22a2017-07-18 10:10:20 +02003819 struct mlxsw_sp_fib *fib;
3820 struct mlxsw_sp_vr *vr;
Ido Schimmel9aecce12017-02-09 10:28:42 +01003821
Ido Schimmel160e22a2017-07-18 10:10:20 +02003822 vr = mlxsw_sp_vr_find(mlxsw_sp, fen_info->tb_id);
3823 if (!vr)
3824 return NULL;
3825 fib = mlxsw_sp_vr_fib(vr, MLXSW_SP_L3_PROTO_IPV4);
3826
3827 fib_node = mlxsw_sp_fib_node_lookup(fib, &fen_info->dst,
3828 sizeof(fen_info->dst),
3829 fen_info->dst_len);
3830 if (!fib_node)
Ido Schimmel9aecce12017-02-09 10:28:42 +01003831 return NULL;
3832
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02003833 list_for_each_entry(fib4_entry, &fib_node->entry_list, common.list) {
3834 if (fib4_entry->tb_id == fen_info->tb_id &&
3835 fib4_entry->tos == fen_info->tos &&
3836 fib4_entry->type == fen_info->type &&
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02003837 mlxsw_sp_nexthop4_group_fi(fib4_entry->common.nh_group) ==
3838 fen_info->fi) {
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02003839 return fib4_entry;
Ido Schimmel9aecce12017-02-09 10:28:42 +01003840 }
3841 }
3842
3843 return NULL;
3844}
3845
3846static const struct rhashtable_params mlxsw_sp_fib_ht_params = {
3847 .key_offset = offsetof(struct mlxsw_sp_fib_node, key),
3848 .head_offset = offsetof(struct mlxsw_sp_fib_node, ht_node),
3849 .key_len = sizeof(struct mlxsw_sp_fib_key),
3850 .automatic_shrinking = true,
3851};
3852
3853static int mlxsw_sp_fib_node_insert(struct mlxsw_sp_fib *fib,
3854 struct mlxsw_sp_fib_node *fib_node)
3855{
3856 return rhashtable_insert_fast(&fib->ht, &fib_node->ht_node,
3857 mlxsw_sp_fib_ht_params);
3858}
3859
3860static void mlxsw_sp_fib_node_remove(struct mlxsw_sp_fib *fib,
3861 struct mlxsw_sp_fib_node *fib_node)
3862{
3863 rhashtable_remove_fast(&fib->ht, &fib_node->ht_node,
3864 mlxsw_sp_fib_ht_params);
3865}
3866
3867static struct mlxsw_sp_fib_node *
3868mlxsw_sp_fib_node_lookup(struct mlxsw_sp_fib *fib, const void *addr,
3869 size_t addr_len, unsigned char prefix_len)
3870{
3871 struct mlxsw_sp_fib_key key;
3872
3873 memset(&key, 0, sizeof(key));
3874 memcpy(key.addr, addr, addr_len);
3875 key.prefix_len = prefix_len;
3876 return rhashtable_lookup_fast(&fib->ht, &key, mlxsw_sp_fib_ht_params);
3877}
3878
3879static struct mlxsw_sp_fib_node *
Ido Schimmel76610eb2017-03-10 08:53:41 +01003880mlxsw_sp_fib_node_create(struct mlxsw_sp_fib *fib, const void *addr,
Ido Schimmel9aecce12017-02-09 10:28:42 +01003881 size_t addr_len, unsigned char prefix_len)
3882{
3883 struct mlxsw_sp_fib_node *fib_node;
3884
3885 fib_node = kzalloc(sizeof(*fib_node), GFP_KERNEL);
3886 if (!fib_node)
3887 return NULL;
3888
3889 INIT_LIST_HEAD(&fib_node->entry_list);
Ido Schimmel76610eb2017-03-10 08:53:41 +01003890 list_add(&fib_node->list, &fib->node_list);
Ido Schimmel9aecce12017-02-09 10:28:42 +01003891 memcpy(fib_node->key.addr, addr, addr_len);
3892 fib_node->key.prefix_len = prefix_len;
Ido Schimmel9aecce12017-02-09 10:28:42 +01003893
3894 return fib_node;
3895}
3896
3897static void mlxsw_sp_fib_node_destroy(struct mlxsw_sp_fib_node *fib_node)
3898{
Ido Schimmel9aecce12017-02-09 10:28:42 +01003899 list_del(&fib_node->list);
3900 WARN_ON(!list_empty(&fib_node->entry_list));
3901 kfree(fib_node);
3902}
3903
3904static bool
3905mlxsw_sp_fib_node_entry_is_first(const struct mlxsw_sp_fib_node *fib_node,
3906 const struct mlxsw_sp_fib_entry *fib_entry)
3907{
3908 return list_first_entry(&fib_node->entry_list,
3909 struct mlxsw_sp_fib_entry, list) == fib_entry;
3910}
3911
Ido Schimmelfc922bb2017-08-14 10:54:05 +02003912static int mlxsw_sp_fib_lpm_tree_link(struct mlxsw_sp *mlxsw_sp,
3913 struct mlxsw_sp_fib *fib,
3914 struct mlxsw_sp_fib_node *fib_node)
3915{
3916 struct mlxsw_sp_prefix_usage req_prefix_usage = {{ 0 } };
3917 struct mlxsw_sp_lpm_tree *lpm_tree;
3918 int err;
3919
3920 /* Since the tree is shared between all virtual routers we must
3921 * make sure it contains all the required prefix lengths. This
3922 * can be computed by either adding the new prefix length to the
3923 * existing prefix usage of a bound tree, or by aggregating the
3924 * prefix lengths across all virtual routers and adding the new
3925 * one as well.
3926 */
3927 if (fib->lpm_tree)
3928 mlxsw_sp_prefix_usage_cpy(&req_prefix_usage,
3929 &fib->lpm_tree->prefix_usage);
3930 else
3931 mlxsw_sp_vrs_prefixes(mlxsw_sp, fib->proto, &req_prefix_usage);
3932 mlxsw_sp_prefix_usage_set(&req_prefix_usage, fib_node->key.prefix_len);
3933
3934 lpm_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, &req_prefix_usage,
3935 fib->proto);
3936 if (IS_ERR(lpm_tree))
3937 return PTR_ERR(lpm_tree);
3938
3939 if (fib->lpm_tree && fib->lpm_tree->id == lpm_tree->id)
3940 return 0;
3941
3942 err = mlxsw_sp_vrs_lpm_tree_replace(mlxsw_sp, fib, lpm_tree);
3943 if (err)
3944 return err;
3945
3946 return 0;
3947}
3948
3949static void mlxsw_sp_fib_lpm_tree_unlink(struct mlxsw_sp *mlxsw_sp,
3950 struct mlxsw_sp_fib *fib)
3951{
Ido Schimmelfc922bb2017-08-14 10:54:05 +02003952 if (!mlxsw_sp_prefix_usage_none(&fib->prefix_usage))
3953 return;
3954 mlxsw_sp_vr_lpm_tree_unbind(mlxsw_sp, fib);
3955 mlxsw_sp_lpm_tree_put(mlxsw_sp, fib->lpm_tree);
3956 fib->lpm_tree = NULL;
3957}
3958
Ido Schimmel9aecce12017-02-09 10:28:42 +01003959static void mlxsw_sp_fib_node_prefix_inc(struct mlxsw_sp_fib_node *fib_node)
3960{
3961 unsigned char prefix_len = fib_node->key.prefix_len;
Ido Schimmel76610eb2017-03-10 08:53:41 +01003962 struct mlxsw_sp_fib *fib = fib_node->fib;
Ido Schimmel9aecce12017-02-09 10:28:42 +01003963
3964 if (fib->prefix_ref_count[prefix_len]++ == 0)
3965 mlxsw_sp_prefix_usage_set(&fib->prefix_usage, prefix_len);
3966}
3967
3968static void mlxsw_sp_fib_node_prefix_dec(struct mlxsw_sp_fib_node *fib_node)
3969{
3970 unsigned char prefix_len = fib_node->key.prefix_len;
Ido Schimmel76610eb2017-03-10 08:53:41 +01003971 struct mlxsw_sp_fib *fib = fib_node->fib;
Ido Schimmel9aecce12017-02-09 10:28:42 +01003972
3973 if (--fib->prefix_ref_count[prefix_len] == 0)
3974 mlxsw_sp_prefix_usage_clear(&fib->prefix_usage, prefix_len);
3975}
3976
Ido Schimmel76610eb2017-03-10 08:53:41 +01003977static int mlxsw_sp_fib_node_init(struct mlxsw_sp *mlxsw_sp,
3978 struct mlxsw_sp_fib_node *fib_node,
3979 struct mlxsw_sp_fib *fib)
3980{
Ido Schimmel76610eb2017-03-10 08:53:41 +01003981 int err;
3982
3983 err = mlxsw_sp_fib_node_insert(fib, fib_node);
3984 if (err)
3985 return err;
3986 fib_node->fib = fib;
3987
Ido Schimmelfc922bb2017-08-14 10:54:05 +02003988 err = mlxsw_sp_fib_lpm_tree_link(mlxsw_sp, fib, fib_node);
3989 if (err)
3990 goto err_fib_lpm_tree_link;
Ido Schimmel76610eb2017-03-10 08:53:41 +01003991
3992 mlxsw_sp_fib_node_prefix_inc(fib_node);
3993
3994 return 0;
3995
Ido Schimmelfc922bb2017-08-14 10:54:05 +02003996err_fib_lpm_tree_link:
Ido Schimmel76610eb2017-03-10 08:53:41 +01003997 fib_node->fib = NULL;
3998 mlxsw_sp_fib_node_remove(fib, fib_node);
3999 return err;
4000}
4001
4002static void mlxsw_sp_fib_node_fini(struct mlxsw_sp *mlxsw_sp,
4003 struct mlxsw_sp_fib_node *fib_node)
4004{
Ido Schimmel76610eb2017-03-10 08:53:41 +01004005 struct mlxsw_sp_fib *fib = fib_node->fib;
4006
4007 mlxsw_sp_fib_node_prefix_dec(fib_node);
Ido Schimmelfc922bb2017-08-14 10:54:05 +02004008 mlxsw_sp_fib_lpm_tree_unlink(mlxsw_sp, fib);
Ido Schimmel76610eb2017-03-10 08:53:41 +01004009 fib_node->fib = NULL;
4010 mlxsw_sp_fib_node_remove(fib, fib_node);
4011}
4012
Ido Schimmel9aecce12017-02-09 10:28:42 +01004013static struct mlxsw_sp_fib_node *
Ido Schimmel731ea1c2017-07-18 10:10:21 +02004014mlxsw_sp_fib_node_get(struct mlxsw_sp *mlxsw_sp, u32 tb_id, const void *addr,
4015 size_t addr_len, unsigned char prefix_len,
4016 enum mlxsw_sp_l3proto proto)
Ido Schimmel9aecce12017-02-09 10:28:42 +01004017{
4018 struct mlxsw_sp_fib_node *fib_node;
Ido Schimmel76610eb2017-03-10 08:53:41 +01004019 struct mlxsw_sp_fib *fib;
Jiri Pirko5b004412016-09-01 10:37:40 +02004020 struct mlxsw_sp_vr *vr;
4021 int err;
4022
David Ahernf8fa9b42017-10-18 09:56:56 -07004023 vr = mlxsw_sp_vr_get(mlxsw_sp, tb_id, NULL);
Jiri Pirko5b004412016-09-01 10:37:40 +02004024 if (IS_ERR(vr))
4025 return ERR_CAST(vr);
Ido Schimmel731ea1c2017-07-18 10:10:21 +02004026 fib = mlxsw_sp_vr_fib(vr, proto);
Jiri Pirko5b004412016-09-01 10:37:40 +02004027
Ido Schimmel731ea1c2017-07-18 10:10:21 +02004028 fib_node = mlxsw_sp_fib_node_lookup(fib, addr, addr_len, prefix_len);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004029 if (fib_node)
4030 return fib_node;
4031
Ido Schimmel731ea1c2017-07-18 10:10:21 +02004032 fib_node = mlxsw_sp_fib_node_create(fib, addr, addr_len, prefix_len);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004033 if (!fib_node) {
Jiri Pirko5b004412016-09-01 10:37:40 +02004034 err = -ENOMEM;
Ido Schimmel9aecce12017-02-09 10:28:42 +01004035 goto err_fib_node_create;
Jiri Pirko5b004412016-09-01 10:37:40 +02004036 }
Jiri Pirko5b004412016-09-01 10:37:40 +02004037
Ido Schimmel76610eb2017-03-10 08:53:41 +01004038 err = mlxsw_sp_fib_node_init(mlxsw_sp, fib_node, fib);
4039 if (err)
4040 goto err_fib_node_init;
4041
Ido Schimmel9aecce12017-02-09 10:28:42 +01004042 return fib_node;
Jiri Pirko5b004412016-09-01 10:37:40 +02004043
Ido Schimmel76610eb2017-03-10 08:53:41 +01004044err_fib_node_init:
4045 mlxsw_sp_fib_node_destroy(fib_node);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004046err_fib_node_create:
Ido Schimmel76610eb2017-03-10 08:53:41 +01004047 mlxsw_sp_vr_put(vr);
Jiri Pirko5b004412016-09-01 10:37:40 +02004048 return ERR_PTR(err);
4049}
4050
Ido Schimmel731ea1c2017-07-18 10:10:21 +02004051static void mlxsw_sp_fib_node_put(struct mlxsw_sp *mlxsw_sp,
4052 struct mlxsw_sp_fib_node *fib_node)
Jiri Pirko5b004412016-09-01 10:37:40 +02004053{
Ido Schimmel76610eb2017-03-10 08:53:41 +01004054 struct mlxsw_sp_vr *vr = fib_node->fib->vr;
Jiri Pirko5b004412016-09-01 10:37:40 +02004055
Ido Schimmel9aecce12017-02-09 10:28:42 +01004056 if (!list_empty(&fib_node->entry_list))
4057 return;
Ido Schimmel76610eb2017-03-10 08:53:41 +01004058 mlxsw_sp_fib_node_fini(mlxsw_sp, fib_node);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004059 mlxsw_sp_fib_node_destroy(fib_node);
Ido Schimmel76610eb2017-03-10 08:53:41 +01004060 mlxsw_sp_vr_put(vr);
Jiri Pirko5b004412016-09-01 10:37:40 +02004061}
4062
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004063static struct mlxsw_sp_fib4_entry *
Ido Schimmel9aecce12017-02-09 10:28:42 +01004064mlxsw_sp_fib4_node_entry_find(const struct mlxsw_sp_fib_node *fib_node,
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004065 const struct mlxsw_sp_fib4_entry *new4_entry)
Jiri Pirko61c503f2016-07-04 08:23:11 +02004066{
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004067 struct mlxsw_sp_fib4_entry *fib4_entry;
Ido Schimmel9aecce12017-02-09 10:28:42 +01004068
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004069 list_for_each_entry(fib4_entry, &fib_node->entry_list, common.list) {
4070 if (fib4_entry->tb_id > new4_entry->tb_id)
Ido Schimmel9aecce12017-02-09 10:28:42 +01004071 continue;
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004072 if (fib4_entry->tb_id != new4_entry->tb_id)
Ido Schimmel9aecce12017-02-09 10:28:42 +01004073 break;
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004074 if (fib4_entry->tos > new4_entry->tos)
Ido Schimmel9aecce12017-02-09 10:28:42 +01004075 continue;
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004076 if (fib4_entry->prio >= new4_entry->prio ||
4077 fib4_entry->tos < new4_entry->tos)
4078 return fib4_entry;
Ido Schimmel9aecce12017-02-09 10:28:42 +01004079 }
4080
4081 return NULL;
4082}
4083
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004084static int
4085mlxsw_sp_fib4_node_list_append(struct mlxsw_sp_fib4_entry *fib4_entry,
4086 struct mlxsw_sp_fib4_entry *new4_entry)
Ido Schimmel4283bce2017-02-09 10:28:43 +01004087{
4088 struct mlxsw_sp_fib_node *fib_node;
4089
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004090 if (WARN_ON(!fib4_entry))
Ido Schimmel4283bce2017-02-09 10:28:43 +01004091 return -EINVAL;
4092
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004093 fib_node = fib4_entry->common.fib_node;
4094 list_for_each_entry_from(fib4_entry, &fib_node->entry_list,
4095 common.list) {
4096 if (fib4_entry->tb_id != new4_entry->tb_id ||
4097 fib4_entry->tos != new4_entry->tos ||
4098 fib4_entry->prio != new4_entry->prio)
Ido Schimmel4283bce2017-02-09 10:28:43 +01004099 break;
4100 }
4101
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004102 list_add_tail(&new4_entry->common.list, &fib4_entry->common.list);
Ido Schimmel4283bce2017-02-09 10:28:43 +01004103 return 0;
4104}
4105
Ido Schimmel9aecce12017-02-09 10:28:42 +01004106static int
Ido Schimmel9efbee62017-07-18 10:10:28 +02004107mlxsw_sp_fib4_node_list_insert(struct mlxsw_sp_fib4_entry *new4_entry,
Ido Schimmel599cf8f2017-02-09 10:28:44 +01004108 bool replace, bool append)
Ido Schimmel9aecce12017-02-09 10:28:42 +01004109{
Ido Schimmel9efbee62017-07-18 10:10:28 +02004110 struct mlxsw_sp_fib_node *fib_node = new4_entry->common.fib_node;
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004111 struct mlxsw_sp_fib4_entry *fib4_entry;
Ido Schimmel9aecce12017-02-09 10:28:42 +01004112
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004113 fib4_entry = mlxsw_sp_fib4_node_entry_find(fib_node, new4_entry);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004114
Ido Schimmel4283bce2017-02-09 10:28:43 +01004115 if (append)
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004116 return mlxsw_sp_fib4_node_list_append(fib4_entry, new4_entry);
4117 if (replace && WARN_ON(!fib4_entry))
Ido Schimmel599cf8f2017-02-09 10:28:44 +01004118 return -EINVAL;
Ido Schimmel4283bce2017-02-09 10:28:43 +01004119
Ido Schimmel599cf8f2017-02-09 10:28:44 +01004120 /* Insert new entry before replaced one, so that we can later
4121 * remove the second.
4122 */
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004123 if (fib4_entry) {
4124 list_add_tail(&new4_entry->common.list,
4125 &fib4_entry->common.list);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004126 } else {
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004127 struct mlxsw_sp_fib4_entry *last;
Ido Schimmel9aecce12017-02-09 10:28:42 +01004128
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004129 list_for_each_entry(last, &fib_node->entry_list, common.list) {
4130 if (new4_entry->tb_id > last->tb_id)
Ido Schimmel9aecce12017-02-09 10:28:42 +01004131 break;
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004132 fib4_entry = last;
Ido Schimmel9aecce12017-02-09 10:28:42 +01004133 }
4134
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004135 if (fib4_entry)
4136 list_add(&new4_entry->common.list,
4137 &fib4_entry->common.list);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004138 else
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004139 list_add(&new4_entry->common.list,
4140 &fib_node->entry_list);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004141 }
4142
4143 return 0;
4144}
4145
4146static void
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004147mlxsw_sp_fib4_node_list_remove(struct mlxsw_sp_fib4_entry *fib4_entry)
Ido Schimmel9aecce12017-02-09 10:28:42 +01004148{
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004149 list_del(&fib4_entry->common.list);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004150}
4151
Ido Schimmel80c238f2017-07-18 10:10:29 +02004152static int mlxsw_sp_fib_node_entry_add(struct mlxsw_sp *mlxsw_sp,
4153 struct mlxsw_sp_fib_entry *fib_entry)
Ido Schimmel9aecce12017-02-09 10:28:42 +01004154{
Ido Schimmel9efbee62017-07-18 10:10:28 +02004155 struct mlxsw_sp_fib_node *fib_node = fib_entry->fib_node;
4156
Ido Schimmel9aecce12017-02-09 10:28:42 +01004157 if (!mlxsw_sp_fib_node_entry_is_first(fib_node, fib_entry))
4158 return 0;
4159
4160 /* To prevent packet loss, overwrite the previously offloaded
4161 * entry.
4162 */
4163 if (!list_is_singular(&fib_node->entry_list)) {
4164 enum mlxsw_reg_ralue_op op = MLXSW_REG_RALUE_OP_WRITE_DELETE;
4165 struct mlxsw_sp_fib_entry *n = list_next_entry(fib_entry, list);
4166
4167 mlxsw_sp_fib_entry_offload_refresh(n, op, 0);
4168 }
4169
4170 return mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
4171}
4172
Ido Schimmel80c238f2017-07-18 10:10:29 +02004173static void mlxsw_sp_fib_node_entry_del(struct mlxsw_sp *mlxsw_sp,
4174 struct mlxsw_sp_fib_entry *fib_entry)
Ido Schimmel9aecce12017-02-09 10:28:42 +01004175{
Ido Schimmel9efbee62017-07-18 10:10:28 +02004176 struct mlxsw_sp_fib_node *fib_node = fib_entry->fib_node;
4177
Ido Schimmel9aecce12017-02-09 10:28:42 +01004178 if (!mlxsw_sp_fib_node_entry_is_first(fib_node, fib_entry))
4179 return;
4180
4181 /* Promote the next entry by overwriting the deleted entry */
4182 if (!list_is_singular(&fib_node->entry_list)) {
4183 struct mlxsw_sp_fib_entry *n = list_next_entry(fib_entry, list);
4184 enum mlxsw_reg_ralue_op op = MLXSW_REG_RALUE_OP_WRITE_DELETE;
4185
4186 mlxsw_sp_fib_entry_update(mlxsw_sp, n);
4187 mlxsw_sp_fib_entry_offload_refresh(fib_entry, op, 0);
4188 return;
4189 }
4190
4191 mlxsw_sp_fib_entry_del(mlxsw_sp, fib_entry);
4192}
4193
4194static int mlxsw_sp_fib4_node_entry_link(struct mlxsw_sp *mlxsw_sp,
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004195 struct mlxsw_sp_fib4_entry *fib4_entry,
Ido Schimmel599cf8f2017-02-09 10:28:44 +01004196 bool replace, bool append)
Ido Schimmel9aecce12017-02-09 10:28:42 +01004197{
Ido Schimmel9aecce12017-02-09 10:28:42 +01004198 int err;
4199
Ido Schimmel9efbee62017-07-18 10:10:28 +02004200 err = mlxsw_sp_fib4_node_list_insert(fib4_entry, replace, append);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004201 if (err)
4202 return err;
4203
Ido Schimmel80c238f2017-07-18 10:10:29 +02004204 err = mlxsw_sp_fib_node_entry_add(mlxsw_sp, &fib4_entry->common);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004205 if (err)
Ido Schimmel80c238f2017-07-18 10:10:29 +02004206 goto err_fib_node_entry_add;
Ido Schimmel9aecce12017-02-09 10:28:42 +01004207
Ido Schimmel9aecce12017-02-09 10:28:42 +01004208 return 0;
4209
Ido Schimmel80c238f2017-07-18 10:10:29 +02004210err_fib_node_entry_add:
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004211 mlxsw_sp_fib4_node_list_remove(fib4_entry);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004212 return err;
4213}
4214
4215static void
4216mlxsw_sp_fib4_node_entry_unlink(struct mlxsw_sp *mlxsw_sp,
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004217 struct mlxsw_sp_fib4_entry *fib4_entry)
Ido Schimmel9aecce12017-02-09 10:28:42 +01004218{
Ido Schimmel80c238f2017-07-18 10:10:29 +02004219 mlxsw_sp_fib_node_entry_del(mlxsw_sp, &fib4_entry->common);
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004220 mlxsw_sp_fib4_node_list_remove(fib4_entry);
Petr Machata4607f6d2017-09-02 23:49:25 +02004221
4222 if (fib4_entry->common.type == MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP)
4223 mlxsw_sp_fib_entry_decap_fini(mlxsw_sp, &fib4_entry->common);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004224}
4225
Ido Schimmel599cf8f2017-02-09 10:28:44 +01004226static void mlxsw_sp_fib4_entry_replace(struct mlxsw_sp *mlxsw_sp,
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004227 struct mlxsw_sp_fib4_entry *fib4_entry,
Ido Schimmel599cf8f2017-02-09 10:28:44 +01004228 bool replace)
4229{
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004230 struct mlxsw_sp_fib_node *fib_node = fib4_entry->common.fib_node;
4231 struct mlxsw_sp_fib4_entry *replaced;
Ido Schimmel599cf8f2017-02-09 10:28:44 +01004232
4233 if (!replace)
4234 return;
4235
4236 /* We inserted the new entry before replaced one */
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004237 replaced = list_next_entry(fib4_entry, common.list);
Ido Schimmel599cf8f2017-02-09 10:28:44 +01004238
4239 mlxsw_sp_fib4_node_entry_unlink(mlxsw_sp, replaced);
4240 mlxsw_sp_fib4_entry_destroy(mlxsw_sp, replaced);
Ido Schimmel731ea1c2017-07-18 10:10:21 +02004241 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
Ido Schimmel599cf8f2017-02-09 10:28:44 +01004242}
4243
Ido Schimmel9aecce12017-02-09 10:28:42 +01004244static int
4245mlxsw_sp_router_fib4_add(struct mlxsw_sp *mlxsw_sp,
Ido Schimmel4283bce2017-02-09 10:28:43 +01004246 const struct fib_entry_notifier_info *fen_info,
Ido Schimmel599cf8f2017-02-09 10:28:44 +01004247 bool replace, bool append)
Ido Schimmel9aecce12017-02-09 10:28:42 +01004248{
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004249 struct mlxsw_sp_fib4_entry *fib4_entry;
Ido Schimmel9aecce12017-02-09 10:28:42 +01004250 struct mlxsw_sp_fib_node *fib_node;
Jiri Pirko61c503f2016-07-04 08:23:11 +02004251 int err;
4252
Ido Schimmel9011b672017-05-16 19:38:25 +02004253 if (mlxsw_sp->router->aborted)
Jiri Pirkob45f64d2016-09-26 12:52:31 +02004254 return 0;
4255
Ido Schimmel731ea1c2017-07-18 10:10:21 +02004256 fib_node = mlxsw_sp_fib_node_get(mlxsw_sp, fen_info->tb_id,
4257 &fen_info->dst, sizeof(fen_info->dst),
4258 fen_info->dst_len,
4259 MLXSW_SP_L3_PROTO_IPV4);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004260 if (IS_ERR(fib_node)) {
4261 dev_warn(mlxsw_sp->bus_info->dev, "Failed to get FIB node\n");
4262 return PTR_ERR(fib_node);
4263 }
4264
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004265 fib4_entry = mlxsw_sp_fib4_entry_create(mlxsw_sp, fib_node, fen_info);
4266 if (IS_ERR(fib4_entry)) {
Ido Schimmel9aecce12017-02-09 10:28:42 +01004267 dev_warn(mlxsw_sp->bus_info->dev, "Failed to create FIB entry\n");
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004268 err = PTR_ERR(fib4_entry);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004269 goto err_fib4_entry_create;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02004270 }
Jiri Pirko61c503f2016-07-04 08:23:11 +02004271
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004272 err = mlxsw_sp_fib4_node_entry_link(mlxsw_sp, fib4_entry, replace,
Ido Schimmel599cf8f2017-02-09 10:28:44 +01004273 append);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02004274 if (err) {
Ido Schimmel9aecce12017-02-09 10:28:42 +01004275 dev_warn(mlxsw_sp->bus_info->dev, "Failed to link FIB entry to node\n");
4276 goto err_fib4_node_entry_link;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02004277 }
Ido Schimmel9aecce12017-02-09 10:28:42 +01004278
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004279 mlxsw_sp_fib4_entry_replace(mlxsw_sp, fib4_entry, replace);
Ido Schimmel599cf8f2017-02-09 10:28:44 +01004280
Jiri Pirko61c503f2016-07-04 08:23:11 +02004281 return 0;
4282
Ido Schimmel9aecce12017-02-09 10:28:42 +01004283err_fib4_node_entry_link:
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004284 mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib4_entry);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004285err_fib4_entry_create:
Ido Schimmel731ea1c2017-07-18 10:10:21 +02004286 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
Jiri Pirko61c503f2016-07-04 08:23:11 +02004287 return err;
4288}
4289
Jiri Pirko37956d72016-10-20 16:05:43 +02004290static void mlxsw_sp_router_fib4_del(struct mlxsw_sp *mlxsw_sp,
4291 struct fib_entry_notifier_info *fen_info)
Jiri Pirko61c503f2016-07-04 08:23:11 +02004292{
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004293 struct mlxsw_sp_fib4_entry *fib4_entry;
Ido Schimmel9aecce12017-02-09 10:28:42 +01004294 struct mlxsw_sp_fib_node *fib_node;
Jiri Pirko61c503f2016-07-04 08:23:11 +02004295
Ido Schimmel9011b672017-05-16 19:38:25 +02004296 if (mlxsw_sp->router->aborted)
Jiri Pirko37956d72016-10-20 16:05:43 +02004297 return;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02004298
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004299 fib4_entry = mlxsw_sp_fib4_entry_lookup(mlxsw_sp, fen_info);
4300 if (WARN_ON(!fib4_entry))
Jiri Pirko37956d72016-10-20 16:05:43 +02004301 return;
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004302 fib_node = fib4_entry->common.fib_node;
Jiri Pirko5b004412016-09-01 10:37:40 +02004303
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004304 mlxsw_sp_fib4_node_entry_unlink(mlxsw_sp, fib4_entry);
4305 mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib4_entry);
Ido Schimmel731ea1c2017-07-18 10:10:21 +02004306 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
Jiri Pirko61c503f2016-07-04 08:23:11 +02004307}
Jiri Pirkob45f64d2016-09-26 12:52:31 +02004308
Ido Schimmel428b8512017-08-03 13:28:28 +02004309static bool mlxsw_sp_fib6_rt_should_ignore(const struct rt6_info *rt)
4310{
4311 /* Packets with link-local destination IP arriving to the router
4312 * are trapped to the CPU, so no need to program specific routes
4313 * for them.
4314 */
4315 if (ipv6_addr_type(&rt->rt6i_dst.addr) & IPV6_ADDR_LINKLOCAL)
4316 return true;
4317
4318 /* Multicast routes aren't supported, so ignore them. Neighbour
4319 * Discovery packets are specifically trapped.
4320 */
4321 if (ipv6_addr_type(&rt->rt6i_dst.addr) & IPV6_ADDR_MULTICAST)
4322 return true;
4323
4324 /* Cloned routes are irrelevant in the forwarding path. */
4325 if (rt->rt6i_flags & RTF_CACHE)
4326 return true;
4327
4328 return false;
4329}
4330
4331static struct mlxsw_sp_rt6 *mlxsw_sp_rt6_create(struct rt6_info *rt)
4332{
4333 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
4334
4335 mlxsw_sp_rt6 = kzalloc(sizeof(*mlxsw_sp_rt6), GFP_KERNEL);
4336 if (!mlxsw_sp_rt6)
4337 return ERR_PTR(-ENOMEM);
4338
4339 /* In case of route replace, replaced route is deleted with
4340 * no notification. Take reference to prevent accessing freed
4341 * memory.
4342 */
4343 mlxsw_sp_rt6->rt = rt;
4344 rt6_hold(rt);
4345
4346 return mlxsw_sp_rt6;
4347}
4348
4349#if IS_ENABLED(CONFIG_IPV6)
4350static void mlxsw_sp_rt6_release(struct rt6_info *rt)
4351{
4352 rt6_release(rt);
4353}
4354#else
4355static void mlxsw_sp_rt6_release(struct rt6_info *rt)
4356{
4357}
4358#endif
4359
4360static void mlxsw_sp_rt6_destroy(struct mlxsw_sp_rt6 *mlxsw_sp_rt6)
4361{
4362 mlxsw_sp_rt6_release(mlxsw_sp_rt6->rt);
4363 kfree(mlxsw_sp_rt6);
4364}
4365
4366static bool mlxsw_sp_fib6_rt_can_mp(const struct rt6_info *rt)
4367{
4368 /* RTF_CACHE routes are ignored */
4369 return (rt->rt6i_flags & (RTF_GATEWAY | RTF_ADDRCONF)) == RTF_GATEWAY;
4370}
4371
4372static struct rt6_info *
4373mlxsw_sp_fib6_entry_rt(const struct mlxsw_sp_fib6_entry *fib6_entry)
4374{
4375 return list_first_entry(&fib6_entry->rt6_list, struct mlxsw_sp_rt6,
4376 list)->rt;
4377}
4378
4379static struct mlxsw_sp_fib6_entry *
4380mlxsw_sp_fib6_node_mp_entry_find(const struct mlxsw_sp_fib_node *fib_node,
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02004381 const struct rt6_info *nrt, bool replace)
Ido Schimmel428b8512017-08-03 13:28:28 +02004382{
4383 struct mlxsw_sp_fib6_entry *fib6_entry;
4384
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02004385 if (!mlxsw_sp_fib6_rt_can_mp(nrt) || replace)
Ido Schimmel428b8512017-08-03 13:28:28 +02004386 return NULL;
4387
4388 list_for_each_entry(fib6_entry, &fib_node->entry_list, common.list) {
4389 struct rt6_info *rt = mlxsw_sp_fib6_entry_rt(fib6_entry);
4390
4391 /* RT6_TABLE_LOCAL and RT6_TABLE_MAIN share the same
4392 * virtual router.
4393 */
4394 if (rt->rt6i_table->tb6_id > nrt->rt6i_table->tb6_id)
4395 continue;
4396 if (rt->rt6i_table->tb6_id != nrt->rt6i_table->tb6_id)
4397 break;
4398 if (rt->rt6i_metric < nrt->rt6i_metric)
4399 continue;
4400 if (rt->rt6i_metric == nrt->rt6i_metric &&
4401 mlxsw_sp_fib6_rt_can_mp(rt))
4402 return fib6_entry;
4403 if (rt->rt6i_metric > nrt->rt6i_metric)
4404 break;
4405 }
4406
4407 return NULL;
4408}
4409
4410static struct mlxsw_sp_rt6 *
4411mlxsw_sp_fib6_entry_rt_find(const struct mlxsw_sp_fib6_entry *fib6_entry,
4412 const struct rt6_info *rt)
4413{
4414 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
4415
4416 list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) {
4417 if (mlxsw_sp_rt6->rt == rt)
4418 return mlxsw_sp_rt6;
4419 }
4420
4421 return NULL;
4422}
4423
Petr Machata8f28a302017-09-02 23:49:24 +02004424static bool mlxsw_sp_nexthop6_ipip_type(const struct mlxsw_sp *mlxsw_sp,
4425 const struct rt6_info *rt,
4426 enum mlxsw_sp_ipip_type *ret)
4427{
4428 return rt->dst.dev &&
4429 mlxsw_sp_netdev_ipip_type(mlxsw_sp, rt->dst.dev, ret);
4430}
4431
Petr Machata35225e42017-09-02 23:49:22 +02004432static int mlxsw_sp_nexthop6_type_init(struct mlxsw_sp *mlxsw_sp,
4433 struct mlxsw_sp_nexthop_group *nh_grp,
4434 struct mlxsw_sp_nexthop *nh,
4435 const struct rt6_info *rt)
Ido Schimmel428b8512017-08-03 13:28:28 +02004436{
Petr Machata8f28a302017-09-02 23:49:24 +02004437 struct mlxsw_sp_router *router = mlxsw_sp->router;
Ido Schimmel428b8512017-08-03 13:28:28 +02004438 struct net_device *dev = rt->dst.dev;
Petr Machata8f28a302017-09-02 23:49:24 +02004439 enum mlxsw_sp_ipip_type ipipt;
Ido Schimmel428b8512017-08-03 13:28:28 +02004440 struct mlxsw_sp_rif *rif;
4441 int err;
4442
Petr Machata8f28a302017-09-02 23:49:24 +02004443 if (mlxsw_sp_nexthop6_ipip_type(mlxsw_sp, rt, &ipipt) &&
4444 router->ipip_ops_arr[ipipt]->can_offload(mlxsw_sp, dev,
4445 MLXSW_SP_L3_PROTO_IPV6)) {
4446 nh->type = MLXSW_SP_NEXTHOP_TYPE_IPIP;
Petr Machata4cccb732017-10-16 16:26:39 +02004447 err = mlxsw_sp_nexthop_ipip_init(mlxsw_sp, nh, dev);
Petr Machatade0f43c2017-10-02 12:14:57 +02004448 if (err)
4449 return err;
4450 mlxsw_sp_nexthop_rif_init(nh, &nh->ipip_entry->ol_lb->common);
4451 return 0;
Petr Machata8f28a302017-09-02 23:49:24 +02004452 }
4453
Petr Machata35225e42017-09-02 23:49:22 +02004454 nh->type = MLXSW_SP_NEXTHOP_TYPE_ETH;
Ido Schimmel428b8512017-08-03 13:28:28 +02004455 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
4456 if (!rif)
4457 return 0;
4458 mlxsw_sp_nexthop_rif_init(nh, rif);
4459
4460 err = mlxsw_sp_nexthop_neigh_init(mlxsw_sp, nh);
4461 if (err)
4462 goto err_nexthop_neigh_init;
4463
4464 return 0;
4465
4466err_nexthop_neigh_init:
4467 mlxsw_sp_nexthop_rif_fini(nh);
4468 return err;
4469}
4470
Petr Machata35225e42017-09-02 23:49:22 +02004471static void mlxsw_sp_nexthop6_type_fini(struct mlxsw_sp *mlxsw_sp,
4472 struct mlxsw_sp_nexthop *nh)
4473{
4474 mlxsw_sp_nexthop_type_fini(mlxsw_sp, nh);
4475}
4476
4477static int mlxsw_sp_nexthop6_init(struct mlxsw_sp *mlxsw_sp,
4478 struct mlxsw_sp_nexthop_group *nh_grp,
4479 struct mlxsw_sp_nexthop *nh,
4480 const struct rt6_info *rt)
4481{
4482 struct net_device *dev = rt->dst.dev;
4483
4484 nh->nh_grp = nh_grp;
Ido Schimmel408bd942017-10-22 23:11:46 +02004485 nh->nh_weight = 1;
Petr Machata35225e42017-09-02 23:49:22 +02004486 memcpy(&nh->gw_addr, &rt->rt6i_gateway, sizeof(nh->gw_addr));
Arkadi Sharshevskya5390272017-09-25 10:32:28 +02004487 mlxsw_sp_nexthop_counter_alloc(mlxsw_sp, nh);
Petr Machata35225e42017-09-02 23:49:22 +02004488
Arkadi Sharshevskydbe45982017-09-25 10:32:23 +02004489 list_add_tail(&nh->router_list_node, &mlxsw_sp->router->nexthop_list);
4490
Petr Machata35225e42017-09-02 23:49:22 +02004491 if (!dev)
4492 return 0;
4493 nh->ifindex = dev->ifindex;
4494
4495 return mlxsw_sp_nexthop6_type_init(mlxsw_sp, nh_grp, nh, rt);
4496}
4497
Ido Schimmel428b8512017-08-03 13:28:28 +02004498static void mlxsw_sp_nexthop6_fini(struct mlxsw_sp *mlxsw_sp,
4499 struct mlxsw_sp_nexthop *nh)
4500{
Petr Machata35225e42017-09-02 23:49:22 +02004501 mlxsw_sp_nexthop6_type_fini(mlxsw_sp, nh);
Arkadi Sharshevskydbe45982017-09-25 10:32:23 +02004502 list_del(&nh->router_list_node);
Arkadi Sharshevskya5390272017-09-25 10:32:28 +02004503 mlxsw_sp_nexthop_counter_free(mlxsw_sp, nh);
Ido Schimmel428b8512017-08-03 13:28:28 +02004504}
4505
Petr Machataf6050ee2017-09-02 23:49:21 +02004506static bool mlxsw_sp_rt6_is_gateway(const struct mlxsw_sp *mlxsw_sp,
4507 const struct rt6_info *rt)
4508{
Petr Machata8f28a302017-09-02 23:49:24 +02004509 return rt->rt6i_flags & RTF_GATEWAY ||
4510 mlxsw_sp_nexthop6_ipip_type(mlxsw_sp, rt, NULL);
Petr Machataf6050ee2017-09-02 23:49:21 +02004511}
4512
Ido Schimmel428b8512017-08-03 13:28:28 +02004513static struct mlxsw_sp_nexthop_group *
4514mlxsw_sp_nexthop6_group_create(struct mlxsw_sp *mlxsw_sp,
4515 struct mlxsw_sp_fib6_entry *fib6_entry)
4516{
4517 struct mlxsw_sp_nexthop_group *nh_grp;
4518 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
4519 struct mlxsw_sp_nexthop *nh;
4520 size_t alloc_size;
4521 int i = 0;
4522 int err;
4523
4524 alloc_size = sizeof(*nh_grp) +
4525 fib6_entry->nrt6 * sizeof(struct mlxsw_sp_nexthop);
4526 nh_grp = kzalloc(alloc_size, GFP_KERNEL);
4527 if (!nh_grp)
4528 return ERR_PTR(-ENOMEM);
4529 INIT_LIST_HEAD(&nh_grp->fib_list);
4530#if IS_ENABLED(CONFIG_IPV6)
4531 nh_grp->neigh_tbl = &nd_tbl;
4532#endif
4533 mlxsw_sp_rt6 = list_first_entry(&fib6_entry->rt6_list,
4534 struct mlxsw_sp_rt6, list);
Petr Machataf6050ee2017-09-02 23:49:21 +02004535 nh_grp->gateway = mlxsw_sp_rt6_is_gateway(mlxsw_sp, mlxsw_sp_rt6->rt);
Ido Schimmel428b8512017-08-03 13:28:28 +02004536 nh_grp->count = fib6_entry->nrt6;
4537 for (i = 0; i < nh_grp->count; i++) {
4538 struct rt6_info *rt = mlxsw_sp_rt6->rt;
4539
4540 nh = &nh_grp->nexthops[i];
4541 err = mlxsw_sp_nexthop6_init(mlxsw_sp, nh_grp, nh, rt);
4542 if (err)
4543 goto err_nexthop6_init;
4544 mlxsw_sp_rt6 = list_next_entry(mlxsw_sp_rt6, list);
4545 }
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02004546
4547 err = mlxsw_sp_nexthop_group_insert(mlxsw_sp, nh_grp);
4548 if (err)
4549 goto err_nexthop_group_insert;
4550
Ido Schimmel428b8512017-08-03 13:28:28 +02004551 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
4552 return nh_grp;
4553
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02004554err_nexthop_group_insert:
Ido Schimmel428b8512017-08-03 13:28:28 +02004555err_nexthop6_init:
4556 for (i--; i >= 0; i--) {
4557 nh = &nh_grp->nexthops[i];
4558 mlxsw_sp_nexthop6_fini(mlxsw_sp, nh);
4559 }
4560 kfree(nh_grp);
4561 return ERR_PTR(err);
4562}
4563
4564static void
4565mlxsw_sp_nexthop6_group_destroy(struct mlxsw_sp *mlxsw_sp,
4566 struct mlxsw_sp_nexthop_group *nh_grp)
4567{
4568 struct mlxsw_sp_nexthop *nh;
4569 int i = nh_grp->count;
4570
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02004571 mlxsw_sp_nexthop_group_remove(mlxsw_sp, nh_grp);
Ido Schimmel428b8512017-08-03 13:28:28 +02004572 for (i--; i >= 0; i--) {
4573 nh = &nh_grp->nexthops[i];
4574 mlxsw_sp_nexthop6_fini(mlxsw_sp, nh);
4575 }
4576 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
4577 WARN_ON(nh_grp->adj_index_valid);
4578 kfree(nh_grp);
4579}
4580
4581static int mlxsw_sp_nexthop6_group_get(struct mlxsw_sp *mlxsw_sp,
4582 struct mlxsw_sp_fib6_entry *fib6_entry)
4583{
4584 struct mlxsw_sp_nexthop_group *nh_grp;
4585
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02004586 nh_grp = mlxsw_sp_nexthop6_group_lookup(mlxsw_sp, fib6_entry);
4587 if (!nh_grp) {
4588 nh_grp = mlxsw_sp_nexthop6_group_create(mlxsw_sp, fib6_entry);
4589 if (IS_ERR(nh_grp))
4590 return PTR_ERR(nh_grp);
4591 }
Ido Schimmel428b8512017-08-03 13:28:28 +02004592
4593 list_add_tail(&fib6_entry->common.nexthop_group_node,
4594 &nh_grp->fib_list);
4595 fib6_entry->common.nh_group = nh_grp;
4596
4597 return 0;
4598}
4599
4600static void mlxsw_sp_nexthop6_group_put(struct mlxsw_sp *mlxsw_sp,
4601 struct mlxsw_sp_fib_entry *fib_entry)
4602{
4603 struct mlxsw_sp_nexthop_group *nh_grp = fib_entry->nh_group;
4604
4605 list_del(&fib_entry->nexthop_group_node);
4606 if (!list_empty(&nh_grp->fib_list))
4607 return;
4608 mlxsw_sp_nexthop6_group_destroy(mlxsw_sp, nh_grp);
4609}
4610
4611static int
4612mlxsw_sp_nexthop6_group_update(struct mlxsw_sp *mlxsw_sp,
4613 struct mlxsw_sp_fib6_entry *fib6_entry)
4614{
4615 struct mlxsw_sp_nexthop_group *old_nh_grp = fib6_entry->common.nh_group;
4616 int err;
4617
4618 fib6_entry->common.nh_group = NULL;
4619 list_del(&fib6_entry->common.nexthop_group_node);
4620
4621 err = mlxsw_sp_nexthop6_group_get(mlxsw_sp, fib6_entry);
4622 if (err)
4623 goto err_nexthop6_group_get;
4624
4625 /* In case this entry is offloaded, then the adjacency index
4626 * currently associated with it in the device's table is that
4627 * of the old group. Start using the new one instead.
4628 */
4629 err = mlxsw_sp_fib_node_entry_add(mlxsw_sp, &fib6_entry->common);
4630 if (err)
4631 goto err_fib_node_entry_add;
4632
4633 if (list_empty(&old_nh_grp->fib_list))
4634 mlxsw_sp_nexthop6_group_destroy(mlxsw_sp, old_nh_grp);
4635
4636 return 0;
4637
4638err_fib_node_entry_add:
4639 mlxsw_sp_nexthop6_group_put(mlxsw_sp, &fib6_entry->common);
4640err_nexthop6_group_get:
4641 list_add_tail(&fib6_entry->common.nexthop_group_node,
4642 &old_nh_grp->fib_list);
4643 fib6_entry->common.nh_group = old_nh_grp;
4644 return err;
4645}
4646
4647static int
4648mlxsw_sp_fib6_entry_nexthop_add(struct mlxsw_sp *mlxsw_sp,
4649 struct mlxsw_sp_fib6_entry *fib6_entry,
4650 struct rt6_info *rt)
4651{
4652 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
4653 int err;
4654
4655 mlxsw_sp_rt6 = mlxsw_sp_rt6_create(rt);
4656 if (IS_ERR(mlxsw_sp_rt6))
4657 return PTR_ERR(mlxsw_sp_rt6);
4658
4659 list_add_tail(&mlxsw_sp_rt6->list, &fib6_entry->rt6_list);
4660 fib6_entry->nrt6++;
4661
4662 err = mlxsw_sp_nexthop6_group_update(mlxsw_sp, fib6_entry);
4663 if (err)
4664 goto err_nexthop6_group_update;
4665
4666 return 0;
4667
4668err_nexthop6_group_update:
4669 fib6_entry->nrt6--;
4670 list_del(&mlxsw_sp_rt6->list);
4671 mlxsw_sp_rt6_destroy(mlxsw_sp_rt6);
4672 return err;
4673}
4674
4675static void
4676mlxsw_sp_fib6_entry_nexthop_del(struct mlxsw_sp *mlxsw_sp,
4677 struct mlxsw_sp_fib6_entry *fib6_entry,
4678 struct rt6_info *rt)
4679{
4680 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
4681
4682 mlxsw_sp_rt6 = mlxsw_sp_fib6_entry_rt_find(fib6_entry, rt);
4683 if (WARN_ON(!mlxsw_sp_rt6))
4684 return;
4685
4686 fib6_entry->nrt6--;
4687 list_del(&mlxsw_sp_rt6->list);
4688 mlxsw_sp_nexthop6_group_update(mlxsw_sp, fib6_entry);
4689 mlxsw_sp_rt6_destroy(mlxsw_sp_rt6);
4690}
4691
Petr Machataf6050ee2017-09-02 23:49:21 +02004692static void mlxsw_sp_fib6_entry_type_set(struct mlxsw_sp *mlxsw_sp,
4693 struct mlxsw_sp_fib_entry *fib_entry,
Ido Schimmel428b8512017-08-03 13:28:28 +02004694 const struct rt6_info *rt)
4695{
4696 /* Packets hitting RTF_REJECT routes need to be discarded by the
4697 * stack. We can rely on their destination device not having a
4698 * RIF (it's the loopback device) and can thus use action type
4699 * local, which will cause them to be trapped with a lower
4700 * priority than packets that need to be locally received.
4701 */
Ido Schimmeld3b6d372017-09-01 10:58:55 +02004702 if (rt->rt6i_flags & (RTF_LOCAL | RTF_ANYCAST))
Ido Schimmel428b8512017-08-03 13:28:28 +02004703 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
4704 else if (rt->rt6i_flags & RTF_REJECT)
4705 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_LOCAL;
Petr Machataf6050ee2017-09-02 23:49:21 +02004706 else if (mlxsw_sp_rt6_is_gateway(mlxsw_sp, rt))
Ido Schimmel428b8512017-08-03 13:28:28 +02004707 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_REMOTE;
4708 else
4709 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_LOCAL;
4710}
4711
4712static void
4713mlxsw_sp_fib6_entry_rt_destroy_all(struct mlxsw_sp_fib6_entry *fib6_entry)
4714{
4715 struct mlxsw_sp_rt6 *mlxsw_sp_rt6, *tmp;
4716
4717 list_for_each_entry_safe(mlxsw_sp_rt6, tmp, &fib6_entry->rt6_list,
4718 list) {
4719 fib6_entry->nrt6--;
4720 list_del(&mlxsw_sp_rt6->list);
4721 mlxsw_sp_rt6_destroy(mlxsw_sp_rt6);
4722 }
4723}
4724
4725static struct mlxsw_sp_fib6_entry *
4726mlxsw_sp_fib6_entry_create(struct mlxsw_sp *mlxsw_sp,
4727 struct mlxsw_sp_fib_node *fib_node,
4728 struct rt6_info *rt)
4729{
4730 struct mlxsw_sp_fib6_entry *fib6_entry;
4731 struct mlxsw_sp_fib_entry *fib_entry;
4732 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
4733 int err;
4734
4735 fib6_entry = kzalloc(sizeof(*fib6_entry), GFP_KERNEL);
4736 if (!fib6_entry)
4737 return ERR_PTR(-ENOMEM);
4738 fib_entry = &fib6_entry->common;
4739
4740 mlxsw_sp_rt6 = mlxsw_sp_rt6_create(rt);
4741 if (IS_ERR(mlxsw_sp_rt6)) {
4742 err = PTR_ERR(mlxsw_sp_rt6);
4743 goto err_rt6_create;
4744 }
4745
Petr Machataf6050ee2017-09-02 23:49:21 +02004746 mlxsw_sp_fib6_entry_type_set(mlxsw_sp, fib_entry, mlxsw_sp_rt6->rt);
Ido Schimmel428b8512017-08-03 13:28:28 +02004747
4748 INIT_LIST_HEAD(&fib6_entry->rt6_list);
4749 list_add_tail(&mlxsw_sp_rt6->list, &fib6_entry->rt6_list);
4750 fib6_entry->nrt6 = 1;
4751 err = mlxsw_sp_nexthop6_group_get(mlxsw_sp, fib6_entry);
4752 if (err)
4753 goto err_nexthop6_group_get;
4754
4755 fib_entry->fib_node = fib_node;
4756
4757 return fib6_entry;
4758
4759err_nexthop6_group_get:
4760 list_del(&mlxsw_sp_rt6->list);
4761 mlxsw_sp_rt6_destroy(mlxsw_sp_rt6);
4762err_rt6_create:
4763 kfree(fib6_entry);
4764 return ERR_PTR(err);
4765}
4766
4767static void mlxsw_sp_fib6_entry_destroy(struct mlxsw_sp *mlxsw_sp,
4768 struct mlxsw_sp_fib6_entry *fib6_entry)
4769{
4770 mlxsw_sp_nexthop6_group_put(mlxsw_sp, &fib6_entry->common);
4771 mlxsw_sp_fib6_entry_rt_destroy_all(fib6_entry);
4772 WARN_ON(fib6_entry->nrt6);
4773 kfree(fib6_entry);
4774}
4775
4776static struct mlxsw_sp_fib6_entry *
4777mlxsw_sp_fib6_node_entry_find(const struct mlxsw_sp_fib_node *fib_node,
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02004778 const struct rt6_info *nrt, bool replace)
Ido Schimmel428b8512017-08-03 13:28:28 +02004779{
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02004780 struct mlxsw_sp_fib6_entry *fib6_entry, *fallback = NULL;
Ido Schimmel428b8512017-08-03 13:28:28 +02004781
4782 list_for_each_entry(fib6_entry, &fib_node->entry_list, common.list) {
4783 struct rt6_info *rt = mlxsw_sp_fib6_entry_rt(fib6_entry);
4784
4785 if (rt->rt6i_table->tb6_id > nrt->rt6i_table->tb6_id)
4786 continue;
4787 if (rt->rt6i_table->tb6_id != nrt->rt6i_table->tb6_id)
4788 break;
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02004789 if (replace && rt->rt6i_metric == nrt->rt6i_metric) {
4790 if (mlxsw_sp_fib6_rt_can_mp(rt) ==
4791 mlxsw_sp_fib6_rt_can_mp(nrt))
4792 return fib6_entry;
4793 if (mlxsw_sp_fib6_rt_can_mp(nrt))
4794 fallback = fallback ?: fib6_entry;
4795 }
Ido Schimmel428b8512017-08-03 13:28:28 +02004796 if (rt->rt6i_metric > nrt->rt6i_metric)
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02004797 return fallback ?: fib6_entry;
Ido Schimmel428b8512017-08-03 13:28:28 +02004798 }
4799
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02004800 return fallback;
Ido Schimmel428b8512017-08-03 13:28:28 +02004801}
4802
4803static int
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02004804mlxsw_sp_fib6_node_list_insert(struct mlxsw_sp_fib6_entry *new6_entry,
4805 bool replace)
Ido Schimmel428b8512017-08-03 13:28:28 +02004806{
4807 struct mlxsw_sp_fib_node *fib_node = new6_entry->common.fib_node;
4808 struct rt6_info *nrt = mlxsw_sp_fib6_entry_rt(new6_entry);
4809 struct mlxsw_sp_fib6_entry *fib6_entry;
4810
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02004811 fib6_entry = mlxsw_sp_fib6_node_entry_find(fib_node, nrt, replace);
4812
4813 if (replace && WARN_ON(!fib6_entry))
4814 return -EINVAL;
Ido Schimmel428b8512017-08-03 13:28:28 +02004815
4816 if (fib6_entry) {
4817 list_add_tail(&new6_entry->common.list,
4818 &fib6_entry->common.list);
4819 } else {
4820 struct mlxsw_sp_fib6_entry *last;
4821
4822 list_for_each_entry(last, &fib_node->entry_list, common.list) {
4823 struct rt6_info *rt = mlxsw_sp_fib6_entry_rt(last);
4824
4825 if (nrt->rt6i_table->tb6_id > rt->rt6i_table->tb6_id)
4826 break;
4827 fib6_entry = last;
4828 }
4829
4830 if (fib6_entry)
4831 list_add(&new6_entry->common.list,
4832 &fib6_entry->common.list);
4833 else
4834 list_add(&new6_entry->common.list,
4835 &fib_node->entry_list);
4836 }
4837
4838 return 0;
4839}
4840
4841static void
4842mlxsw_sp_fib6_node_list_remove(struct mlxsw_sp_fib6_entry *fib6_entry)
4843{
4844 list_del(&fib6_entry->common.list);
4845}
4846
4847static int mlxsw_sp_fib6_node_entry_link(struct mlxsw_sp *mlxsw_sp,
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02004848 struct mlxsw_sp_fib6_entry *fib6_entry,
4849 bool replace)
Ido Schimmel428b8512017-08-03 13:28:28 +02004850{
4851 int err;
4852
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02004853 err = mlxsw_sp_fib6_node_list_insert(fib6_entry, replace);
Ido Schimmel428b8512017-08-03 13:28:28 +02004854 if (err)
4855 return err;
4856
4857 err = mlxsw_sp_fib_node_entry_add(mlxsw_sp, &fib6_entry->common);
4858 if (err)
4859 goto err_fib_node_entry_add;
4860
4861 return 0;
4862
4863err_fib_node_entry_add:
4864 mlxsw_sp_fib6_node_list_remove(fib6_entry);
4865 return err;
4866}
4867
4868static void
4869mlxsw_sp_fib6_node_entry_unlink(struct mlxsw_sp *mlxsw_sp,
4870 struct mlxsw_sp_fib6_entry *fib6_entry)
4871{
4872 mlxsw_sp_fib_node_entry_del(mlxsw_sp, &fib6_entry->common);
4873 mlxsw_sp_fib6_node_list_remove(fib6_entry);
4874}
4875
4876static struct mlxsw_sp_fib6_entry *
4877mlxsw_sp_fib6_entry_lookup(struct mlxsw_sp *mlxsw_sp,
4878 const struct rt6_info *rt)
4879{
4880 struct mlxsw_sp_fib6_entry *fib6_entry;
4881 struct mlxsw_sp_fib_node *fib_node;
4882 struct mlxsw_sp_fib *fib;
4883 struct mlxsw_sp_vr *vr;
4884
4885 vr = mlxsw_sp_vr_find(mlxsw_sp, rt->rt6i_table->tb6_id);
4886 if (!vr)
4887 return NULL;
4888 fib = mlxsw_sp_vr_fib(vr, MLXSW_SP_L3_PROTO_IPV6);
4889
4890 fib_node = mlxsw_sp_fib_node_lookup(fib, &rt->rt6i_dst.addr,
4891 sizeof(rt->rt6i_dst.addr),
4892 rt->rt6i_dst.plen);
4893 if (!fib_node)
4894 return NULL;
4895
4896 list_for_each_entry(fib6_entry, &fib_node->entry_list, common.list) {
4897 struct rt6_info *iter_rt = mlxsw_sp_fib6_entry_rt(fib6_entry);
4898
4899 if (rt->rt6i_table->tb6_id == iter_rt->rt6i_table->tb6_id &&
4900 rt->rt6i_metric == iter_rt->rt6i_metric &&
4901 mlxsw_sp_fib6_entry_rt_find(fib6_entry, rt))
4902 return fib6_entry;
4903 }
4904
4905 return NULL;
4906}
4907
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02004908static void mlxsw_sp_fib6_entry_replace(struct mlxsw_sp *mlxsw_sp,
4909 struct mlxsw_sp_fib6_entry *fib6_entry,
4910 bool replace)
4911{
4912 struct mlxsw_sp_fib_node *fib_node = fib6_entry->common.fib_node;
4913 struct mlxsw_sp_fib6_entry *replaced;
4914
4915 if (!replace)
4916 return;
4917
4918 replaced = list_next_entry(fib6_entry, common.list);
4919
4920 mlxsw_sp_fib6_node_entry_unlink(mlxsw_sp, replaced);
4921 mlxsw_sp_fib6_entry_destroy(mlxsw_sp, replaced);
4922 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
4923}
4924
Ido Schimmel428b8512017-08-03 13:28:28 +02004925static int mlxsw_sp_router_fib6_add(struct mlxsw_sp *mlxsw_sp,
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02004926 struct rt6_info *rt, bool replace)
Ido Schimmel428b8512017-08-03 13:28:28 +02004927{
4928 struct mlxsw_sp_fib6_entry *fib6_entry;
4929 struct mlxsw_sp_fib_node *fib_node;
4930 int err;
4931
4932 if (mlxsw_sp->router->aborted)
4933 return 0;
4934
Ido Schimmelf36f5ac2017-08-03 13:28:30 +02004935 if (rt->rt6i_src.plen)
4936 return -EINVAL;
4937
Ido Schimmel428b8512017-08-03 13:28:28 +02004938 if (mlxsw_sp_fib6_rt_should_ignore(rt))
4939 return 0;
4940
4941 fib_node = mlxsw_sp_fib_node_get(mlxsw_sp, rt->rt6i_table->tb6_id,
4942 &rt->rt6i_dst.addr,
4943 sizeof(rt->rt6i_dst.addr),
4944 rt->rt6i_dst.plen,
4945 MLXSW_SP_L3_PROTO_IPV6);
4946 if (IS_ERR(fib_node))
4947 return PTR_ERR(fib_node);
4948
4949 /* Before creating a new entry, try to append route to an existing
4950 * multipath entry.
4951 */
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02004952 fib6_entry = mlxsw_sp_fib6_node_mp_entry_find(fib_node, rt, replace);
Ido Schimmel428b8512017-08-03 13:28:28 +02004953 if (fib6_entry) {
4954 err = mlxsw_sp_fib6_entry_nexthop_add(mlxsw_sp, fib6_entry, rt);
4955 if (err)
4956 goto err_fib6_entry_nexthop_add;
4957 return 0;
4958 }
4959
4960 fib6_entry = mlxsw_sp_fib6_entry_create(mlxsw_sp, fib_node, rt);
4961 if (IS_ERR(fib6_entry)) {
4962 err = PTR_ERR(fib6_entry);
4963 goto err_fib6_entry_create;
4964 }
4965
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02004966 err = mlxsw_sp_fib6_node_entry_link(mlxsw_sp, fib6_entry, replace);
Ido Schimmel428b8512017-08-03 13:28:28 +02004967 if (err)
4968 goto err_fib6_node_entry_link;
4969
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02004970 mlxsw_sp_fib6_entry_replace(mlxsw_sp, fib6_entry, replace);
4971
Ido Schimmel428b8512017-08-03 13:28:28 +02004972 return 0;
4973
4974err_fib6_node_entry_link:
4975 mlxsw_sp_fib6_entry_destroy(mlxsw_sp, fib6_entry);
4976err_fib6_entry_create:
4977err_fib6_entry_nexthop_add:
4978 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
4979 return err;
4980}
4981
4982static void mlxsw_sp_router_fib6_del(struct mlxsw_sp *mlxsw_sp,
4983 struct rt6_info *rt)
4984{
4985 struct mlxsw_sp_fib6_entry *fib6_entry;
4986 struct mlxsw_sp_fib_node *fib_node;
4987
4988 if (mlxsw_sp->router->aborted)
4989 return;
4990
4991 if (mlxsw_sp_fib6_rt_should_ignore(rt))
4992 return;
4993
4994 fib6_entry = mlxsw_sp_fib6_entry_lookup(mlxsw_sp, rt);
4995 if (WARN_ON(!fib6_entry))
4996 return;
4997
4998 /* If route is part of a multipath entry, but not the last one
4999 * removed, then only reduce its nexthop group.
5000 */
5001 if (!list_is_singular(&fib6_entry->rt6_list)) {
5002 mlxsw_sp_fib6_entry_nexthop_del(mlxsw_sp, fib6_entry, rt);
5003 return;
5004 }
5005
5006 fib_node = fib6_entry->common.fib_node;
5007
5008 mlxsw_sp_fib6_node_entry_unlink(mlxsw_sp, fib6_entry);
5009 mlxsw_sp_fib6_entry_destroy(mlxsw_sp, fib6_entry);
5010 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
5011}
5012
Ido Schimmelbc65a8a2017-07-18 10:10:25 +02005013static int __mlxsw_sp_router_set_abort_trap(struct mlxsw_sp *mlxsw_sp,
5014 enum mlxsw_reg_ralxx_protocol proto,
5015 u8 tree_id)
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005016{
5017 char ralta_pl[MLXSW_REG_RALTA_LEN];
5018 char ralst_pl[MLXSW_REG_RALST_LEN];
Ido Schimmelb5d90e62017-03-10 08:53:43 +01005019 int i, err;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005020
Ido Schimmelbc65a8a2017-07-18 10:10:25 +02005021 mlxsw_reg_ralta_pack(ralta_pl, true, proto, tree_id);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005022 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralta), ralta_pl);
5023 if (err)
5024 return err;
5025
Ido Schimmelbc65a8a2017-07-18 10:10:25 +02005026 mlxsw_reg_ralst_pack(ralst_pl, 0xff, tree_id);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005027 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralst), ralst_pl);
5028 if (err)
5029 return err;
5030
Ido Schimmelb5d90e62017-03-10 08:53:43 +01005031 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
Ido Schimmel9011b672017-05-16 19:38:25 +02005032 struct mlxsw_sp_vr *vr = &mlxsw_sp->router->vrs[i];
Ido Schimmelb5d90e62017-03-10 08:53:43 +01005033 char raltb_pl[MLXSW_REG_RALTB_LEN];
5034 char ralue_pl[MLXSW_REG_RALUE_LEN];
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005035
Ido Schimmelbc65a8a2017-07-18 10:10:25 +02005036 mlxsw_reg_raltb_pack(raltb_pl, vr->id, proto, tree_id);
Ido Schimmelb5d90e62017-03-10 08:53:43 +01005037 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raltb),
5038 raltb_pl);
5039 if (err)
5040 return err;
5041
Ido Schimmelbc65a8a2017-07-18 10:10:25 +02005042 mlxsw_reg_ralue_pack(ralue_pl, proto,
5043 MLXSW_REG_RALUE_OP_WRITE_WRITE, vr->id, 0);
Ido Schimmelb5d90e62017-03-10 08:53:43 +01005044 mlxsw_reg_ralue_act_ip2me_pack(ralue_pl);
5045 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue),
5046 ralue_pl);
5047 if (err)
5048 return err;
5049 }
5050
5051 return 0;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005052}
5053
Yotam Gigid42b0962017-09-27 08:23:20 +02005054static int mlxsw_sp_router_fibmr_add(struct mlxsw_sp *mlxsw_sp,
5055 struct mfc_entry_notifier_info *men_info,
5056 bool replace)
5057{
5058 struct mlxsw_sp_vr *vr;
5059
5060 if (mlxsw_sp->router->aborted)
5061 return 0;
5062
David Ahernf8fa9b42017-10-18 09:56:56 -07005063 vr = mlxsw_sp_vr_get(mlxsw_sp, men_info->tb_id, NULL);
Yotam Gigid42b0962017-09-27 08:23:20 +02005064 if (IS_ERR(vr))
5065 return PTR_ERR(vr);
5066
5067 return mlxsw_sp_mr_route4_add(vr->mr4_table, men_info->mfc, replace);
5068}
5069
5070static void mlxsw_sp_router_fibmr_del(struct mlxsw_sp *mlxsw_sp,
5071 struct mfc_entry_notifier_info *men_info)
5072{
5073 struct mlxsw_sp_vr *vr;
5074
5075 if (mlxsw_sp->router->aborted)
5076 return;
5077
5078 vr = mlxsw_sp_vr_find(mlxsw_sp, men_info->tb_id);
5079 if (WARN_ON(!vr))
5080 return;
5081
5082 mlxsw_sp_mr_route4_del(vr->mr4_table, men_info->mfc);
5083 mlxsw_sp_vr_put(vr);
5084}
5085
5086static int
5087mlxsw_sp_router_fibmr_vif_add(struct mlxsw_sp *mlxsw_sp,
5088 struct vif_entry_notifier_info *ven_info)
5089{
5090 struct mlxsw_sp_rif *rif;
5091 struct mlxsw_sp_vr *vr;
5092
5093 if (mlxsw_sp->router->aborted)
5094 return 0;
5095
David Ahernf8fa9b42017-10-18 09:56:56 -07005096 vr = mlxsw_sp_vr_get(mlxsw_sp, ven_info->tb_id, NULL);
Yotam Gigid42b0962017-09-27 08:23:20 +02005097 if (IS_ERR(vr))
5098 return PTR_ERR(vr);
5099
5100 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, ven_info->dev);
5101 return mlxsw_sp_mr_vif_add(vr->mr4_table, ven_info->dev,
5102 ven_info->vif_index,
5103 ven_info->vif_flags, rif);
5104}
5105
5106static void
5107mlxsw_sp_router_fibmr_vif_del(struct mlxsw_sp *mlxsw_sp,
5108 struct vif_entry_notifier_info *ven_info)
5109{
5110 struct mlxsw_sp_vr *vr;
5111
5112 if (mlxsw_sp->router->aborted)
5113 return;
5114
5115 vr = mlxsw_sp_vr_find(mlxsw_sp, ven_info->tb_id);
5116 if (WARN_ON(!vr))
5117 return;
5118
5119 mlxsw_sp_mr_vif_del(vr->mr4_table, ven_info->vif_index);
5120 mlxsw_sp_vr_put(vr);
5121}
5122
Ido Schimmelbc65a8a2017-07-18 10:10:25 +02005123static int mlxsw_sp_router_set_abort_trap(struct mlxsw_sp *mlxsw_sp)
5124{
5125 enum mlxsw_reg_ralxx_protocol proto = MLXSW_REG_RALXX_PROTOCOL_IPV4;
5126 int err;
5127
5128 err = __mlxsw_sp_router_set_abort_trap(mlxsw_sp, proto,
5129 MLXSW_SP_LPM_TREE_MIN);
5130 if (err)
5131 return err;
5132
Yotam Gigid42b0962017-09-27 08:23:20 +02005133 /* The multicast router code does not need an abort trap as by default,
5134 * packets that don't match any routes are trapped to the CPU.
5135 */
5136
Ido Schimmelbc65a8a2017-07-18 10:10:25 +02005137 proto = MLXSW_REG_RALXX_PROTOCOL_IPV6;
5138 return __mlxsw_sp_router_set_abort_trap(mlxsw_sp, proto,
5139 MLXSW_SP_LPM_TREE_MIN + 1);
5140}
5141
Ido Schimmel9aecce12017-02-09 10:28:42 +01005142static void mlxsw_sp_fib4_node_flush(struct mlxsw_sp *mlxsw_sp,
5143 struct mlxsw_sp_fib_node *fib_node)
5144{
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02005145 struct mlxsw_sp_fib4_entry *fib4_entry, *tmp;
Ido Schimmel9aecce12017-02-09 10:28:42 +01005146
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02005147 list_for_each_entry_safe(fib4_entry, tmp, &fib_node->entry_list,
5148 common.list) {
5149 bool do_break = &tmp->common.list == &fib_node->entry_list;
Ido Schimmel9aecce12017-02-09 10:28:42 +01005150
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02005151 mlxsw_sp_fib4_node_entry_unlink(mlxsw_sp, fib4_entry);
5152 mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib4_entry);
Ido Schimmel731ea1c2017-07-18 10:10:21 +02005153 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
Ido Schimmel9aecce12017-02-09 10:28:42 +01005154 /* Break when entry list is empty and node was freed.
5155 * Otherwise, we'll access freed memory in the next
5156 * iteration.
5157 */
5158 if (do_break)
5159 break;
5160 }
5161}
5162
Ido Schimmel428b8512017-08-03 13:28:28 +02005163static void mlxsw_sp_fib6_node_flush(struct mlxsw_sp *mlxsw_sp,
5164 struct mlxsw_sp_fib_node *fib_node)
5165{
5166 struct mlxsw_sp_fib6_entry *fib6_entry, *tmp;
5167
5168 list_for_each_entry_safe(fib6_entry, tmp, &fib_node->entry_list,
5169 common.list) {
5170 bool do_break = &tmp->common.list == &fib_node->entry_list;
5171
5172 mlxsw_sp_fib6_node_entry_unlink(mlxsw_sp, fib6_entry);
5173 mlxsw_sp_fib6_entry_destroy(mlxsw_sp, fib6_entry);
5174 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
5175 if (do_break)
5176 break;
5177 }
5178}
5179
Ido Schimmel9aecce12017-02-09 10:28:42 +01005180static void mlxsw_sp_fib_node_flush(struct mlxsw_sp *mlxsw_sp,
5181 struct mlxsw_sp_fib_node *fib_node)
5182{
Ido Schimmel76610eb2017-03-10 08:53:41 +01005183 switch (fib_node->fib->proto) {
Ido Schimmel9aecce12017-02-09 10:28:42 +01005184 case MLXSW_SP_L3_PROTO_IPV4:
5185 mlxsw_sp_fib4_node_flush(mlxsw_sp, fib_node);
5186 break;
5187 case MLXSW_SP_L3_PROTO_IPV6:
Ido Schimmel428b8512017-08-03 13:28:28 +02005188 mlxsw_sp_fib6_node_flush(mlxsw_sp, fib_node);
Ido Schimmel9aecce12017-02-09 10:28:42 +01005189 break;
5190 }
5191}
5192
Ido Schimmel76610eb2017-03-10 08:53:41 +01005193static void mlxsw_sp_vr_fib_flush(struct mlxsw_sp *mlxsw_sp,
5194 struct mlxsw_sp_vr *vr,
5195 enum mlxsw_sp_l3proto proto)
5196{
5197 struct mlxsw_sp_fib *fib = mlxsw_sp_vr_fib(vr, proto);
5198 struct mlxsw_sp_fib_node *fib_node, *tmp;
5199
5200 list_for_each_entry_safe(fib_node, tmp, &fib->node_list, list) {
5201 bool do_break = &tmp->list == &fib->node_list;
5202
5203 mlxsw_sp_fib_node_flush(mlxsw_sp, fib_node);
5204 if (do_break)
5205 break;
5206 }
5207}
5208
Ido Schimmelac571de2016-11-14 11:26:32 +01005209static void mlxsw_sp_router_fib_flush(struct mlxsw_sp *mlxsw_sp)
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005210{
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005211 int i;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005212
Jiri Pirkoc1a38312016-10-21 16:07:23 +02005213 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
Ido Schimmel9011b672017-05-16 19:38:25 +02005214 struct mlxsw_sp_vr *vr = &mlxsw_sp->router->vrs[i];
Ido Schimmelac571de2016-11-14 11:26:32 +01005215
Ido Schimmel76610eb2017-03-10 08:53:41 +01005216 if (!mlxsw_sp_vr_is_used(vr))
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005217 continue;
Yotam Gigid42b0962017-09-27 08:23:20 +02005218
5219 mlxsw_sp_mr_table_flush(vr->mr4_table);
Ido Schimmel76610eb2017-03-10 08:53:41 +01005220 mlxsw_sp_vr_fib_flush(mlxsw_sp, vr, MLXSW_SP_L3_PROTO_IPV4);
Ido Schimmela3d9bc52017-07-18 10:10:22 +02005221
5222 /* If virtual router was only used for IPv4, then it's no
5223 * longer used.
5224 */
5225 if (!mlxsw_sp_vr_is_used(vr))
5226 continue;
5227 mlxsw_sp_vr_fib_flush(mlxsw_sp, vr, MLXSW_SP_L3_PROTO_IPV6);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005228 }
Ido Schimmelac571de2016-11-14 11:26:32 +01005229}
5230
Ido Schimmelbc65a8a2017-07-18 10:10:25 +02005231static void mlxsw_sp_router_fib_abort(struct mlxsw_sp *mlxsw_sp)
Ido Schimmelac571de2016-11-14 11:26:32 +01005232{
5233 int err;
5234
Ido Schimmel9011b672017-05-16 19:38:25 +02005235 if (mlxsw_sp->router->aborted)
Ido Schimmeld331d302016-11-16 09:51:58 +01005236 return;
5237 dev_warn(mlxsw_sp->bus_info->dev, "FIB abort triggered. Note that FIB entries are no longer being offloaded to this device.\n");
Ido Schimmelac571de2016-11-14 11:26:32 +01005238 mlxsw_sp_router_fib_flush(mlxsw_sp);
Ido Schimmel9011b672017-05-16 19:38:25 +02005239 mlxsw_sp->router->aborted = true;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005240 err = mlxsw_sp_router_set_abort_trap(mlxsw_sp);
5241 if (err)
5242 dev_warn(mlxsw_sp->bus_info->dev, "Failed to set abort trap.\n");
5243}
5244
Ido Schimmel30572242016-12-03 16:45:01 +01005245struct mlxsw_sp_fib_event_work {
Ido Schimmela0e47612017-02-06 16:20:10 +01005246 struct work_struct work;
Ido Schimmelad178c82017-02-08 11:16:40 +01005247 union {
Ido Schimmel428b8512017-08-03 13:28:28 +02005248 struct fib6_entry_notifier_info fen6_info;
Ido Schimmelad178c82017-02-08 11:16:40 +01005249 struct fib_entry_notifier_info fen_info;
Ido Schimmel5d7bfd12017-03-16 09:08:14 +01005250 struct fib_rule_notifier_info fr_info;
Ido Schimmelad178c82017-02-08 11:16:40 +01005251 struct fib_nh_notifier_info fnh_info;
Yotam Gigid42b0962017-09-27 08:23:20 +02005252 struct mfc_entry_notifier_info men_info;
5253 struct vif_entry_notifier_info ven_info;
Ido Schimmelad178c82017-02-08 11:16:40 +01005254 };
Ido Schimmel30572242016-12-03 16:45:01 +01005255 struct mlxsw_sp *mlxsw_sp;
5256 unsigned long event;
5257};
5258
Ido Schimmel66a57632017-08-03 13:28:26 +02005259static void mlxsw_sp_router_fib4_event_work(struct work_struct *work)
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005260{
Ido Schimmel30572242016-12-03 16:45:01 +01005261 struct mlxsw_sp_fib_event_work *fib_work =
Ido Schimmela0e47612017-02-06 16:20:10 +01005262 container_of(work, struct mlxsw_sp_fib_event_work, work);
Ido Schimmel30572242016-12-03 16:45:01 +01005263 struct mlxsw_sp *mlxsw_sp = fib_work->mlxsw_sp;
Ido Schimmel599cf8f2017-02-09 10:28:44 +01005264 bool replace, append;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005265 int err;
5266
Ido Schimmel30572242016-12-03 16:45:01 +01005267 /* Protect internal structures from changes */
5268 rtnl_lock();
5269 switch (fib_work->event) {
Ido Schimmel599cf8f2017-02-09 10:28:44 +01005270 case FIB_EVENT_ENTRY_REPLACE: /* fall through */
Ido Schimmel4283bce2017-02-09 10:28:43 +01005271 case FIB_EVENT_ENTRY_APPEND: /* fall through */
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005272 case FIB_EVENT_ENTRY_ADD:
Ido Schimmel599cf8f2017-02-09 10:28:44 +01005273 replace = fib_work->event == FIB_EVENT_ENTRY_REPLACE;
Ido Schimmel4283bce2017-02-09 10:28:43 +01005274 append = fib_work->event == FIB_EVENT_ENTRY_APPEND;
5275 err = mlxsw_sp_router_fib4_add(mlxsw_sp, &fib_work->fen_info,
Ido Schimmel599cf8f2017-02-09 10:28:44 +01005276 replace, append);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005277 if (err)
Ido Schimmelbc65a8a2017-07-18 10:10:25 +02005278 mlxsw_sp_router_fib_abort(mlxsw_sp);
Ido Schimmel30572242016-12-03 16:45:01 +01005279 fib_info_put(fib_work->fen_info.fi);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005280 break;
5281 case FIB_EVENT_ENTRY_DEL:
Ido Schimmel30572242016-12-03 16:45:01 +01005282 mlxsw_sp_router_fib4_del(mlxsw_sp, &fib_work->fen_info);
5283 fib_info_put(fib_work->fen_info.fi);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005284 break;
David Ahern1f279232017-10-27 17:37:14 -07005285 case FIB_EVENT_RULE_ADD:
5286 /* if we get here, a rule was added that we do not support.
5287 * just do the fib_abort
5288 */
5289 mlxsw_sp_router_fib_abort(mlxsw_sp);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005290 break;
Ido Schimmelad178c82017-02-08 11:16:40 +01005291 case FIB_EVENT_NH_ADD: /* fall through */
5292 case FIB_EVENT_NH_DEL:
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02005293 mlxsw_sp_nexthop4_event(mlxsw_sp, fib_work->event,
5294 fib_work->fnh_info.fib_nh);
Ido Schimmelad178c82017-02-08 11:16:40 +01005295 fib_info_put(fib_work->fnh_info.fib_nh->nh_parent);
5296 break;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005297 }
Ido Schimmel30572242016-12-03 16:45:01 +01005298 rtnl_unlock();
5299 kfree(fib_work);
5300}
5301
Ido Schimmel66a57632017-08-03 13:28:26 +02005302static void mlxsw_sp_router_fib6_event_work(struct work_struct *work)
5303{
Ido Schimmel583419f2017-08-03 13:28:27 +02005304 struct mlxsw_sp_fib_event_work *fib_work =
5305 container_of(work, struct mlxsw_sp_fib_event_work, work);
5306 struct mlxsw_sp *mlxsw_sp = fib_work->mlxsw_sp;
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02005307 bool replace;
Ido Schimmel428b8512017-08-03 13:28:28 +02005308 int err;
Ido Schimmel583419f2017-08-03 13:28:27 +02005309
5310 rtnl_lock();
5311 switch (fib_work->event) {
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02005312 case FIB_EVENT_ENTRY_REPLACE: /* fall through */
Ido Schimmel428b8512017-08-03 13:28:28 +02005313 case FIB_EVENT_ENTRY_ADD:
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02005314 replace = fib_work->event == FIB_EVENT_ENTRY_REPLACE;
Ido Schimmel428b8512017-08-03 13:28:28 +02005315 err = mlxsw_sp_router_fib6_add(mlxsw_sp,
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02005316 fib_work->fen6_info.rt, replace);
Ido Schimmel428b8512017-08-03 13:28:28 +02005317 if (err)
5318 mlxsw_sp_router_fib_abort(mlxsw_sp);
5319 mlxsw_sp_rt6_release(fib_work->fen6_info.rt);
5320 break;
5321 case FIB_EVENT_ENTRY_DEL:
5322 mlxsw_sp_router_fib6_del(mlxsw_sp, fib_work->fen6_info.rt);
5323 mlxsw_sp_rt6_release(fib_work->fen6_info.rt);
5324 break;
David Ahern1f279232017-10-27 17:37:14 -07005325 case FIB_EVENT_RULE_ADD:
5326 /* if we get here, a rule was added that we do not support.
5327 * just do the fib_abort
5328 */
5329 mlxsw_sp_router_fib_abort(mlxsw_sp);
Ido Schimmel583419f2017-08-03 13:28:27 +02005330 break;
5331 }
5332 rtnl_unlock();
5333 kfree(fib_work);
Ido Schimmel66a57632017-08-03 13:28:26 +02005334}
5335
Yotam Gigid42b0962017-09-27 08:23:20 +02005336static void mlxsw_sp_router_fibmr_event_work(struct work_struct *work)
5337{
5338 struct mlxsw_sp_fib_event_work *fib_work =
5339 container_of(work, struct mlxsw_sp_fib_event_work, work);
5340 struct mlxsw_sp *mlxsw_sp = fib_work->mlxsw_sp;
Yotam Gigid42b0962017-09-27 08:23:20 +02005341 bool replace;
5342 int err;
5343
5344 rtnl_lock();
5345 switch (fib_work->event) {
5346 case FIB_EVENT_ENTRY_REPLACE: /* fall through */
5347 case FIB_EVENT_ENTRY_ADD:
5348 replace = fib_work->event == FIB_EVENT_ENTRY_REPLACE;
5349
5350 err = mlxsw_sp_router_fibmr_add(mlxsw_sp, &fib_work->men_info,
5351 replace);
5352 if (err)
5353 mlxsw_sp_router_fib_abort(mlxsw_sp);
5354 ipmr_cache_put(fib_work->men_info.mfc);
5355 break;
5356 case FIB_EVENT_ENTRY_DEL:
5357 mlxsw_sp_router_fibmr_del(mlxsw_sp, &fib_work->men_info);
5358 ipmr_cache_put(fib_work->men_info.mfc);
5359 break;
5360 case FIB_EVENT_VIF_ADD:
5361 err = mlxsw_sp_router_fibmr_vif_add(mlxsw_sp,
5362 &fib_work->ven_info);
5363 if (err)
5364 mlxsw_sp_router_fib_abort(mlxsw_sp);
5365 dev_put(fib_work->ven_info.dev);
5366 break;
5367 case FIB_EVENT_VIF_DEL:
5368 mlxsw_sp_router_fibmr_vif_del(mlxsw_sp,
5369 &fib_work->ven_info);
5370 dev_put(fib_work->ven_info.dev);
5371 break;
David Ahern1f279232017-10-27 17:37:14 -07005372 case FIB_EVENT_RULE_ADD:
5373 /* if we get here, a rule was added that we do not support.
5374 * just do the fib_abort
5375 */
5376 mlxsw_sp_router_fib_abort(mlxsw_sp);
Yotam Gigid42b0962017-09-27 08:23:20 +02005377 break;
5378 }
5379 rtnl_unlock();
5380 kfree(fib_work);
5381}
5382
Ido Schimmel66a57632017-08-03 13:28:26 +02005383static void mlxsw_sp_router_fib4_event(struct mlxsw_sp_fib_event_work *fib_work,
5384 struct fib_notifier_info *info)
5385{
David Ahern3c75f9b2017-10-18 15:01:38 -07005386 struct fib_entry_notifier_info *fen_info;
David Ahern3c75f9b2017-10-18 15:01:38 -07005387 struct fib_nh_notifier_info *fnh_info;
5388
Ido Schimmel66a57632017-08-03 13:28:26 +02005389 switch (fib_work->event) {
5390 case FIB_EVENT_ENTRY_REPLACE: /* fall through */
5391 case FIB_EVENT_ENTRY_APPEND: /* fall through */
5392 case FIB_EVENT_ENTRY_ADD: /* fall through */
5393 case FIB_EVENT_ENTRY_DEL:
David Ahern3c75f9b2017-10-18 15:01:38 -07005394 fen_info = container_of(info, struct fib_entry_notifier_info,
5395 info);
5396 fib_work->fen_info = *fen_info;
5397 /* Take reference on fib_info to prevent it from being
Ido Schimmel66a57632017-08-03 13:28:26 +02005398 * freed while work is queued. Release it afterwards.
5399 */
5400 fib_info_hold(fib_work->fen_info.fi);
5401 break;
Ido Schimmel66a57632017-08-03 13:28:26 +02005402 case FIB_EVENT_NH_ADD: /* fall through */
5403 case FIB_EVENT_NH_DEL:
David Ahern3c75f9b2017-10-18 15:01:38 -07005404 fnh_info = container_of(info, struct fib_nh_notifier_info,
5405 info);
5406 fib_work->fnh_info = *fnh_info;
Ido Schimmel66a57632017-08-03 13:28:26 +02005407 fib_info_hold(fib_work->fnh_info.fib_nh->nh_parent);
5408 break;
5409 }
5410}
5411
5412static void mlxsw_sp_router_fib6_event(struct mlxsw_sp_fib_event_work *fib_work,
5413 struct fib_notifier_info *info)
5414{
David Ahern3c75f9b2017-10-18 15:01:38 -07005415 struct fib6_entry_notifier_info *fen6_info;
David Ahern3c75f9b2017-10-18 15:01:38 -07005416
Ido Schimmel583419f2017-08-03 13:28:27 +02005417 switch (fib_work->event) {
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02005418 case FIB_EVENT_ENTRY_REPLACE: /* fall through */
Ido Schimmel428b8512017-08-03 13:28:28 +02005419 case FIB_EVENT_ENTRY_ADD: /* fall through */
5420 case FIB_EVENT_ENTRY_DEL:
David Ahern3c75f9b2017-10-18 15:01:38 -07005421 fen6_info = container_of(info, struct fib6_entry_notifier_info,
5422 info);
5423 fib_work->fen6_info = *fen6_info;
Ido Schimmel428b8512017-08-03 13:28:28 +02005424 rt6_hold(fib_work->fen6_info.rt);
5425 break;
Ido Schimmel583419f2017-08-03 13:28:27 +02005426 }
Ido Schimmel66a57632017-08-03 13:28:26 +02005427}
5428
Yotam Gigid42b0962017-09-27 08:23:20 +02005429static void
5430mlxsw_sp_router_fibmr_event(struct mlxsw_sp_fib_event_work *fib_work,
5431 struct fib_notifier_info *info)
5432{
5433 switch (fib_work->event) {
5434 case FIB_EVENT_ENTRY_REPLACE: /* fall through */
5435 case FIB_EVENT_ENTRY_ADD: /* fall through */
5436 case FIB_EVENT_ENTRY_DEL:
5437 memcpy(&fib_work->men_info, info, sizeof(fib_work->men_info));
5438 ipmr_cache_hold(fib_work->men_info.mfc);
5439 break;
5440 case FIB_EVENT_VIF_ADD: /* fall through */
5441 case FIB_EVENT_VIF_DEL:
5442 memcpy(&fib_work->ven_info, info, sizeof(fib_work->ven_info));
5443 dev_hold(fib_work->ven_info.dev);
5444 break;
David Ahern1f279232017-10-27 17:37:14 -07005445 }
5446}
5447
5448static int mlxsw_sp_router_fib_rule_event(unsigned long event,
5449 struct fib_notifier_info *info,
5450 struct mlxsw_sp *mlxsw_sp)
5451{
5452 struct netlink_ext_ack *extack = info->extack;
5453 struct fib_rule_notifier_info *fr_info;
5454 struct fib_rule *rule;
5455 int err = 0;
5456
5457 /* nothing to do at the moment */
5458 if (event == FIB_EVENT_RULE_DEL)
5459 return 0;
5460
5461 if (mlxsw_sp->router->aborted)
5462 return 0;
5463
5464 fr_info = container_of(info, struct fib_rule_notifier_info, info);
5465 rule = fr_info->rule;
5466
5467 switch (info->family) {
5468 case AF_INET:
5469 if (!fib4_rule_default(rule) && !rule->l3mdev)
5470 err = -1;
5471 break;
5472 case AF_INET6:
5473 if (!fib6_rule_default(rule) && !rule->l3mdev)
5474 err = -1;
5475 break;
5476 case RTNL_FAMILY_IPMR:
5477 if (!ipmr_rule_default(rule) && !rule->l3mdev)
5478 err = -1;
Yotam Gigid42b0962017-09-27 08:23:20 +02005479 break;
5480 }
David Ahern1f279232017-10-27 17:37:14 -07005481
5482 if (err < 0)
5483 NL_SET_ERR_MSG(extack, "spectrum: FIB rules not supported. Aborting offload");
5484
5485 return err;
Yotam Gigid42b0962017-09-27 08:23:20 +02005486}
5487
Ido Schimmel30572242016-12-03 16:45:01 +01005488/* Called with rcu_read_lock() */
5489static int mlxsw_sp_router_fib_event(struct notifier_block *nb,
5490 unsigned long event, void *ptr)
5491{
Ido Schimmel30572242016-12-03 16:45:01 +01005492 struct mlxsw_sp_fib_event_work *fib_work;
5493 struct fib_notifier_info *info = ptr;
Ido Schimmel7e39d112017-05-16 19:38:28 +02005494 struct mlxsw_sp_router *router;
David Ahern1f279232017-10-27 17:37:14 -07005495 int err;
Ido Schimmel30572242016-12-03 16:45:01 +01005496
Ido Schimmel8e29f972017-09-15 15:31:07 +02005497 if (!net_eq(info->net, &init_net) ||
Yotam Gigi664375e2017-09-27 08:23:22 +02005498 (info->family != AF_INET && info->family != AF_INET6 &&
5499 info->family != RTNL_FAMILY_IPMR))
Ido Schimmel30572242016-12-03 16:45:01 +01005500 return NOTIFY_DONE;
5501
David Ahern1f279232017-10-27 17:37:14 -07005502 router = container_of(nb, struct mlxsw_sp_router, fib_nb);
5503
5504 switch (event) {
5505 case FIB_EVENT_RULE_ADD: /* fall through */
5506 case FIB_EVENT_RULE_DEL:
5507 err = mlxsw_sp_router_fib_rule_event(event, info,
5508 router->mlxsw_sp);
5509 if (!err)
5510 return NOTIFY_DONE;
5511 }
5512
Ido Schimmel30572242016-12-03 16:45:01 +01005513 fib_work = kzalloc(sizeof(*fib_work), GFP_ATOMIC);
5514 if (WARN_ON(!fib_work))
5515 return NOTIFY_BAD;
5516
Ido Schimmel7e39d112017-05-16 19:38:28 +02005517 fib_work->mlxsw_sp = router->mlxsw_sp;
Ido Schimmel30572242016-12-03 16:45:01 +01005518 fib_work->event = event;
5519
Ido Schimmel66a57632017-08-03 13:28:26 +02005520 switch (info->family) {
5521 case AF_INET:
5522 INIT_WORK(&fib_work->work, mlxsw_sp_router_fib4_event_work);
5523 mlxsw_sp_router_fib4_event(fib_work, info);
Ido Schimmel30572242016-12-03 16:45:01 +01005524 break;
Ido Schimmel66a57632017-08-03 13:28:26 +02005525 case AF_INET6:
5526 INIT_WORK(&fib_work->work, mlxsw_sp_router_fib6_event_work);
5527 mlxsw_sp_router_fib6_event(fib_work, info);
Ido Schimmelad178c82017-02-08 11:16:40 +01005528 break;
Yotam Gigid42b0962017-09-27 08:23:20 +02005529 case RTNL_FAMILY_IPMR:
5530 INIT_WORK(&fib_work->work, mlxsw_sp_router_fibmr_event_work);
5531 mlxsw_sp_router_fibmr_event(fib_work, info);
5532 break;
Ido Schimmel30572242016-12-03 16:45:01 +01005533 }
5534
Ido Schimmela0e47612017-02-06 16:20:10 +01005535 mlxsw_core_schedule_work(&fib_work->work);
Ido Schimmel30572242016-12-03 16:45:01 +01005536
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005537 return NOTIFY_DONE;
5538}
5539
Ido Schimmel4724ba562017-03-10 08:53:39 +01005540static struct mlxsw_sp_rif *
5541mlxsw_sp_rif_find_by_dev(const struct mlxsw_sp *mlxsw_sp,
5542 const struct net_device *dev)
5543{
5544 int i;
5545
5546 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++)
Ido Schimmel5f9efff2017-05-16 19:38:27 +02005547 if (mlxsw_sp->router->rifs[i] &&
5548 mlxsw_sp->router->rifs[i]->dev == dev)
5549 return mlxsw_sp->router->rifs[i];
Ido Schimmel4724ba562017-03-10 08:53:39 +01005550
5551 return NULL;
5552}
5553
5554static int mlxsw_sp_router_rif_disable(struct mlxsw_sp *mlxsw_sp, u16 rif)
5555{
5556 char ritr_pl[MLXSW_REG_RITR_LEN];
5557 int err;
5558
5559 mlxsw_reg_ritr_rif_pack(ritr_pl, rif);
5560 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
5561 if (WARN_ON_ONCE(err))
5562 return err;
5563
5564 mlxsw_reg_ritr_enable_set(ritr_pl, false);
5565 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
5566}
5567
5568static void mlxsw_sp_router_rif_gone_sync(struct mlxsw_sp *mlxsw_sp,
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01005569 struct mlxsw_sp_rif *rif)
Ido Schimmel4724ba562017-03-10 08:53:39 +01005570{
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01005571 mlxsw_sp_router_rif_disable(mlxsw_sp, rif->rif_index);
5572 mlxsw_sp_nexthop_rif_gone_sync(mlxsw_sp, rif);
5573 mlxsw_sp_neigh_rif_gone_sync(mlxsw_sp, rif);
Ido Schimmel4724ba562017-03-10 08:53:39 +01005574}
5575
Arkadi Sharshevsky5ea12372017-07-18 10:10:13 +02005576static bool
5577mlxsw_sp_rif_should_config(struct mlxsw_sp_rif *rif, struct net_device *dev,
5578 unsigned long event)
Ido Schimmel4724ba562017-03-10 08:53:39 +01005579{
Arkadi Sharshevsky5ea12372017-07-18 10:10:13 +02005580 struct inet6_dev *inet6_dev;
5581 bool addr_list_empty = true;
5582 struct in_device *idev;
5583
Ido Schimmel4724ba562017-03-10 08:53:39 +01005584 switch (event) {
5585 case NETDEV_UP:
Petr Machataf1b1f272017-07-31 09:27:28 +02005586 return rif == NULL;
Ido Schimmel4724ba562017-03-10 08:53:39 +01005587 case NETDEV_DOWN:
Arkadi Sharshevsky5ea12372017-07-18 10:10:13 +02005588 idev = __in_dev_get_rtnl(dev);
5589 if (idev && idev->ifa_list)
5590 addr_list_empty = false;
5591
5592 inet6_dev = __in6_dev_get(dev);
5593 if (addr_list_empty && inet6_dev &&
5594 !list_empty(&inet6_dev->addr_list))
5595 addr_list_empty = false;
5596
5597 if (rif && addr_list_empty &&
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01005598 !netif_is_l3_slave(rif->dev))
Ido Schimmel4724ba562017-03-10 08:53:39 +01005599 return true;
5600 /* It is possible we already removed the RIF ourselves
5601 * if it was assigned to a netdev that is now a bridge
5602 * or LAG slave.
5603 */
5604 return false;
5605 }
5606
5607 return false;
5608}
5609
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02005610static enum mlxsw_sp_rif_type
5611mlxsw_sp_dev_rif_type(const struct mlxsw_sp *mlxsw_sp,
5612 const struct net_device *dev)
5613{
5614 enum mlxsw_sp_fid_type type;
5615
Petr Machata6ddb7422017-09-02 23:49:19 +02005616 if (mlxsw_sp_netdev_ipip_type(mlxsw_sp, dev, NULL))
5617 return MLXSW_SP_RIF_TYPE_IPIP_LB;
5618
5619 /* Otherwise RIF type is derived from the type of the underlying FID. */
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02005620 if (is_vlan_dev(dev) && netif_is_bridge_master(vlan_dev_real_dev(dev)))
5621 type = MLXSW_SP_FID_TYPE_8021Q;
5622 else if (netif_is_bridge_master(dev) && br_vlan_enabled(dev))
5623 type = MLXSW_SP_FID_TYPE_8021Q;
5624 else if (netif_is_bridge_master(dev))
5625 type = MLXSW_SP_FID_TYPE_8021D;
5626 else
5627 type = MLXSW_SP_FID_TYPE_RFID;
5628
5629 return mlxsw_sp_fid_type_rif_type(mlxsw_sp, type);
5630}
5631
Ido Schimmelde5ed992017-06-04 16:53:40 +02005632static int mlxsw_sp_rif_index_alloc(struct mlxsw_sp *mlxsw_sp, u16 *p_rif_index)
Ido Schimmel4724ba562017-03-10 08:53:39 +01005633{
5634 int i;
5635
Ido Schimmelde5ed992017-06-04 16:53:40 +02005636 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++) {
5637 if (!mlxsw_sp->router->rifs[i]) {
5638 *p_rif_index = i;
5639 return 0;
5640 }
5641 }
Ido Schimmel4724ba562017-03-10 08:53:39 +01005642
Ido Schimmelde5ed992017-06-04 16:53:40 +02005643 return -ENOBUFS;
Ido Schimmel4724ba562017-03-10 08:53:39 +01005644}
5645
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02005646static struct mlxsw_sp_rif *mlxsw_sp_rif_alloc(size_t rif_size, u16 rif_index,
5647 u16 vr_id,
5648 struct net_device *l3_dev)
Ido Schimmel4724ba562017-03-10 08:53:39 +01005649{
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01005650 struct mlxsw_sp_rif *rif;
Ido Schimmel4724ba562017-03-10 08:53:39 +01005651
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02005652 rif = kzalloc(rif_size, GFP_KERNEL);
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01005653 if (!rif)
Ido Schimmel4724ba562017-03-10 08:53:39 +01005654 return NULL;
5655
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01005656 INIT_LIST_HEAD(&rif->nexthop_list);
5657 INIT_LIST_HEAD(&rif->neigh_list);
5658 ether_addr_copy(rif->addr, l3_dev->dev_addr);
5659 rif->mtu = l3_dev->mtu;
5660 rif->vr_id = vr_id;
5661 rif->dev = l3_dev;
5662 rif->rif_index = rif_index;
Ido Schimmel4724ba562017-03-10 08:53:39 +01005663
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01005664 return rif;
Ido Schimmel4724ba562017-03-10 08:53:39 +01005665}
5666
Ido Schimmel5f9efff2017-05-16 19:38:27 +02005667struct mlxsw_sp_rif *mlxsw_sp_rif_by_index(const struct mlxsw_sp *mlxsw_sp,
5668 u16 rif_index)
5669{
5670 return mlxsw_sp->router->rifs[rif_index];
5671}
5672
Arkadi Sharshevskyfd1b9d42017-03-28 17:24:16 +02005673u16 mlxsw_sp_rif_index(const struct mlxsw_sp_rif *rif)
5674{
5675 return rif->rif_index;
5676}
5677
Petr Machata92107cf2017-09-02 23:49:28 +02005678u16 mlxsw_sp_ipip_lb_rif_index(const struct mlxsw_sp_rif_ipip_lb *lb_rif)
5679{
5680 return lb_rif->common.rif_index;
5681}
5682
5683u16 mlxsw_sp_ipip_lb_ul_vr_id(const struct mlxsw_sp_rif_ipip_lb *lb_rif)
5684{
5685 return lb_rif->ul_vr_id;
5686}
5687
Arkadi Sharshevskyfd1b9d42017-03-28 17:24:16 +02005688int mlxsw_sp_rif_dev_ifindex(const struct mlxsw_sp_rif *rif)
5689{
5690 return rif->dev->ifindex;
5691}
5692
Yotam Gigi91e4d592017-09-19 10:00:19 +02005693const struct net_device *mlxsw_sp_rif_dev(const struct mlxsw_sp_rif *rif)
5694{
5695 return rif->dev;
5696}
5697
Ido Schimmel4724ba562017-03-10 08:53:39 +01005698static struct mlxsw_sp_rif *
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02005699mlxsw_sp_rif_create(struct mlxsw_sp *mlxsw_sp,
David Ahernf8fa9b42017-10-18 09:56:56 -07005700 const struct mlxsw_sp_rif_params *params,
5701 struct netlink_ext_ack *extack)
Ido Schimmel4724ba562017-03-10 08:53:39 +01005702{
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02005703 u32 tb_id = l3mdev_fib_table(params->dev);
5704 const struct mlxsw_sp_rif_ops *ops;
Petr Machata010cadf2017-09-02 23:49:18 +02005705 struct mlxsw_sp_fid *fid = NULL;
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02005706 enum mlxsw_sp_rif_type type;
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01005707 struct mlxsw_sp_rif *rif;
Ido Schimmela1107482017-05-26 08:37:39 +02005708 struct mlxsw_sp_vr *vr;
5709 u16 rif_index;
Ido Schimmel4724ba562017-03-10 08:53:39 +01005710 int err;
5711
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02005712 type = mlxsw_sp_dev_rif_type(mlxsw_sp, params->dev);
5713 ops = mlxsw_sp->router->rif_ops_arr[type];
5714
David Ahernf8fa9b42017-10-18 09:56:56 -07005715 vr = mlxsw_sp_vr_get(mlxsw_sp, tb_id ? : RT_TABLE_MAIN, extack);
Ido Schimmelc9ec53f2017-05-26 08:37:38 +02005716 if (IS_ERR(vr))
5717 return ERR_CAST(vr);
Petr Machata28a04c72017-10-02 12:14:56 +02005718 vr->rif_count++;
Ido Schimmelc9ec53f2017-05-26 08:37:38 +02005719
Ido Schimmelde5ed992017-06-04 16:53:40 +02005720 err = mlxsw_sp_rif_index_alloc(mlxsw_sp, &rif_index);
David Ahernf8fa9b42017-10-18 09:56:56 -07005721 if (err) {
5722 NL_SET_ERR_MSG(extack, "spectrum: Exceeded number of supported router interfaces");
Ido Schimmelde5ed992017-06-04 16:53:40 +02005723 goto err_rif_index_alloc;
David Ahernf8fa9b42017-10-18 09:56:56 -07005724 }
Ido Schimmel4724ba562017-03-10 08:53:39 +01005725
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02005726 rif = mlxsw_sp_rif_alloc(ops->rif_size, rif_index, vr->id, params->dev);
Ido Schimmela13a5942017-05-26 08:37:33 +02005727 if (!rif) {
5728 err = -ENOMEM;
5729 goto err_rif_alloc;
5730 }
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02005731 rif->mlxsw_sp = mlxsw_sp;
5732 rif->ops = ops;
Ido Schimmela13a5942017-05-26 08:37:33 +02005733
Petr Machata010cadf2017-09-02 23:49:18 +02005734 if (ops->fid_get) {
5735 fid = ops->fid_get(rif);
5736 if (IS_ERR(fid)) {
5737 err = PTR_ERR(fid);
5738 goto err_fid_get;
5739 }
5740 rif->fid = fid;
Ido Schimmel4d93cee2017-05-26 08:37:34 +02005741 }
5742
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02005743 if (ops->setup)
5744 ops->setup(rif, params);
5745
5746 err = ops->configure(rif);
Ido Schimmel4724ba562017-03-10 08:53:39 +01005747 if (err)
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02005748 goto err_configure;
Ido Schimmel4724ba562017-03-10 08:53:39 +01005749
Yotam Gigid42b0962017-09-27 08:23:20 +02005750 err = mlxsw_sp_mr_rif_add(vr->mr4_table, rif);
5751 if (err)
5752 goto err_mr_rif_add;
5753
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02005754 mlxsw_sp_rif_counters_alloc(rif);
Ido Schimmel5f9efff2017-05-16 19:38:27 +02005755 mlxsw_sp->router->rifs[rif_index] = rif;
Ido Schimmel4724ba562017-03-10 08:53:39 +01005756
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01005757 return rif;
Ido Schimmel4724ba562017-03-10 08:53:39 +01005758
Yotam Gigid42b0962017-09-27 08:23:20 +02005759err_mr_rif_add:
5760 ops->deconfigure(rif);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02005761err_configure:
Petr Machata010cadf2017-09-02 23:49:18 +02005762 if (fid)
5763 mlxsw_sp_fid_put(fid);
Ido Schimmela1107482017-05-26 08:37:39 +02005764err_fid_get:
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02005765 kfree(rif);
5766err_rif_alloc:
Ido Schimmelde5ed992017-06-04 16:53:40 +02005767err_rif_index_alloc:
Petr Machata28a04c72017-10-02 12:14:56 +02005768 vr->rif_count--;
Ido Schimmelc9ec53f2017-05-26 08:37:38 +02005769 mlxsw_sp_vr_put(vr);
Ido Schimmel4724ba562017-03-10 08:53:39 +01005770 return ERR_PTR(err);
5771}
5772
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02005773void mlxsw_sp_rif_destroy(struct mlxsw_sp_rif *rif)
Ido Schimmel4724ba562017-03-10 08:53:39 +01005774{
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02005775 const struct mlxsw_sp_rif_ops *ops = rif->ops;
5776 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
Ido Schimmela1107482017-05-26 08:37:39 +02005777 struct mlxsw_sp_fid *fid = rif->fid;
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02005778 struct mlxsw_sp_vr *vr;
Ido Schimmel4724ba562017-03-10 08:53:39 +01005779
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01005780 mlxsw_sp_router_rif_gone_sync(mlxsw_sp, rif);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02005781 vr = &mlxsw_sp->router->vrs[rif->vr_id];
Arkadi Sharshevskye0c0afd2017-03-28 17:24:15 +02005782
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02005783 mlxsw_sp->router->rifs[rif->rif_index] = NULL;
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02005784 mlxsw_sp_rif_counters_free(rif);
Yotam Gigid42b0962017-09-27 08:23:20 +02005785 mlxsw_sp_mr_rif_del(vr->mr4_table, rif);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02005786 ops->deconfigure(rif);
Petr Machata010cadf2017-09-02 23:49:18 +02005787 if (fid)
5788 /* Loopback RIFs are not associated with a FID. */
5789 mlxsw_sp_fid_put(fid);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02005790 kfree(rif);
Petr Machata28a04c72017-10-02 12:14:56 +02005791 vr->rif_count--;
Ido Schimmelc9ec53f2017-05-26 08:37:38 +02005792 mlxsw_sp_vr_put(vr);
Ido Schimmel4724ba562017-03-10 08:53:39 +01005793}
5794
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02005795static void
5796mlxsw_sp_rif_subport_params_init(struct mlxsw_sp_rif_params *params,
5797 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
5798{
5799 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
5800
5801 params->vid = mlxsw_sp_port_vlan->vid;
5802 params->lag = mlxsw_sp_port->lagged;
5803 if (params->lag)
5804 params->lag_id = mlxsw_sp_port->lag_id;
5805 else
5806 params->system_port = mlxsw_sp_port->local_port;
5807}
5808
Ido Schimmel7cbecf22017-05-26 08:37:28 +02005809static int
Ido Schimmela1107482017-05-26 08:37:39 +02005810mlxsw_sp_port_vlan_router_join(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan,
David Ahernf8fa9b42017-10-18 09:56:56 -07005811 struct net_device *l3_dev,
5812 struct netlink_ext_ack *extack)
Ido Schimmel4724ba562017-03-10 08:53:39 +01005813{
Ido Schimmel7cbecf22017-05-26 08:37:28 +02005814 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
Ido Schimmel1b8f09a2017-05-26 08:37:36 +02005815 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
Ido Schimmel7cbecf22017-05-26 08:37:28 +02005816 u16 vid = mlxsw_sp_port_vlan->vid;
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01005817 struct mlxsw_sp_rif *rif;
Ido Schimmela1107482017-05-26 08:37:39 +02005818 struct mlxsw_sp_fid *fid;
Ido Schimmel03ea01e2017-05-23 21:56:30 +02005819 int err;
Ido Schimmel4724ba562017-03-10 08:53:39 +01005820
Ido Schimmel1b8f09a2017-05-26 08:37:36 +02005821 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev);
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01005822 if (!rif) {
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02005823 struct mlxsw_sp_rif_params params = {
5824 .dev = l3_dev,
5825 };
5826
5827 mlxsw_sp_rif_subport_params_init(&params, mlxsw_sp_port_vlan);
David Ahernf8fa9b42017-10-18 09:56:56 -07005828 rif = mlxsw_sp_rif_create(mlxsw_sp, &params, extack);
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01005829 if (IS_ERR(rif))
5830 return PTR_ERR(rif);
Ido Schimmel4724ba562017-03-10 08:53:39 +01005831 }
5832
Ido Schimmela1107482017-05-26 08:37:39 +02005833 /* FID was already created, just take a reference */
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02005834 fid = rif->ops->fid_get(rif);
Ido Schimmela1107482017-05-26 08:37:39 +02005835 err = mlxsw_sp_fid_port_vid_map(fid, mlxsw_sp_port, vid);
5836 if (err)
5837 goto err_fid_port_vid_map;
5838
Ido Schimmel7cbecf22017-05-26 08:37:28 +02005839 err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, false);
Ido Schimmel03ea01e2017-05-23 21:56:30 +02005840 if (err)
5841 goto err_port_vid_learning_set;
5842
Ido Schimmel7cbecf22017-05-26 08:37:28 +02005843 err = mlxsw_sp_port_vid_stp_set(mlxsw_sp_port, vid,
Ido Schimmel03ea01e2017-05-23 21:56:30 +02005844 BR_STATE_FORWARDING);
5845 if (err)
5846 goto err_port_vid_stp_set;
5847
Ido Schimmela1107482017-05-26 08:37:39 +02005848 mlxsw_sp_port_vlan->fid = fid;
Ido Schimmel4724ba562017-03-10 08:53:39 +01005849
Ido Schimmel4724ba562017-03-10 08:53:39 +01005850 return 0;
Ido Schimmel03ea01e2017-05-23 21:56:30 +02005851
5852err_port_vid_stp_set:
Ido Schimmel7cbecf22017-05-26 08:37:28 +02005853 mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, true);
Ido Schimmel03ea01e2017-05-23 21:56:30 +02005854err_port_vid_learning_set:
Ido Schimmela1107482017-05-26 08:37:39 +02005855 mlxsw_sp_fid_port_vid_unmap(fid, mlxsw_sp_port, vid);
5856err_fid_port_vid_map:
5857 mlxsw_sp_fid_put(fid);
Ido Schimmel03ea01e2017-05-23 21:56:30 +02005858 return err;
Ido Schimmel4724ba562017-03-10 08:53:39 +01005859}
5860
Ido Schimmela1107482017-05-26 08:37:39 +02005861void
5862mlxsw_sp_port_vlan_router_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
Ido Schimmel4724ba562017-03-10 08:53:39 +01005863{
Ido Schimmelce95e152017-05-26 08:37:27 +02005864 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
Ido Schimmel7cbecf22017-05-26 08:37:28 +02005865 struct mlxsw_sp_fid *fid = mlxsw_sp_port_vlan->fid;
Ido Schimmelce95e152017-05-26 08:37:27 +02005866 u16 vid = mlxsw_sp_port_vlan->vid;
Ido Schimmelce95e152017-05-26 08:37:27 +02005867
Ido Schimmela1107482017-05-26 08:37:39 +02005868 if (WARN_ON(mlxsw_sp_fid_type(fid) != MLXSW_SP_FID_TYPE_RFID))
5869 return;
Ido Schimmel4aafc362017-05-26 08:37:25 +02005870
Ido Schimmela1107482017-05-26 08:37:39 +02005871 mlxsw_sp_port_vlan->fid = NULL;
Ido Schimmel7cbecf22017-05-26 08:37:28 +02005872 mlxsw_sp_port_vid_stp_set(mlxsw_sp_port, vid, BR_STATE_BLOCKING);
5873 mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, true);
Ido Schimmela1107482017-05-26 08:37:39 +02005874 mlxsw_sp_fid_port_vid_unmap(fid, mlxsw_sp_port, vid);
5875 /* If router port holds the last reference on the rFID, then the
5876 * associated Sub-port RIF will be destroyed.
5877 */
5878 mlxsw_sp_fid_put(fid);
Ido Schimmel4724ba562017-03-10 08:53:39 +01005879}
5880
Ido Schimmel7cbecf22017-05-26 08:37:28 +02005881static int mlxsw_sp_inetaddr_port_vlan_event(struct net_device *l3_dev,
5882 struct net_device *port_dev,
David Ahernf8fa9b42017-10-18 09:56:56 -07005883 unsigned long event, u16 vid,
5884 struct netlink_ext_ack *extack)
Ido Schimmel4724ba562017-03-10 08:53:39 +01005885{
5886 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(port_dev);
Ido Schimmelce95e152017-05-26 08:37:27 +02005887 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
Ido Schimmel4724ba562017-03-10 08:53:39 +01005888
Ido Schimmelce95e152017-05-26 08:37:27 +02005889 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
Ido Schimmel7cbecf22017-05-26 08:37:28 +02005890 if (WARN_ON(!mlxsw_sp_port_vlan))
5891 return -EINVAL;
Ido Schimmel4724ba562017-03-10 08:53:39 +01005892
5893 switch (event) {
5894 case NETDEV_UP:
Ido Schimmela1107482017-05-26 08:37:39 +02005895 return mlxsw_sp_port_vlan_router_join(mlxsw_sp_port_vlan,
David Ahernf8fa9b42017-10-18 09:56:56 -07005896 l3_dev, extack);
Ido Schimmel4724ba562017-03-10 08:53:39 +01005897 case NETDEV_DOWN:
Ido Schimmela1107482017-05-26 08:37:39 +02005898 mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port_vlan);
Ido Schimmel4724ba562017-03-10 08:53:39 +01005899 break;
5900 }
5901
5902 return 0;
5903}
5904
5905static int mlxsw_sp_inetaddr_port_event(struct net_device *port_dev,
David Ahernf8fa9b42017-10-18 09:56:56 -07005906 unsigned long event,
5907 struct netlink_ext_ack *extack)
Ido Schimmel4724ba562017-03-10 08:53:39 +01005908{
Jiri Pirko2b94e582017-04-18 16:55:37 +02005909 if (netif_is_bridge_port(port_dev) ||
5910 netif_is_lag_port(port_dev) ||
5911 netif_is_ovs_port(port_dev))
Ido Schimmel4724ba562017-03-10 08:53:39 +01005912 return 0;
5913
David Ahernf8fa9b42017-10-18 09:56:56 -07005914 return mlxsw_sp_inetaddr_port_vlan_event(port_dev, port_dev, event, 1,
5915 extack);
Ido Schimmel4724ba562017-03-10 08:53:39 +01005916}
5917
5918static int __mlxsw_sp_inetaddr_lag_event(struct net_device *l3_dev,
5919 struct net_device *lag_dev,
David Ahernf8fa9b42017-10-18 09:56:56 -07005920 unsigned long event, u16 vid,
5921 struct netlink_ext_ack *extack)
Ido Schimmel4724ba562017-03-10 08:53:39 +01005922{
5923 struct net_device *port_dev;
5924 struct list_head *iter;
5925 int err;
5926
5927 netdev_for_each_lower_dev(lag_dev, port_dev, iter) {
5928 if (mlxsw_sp_port_dev_check(port_dev)) {
Ido Schimmel7cbecf22017-05-26 08:37:28 +02005929 err = mlxsw_sp_inetaddr_port_vlan_event(l3_dev,
5930 port_dev,
David Ahernf8fa9b42017-10-18 09:56:56 -07005931 event, vid,
5932 extack);
Ido Schimmel4724ba562017-03-10 08:53:39 +01005933 if (err)
5934 return err;
5935 }
5936 }
5937
5938 return 0;
5939}
5940
5941static int mlxsw_sp_inetaddr_lag_event(struct net_device *lag_dev,
David Ahernf8fa9b42017-10-18 09:56:56 -07005942 unsigned long event,
5943 struct netlink_ext_ack *extack)
Ido Schimmel4724ba562017-03-10 08:53:39 +01005944{
5945 if (netif_is_bridge_port(lag_dev))
5946 return 0;
5947
David Ahernf8fa9b42017-10-18 09:56:56 -07005948 return __mlxsw_sp_inetaddr_lag_event(lag_dev, lag_dev, event, 1,
5949 extack);
Ido Schimmel4724ba562017-03-10 08:53:39 +01005950}
5951
Ido Schimmel4724ba562017-03-10 08:53:39 +01005952static int mlxsw_sp_inetaddr_bridge_event(struct net_device *l3_dev,
David Ahernf8fa9b42017-10-18 09:56:56 -07005953 unsigned long event,
5954 struct netlink_ext_ack *extack)
Ido Schimmel4724ba562017-03-10 08:53:39 +01005955{
5956 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(l3_dev);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02005957 struct mlxsw_sp_rif_params params = {
5958 .dev = l3_dev,
5959 };
Ido Schimmela1107482017-05-26 08:37:39 +02005960 struct mlxsw_sp_rif *rif;
Ido Schimmel4724ba562017-03-10 08:53:39 +01005961
5962 switch (event) {
5963 case NETDEV_UP:
David Ahernf8fa9b42017-10-18 09:56:56 -07005964 rif = mlxsw_sp_rif_create(mlxsw_sp, &params, extack);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02005965 if (IS_ERR(rif))
5966 return PTR_ERR(rif);
5967 break;
Ido Schimmel4724ba562017-03-10 08:53:39 +01005968 case NETDEV_DOWN:
Ido Schimmela1107482017-05-26 08:37:39 +02005969 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02005970 mlxsw_sp_rif_destroy(rif);
Ido Schimmel4724ba562017-03-10 08:53:39 +01005971 break;
5972 }
5973
5974 return 0;
5975}
5976
5977static int mlxsw_sp_inetaddr_vlan_event(struct net_device *vlan_dev,
David Ahernf8fa9b42017-10-18 09:56:56 -07005978 unsigned long event,
5979 struct netlink_ext_ack *extack)
Ido Schimmel4724ba562017-03-10 08:53:39 +01005980{
5981 struct net_device *real_dev = vlan_dev_real_dev(vlan_dev);
Ido Schimmel4724ba562017-03-10 08:53:39 +01005982 u16 vid = vlan_dev_vlan_id(vlan_dev);
5983
Ido Schimmel6b27c8a2017-06-28 09:03:12 +03005984 if (netif_is_bridge_port(vlan_dev))
5985 return 0;
5986
Ido Schimmel4724ba562017-03-10 08:53:39 +01005987 if (mlxsw_sp_port_dev_check(real_dev))
Ido Schimmel7cbecf22017-05-26 08:37:28 +02005988 return mlxsw_sp_inetaddr_port_vlan_event(vlan_dev, real_dev,
David Ahernf8fa9b42017-10-18 09:56:56 -07005989 event, vid, extack);
Ido Schimmel4724ba562017-03-10 08:53:39 +01005990 else if (netif_is_lag_master(real_dev))
5991 return __mlxsw_sp_inetaddr_lag_event(vlan_dev, real_dev, event,
David Ahernf8fa9b42017-10-18 09:56:56 -07005992 vid, extack);
Ido Schimmelc57529e2017-05-26 08:37:31 +02005993 else if (netif_is_bridge_master(real_dev) && br_vlan_enabled(real_dev))
David Ahernf8fa9b42017-10-18 09:56:56 -07005994 return mlxsw_sp_inetaddr_bridge_event(vlan_dev, event, extack);
Ido Schimmel4724ba562017-03-10 08:53:39 +01005995
5996 return 0;
5997}
5998
Ido Schimmelb1e45522017-04-30 19:47:14 +03005999static int __mlxsw_sp_inetaddr_event(struct net_device *dev,
David Ahernf8fa9b42017-10-18 09:56:56 -07006000 unsigned long event,
6001 struct netlink_ext_ack *extack)
Ido Schimmelb1e45522017-04-30 19:47:14 +03006002{
6003 if (mlxsw_sp_port_dev_check(dev))
David Ahernf8fa9b42017-10-18 09:56:56 -07006004 return mlxsw_sp_inetaddr_port_event(dev, event, extack);
Ido Schimmelb1e45522017-04-30 19:47:14 +03006005 else if (netif_is_lag_master(dev))
David Ahernf8fa9b42017-10-18 09:56:56 -07006006 return mlxsw_sp_inetaddr_lag_event(dev, event, extack);
Ido Schimmelb1e45522017-04-30 19:47:14 +03006007 else if (netif_is_bridge_master(dev))
David Ahernf8fa9b42017-10-18 09:56:56 -07006008 return mlxsw_sp_inetaddr_bridge_event(dev, event, extack);
Ido Schimmelb1e45522017-04-30 19:47:14 +03006009 else if (is_vlan_dev(dev))
David Ahernf8fa9b42017-10-18 09:56:56 -07006010 return mlxsw_sp_inetaddr_vlan_event(dev, event, extack);
Ido Schimmelb1e45522017-04-30 19:47:14 +03006011 else
6012 return 0;
6013}
6014
Ido Schimmel4724ba562017-03-10 08:53:39 +01006015int mlxsw_sp_inetaddr_event(struct notifier_block *unused,
6016 unsigned long event, void *ptr)
6017{
6018 struct in_ifaddr *ifa = (struct in_ifaddr *) ptr;
6019 struct net_device *dev = ifa->ifa_dev->dev;
6020 struct mlxsw_sp *mlxsw_sp;
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01006021 struct mlxsw_sp_rif *rif;
Ido Schimmel4724ba562017-03-10 08:53:39 +01006022 int err = 0;
6023
David Ahern89d5dd22017-10-18 09:56:55 -07006024 /* NETDEV_UP event is handled by mlxsw_sp_inetaddr_valid_event */
6025 if (event == NETDEV_UP)
6026 goto out;
6027
6028 mlxsw_sp = mlxsw_sp_lower_get(dev);
6029 if (!mlxsw_sp)
6030 goto out;
6031
6032 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
6033 if (!mlxsw_sp_rif_should_config(rif, dev, event))
6034 goto out;
6035
David Ahernf8fa9b42017-10-18 09:56:56 -07006036 err = __mlxsw_sp_inetaddr_event(dev, event, NULL);
David Ahern89d5dd22017-10-18 09:56:55 -07006037out:
6038 return notifier_from_errno(err);
6039}
6040
6041int mlxsw_sp_inetaddr_valid_event(struct notifier_block *unused,
6042 unsigned long event, void *ptr)
6043{
6044 struct in_validator_info *ivi = (struct in_validator_info *) ptr;
6045 struct net_device *dev = ivi->ivi_dev->dev;
6046 struct mlxsw_sp *mlxsw_sp;
6047 struct mlxsw_sp_rif *rif;
6048 int err = 0;
6049
Ido Schimmel4724ba562017-03-10 08:53:39 +01006050 mlxsw_sp = mlxsw_sp_lower_get(dev);
6051 if (!mlxsw_sp)
6052 goto out;
6053
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01006054 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
Arkadi Sharshevsky5ea12372017-07-18 10:10:13 +02006055 if (!mlxsw_sp_rif_should_config(rif, dev, event))
Ido Schimmel4724ba562017-03-10 08:53:39 +01006056 goto out;
6057
David Ahernf8fa9b42017-10-18 09:56:56 -07006058 err = __mlxsw_sp_inetaddr_event(dev, event, ivi->extack);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006059out:
6060 return notifier_from_errno(err);
6061}
6062
Arkadi Sharshevsky5ea12372017-07-18 10:10:13 +02006063struct mlxsw_sp_inet6addr_event_work {
6064 struct work_struct work;
6065 struct net_device *dev;
6066 unsigned long event;
6067};
6068
6069static void mlxsw_sp_inet6addr_event_work(struct work_struct *work)
6070{
6071 struct mlxsw_sp_inet6addr_event_work *inet6addr_work =
6072 container_of(work, struct mlxsw_sp_inet6addr_event_work, work);
6073 struct net_device *dev = inet6addr_work->dev;
6074 unsigned long event = inet6addr_work->event;
6075 struct mlxsw_sp *mlxsw_sp;
6076 struct mlxsw_sp_rif *rif;
6077
6078 rtnl_lock();
6079 mlxsw_sp = mlxsw_sp_lower_get(dev);
6080 if (!mlxsw_sp)
6081 goto out;
6082
6083 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
6084 if (!mlxsw_sp_rif_should_config(rif, dev, event))
6085 goto out;
6086
David Ahernf8fa9b42017-10-18 09:56:56 -07006087 __mlxsw_sp_inetaddr_event(dev, event, NULL);
Arkadi Sharshevsky5ea12372017-07-18 10:10:13 +02006088out:
6089 rtnl_unlock();
6090 dev_put(dev);
6091 kfree(inet6addr_work);
6092}
6093
6094/* Called with rcu_read_lock() */
6095int mlxsw_sp_inet6addr_event(struct notifier_block *unused,
6096 unsigned long event, void *ptr)
6097{
6098 struct inet6_ifaddr *if6 = (struct inet6_ifaddr *) ptr;
6099 struct mlxsw_sp_inet6addr_event_work *inet6addr_work;
6100 struct net_device *dev = if6->idev->dev;
6101
David Ahern89d5dd22017-10-18 09:56:55 -07006102 /* NETDEV_UP event is handled by mlxsw_sp_inet6addr_valid_event */
6103 if (event == NETDEV_UP)
6104 return NOTIFY_DONE;
6105
Arkadi Sharshevsky5ea12372017-07-18 10:10:13 +02006106 if (!mlxsw_sp_port_dev_lower_find_rcu(dev))
6107 return NOTIFY_DONE;
6108
6109 inet6addr_work = kzalloc(sizeof(*inet6addr_work), GFP_ATOMIC);
6110 if (!inet6addr_work)
6111 return NOTIFY_BAD;
6112
6113 INIT_WORK(&inet6addr_work->work, mlxsw_sp_inet6addr_event_work);
6114 inet6addr_work->dev = dev;
6115 inet6addr_work->event = event;
6116 dev_hold(dev);
6117 mlxsw_core_schedule_work(&inet6addr_work->work);
6118
6119 return NOTIFY_DONE;
6120}
6121
David Ahern89d5dd22017-10-18 09:56:55 -07006122int mlxsw_sp_inet6addr_valid_event(struct notifier_block *unused,
6123 unsigned long event, void *ptr)
6124{
6125 struct in6_validator_info *i6vi = (struct in6_validator_info *) ptr;
6126 struct net_device *dev = i6vi->i6vi_dev->dev;
6127 struct mlxsw_sp *mlxsw_sp;
6128 struct mlxsw_sp_rif *rif;
6129 int err = 0;
6130
6131 mlxsw_sp = mlxsw_sp_lower_get(dev);
6132 if (!mlxsw_sp)
6133 goto out;
6134
6135 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
6136 if (!mlxsw_sp_rif_should_config(rif, dev, event))
6137 goto out;
6138
David Ahernf8fa9b42017-10-18 09:56:56 -07006139 err = __mlxsw_sp_inetaddr_event(dev, event, i6vi->extack);
David Ahern89d5dd22017-10-18 09:56:55 -07006140out:
6141 return notifier_from_errno(err);
6142}
6143
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01006144static int mlxsw_sp_rif_edit(struct mlxsw_sp *mlxsw_sp, u16 rif_index,
Ido Schimmel4724ba562017-03-10 08:53:39 +01006145 const char *mac, int mtu)
6146{
6147 char ritr_pl[MLXSW_REG_RITR_LEN];
6148 int err;
6149
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01006150 mlxsw_reg_ritr_rif_pack(ritr_pl, rif_index);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006151 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
6152 if (err)
6153 return err;
6154
6155 mlxsw_reg_ritr_mtu_set(ritr_pl, mtu);
6156 mlxsw_reg_ritr_if_mac_memcpy_to(ritr_pl, mac);
6157 mlxsw_reg_ritr_op_set(ritr_pl, MLXSW_REG_RITR_RIF_CREATE);
6158 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
6159}
6160
6161int mlxsw_sp_netdevice_router_port_event(struct net_device *dev)
6162{
6163 struct mlxsw_sp *mlxsw_sp;
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01006164 struct mlxsw_sp_rif *rif;
Ido Schimmela1107482017-05-26 08:37:39 +02006165 u16 fid_index;
Ido Schimmel4724ba562017-03-10 08:53:39 +01006166 int err;
6167
6168 mlxsw_sp = mlxsw_sp_lower_get(dev);
6169 if (!mlxsw_sp)
6170 return 0;
6171
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01006172 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
6173 if (!rif)
Ido Schimmel4724ba562017-03-10 08:53:39 +01006174 return 0;
Ido Schimmela1107482017-05-26 08:37:39 +02006175 fid_index = mlxsw_sp_fid_index(rif->fid);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006176
Ido Schimmela1107482017-05-26 08:37:39 +02006177 err = mlxsw_sp_rif_fdb_op(mlxsw_sp, rif->addr, fid_index, false);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006178 if (err)
6179 return err;
6180
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01006181 err = mlxsw_sp_rif_edit(mlxsw_sp, rif->rif_index, dev->dev_addr,
6182 dev->mtu);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006183 if (err)
6184 goto err_rif_edit;
6185
Ido Schimmela1107482017-05-26 08:37:39 +02006186 err = mlxsw_sp_rif_fdb_op(mlxsw_sp, dev->dev_addr, fid_index, true);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006187 if (err)
6188 goto err_rif_fdb_op;
6189
Yotam Gigifd890fe2017-09-27 08:23:21 +02006190 if (rif->mtu != dev->mtu) {
6191 struct mlxsw_sp_vr *vr;
6192
6193 /* The RIF is relevant only to its mr_table instance, as unlike
6194 * unicast routing, in multicast routing a RIF cannot be shared
6195 * between several multicast routing tables.
6196 */
6197 vr = &mlxsw_sp->router->vrs[rif->vr_id];
6198 mlxsw_sp_mr_rif_mtu_update(vr->mr4_table, rif, dev->mtu);
6199 }
6200
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01006201 ether_addr_copy(rif->addr, dev->dev_addr);
6202 rif->mtu = dev->mtu;
Ido Schimmel4724ba562017-03-10 08:53:39 +01006203
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01006204 netdev_dbg(dev, "Updated RIF=%d\n", rif->rif_index);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006205
6206 return 0;
6207
6208err_rif_fdb_op:
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01006209 mlxsw_sp_rif_edit(mlxsw_sp, rif->rif_index, rif->addr, rif->mtu);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006210err_rif_edit:
Ido Schimmela1107482017-05-26 08:37:39 +02006211 mlxsw_sp_rif_fdb_op(mlxsw_sp, rif->addr, fid_index, true);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006212 return err;
6213}
6214
Ido Schimmelb1e45522017-04-30 19:47:14 +03006215static int mlxsw_sp_port_vrf_join(struct mlxsw_sp *mlxsw_sp,
David Ahernf8fa9b42017-10-18 09:56:56 -07006216 struct net_device *l3_dev,
6217 struct netlink_ext_ack *extack)
Ido Schimmel7179eb52017-03-16 09:08:18 +01006218{
Ido Schimmelb1e45522017-04-30 19:47:14 +03006219 struct mlxsw_sp_rif *rif;
Ido Schimmel7179eb52017-03-16 09:08:18 +01006220
Ido Schimmelb1e45522017-04-30 19:47:14 +03006221 /* If netdev is already associated with a RIF, then we need to
6222 * destroy it and create a new one with the new virtual router ID.
Ido Schimmel7179eb52017-03-16 09:08:18 +01006223 */
Ido Schimmelb1e45522017-04-30 19:47:14 +03006224 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev);
6225 if (rif)
David Ahernf8fa9b42017-10-18 09:56:56 -07006226 __mlxsw_sp_inetaddr_event(l3_dev, NETDEV_DOWN, extack);
Ido Schimmel7179eb52017-03-16 09:08:18 +01006227
David Ahernf8fa9b42017-10-18 09:56:56 -07006228 return __mlxsw_sp_inetaddr_event(l3_dev, NETDEV_UP, extack);
Ido Schimmel7179eb52017-03-16 09:08:18 +01006229}
6230
Ido Schimmelb1e45522017-04-30 19:47:14 +03006231static void mlxsw_sp_port_vrf_leave(struct mlxsw_sp *mlxsw_sp,
6232 struct net_device *l3_dev)
Ido Schimmel7179eb52017-03-16 09:08:18 +01006233{
Ido Schimmelb1e45522017-04-30 19:47:14 +03006234 struct mlxsw_sp_rif *rif;
Ido Schimmel7179eb52017-03-16 09:08:18 +01006235
Ido Schimmelb1e45522017-04-30 19:47:14 +03006236 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev);
6237 if (!rif)
Ido Schimmel7179eb52017-03-16 09:08:18 +01006238 return;
David Ahernf8fa9b42017-10-18 09:56:56 -07006239 __mlxsw_sp_inetaddr_event(l3_dev, NETDEV_DOWN, NULL);
Ido Schimmel7179eb52017-03-16 09:08:18 +01006240}
6241
Ido Schimmelb1e45522017-04-30 19:47:14 +03006242int mlxsw_sp_netdevice_vrf_event(struct net_device *l3_dev, unsigned long event,
6243 struct netdev_notifier_changeupper_info *info)
Ido Schimmel3d70e4582017-03-16 09:08:19 +01006244{
Ido Schimmelb1e45522017-04-30 19:47:14 +03006245 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(l3_dev);
6246 int err = 0;
Ido Schimmel3d70e4582017-03-16 09:08:19 +01006247
Ido Schimmelb1e45522017-04-30 19:47:14 +03006248 if (!mlxsw_sp)
6249 return 0;
Ido Schimmel3d70e4582017-03-16 09:08:19 +01006250
Ido Schimmelb1e45522017-04-30 19:47:14 +03006251 switch (event) {
6252 case NETDEV_PRECHANGEUPPER:
6253 return 0;
6254 case NETDEV_CHANGEUPPER:
David Ahernf8fa9b42017-10-18 09:56:56 -07006255 if (info->linking) {
6256 struct netlink_ext_ack *extack;
6257
6258 extack = netdev_notifier_info_to_extack(&info->info);
6259 err = mlxsw_sp_port_vrf_join(mlxsw_sp, l3_dev, extack);
6260 } else {
Ido Schimmelb1e45522017-04-30 19:47:14 +03006261 mlxsw_sp_port_vrf_leave(mlxsw_sp, l3_dev);
David Ahernf8fa9b42017-10-18 09:56:56 -07006262 }
Ido Schimmelb1e45522017-04-30 19:47:14 +03006263 break;
6264 }
Ido Schimmel3d70e4582017-03-16 09:08:19 +01006265
Ido Schimmelb1e45522017-04-30 19:47:14 +03006266 return err;
Ido Schimmel3d70e4582017-03-16 09:08:19 +01006267}
6268
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006269static struct mlxsw_sp_rif_subport *
6270mlxsw_sp_rif_subport_rif(const struct mlxsw_sp_rif *rif)
Ido Schimmela1107482017-05-26 08:37:39 +02006271{
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006272 return container_of(rif, struct mlxsw_sp_rif_subport, common);
Ido Schimmela1107482017-05-26 08:37:39 +02006273}
6274
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006275static void mlxsw_sp_rif_subport_setup(struct mlxsw_sp_rif *rif,
6276 const struct mlxsw_sp_rif_params *params)
6277{
6278 struct mlxsw_sp_rif_subport *rif_subport;
6279
6280 rif_subport = mlxsw_sp_rif_subport_rif(rif);
6281 rif_subport->vid = params->vid;
6282 rif_subport->lag = params->lag;
6283 if (params->lag)
6284 rif_subport->lag_id = params->lag_id;
6285 else
6286 rif_subport->system_port = params->system_port;
6287}
6288
6289static int mlxsw_sp_rif_subport_op(struct mlxsw_sp_rif *rif, bool enable)
6290{
6291 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
6292 struct mlxsw_sp_rif_subport *rif_subport;
6293 char ritr_pl[MLXSW_REG_RITR_LEN];
6294
6295 rif_subport = mlxsw_sp_rif_subport_rif(rif);
6296 mlxsw_reg_ritr_pack(ritr_pl, enable, MLXSW_REG_RITR_SP_IF,
Petr Machata9571e822017-09-02 23:49:14 +02006297 rif->rif_index, rif->vr_id, rif->dev->mtu);
6298 mlxsw_reg_ritr_mac_pack(ritr_pl, rif->dev->dev_addr);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006299 mlxsw_reg_ritr_sp_if_pack(ritr_pl, rif_subport->lag,
6300 rif_subport->lag ? rif_subport->lag_id :
6301 rif_subport->system_port,
6302 rif_subport->vid);
6303
6304 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
6305}
6306
6307static int mlxsw_sp_rif_subport_configure(struct mlxsw_sp_rif *rif)
6308{
Petr Machata010cadf2017-09-02 23:49:18 +02006309 int err;
6310
6311 err = mlxsw_sp_rif_subport_op(rif, true);
6312 if (err)
6313 return err;
6314
6315 err = mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
6316 mlxsw_sp_fid_index(rif->fid), true);
6317 if (err)
6318 goto err_rif_fdb_op;
6319
6320 mlxsw_sp_fid_rif_set(rif->fid, rif);
6321 return 0;
6322
6323err_rif_fdb_op:
6324 mlxsw_sp_rif_subport_op(rif, false);
6325 return err;
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006326}
6327
6328static void mlxsw_sp_rif_subport_deconfigure(struct mlxsw_sp_rif *rif)
6329{
Petr Machata010cadf2017-09-02 23:49:18 +02006330 struct mlxsw_sp_fid *fid = rif->fid;
6331
6332 mlxsw_sp_fid_rif_set(fid, NULL);
6333 mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
6334 mlxsw_sp_fid_index(fid), false);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006335 mlxsw_sp_rif_subport_op(rif, false);
6336}
6337
6338static struct mlxsw_sp_fid *
6339mlxsw_sp_rif_subport_fid_get(struct mlxsw_sp_rif *rif)
6340{
6341 return mlxsw_sp_fid_rfid_get(rif->mlxsw_sp, rif->rif_index);
6342}
6343
6344static const struct mlxsw_sp_rif_ops mlxsw_sp_rif_subport_ops = {
6345 .type = MLXSW_SP_RIF_TYPE_SUBPORT,
6346 .rif_size = sizeof(struct mlxsw_sp_rif_subport),
6347 .setup = mlxsw_sp_rif_subport_setup,
6348 .configure = mlxsw_sp_rif_subport_configure,
6349 .deconfigure = mlxsw_sp_rif_subport_deconfigure,
6350 .fid_get = mlxsw_sp_rif_subport_fid_get,
6351};
6352
6353static int mlxsw_sp_rif_vlan_fid_op(struct mlxsw_sp_rif *rif,
6354 enum mlxsw_reg_ritr_if_type type,
6355 u16 vid_fid, bool enable)
6356{
6357 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
6358 char ritr_pl[MLXSW_REG_RITR_LEN];
6359
6360 mlxsw_reg_ritr_pack(ritr_pl, enable, type, rif->rif_index, rif->vr_id,
Petr Machata9571e822017-09-02 23:49:14 +02006361 rif->dev->mtu);
6362 mlxsw_reg_ritr_mac_pack(ritr_pl, rif->dev->dev_addr);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006363 mlxsw_reg_ritr_fid_set(ritr_pl, type, vid_fid);
6364
6365 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
6366}
6367
Yotam Gigib35750f2017-10-09 11:15:33 +02006368u8 mlxsw_sp_router_port(const struct mlxsw_sp *mlxsw_sp)
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006369{
6370 return mlxsw_core_max_ports(mlxsw_sp->core) + 1;
6371}
6372
6373static int mlxsw_sp_rif_vlan_configure(struct mlxsw_sp_rif *rif)
6374{
6375 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
6376 u16 vid = mlxsw_sp_fid_8021q_vid(rif->fid);
6377 int err;
6378
6379 err = mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_VLAN_IF, vid, true);
6380 if (err)
6381 return err;
6382
Ido Schimmel0d284812017-07-18 10:10:12 +02006383 err = mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
6384 mlxsw_sp_router_port(mlxsw_sp), true);
6385 if (err)
6386 goto err_fid_mc_flood_set;
6387
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006388 err = mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
6389 mlxsw_sp_router_port(mlxsw_sp), true);
6390 if (err)
6391 goto err_fid_bc_flood_set;
6392
Petr Machata010cadf2017-09-02 23:49:18 +02006393 err = mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
6394 mlxsw_sp_fid_index(rif->fid), true);
6395 if (err)
6396 goto err_rif_fdb_op;
6397
6398 mlxsw_sp_fid_rif_set(rif->fid, rif);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006399 return 0;
6400
Petr Machata010cadf2017-09-02 23:49:18 +02006401err_rif_fdb_op:
6402 mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
6403 mlxsw_sp_router_port(mlxsw_sp), false);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006404err_fid_bc_flood_set:
Ido Schimmel0d284812017-07-18 10:10:12 +02006405 mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
6406 mlxsw_sp_router_port(mlxsw_sp), false);
6407err_fid_mc_flood_set:
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006408 mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_VLAN_IF, vid, false);
6409 return err;
6410}
6411
6412static void mlxsw_sp_rif_vlan_deconfigure(struct mlxsw_sp_rif *rif)
6413{
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006414 u16 vid = mlxsw_sp_fid_8021q_vid(rif->fid);
Petr Machata010cadf2017-09-02 23:49:18 +02006415 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
6416 struct mlxsw_sp_fid *fid = rif->fid;
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006417
Petr Machata010cadf2017-09-02 23:49:18 +02006418 mlxsw_sp_fid_rif_set(fid, NULL);
6419 mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
6420 mlxsw_sp_fid_index(fid), false);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006421 mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
6422 mlxsw_sp_router_port(mlxsw_sp), false);
Ido Schimmel0d284812017-07-18 10:10:12 +02006423 mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
6424 mlxsw_sp_router_port(mlxsw_sp), false);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006425 mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_VLAN_IF, vid, false);
6426}
6427
6428static struct mlxsw_sp_fid *
6429mlxsw_sp_rif_vlan_fid_get(struct mlxsw_sp_rif *rif)
6430{
6431 u16 vid = is_vlan_dev(rif->dev) ? vlan_dev_vlan_id(rif->dev) : 1;
6432
6433 return mlxsw_sp_fid_8021q_get(rif->mlxsw_sp, vid);
6434}
6435
6436static const struct mlxsw_sp_rif_ops mlxsw_sp_rif_vlan_ops = {
6437 .type = MLXSW_SP_RIF_TYPE_VLAN,
6438 .rif_size = sizeof(struct mlxsw_sp_rif),
6439 .configure = mlxsw_sp_rif_vlan_configure,
6440 .deconfigure = mlxsw_sp_rif_vlan_deconfigure,
6441 .fid_get = mlxsw_sp_rif_vlan_fid_get,
6442};
6443
6444static int mlxsw_sp_rif_fid_configure(struct mlxsw_sp_rif *rif)
6445{
6446 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
6447 u16 fid_index = mlxsw_sp_fid_index(rif->fid);
6448 int err;
6449
6450 err = mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_FID_IF, fid_index,
6451 true);
6452 if (err)
6453 return err;
6454
Ido Schimmel0d284812017-07-18 10:10:12 +02006455 err = mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
6456 mlxsw_sp_router_port(mlxsw_sp), true);
6457 if (err)
6458 goto err_fid_mc_flood_set;
6459
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006460 err = mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
6461 mlxsw_sp_router_port(mlxsw_sp), true);
6462 if (err)
6463 goto err_fid_bc_flood_set;
6464
Petr Machata010cadf2017-09-02 23:49:18 +02006465 err = mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
6466 mlxsw_sp_fid_index(rif->fid), true);
6467 if (err)
6468 goto err_rif_fdb_op;
6469
6470 mlxsw_sp_fid_rif_set(rif->fid, rif);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006471 return 0;
6472
Petr Machata010cadf2017-09-02 23:49:18 +02006473err_rif_fdb_op:
6474 mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
6475 mlxsw_sp_router_port(mlxsw_sp), false);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006476err_fid_bc_flood_set:
Ido Schimmel0d284812017-07-18 10:10:12 +02006477 mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
6478 mlxsw_sp_router_port(mlxsw_sp), false);
6479err_fid_mc_flood_set:
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006480 mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_FID_IF, fid_index, false);
6481 return err;
6482}
6483
6484static void mlxsw_sp_rif_fid_deconfigure(struct mlxsw_sp_rif *rif)
6485{
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006486 u16 fid_index = mlxsw_sp_fid_index(rif->fid);
Petr Machata010cadf2017-09-02 23:49:18 +02006487 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
6488 struct mlxsw_sp_fid *fid = rif->fid;
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006489
Petr Machata010cadf2017-09-02 23:49:18 +02006490 mlxsw_sp_fid_rif_set(fid, NULL);
6491 mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
6492 mlxsw_sp_fid_index(fid), false);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006493 mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
6494 mlxsw_sp_router_port(mlxsw_sp), false);
Ido Schimmel0d284812017-07-18 10:10:12 +02006495 mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
6496 mlxsw_sp_router_port(mlxsw_sp), false);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006497 mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_FID_IF, fid_index, false);
6498}
6499
6500static struct mlxsw_sp_fid *
6501mlxsw_sp_rif_fid_fid_get(struct mlxsw_sp_rif *rif)
6502{
6503 return mlxsw_sp_fid_8021d_get(rif->mlxsw_sp, rif->dev->ifindex);
6504}
6505
6506static const struct mlxsw_sp_rif_ops mlxsw_sp_rif_fid_ops = {
6507 .type = MLXSW_SP_RIF_TYPE_FID,
6508 .rif_size = sizeof(struct mlxsw_sp_rif),
6509 .configure = mlxsw_sp_rif_fid_configure,
6510 .deconfigure = mlxsw_sp_rif_fid_deconfigure,
6511 .fid_get = mlxsw_sp_rif_fid_fid_get,
6512};
6513
Petr Machata6ddb7422017-09-02 23:49:19 +02006514static struct mlxsw_sp_rif_ipip_lb *
6515mlxsw_sp_rif_ipip_lb_rif(struct mlxsw_sp_rif *rif)
6516{
6517 return container_of(rif, struct mlxsw_sp_rif_ipip_lb, common);
6518}
6519
6520static void
6521mlxsw_sp_rif_ipip_lb_setup(struct mlxsw_sp_rif *rif,
6522 const struct mlxsw_sp_rif_params *params)
6523{
6524 struct mlxsw_sp_rif_params_ipip_lb *params_lb;
6525 struct mlxsw_sp_rif_ipip_lb *rif_lb;
6526
6527 params_lb = container_of(params, struct mlxsw_sp_rif_params_ipip_lb,
6528 common);
6529 rif_lb = mlxsw_sp_rif_ipip_lb_rif(rif);
6530 rif_lb->lb_config = params_lb->lb_config;
6531}
6532
6533static int
6534mlxsw_sp_rif_ipip_lb_op(struct mlxsw_sp_rif_ipip_lb *lb_rif,
6535 struct mlxsw_sp_vr *ul_vr, bool enable)
6536{
6537 struct mlxsw_sp_rif_ipip_lb_config lb_cf = lb_rif->lb_config;
6538 struct mlxsw_sp_rif *rif = &lb_rif->common;
6539 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
6540 char ritr_pl[MLXSW_REG_RITR_LEN];
6541 u32 saddr4;
6542
6543 switch (lb_cf.ul_protocol) {
6544 case MLXSW_SP_L3_PROTO_IPV4:
6545 saddr4 = be32_to_cpu(lb_cf.saddr.addr4);
6546 mlxsw_reg_ritr_pack(ritr_pl, enable, MLXSW_REG_RITR_LOOPBACK_IF,
6547 rif->rif_index, rif->vr_id, rif->dev->mtu);
6548 mlxsw_reg_ritr_loopback_ipip4_pack(ritr_pl, lb_cf.lb_ipipt,
6549 MLXSW_REG_RITR_LOOPBACK_IPIP_OPTIONS_GRE_KEY_PRESET,
6550 ul_vr->id, saddr4, lb_cf.okey);
6551 break;
6552
6553 case MLXSW_SP_L3_PROTO_IPV6:
6554 return -EAFNOSUPPORT;
6555 }
6556
6557 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
6558}
6559
6560static int
6561mlxsw_sp_rif_ipip_lb_configure(struct mlxsw_sp_rif *rif)
6562{
6563 struct mlxsw_sp_rif_ipip_lb *lb_rif = mlxsw_sp_rif_ipip_lb_rif(rif);
6564 u32 ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(rif->dev);
6565 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
6566 struct mlxsw_sp_vr *ul_vr;
6567 int err;
6568
David Ahernf8fa9b42017-10-18 09:56:56 -07006569 ul_vr = mlxsw_sp_vr_get(mlxsw_sp, ul_tb_id, NULL);
Petr Machata6ddb7422017-09-02 23:49:19 +02006570 if (IS_ERR(ul_vr))
6571 return PTR_ERR(ul_vr);
6572
6573 err = mlxsw_sp_rif_ipip_lb_op(lb_rif, ul_vr, true);
6574 if (err)
6575 goto err_loopback_op;
6576
6577 lb_rif->ul_vr_id = ul_vr->id;
6578 ++ul_vr->rif_count;
6579 return 0;
6580
6581err_loopback_op:
6582 mlxsw_sp_vr_put(ul_vr);
6583 return err;
6584}
6585
6586static void mlxsw_sp_rif_ipip_lb_deconfigure(struct mlxsw_sp_rif *rif)
6587{
6588 struct mlxsw_sp_rif_ipip_lb *lb_rif = mlxsw_sp_rif_ipip_lb_rif(rif);
6589 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
6590 struct mlxsw_sp_vr *ul_vr;
6591
6592 ul_vr = &mlxsw_sp->router->vrs[lb_rif->ul_vr_id];
6593 mlxsw_sp_rif_ipip_lb_op(lb_rif, ul_vr, false);
6594
6595 --ul_vr->rif_count;
6596 mlxsw_sp_vr_put(ul_vr);
6597}
6598
6599static const struct mlxsw_sp_rif_ops mlxsw_sp_rif_ipip_lb_ops = {
6600 .type = MLXSW_SP_RIF_TYPE_IPIP_LB,
6601 .rif_size = sizeof(struct mlxsw_sp_rif_ipip_lb),
6602 .setup = mlxsw_sp_rif_ipip_lb_setup,
6603 .configure = mlxsw_sp_rif_ipip_lb_configure,
6604 .deconfigure = mlxsw_sp_rif_ipip_lb_deconfigure,
6605};
6606
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006607static const struct mlxsw_sp_rif_ops *mlxsw_sp_rif_ops_arr[] = {
6608 [MLXSW_SP_RIF_TYPE_SUBPORT] = &mlxsw_sp_rif_subport_ops,
6609 [MLXSW_SP_RIF_TYPE_VLAN] = &mlxsw_sp_rif_vlan_ops,
6610 [MLXSW_SP_RIF_TYPE_FID] = &mlxsw_sp_rif_fid_ops,
Petr Machata6ddb7422017-09-02 23:49:19 +02006611 [MLXSW_SP_RIF_TYPE_IPIP_LB] = &mlxsw_sp_rif_ipip_lb_ops,
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006612};
6613
Ido Schimmel348b8fc2017-05-16 19:38:29 +02006614static int mlxsw_sp_rifs_init(struct mlxsw_sp *mlxsw_sp)
6615{
6616 u64 max_rifs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS);
6617
6618 mlxsw_sp->router->rifs = kcalloc(max_rifs,
6619 sizeof(struct mlxsw_sp_rif *),
6620 GFP_KERNEL);
6621 if (!mlxsw_sp->router->rifs)
6622 return -ENOMEM;
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006623
6624 mlxsw_sp->router->rif_ops_arr = mlxsw_sp_rif_ops_arr;
6625
Ido Schimmel348b8fc2017-05-16 19:38:29 +02006626 return 0;
6627}
6628
6629static void mlxsw_sp_rifs_fini(struct mlxsw_sp *mlxsw_sp)
6630{
6631 int i;
6632
6633 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++)
6634 WARN_ON_ONCE(mlxsw_sp->router->rifs[i]);
6635
6636 kfree(mlxsw_sp->router->rifs);
6637}
6638
Petr Machatadcbda282017-10-20 09:16:16 +02006639static int
6640mlxsw_sp_ipip_config_tigcr(struct mlxsw_sp *mlxsw_sp)
6641{
6642 char tigcr_pl[MLXSW_REG_TIGCR_LEN];
6643
6644 mlxsw_reg_tigcr_pack(tigcr_pl, true, 0);
6645 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(tigcr), tigcr_pl);
6646}
6647
Petr Machata38ebc0f2017-09-02 23:49:17 +02006648static int mlxsw_sp_ipips_init(struct mlxsw_sp *mlxsw_sp)
6649{
6650 mlxsw_sp->router->ipip_ops_arr = mlxsw_sp_ipip_ops_arr;
Petr Machata1012b9a2017-09-02 23:49:23 +02006651 INIT_LIST_HEAD(&mlxsw_sp->router->ipip_list);
Petr Machatadcbda282017-10-20 09:16:16 +02006652 return mlxsw_sp_ipip_config_tigcr(mlxsw_sp);
Petr Machata38ebc0f2017-09-02 23:49:17 +02006653}
6654
6655static void mlxsw_sp_ipips_fini(struct mlxsw_sp *mlxsw_sp)
6656{
Petr Machata1012b9a2017-09-02 23:49:23 +02006657 WARN_ON(!list_empty(&mlxsw_sp->router->ipip_list));
Petr Machata38ebc0f2017-09-02 23:49:17 +02006658}
6659
Ido Schimmelc3852ef2016-12-03 16:45:07 +01006660static void mlxsw_sp_router_fib_dump_flush(struct notifier_block *nb)
6661{
Ido Schimmel7e39d112017-05-16 19:38:28 +02006662 struct mlxsw_sp_router *router;
Ido Schimmelc3852ef2016-12-03 16:45:07 +01006663
6664 /* Flush pending FIB notifications and then flush the device's
6665 * table before requesting another dump. The FIB notification
6666 * block is unregistered, so no need to take RTNL.
6667 */
6668 mlxsw_core_flush_owq();
Ido Schimmel7e39d112017-05-16 19:38:28 +02006669 router = container_of(nb, struct mlxsw_sp_router, fib_nb);
6670 mlxsw_sp_router_fib_flush(router->mlxsw_sp);
Ido Schimmelc3852ef2016-12-03 16:45:07 +01006671}
6672
Ido Schimmelaf658b62017-11-02 17:14:09 +01006673#ifdef CONFIG_IP_ROUTE_MULTIPATH
6674static void mlxsw_sp_mp_hash_header_set(char *recr2_pl, int header)
6675{
6676 mlxsw_reg_recr2_outer_header_enables_set(recr2_pl, header, true);
6677}
6678
6679static void mlxsw_sp_mp_hash_field_set(char *recr2_pl, int field)
6680{
6681 mlxsw_reg_recr2_outer_header_fields_enable_set(recr2_pl, field, true);
6682}
6683
6684static void mlxsw_sp_mp4_hash_init(char *recr2_pl)
6685{
6686 bool only_l3 = !init_net.ipv4.sysctl_fib_multipath_hash_policy;
6687
6688 mlxsw_sp_mp_hash_header_set(recr2_pl,
6689 MLXSW_REG_RECR2_IPV4_EN_NOT_TCP_NOT_UDP);
6690 mlxsw_sp_mp_hash_header_set(recr2_pl, MLXSW_REG_RECR2_IPV4_EN_TCP_UDP);
6691 mlxsw_reg_recr2_ipv4_sip_enable(recr2_pl);
6692 mlxsw_reg_recr2_ipv4_dip_enable(recr2_pl);
6693 if (only_l3)
6694 return;
6695 mlxsw_sp_mp_hash_header_set(recr2_pl, MLXSW_REG_RECR2_TCP_UDP_EN_IPV4);
6696 mlxsw_sp_mp_hash_field_set(recr2_pl, MLXSW_REG_RECR2_IPV4_PROTOCOL);
6697 mlxsw_sp_mp_hash_field_set(recr2_pl, MLXSW_REG_RECR2_TCP_UDP_SPORT);
6698 mlxsw_sp_mp_hash_field_set(recr2_pl, MLXSW_REG_RECR2_TCP_UDP_DPORT);
6699}
6700
6701static void mlxsw_sp_mp6_hash_init(char *recr2_pl)
6702{
6703 mlxsw_sp_mp_hash_header_set(recr2_pl,
6704 MLXSW_REG_RECR2_IPV6_EN_NOT_TCP_NOT_UDP);
6705 mlxsw_sp_mp_hash_header_set(recr2_pl, MLXSW_REG_RECR2_IPV6_EN_TCP_UDP);
6706 mlxsw_reg_recr2_ipv6_sip_enable(recr2_pl);
6707 mlxsw_reg_recr2_ipv6_dip_enable(recr2_pl);
6708 mlxsw_sp_mp_hash_field_set(recr2_pl, MLXSW_REG_RECR2_IPV6_FLOW_LABEL);
6709 mlxsw_sp_mp_hash_field_set(recr2_pl, MLXSW_REG_RECR2_IPV6_NEXT_HEADER);
6710}
6711
6712static int mlxsw_sp_mp_hash_init(struct mlxsw_sp *mlxsw_sp)
6713{
6714 char recr2_pl[MLXSW_REG_RECR2_LEN];
6715 u32 seed;
6716
6717 get_random_bytes(&seed, sizeof(seed));
6718 mlxsw_reg_recr2_pack(recr2_pl, seed);
6719 mlxsw_sp_mp4_hash_init(recr2_pl);
6720 mlxsw_sp_mp6_hash_init(recr2_pl);
6721
6722 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(recr2), recr2_pl);
6723}
6724#else
6725static int mlxsw_sp_mp_hash_init(struct mlxsw_sp *mlxsw_sp)
6726{
6727 return 0;
6728}
6729#endif
6730
Ido Schimmel4724ba562017-03-10 08:53:39 +01006731static int __mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp)
6732{
6733 char rgcr_pl[MLXSW_REG_RGCR_LEN];
6734 u64 max_rifs;
6735 int err;
6736
6737 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_RIFS))
6738 return -EIO;
Ido Schimmel4724ba562017-03-10 08:53:39 +01006739 max_rifs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006740
Arkadi Sharshevskye29237e2017-07-18 10:10:09 +02006741 mlxsw_reg_rgcr_pack(rgcr_pl, true, true);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006742 mlxsw_reg_rgcr_max_router_interfaces_set(rgcr_pl, max_rifs);
6743 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rgcr), rgcr_pl);
6744 if (err)
Ido Schimmel348b8fc2017-05-16 19:38:29 +02006745 return err;
Ido Schimmel4724ba562017-03-10 08:53:39 +01006746 return 0;
Ido Schimmel4724ba562017-03-10 08:53:39 +01006747}
6748
6749static void __mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp)
6750{
6751 char rgcr_pl[MLXSW_REG_RGCR_LEN];
Ido Schimmel4724ba562017-03-10 08:53:39 +01006752
Arkadi Sharshevskye29237e2017-07-18 10:10:09 +02006753 mlxsw_reg_rgcr_pack(rgcr_pl, false, false);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006754 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rgcr), rgcr_pl);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006755}
6756
Jiri Pirkob45f64d2016-09-26 12:52:31 +02006757int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp)
6758{
Ido Schimmel9011b672017-05-16 19:38:25 +02006759 struct mlxsw_sp_router *router;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02006760 int err;
6761
Ido Schimmel9011b672017-05-16 19:38:25 +02006762 router = kzalloc(sizeof(*mlxsw_sp->router), GFP_KERNEL);
6763 if (!router)
6764 return -ENOMEM;
6765 mlxsw_sp->router = router;
6766 router->mlxsw_sp = mlxsw_sp;
6767
6768 INIT_LIST_HEAD(&mlxsw_sp->router->nexthop_neighs_list);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02006769 err = __mlxsw_sp_router_init(mlxsw_sp);
6770 if (err)
Ido Schimmel9011b672017-05-16 19:38:25 +02006771 goto err_router_init;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02006772
Ido Schimmel348b8fc2017-05-16 19:38:29 +02006773 err = mlxsw_sp_rifs_init(mlxsw_sp);
6774 if (err)
6775 goto err_rifs_init;
6776
Petr Machata38ebc0f2017-09-02 23:49:17 +02006777 err = mlxsw_sp_ipips_init(mlxsw_sp);
6778 if (err)
6779 goto err_ipips_init;
6780
Ido Schimmel9011b672017-05-16 19:38:25 +02006781 err = rhashtable_init(&mlxsw_sp->router->nexthop_ht,
Ido Schimmelc53b8e12017-02-08 11:16:30 +01006782 &mlxsw_sp_nexthop_ht_params);
6783 if (err)
6784 goto err_nexthop_ht_init;
6785
Ido Schimmel9011b672017-05-16 19:38:25 +02006786 err = rhashtable_init(&mlxsw_sp->router->nexthop_group_ht,
Ido Schimmele9ad5e72017-02-08 11:16:29 +01006787 &mlxsw_sp_nexthop_group_ht_params);
6788 if (err)
6789 goto err_nexthop_group_ht_init;
6790
Arkadi Sharshevskydbe45982017-09-25 10:32:23 +02006791 INIT_LIST_HEAD(&mlxsw_sp->router->nexthop_list);
Ido Schimmel8494ab02017-03-24 08:02:47 +01006792 err = mlxsw_sp_lpm_init(mlxsw_sp);
6793 if (err)
6794 goto err_lpm_init;
6795
Yotam Gigid42b0962017-09-27 08:23:20 +02006796 err = mlxsw_sp_mr_init(mlxsw_sp, &mlxsw_sp_mr_tcam_ops);
6797 if (err)
6798 goto err_mr_init;
6799
Jiri Pirkob45f64d2016-09-26 12:52:31 +02006800 err = mlxsw_sp_vrs_init(mlxsw_sp);
6801 if (err)
6802 goto err_vrs_init;
6803
Ido Schimmel8c9583a2016-10-27 15:12:57 +02006804 err = mlxsw_sp_neigh_init(mlxsw_sp);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02006805 if (err)
6806 goto err_neigh_init;
6807
Ido Schimmel48fac882017-11-02 17:14:06 +01006808 mlxsw_sp->router->netevent_nb.notifier_call =
6809 mlxsw_sp_router_netevent_event;
6810 err = register_netevent_notifier(&mlxsw_sp->router->netevent_nb);
6811 if (err)
6812 goto err_register_netevent_notifier;
6813
Ido Schimmelaf658b62017-11-02 17:14:09 +01006814 err = mlxsw_sp_mp_hash_init(mlxsw_sp);
6815 if (err)
6816 goto err_mp_hash_init;
6817
Ido Schimmel7e39d112017-05-16 19:38:28 +02006818 mlxsw_sp->router->fib_nb.notifier_call = mlxsw_sp_router_fib_event;
6819 err = register_fib_notifier(&mlxsw_sp->router->fib_nb,
Ido Schimmelc3852ef2016-12-03 16:45:07 +01006820 mlxsw_sp_router_fib_dump_flush);
6821 if (err)
6822 goto err_register_fib_notifier;
6823
Jiri Pirkob45f64d2016-09-26 12:52:31 +02006824 return 0;
6825
Ido Schimmelc3852ef2016-12-03 16:45:07 +01006826err_register_fib_notifier:
Ido Schimmelaf658b62017-11-02 17:14:09 +01006827err_mp_hash_init:
Ido Schimmel48fac882017-11-02 17:14:06 +01006828 unregister_netevent_notifier(&mlxsw_sp->router->netevent_nb);
6829err_register_netevent_notifier:
Ido Schimmelc3852ef2016-12-03 16:45:07 +01006830 mlxsw_sp_neigh_fini(mlxsw_sp);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02006831err_neigh_init:
6832 mlxsw_sp_vrs_fini(mlxsw_sp);
6833err_vrs_init:
Yotam Gigid42b0962017-09-27 08:23:20 +02006834 mlxsw_sp_mr_fini(mlxsw_sp);
6835err_mr_init:
Ido Schimmel8494ab02017-03-24 08:02:47 +01006836 mlxsw_sp_lpm_fini(mlxsw_sp);
6837err_lpm_init:
Ido Schimmel9011b672017-05-16 19:38:25 +02006838 rhashtable_destroy(&mlxsw_sp->router->nexthop_group_ht);
Ido Schimmele9ad5e72017-02-08 11:16:29 +01006839err_nexthop_group_ht_init:
Ido Schimmel9011b672017-05-16 19:38:25 +02006840 rhashtable_destroy(&mlxsw_sp->router->nexthop_ht);
Ido Schimmelc53b8e12017-02-08 11:16:30 +01006841err_nexthop_ht_init:
Petr Machata38ebc0f2017-09-02 23:49:17 +02006842 mlxsw_sp_ipips_fini(mlxsw_sp);
6843err_ipips_init:
Ido Schimmel348b8fc2017-05-16 19:38:29 +02006844 mlxsw_sp_rifs_fini(mlxsw_sp);
6845err_rifs_init:
Jiri Pirkob45f64d2016-09-26 12:52:31 +02006846 __mlxsw_sp_router_fini(mlxsw_sp);
Ido Schimmel9011b672017-05-16 19:38:25 +02006847err_router_init:
6848 kfree(mlxsw_sp->router);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02006849 return err;
6850}
6851
6852void mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp)
6853{
Ido Schimmel7e39d112017-05-16 19:38:28 +02006854 unregister_fib_notifier(&mlxsw_sp->router->fib_nb);
Ido Schimmel48fac882017-11-02 17:14:06 +01006855 unregister_netevent_notifier(&mlxsw_sp->router->netevent_nb);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02006856 mlxsw_sp_neigh_fini(mlxsw_sp);
6857 mlxsw_sp_vrs_fini(mlxsw_sp);
Yotam Gigid42b0962017-09-27 08:23:20 +02006858 mlxsw_sp_mr_fini(mlxsw_sp);
Ido Schimmel8494ab02017-03-24 08:02:47 +01006859 mlxsw_sp_lpm_fini(mlxsw_sp);
Ido Schimmel9011b672017-05-16 19:38:25 +02006860 rhashtable_destroy(&mlxsw_sp->router->nexthop_group_ht);
6861 rhashtable_destroy(&mlxsw_sp->router->nexthop_ht);
Petr Machata38ebc0f2017-09-02 23:49:17 +02006862 mlxsw_sp_ipips_fini(mlxsw_sp);
Ido Schimmel348b8fc2017-05-16 19:38:29 +02006863 mlxsw_sp_rifs_fini(mlxsw_sp);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02006864 __mlxsw_sp_router_fini(mlxsw_sp);
Ido Schimmel9011b672017-05-16 19:38:25 +02006865 kfree(mlxsw_sp->router);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02006866}