blob: 1376a9738b3caf21b4644c1a06a7cd9bef3847f9 [file] [log] [blame]
Ido Schimmel464dce12016-07-02 11:00:15 +02001/*
2 * drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
Petr Machata6ddb7422017-09-02 23:49:19 +02003 * Copyright (c) 2016-2017 Mellanox Technologies. All rights reserved.
Ido Schimmel464dce12016-07-02 11:00:15 +02004 * Copyright (c) 2016 Jiri Pirko <jiri@mellanox.com>
5 * Copyright (c) 2016 Ido Schimmel <idosch@mellanox.com>
Yotam Gigic723c7352016-07-05 11:27:43 +02006 * Copyright (c) 2016 Yotam Gigi <yotamg@mellanox.com>
Petr Machata6ddb7422017-09-02 23:49:19 +02007 * Copyright (c) 2017 Petr Machata <petrm@mellanox.com>
Ido Schimmel464dce12016-07-02 11:00:15 +02008 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions are met:
11 *
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. Neither the names of the copyright holders nor the names of its
18 * contributors may be used to endorse or promote products derived from
19 * this software without specific prior written permission.
20 *
21 * Alternatively, this software may be distributed under the terms of the
22 * GNU General Public License ("GPL") version 2 as published by the Free
23 * Software Foundation.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
26 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
29 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
36 */
37
38#include <linux/kernel.h>
39#include <linux/types.h>
Jiri Pirko5e9c16c2016-07-04 08:23:04 +020040#include <linux/rhashtable.h>
41#include <linux/bitops.h>
42#include <linux/in6.h>
Yotam Gigic723c7352016-07-05 11:27:43 +020043#include <linux/notifier.h>
Ido Schimmeldf6dd79b2017-02-08 14:36:49 +010044#include <linux/inetdevice.h>
Ido Schimmel9db032b2017-03-16 09:08:17 +010045#include <linux/netdevice.h>
Ido Schimmel03ea01e2017-05-23 21:56:30 +020046#include <linux/if_bridge.h>
Ido Schimmelb5f3e0d2017-07-24 09:56:00 +020047#include <linux/socket.h>
Ido Schimmel428b8512017-08-03 13:28:28 +020048#include <linux/route.h>
Ido Schimmeleb789982017-10-22 23:11:48 +020049#include <linux/gcd.h>
Ido Schimmelaf658b62017-11-02 17:14:09 +010050#include <linux/random.h>
Yotam Gigic723c7352016-07-05 11:27:43 +020051#include <net/netevent.h>
Jiri Pirko6cf3c972016-07-05 11:27:39 +020052#include <net/neighbour.h>
53#include <net/arp.h>
Jiri Pirkob45f64d2016-09-26 12:52:31 +020054#include <net/ip_fib.h>
Ido Schimmel583419f2017-08-03 13:28:27 +020055#include <net/ip6_fib.h>
Ido Schimmel5d7bfd12017-03-16 09:08:14 +010056#include <net/fib_rules.h>
Petr Machata6ddb7422017-09-02 23:49:19 +020057#include <net/ip_tunnels.h>
Ido Schimmel57837882017-03-16 09:08:16 +010058#include <net/l3mdev.h>
Arkadi Sharshevsky5ea12372017-07-18 10:10:13 +020059#include <net/addrconf.h>
Arkadi Sharshevskyd5eb89c2017-07-18 10:10:15 +020060#include <net/ndisc.h>
61#include <net/ipv6.h>
Ido Schimmel04b1d4e2017-08-03 13:28:11 +020062#include <net/fib_notifier.h>
Ido Schimmel464dce12016-07-02 11:00:15 +020063
64#include "spectrum.h"
65#include "core.h"
66#include "reg.h"
Arkadi Sharshevskye0c0afd2017-03-28 17:24:15 +020067#include "spectrum_cnt.h"
68#include "spectrum_dpipe.h"
Petr Machata38ebc0f2017-09-02 23:49:17 +020069#include "spectrum_ipip.h"
Yotam Gigid42b0962017-09-27 08:23:20 +020070#include "spectrum_mr.h"
71#include "spectrum_mr_tcam.h"
Arkadi Sharshevskye0c0afd2017-03-28 17:24:15 +020072#include "spectrum_router.h"
Ido Schimmel464dce12016-07-02 11:00:15 +020073
Ido Schimmel9011b672017-05-16 19:38:25 +020074struct mlxsw_sp_vr;
75struct mlxsw_sp_lpm_tree;
Ido Schimmele4f3c1c2017-05-26 08:37:40 +020076struct mlxsw_sp_rif_ops;
Ido Schimmel9011b672017-05-16 19:38:25 +020077
78struct mlxsw_sp_router {
79 struct mlxsw_sp *mlxsw_sp;
Ido Schimmel5f9efff2017-05-16 19:38:27 +020080 struct mlxsw_sp_rif **rifs;
Ido Schimmel9011b672017-05-16 19:38:25 +020081 struct mlxsw_sp_vr *vrs;
82 struct rhashtable neigh_ht;
83 struct rhashtable nexthop_group_ht;
84 struct rhashtable nexthop_ht;
Arkadi Sharshevskydbe45982017-09-25 10:32:23 +020085 struct list_head nexthop_list;
Ido Schimmel9011b672017-05-16 19:38:25 +020086 struct {
87 struct mlxsw_sp_lpm_tree *trees;
88 unsigned int tree_count;
89 } lpm;
90 struct {
91 struct delayed_work dw;
92 unsigned long interval; /* ms */
93 } neighs_update;
94 struct delayed_work nexthop_probe_dw;
95#define MLXSW_SP_UNRESOLVED_NH_PROBE_INTERVAL 5000 /* ms */
96 struct list_head nexthop_neighs_list;
Petr Machata1012b9a2017-09-02 23:49:23 +020097 struct list_head ipip_list;
Ido Schimmel9011b672017-05-16 19:38:25 +020098 bool aborted;
Ido Schimmel7e39d112017-05-16 19:38:28 +020099 struct notifier_block fib_nb;
Ido Schimmel48fac882017-11-02 17:14:06 +0100100 struct notifier_block netevent_nb;
Ido Schimmele4f3c1c2017-05-26 08:37:40 +0200101 const struct mlxsw_sp_rif_ops **rif_ops_arr;
Petr Machata38ebc0f2017-09-02 23:49:17 +0200102 const struct mlxsw_sp_ipip_ops **ipip_ops_arr;
Ido Schimmel9011b672017-05-16 19:38:25 +0200103};
104
Ido Schimmel4724ba562017-03-10 08:53:39 +0100105struct mlxsw_sp_rif {
106 struct list_head nexthop_list;
107 struct list_head neigh_list;
108 struct net_device *dev;
Ido Schimmela1107482017-05-26 08:37:39 +0200109 struct mlxsw_sp_fid *fid;
Ido Schimmel4724ba562017-03-10 08:53:39 +0100110 unsigned char addr[ETH_ALEN];
111 int mtu;
Arkadi Sharshevskybf952332017-03-17 09:38:00 +0100112 u16 rif_index;
Ido Schimmel69132292017-03-10 08:53:42 +0100113 u16 vr_id;
Ido Schimmele4f3c1c2017-05-26 08:37:40 +0200114 const struct mlxsw_sp_rif_ops *ops;
115 struct mlxsw_sp *mlxsw_sp;
116
Arkadi Sharshevskye0c0afd2017-03-28 17:24:15 +0200117 unsigned int counter_ingress;
118 bool counter_ingress_valid;
119 unsigned int counter_egress;
120 bool counter_egress_valid;
Ido Schimmel4724ba562017-03-10 08:53:39 +0100121};
122
Ido Schimmele4f3c1c2017-05-26 08:37:40 +0200123struct mlxsw_sp_rif_params {
124 struct net_device *dev;
125 union {
126 u16 system_port;
127 u16 lag_id;
128 };
129 u16 vid;
130 bool lag;
131};
132
Ido Schimmel4d93cee2017-05-26 08:37:34 +0200133struct mlxsw_sp_rif_subport {
134 struct mlxsw_sp_rif common;
135 union {
136 u16 system_port;
137 u16 lag_id;
138 };
139 u16 vid;
140 bool lag;
141};
142
Petr Machata6ddb7422017-09-02 23:49:19 +0200143struct mlxsw_sp_rif_ipip_lb {
144 struct mlxsw_sp_rif common;
145 struct mlxsw_sp_rif_ipip_lb_config lb_config;
146 u16 ul_vr_id; /* Reserved for Spectrum-2. */
147};
148
149struct mlxsw_sp_rif_params_ipip_lb {
150 struct mlxsw_sp_rif_params common;
151 struct mlxsw_sp_rif_ipip_lb_config lb_config;
152};
153
Ido Schimmele4f3c1c2017-05-26 08:37:40 +0200154struct mlxsw_sp_rif_ops {
155 enum mlxsw_sp_rif_type type;
156 size_t rif_size;
157
158 void (*setup)(struct mlxsw_sp_rif *rif,
159 const struct mlxsw_sp_rif_params *params);
160 int (*configure)(struct mlxsw_sp_rif *rif);
161 void (*deconfigure)(struct mlxsw_sp_rif *rif);
162 struct mlxsw_sp_fid * (*fid_get)(struct mlxsw_sp_rif *rif);
163};
164
Arkadi Sharshevskye0c0afd2017-03-28 17:24:15 +0200165static unsigned int *
166mlxsw_sp_rif_p_counter_get(struct mlxsw_sp_rif *rif,
167 enum mlxsw_sp_rif_counter_dir dir)
168{
169 switch (dir) {
170 case MLXSW_SP_RIF_COUNTER_EGRESS:
171 return &rif->counter_egress;
172 case MLXSW_SP_RIF_COUNTER_INGRESS:
173 return &rif->counter_ingress;
174 }
175 return NULL;
176}
177
178static bool
179mlxsw_sp_rif_counter_valid_get(struct mlxsw_sp_rif *rif,
180 enum mlxsw_sp_rif_counter_dir dir)
181{
182 switch (dir) {
183 case MLXSW_SP_RIF_COUNTER_EGRESS:
184 return rif->counter_egress_valid;
185 case MLXSW_SP_RIF_COUNTER_INGRESS:
186 return rif->counter_ingress_valid;
187 }
188 return false;
189}
190
191static void
192mlxsw_sp_rif_counter_valid_set(struct mlxsw_sp_rif *rif,
193 enum mlxsw_sp_rif_counter_dir dir,
194 bool valid)
195{
196 switch (dir) {
197 case MLXSW_SP_RIF_COUNTER_EGRESS:
198 rif->counter_egress_valid = valid;
199 break;
200 case MLXSW_SP_RIF_COUNTER_INGRESS:
201 rif->counter_ingress_valid = valid;
202 break;
203 }
204}
205
206static int mlxsw_sp_rif_counter_edit(struct mlxsw_sp *mlxsw_sp, u16 rif_index,
207 unsigned int counter_index, bool enable,
208 enum mlxsw_sp_rif_counter_dir dir)
209{
210 char ritr_pl[MLXSW_REG_RITR_LEN];
211 bool is_egress = false;
212 int err;
213
214 if (dir == MLXSW_SP_RIF_COUNTER_EGRESS)
215 is_egress = true;
216 mlxsw_reg_ritr_rif_pack(ritr_pl, rif_index);
217 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
218 if (err)
219 return err;
220
221 mlxsw_reg_ritr_counter_pack(ritr_pl, counter_index, enable,
222 is_egress);
223 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
224}
225
226int mlxsw_sp_rif_counter_value_get(struct mlxsw_sp *mlxsw_sp,
227 struct mlxsw_sp_rif *rif,
228 enum mlxsw_sp_rif_counter_dir dir, u64 *cnt)
229{
230 char ricnt_pl[MLXSW_REG_RICNT_LEN];
231 unsigned int *p_counter_index;
232 bool valid;
233 int err;
234
235 valid = mlxsw_sp_rif_counter_valid_get(rif, dir);
236 if (!valid)
237 return -EINVAL;
238
239 p_counter_index = mlxsw_sp_rif_p_counter_get(rif, dir);
240 if (!p_counter_index)
241 return -EINVAL;
242 mlxsw_reg_ricnt_pack(ricnt_pl, *p_counter_index,
243 MLXSW_REG_RICNT_OPCODE_NOP);
244 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ricnt), ricnt_pl);
245 if (err)
246 return err;
247 *cnt = mlxsw_reg_ricnt_good_unicast_packets_get(ricnt_pl);
248 return 0;
249}
250
251static int mlxsw_sp_rif_counter_clear(struct mlxsw_sp *mlxsw_sp,
252 unsigned int counter_index)
253{
254 char ricnt_pl[MLXSW_REG_RICNT_LEN];
255
256 mlxsw_reg_ricnt_pack(ricnt_pl, counter_index,
257 MLXSW_REG_RICNT_OPCODE_CLEAR);
258 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ricnt), ricnt_pl);
259}
260
261int mlxsw_sp_rif_counter_alloc(struct mlxsw_sp *mlxsw_sp,
262 struct mlxsw_sp_rif *rif,
263 enum mlxsw_sp_rif_counter_dir dir)
264{
265 unsigned int *p_counter_index;
266 int err;
267
268 p_counter_index = mlxsw_sp_rif_p_counter_get(rif, dir);
269 if (!p_counter_index)
270 return -EINVAL;
271 err = mlxsw_sp_counter_alloc(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_RIF,
272 p_counter_index);
273 if (err)
274 return err;
275
276 err = mlxsw_sp_rif_counter_clear(mlxsw_sp, *p_counter_index);
277 if (err)
278 goto err_counter_clear;
279
280 err = mlxsw_sp_rif_counter_edit(mlxsw_sp, rif->rif_index,
281 *p_counter_index, true, dir);
282 if (err)
283 goto err_counter_edit;
284 mlxsw_sp_rif_counter_valid_set(rif, dir, true);
285 return 0;
286
287err_counter_edit:
288err_counter_clear:
289 mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_RIF,
290 *p_counter_index);
291 return err;
292}
293
294void mlxsw_sp_rif_counter_free(struct mlxsw_sp *mlxsw_sp,
295 struct mlxsw_sp_rif *rif,
296 enum mlxsw_sp_rif_counter_dir dir)
297{
298 unsigned int *p_counter_index;
299
Arkadi Sharshevsky6b1206b2017-05-18 09:18:53 +0200300 if (!mlxsw_sp_rif_counter_valid_get(rif, dir))
301 return;
302
Arkadi Sharshevskye0c0afd2017-03-28 17:24:15 +0200303 p_counter_index = mlxsw_sp_rif_p_counter_get(rif, dir);
304 if (WARN_ON(!p_counter_index))
305 return;
306 mlxsw_sp_rif_counter_edit(mlxsw_sp, rif->rif_index,
307 *p_counter_index, false, dir);
308 mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_RIF,
309 *p_counter_index);
310 mlxsw_sp_rif_counter_valid_set(rif, dir, false);
311}
312
Ido Schimmele4f3c1c2017-05-26 08:37:40 +0200313static void mlxsw_sp_rif_counters_alloc(struct mlxsw_sp_rif *rif)
314{
315 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
316 struct devlink *devlink;
317
318 devlink = priv_to_devlink(mlxsw_sp->core);
319 if (!devlink_dpipe_table_counter_enabled(devlink,
320 MLXSW_SP_DPIPE_TABLE_NAME_ERIF))
321 return;
322 mlxsw_sp_rif_counter_alloc(mlxsw_sp, rif, MLXSW_SP_RIF_COUNTER_EGRESS);
323}
324
325static void mlxsw_sp_rif_counters_free(struct mlxsw_sp_rif *rif)
326{
327 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
328
329 mlxsw_sp_rif_counter_free(mlxsw_sp, rif, MLXSW_SP_RIF_COUNTER_EGRESS);
330}
331
Ido Schimmel4724ba562017-03-10 08:53:39 +0100332static struct mlxsw_sp_rif *
333mlxsw_sp_rif_find_by_dev(const struct mlxsw_sp *mlxsw_sp,
334 const struct net_device *dev);
335
Ido Schimmel7dcc18a2017-07-18 10:10:30 +0200336#define MLXSW_SP_PREFIX_COUNT (sizeof(struct in6_addr) * BITS_PER_BYTE + 1)
Ido Schimmel9011b672017-05-16 19:38:25 +0200337
338struct mlxsw_sp_prefix_usage {
339 DECLARE_BITMAP(b, MLXSW_SP_PREFIX_COUNT);
340};
341
Jiri Pirko53342022016-07-04 08:23:08 +0200342#define mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage) \
343 for_each_set_bit(prefix, (prefix_usage)->b, MLXSW_SP_PREFIX_COUNT)
344
345static bool
346mlxsw_sp_prefix_usage_eq(struct mlxsw_sp_prefix_usage *prefix_usage1,
347 struct mlxsw_sp_prefix_usage *prefix_usage2)
348{
349 return !memcmp(prefix_usage1, prefix_usage2, sizeof(*prefix_usage1));
350}
351
Jiri Pirko6b75c482016-07-04 08:23:09 +0200352static bool
353mlxsw_sp_prefix_usage_none(struct mlxsw_sp_prefix_usage *prefix_usage)
354{
355 struct mlxsw_sp_prefix_usage prefix_usage_none = {{ 0 } };
356
357 return mlxsw_sp_prefix_usage_eq(prefix_usage, &prefix_usage_none);
358}
359
360static void
361mlxsw_sp_prefix_usage_cpy(struct mlxsw_sp_prefix_usage *prefix_usage1,
362 struct mlxsw_sp_prefix_usage *prefix_usage2)
363{
364 memcpy(prefix_usage1, prefix_usage2, sizeof(*prefix_usage1));
365}
366
367static void
Jiri Pirko5e9c16c2016-07-04 08:23:04 +0200368mlxsw_sp_prefix_usage_set(struct mlxsw_sp_prefix_usage *prefix_usage,
369 unsigned char prefix_len)
370{
371 set_bit(prefix_len, prefix_usage->b);
372}
373
374static void
375mlxsw_sp_prefix_usage_clear(struct mlxsw_sp_prefix_usage *prefix_usage,
376 unsigned char prefix_len)
377{
378 clear_bit(prefix_len, prefix_usage->b);
379}
380
381struct mlxsw_sp_fib_key {
382 unsigned char addr[sizeof(struct in6_addr)];
383 unsigned char prefix_len;
384};
385
Jiri Pirko61c503f2016-07-04 08:23:11 +0200386enum mlxsw_sp_fib_entry_type {
387 MLXSW_SP_FIB_ENTRY_TYPE_REMOTE,
388 MLXSW_SP_FIB_ENTRY_TYPE_LOCAL,
389 MLXSW_SP_FIB_ENTRY_TYPE_TRAP,
Petr Machata4607f6d2017-09-02 23:49:25 +0200390
391 /* This is a special case of local delivery, where a packet should be
392 * decapsulated on reception. Note that there is no corresponding ENCAP,
393 * because that's a type of next hop, not of FIB entry. (There can be
394 * several next hops in a REMOTE entry, and some of them may be
395 * encapsulating entries.)
396 */
397 MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP,
Jiri Pirko61c503f2016-07-04 08:23:11 +0200398};
399
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +0200400struct mlxsw_sp_nexthop_group;
Ido Schimmel9011b672017-05-16 19:38:25 +0200401struct mlxsw_sp_fib;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +0200402
Ido Schimmel9aecce12017-02-09 10:28:42 +0100403struct mlxsw_sp_fib_node {
404 struct list_head entry_list;
Jiri Pirkob45f64d2016-09-26 12:52:31 +0200405 struct list_head list;
Ido Schimmel9aecce12017-02-09 10:28:42 +0100406 struct rhash_head ht_node;
Ido Schimmel76610eb2017-03-10 08:53:41 +0100407 struct mlxsw_sp_fib *fib;
Ido Schimmel9aecce12017-02-09 10:28:42 +0100408 struct mlxsw_sp_fib_key key;
409};
410
Petr Machata4607f6d2017-09-02 23:49:25 +0200411struct mlxsw_sp_fib_entry_decap {
412 struct mlxsw_sp_ipip_entry *ipip_entry;
413 u32 tunnel_index;
414};
415
Ido Schimmel9aecce12017-02-09 10:28:42 +0100416struct mlxsw_sp_fib_entry {
417 struct list_head list;
418 struct mlxsw_sp_fib_node *fib_node;
419 enum mlxsw_sp_fib_entry_type type;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +0200420 struct list_head nexthop_group_node;
421 struct mlxsw_sp_nexthop_group *nh_group;
Petr Machata4607f6d2017-09-02 23:49:25 +0200422 struct mlxsw_sp_fib_entry_decap decap; /* Valid for decap entries. */
Jiri Pirko5e9c16c2016-07-04 08:23:04 +0200423};
424
Ido Schimmel4f1c7f12017-07-18 10:10:26 +0200425struct mlxsw_sp_fib4_entry {
426 struct mlxsw_sp_fib_entry common;
427 u32 tb_id;
428 u32 prio;
429 u8 tos;
430 u8 type;
431};
432
Ido Schimmel428b8512017-08-03 13:28:28 +0200433struct mlxsw_sp_fib6_entry {
434 struct mlxsw_sp_fib_entry common;
435 struct list_head rt6_list;
436 unsigned int nrt6;
437};
438
439struct mlxsw_sp_rt6 {
440 struct list_head list;
441 struct rt6_info *rt;
442};
443
Ido Schimmel9011b672017-05-16 19:38:25 +0200444struct mlxsw_sp_lpm_tree {
445 u8 id; /* tree ID */
446 unsigned int ref_count;
447 enum mlxsw_sp_l3proto proto;
448 struct mlxsw_sp_prefix_usage prefix_usage;
449};
450
Jiri Pirko5e9c16c2016-07-04 08:23:04 +0200451struct mlxsw_sp_fib {
452 struct rhashtable ht;
Ido Schimmel9aecce12017-02-09 10:28:42 +0100453 struct list_head node_list;
Ido Schimmel76610eb2017-03-10 08:53:41 +0100454 struct mlxsw_sp_vr *vr;
455 struct mlxsw_sp_lpm_tree *lpm_tree;
Jiri Pirko5e9c16c2016-07-04 08:23:04 +0200456 unsigned long prefix_ref_count[MLXSW_SP_PREFIX_COUNT];
457 struct mlxsw_sp_prefix_usage prefix_usage;
Ido Schimmel76610eb2017-03-10 08:53:41 +0100458 enum mlxsw_sp_l3proto proto;
Jiri Pirko5e9c16c2016-07-04 08:23:04 +0200459};
460
Ido Schimmel9011b672017-05-16 19:38:25 +0200461struct mlxsw_sp_vr {
462 u16 id; /* virtual router ID */
463 u32 tb_id; /* kernel fib table id */
464 unsigned int rif_count;
465 struct mlxsw_sp_fib *fib4;
Ido Schimmela3d9bc52017-07-18 10:10:22 +0200466 struct mlxsw_sp_fib *fib6;
Yotam Gigid42b0962017-09-27 08:23:20 +0200467 struct mlxsw_sp_mr_table *mr4_table;
Ido Schimmel9011b672017-05-16 19:38:25 +0200468};
469
Ido Schimmel9aecce12017-02-09 10:28:42 +0100470static const struct rhashtable_params mlxsw_sp_fib_ht_params;
Jiri Pirko5e9c16c2016-07-04 08:23:04 +0200471
Ido Schimmel76610eb2017-03-10 08:53:41 +0100472static struct mlxsw_sp_fib *mlxsw_sp_fib_create(struct mlxsw_sp_vr *vr,
473 enum mlxsw_sp_l3proto proto)
Jiri Pirko5e9c16c2016-07-04 08:23:04 +0200474{
475 struct mlxsw_sp_fib *fib;
476 int err;
477
478 fib = kzalloc(sizeof(*fib), GFP_KERNEL);
479 if (!fib)
480 return ERR_PTR(-ENOMEM);
481 err = rhashtable_init(&fib->ht, &mlxsw_sp_fib_ht_params);
482 if (err)
483 goto err_rhashtable_init;
Ido Schimmel9aecce12017-02-09 10:28:42 +0100484 INIT_LIST_HEAD(&fib->node_list);
Ido Schimmel76610eb2017-03-10 08:53:41 +0100485 fib->proto = proto;
486 fib->vr = vr;
Jiri Pirko5e9c16c2016-07-04 08:23:04 +0200487 return fib;
488
489err_rhashtable_init:
490 kfree(fib);
491 return ERR_PTR(err);
492}
493
494static void mlxsw_sp_fib_destroy(struct mlxsw_sp_fib *fib)
495{
Ido Schimmel9aecce12017-02-09 10:28:42 +0100496 WARN_ON(!list_empty(&fib->node_list));
Ido Schimmel76610eb2017-03-10 08:53:41 +0100497 WARN_ON(fib->lpm_tree);
Jiri Pirko5e9c16c2016-07-04 08:23:04 +0200498 rhashtable_destroy(&fib->ht);
499 kfree(fib);
500}
501
Jiri Pirko53342022016-07-04 08:23:08 +0200502static struct mlxsw_sp_lpm_tree *
Ido Schimmel382dbb42017-03-10 08:53:40 +0100503mlxsw_sp_lpm_tree_find_unused(struct mlxsw_sp *mlxsw_sp)
Jiri Pirko53342022016-07-04 08:23:08 +0200504{
505 static struct mlxsw_sp_lpm_tree *lpm_tree;
506 int i;
507
Ido Schimmel9011b672017-05-16 19:38:25 +0200508 for (i = 0; i < mlxsw_sp->router->lpm.tree_count; i++) {
509 lpm_tree = &mlxsw_sp->router->lpm.trees[i];
Ido Schimmel382dbb42017-03-10 08:53:40 +0100510 if (lpm_tree->ref_count == 0)
511 return lpm_tree;
Jiri Pirko53342022016-07-04 08:23:08 +0200512 }
513 return NULL;
514}
515
516static int mlxsw_sp_lpm_tree_alloc(struct mlxsw_sp *mlxsw_sp,
517 struct mlxsw_sp_lpm_tree *lpm_tree)
518{
519 char ralta_pl[MLXSW_REG_RALTA_LEN];
520
Ido Schimmel1a9234e662016-09-19 08:29:26 +0200521 mlxsw_reg_ralta_pack(ralta_pl, true,
522 (enum mlxsw_reg_ralxx_protocol) lpm_tree->proto,
523 lpm_tree->id);
Jiri Pirko53342022016-07-04 08:23:08 +0200524 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralta), ralta_pl);
525}
526
Ido Schimmelcc702672017-08-14 10:54:03 +0200527static void mlxsw_sp_lpm_tree_free(struct mlxsw_sp *mlxsw_sp,
528 struct mlxsw_sp_lpm_tree *lpm_tree)
Jiri Pirko53342022016-07-04 08:23:08 +0200529{
530 char ralta_pl[MLXSW_REG_RALTA_LEN];
531
Ido Schimmel1a9234e662016-09-19 08:29:26 +0200532 mlxsw_reg_ralta_pack(ralta_pl, false,
533 (enum mlxsw_reg_ralxx_protocol) lpm_tree->proto,
534 lpm_tree->id);
Ido Schimmelcc702672017-08-14 10:54:03 +0200535 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralta), ralta_pl);
Jiri Pirko53342022016-07-04 08:23:08 +0200536}
537
538static int
539mlxsw_sp_lpm_tree_left_struct_set(struct mlxsw_sp *mlxsw_sp,
540 struct mlxsw_sp_prefix_usage *prefix_usage,
541 struct mlxsw_sp_lpm_tree *lpm_tree)
542{
543 char ralst_pl[MLXSW_REG_RALST_LEN];
544 u8 root_bin = 0;
545 u8 prefix;
546 u8 last_prefix = MLXSW_REG_RALST_BIN_NO_CHILD;
547
548 mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage)
549 root_bin = prefix;
550
551 mlxsw_reg_ralst_pack(ralst_pl, root_bin, lpm_tree->id);
552 mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage) {
553 if (prefix == 0)
554 continue;
555 mlxsw_reg_ralst_bin_pack(ralst_pl, prefix, last_prefix,
556 MLXSW_REG_RALST_BIN_NO_CHILD);
557 last_prefix = prefix;
558 }
559 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralst), ralst_pl);
560}
561
562static struct mlxsw_sp_lpm_tree *
563mlxsw_sp_lpm_tree_create(struct mlxsw_sp *mlxsw_sp,
564 struct mlxsw_sp_prefix_usage *prefix_usage,
Ido Schimmel382dbb42017-03-10 08:53:40 +0100565 enum mlxsw_sp_l3proto proto)
Jiri Pirko53342022016-07-04 08:23:08 +0200566{
567 struct mlxsw_sp_lpm_tree *lpm_tree;
568 int err;
569
Ido Schimmel382dbb42017-03-10 08:53:40 +0100570 lpm_tree = mlxsw_sp_lpm_tree_find_unused(mlxsw_sp);
Jiri Pirko53342022016-07-04 08:23:08 +0200571 if (!lpm_tree)
572 return ERR_PTR(-EBUSY);
573 lpm_tree->proto = proto;
574 err = mlxsw_sp_lpm_tree_alloc(mlxsw_sp, lpm_tree);
575 if (err)
576 return ERR_PTR(err);
577
578 err = mlxsw_sp_lpm_tree_left_struct_set(mlxsw_sp, prefix_usage,
579 lpm_tree);
580 if (err)
581 goto err_left_struct_set;
Jiri Pirko2083d362016-10-25 11:25:56 +0200582 memcpy(&lpm_tree->prefix_usage, prefix_usage,
583 sizeof(lpm_tree->prefix_usage));
Jiri Pirko53342022016-07-04 08:23:08 +0200584 return lpm_tree;
585
586err_left_struct_set:
587 mlxsw_sp_lpm_tree_free(mlxsw_sp, lpm_tree);
588 return ERR_PTR(err);
589}
590
Ido Schimmelcc702672017-08-14 10:54:03 +0200591static void mlxsw_sp_lpm_tree_destroy(struct mlxsw_sp *mlxsw_sp,
592 struct mlxsw_sp_lpm_tree *lpm_tree)
Jiri Pirko53342022016-07-04 08:23:08 +0200593{
Ido Schimmelcc702672017-08-14 10:54:03 +0200594 mlxsw_sp_lpm_tree_free(mlxsw_sp, lpm_tree);
Jiri Pirko53342022016-07-04 08:23:08 +0200595}
596
597static struct mlxsw_sp_lpm_tree *
598mlxsw_sp_lpm_tree_get(struct mlxsw_sp *mlxsw_sp,
599 struct mlxsw_sp_prefix_usage *prefix_usage,
Ido Schimmel382dbb42017-03-10 08:53:40 +0100600 enum mlxsw_sp_l3proto proto)
Jiri Pirko53342022016-07-04 08:23:08 +0200601{
602 struct mlxsw_sp_lpm_tree *lpm_tree;
603 int i;
604
Ido Schimmel9011b672017-05-16 19:38:25 +0200605 for (i = 0; i < mlxsw_sp->router->lpm.tree_count; i++) {
606 lpm_tree = &mlxsw_sp->router->lpm.trees[i];
Jiri Pirko8b99bec2016-10-25 11:25:57 +0200607 if (lpm_tree->ref_count != 0 &&
608 lpm_tree->proto == proto &&
Jiri Pirko53342022016-07-04 08:23:08 +0200609 mlxsw_sp_prefix_usage_eq(&lpm_tree->prefix_usage,
610 prefix_usage))
Ido Schimmelfc922bb2017-08-14 10:54:05 +0200611 return lpm_tree;
Jiri Pirko53342022016-07-04 08:23:08 +0200612 }
Ido Schimmelfc922bb2017-08-14 10:54:05 +0200613 return mlxsw_sp_lpm_tree_create(mlxsw_sp, prefix_usage, proto);
614}
Jiri Pirko53342022016-07-04 08:23:08 +0200615
Ido Schimmelfc922bb2017-08-14 10:54:05 +0200616static void mlxsw_sp_lpm_tree_hold(struct mlxsw_sp_lpm_tree *lpm_tree)
617{
Jiri Pirko53342022016-07-04 08:23:08 +0200618 lpm_tree->ref_count++;
Jiri Pirko53342022016-07-04 08:23:08 +0200619}
620
Ido Schimmelcc702672017-08-14 10:54:03 +0200621static void mlxsw_sp_lpm_tree_put(struct mlxsw_sp *mlxsw_sp,
622 struct mlxsw_sp_lpm_tree *lpm_tree)
Jiri Pirko53342022016-07-04 08:23:08 +0200623{
624 if (--lpm_tree->ref_count == 0)
Ido Schimmelcc702672017-08-14 10:54:03 +0200625 mlxsw_sp_lpm_tree_destroy(mlxsw_sp, lpm_tree);
Jiri Pirko53342022016-07-04 08:23:08 +0200626}
627
Ido Schimmeld7a60302017-06-08 08:47:43 +0200628#define MLXSW_SP_LPM_TREE_MIN 1 /* tree 0 is reserved */
Ido Schimmel8494ab02017-03-24 08:02:47 +0100629
630static int mlxsw_sp_lpm_init(struct mlxsw_sp *mlxsw_sp)
Jiri Pirko53342022016-07-04 08:23:08 +0200631{
632 struct mlxsw_sp_lpm_tree *lpm_tree;
Ido Schimmel8494ab02017-03-24 08:02:47 +0100633 u64 max_trees;
Jiri Pirko53342022016-07-04 08:23:08 +0200634 int i;
635
Ido Schimmel8494ab02017-03-24 08:02:47 +0100636 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_LPM_TREES))
637 return -EIO;
638
639 max_trees = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_LPM_TREES);
Ido Schimmel9011b672017-05-16 19:38:25 +0200640 mlxsw_sp->router->lpm.tree_count = max_trees - MLXSW_SP_LPM_TREE_MIN;
641 mlxsw_sp->router->lpm.trees = kcalloc(mlxsw_sp->router->lpm.tree_count,
Ido Schimmel8494ab02017-03-24 08:02:47 +0100642 sizeof(struct mlxsw_sp_lpm_tree),
643 GFP_KERNEL);
Ido Schimmel9011b672017-05-16 19:38:25 +0200644 if (!mlxsw_sp->router->lpm.trees)
Ido Schimmel8494ab02017-03-24 08:02:47 +0100645 return -ENOMEM;
646
Ido Schimmel9011b672017-05-16 19:38:25 +0200647 for (i = 0; i < mlxsw_sp->router->lpm.tree_count; i++) {
648 lpm_tree = &mlxsw_sp->router->lpm.trees[i];
Jiri Pirko53342022016-07-04 08:23:08 +0200649 lpm_tree->id = i + MLXSW_SP_LPM_TREE_MIN;
650 }
Ido Schimmel8494ab02017-03-24 08:02:47 +0100651
652 return 0;
653}
654
655static void mlxsw_sp_lpm_fini(struct mlxsw_sp *mlxsw_sp)
656{
Ido Schimmel9011b672017-05-16 19:38:25 +0200657 kfree(mlxsw_sp->router->lpm.trees);
Jiri Pirko53342022016-07-04 08:23:08 +0200658}
659
Ido Schimmel76610eb2017-03-10 08:53:41 +0100660static bool mlxsw_sp_vr_is_used(const struct mlxsw_sp_vr *vr)
661{
Yotam Gigid42b0962017-09-27 08:23:20 +0200662 return !!vr->fib4 || !!vr->fib6 || !!vr->mr4_table;
Ido Schimmel76610eb2017-03-10 08:53:41 +0100663}
664
Jiri Pirko6b75c482016-07-04 08:23:09 +0200665static struct mlxsw_sp_vr *mlxsw_sp_vr_find_unused(struct mlxsw_sp *mlxsw_sp)
666{
667 struct mlxsw_sp_vr *vr;
668 int i;
669
Jiri Pirkoc1a38312016-10-21 16:07:23 +0200670 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
Ido Schimmel9011b672017-05-16 19:38:25 +0200671 vr = &mlxsw_sp->router->vrs[i];
Ido Schimmel76610eb2017-03-10 08:53:41 +0100672 if (!mlxsw_sp_vr_is_used(vr))
Jiri Pirko6b75c482016-07-04 08:23:09 +0200673 return vr;
674 }
675 return NULL;
676}
677
678static int mlxsw_sp_vr_lpm_tree_bind(struct mlxsw_sp *mlxsw_sp,
Ido Schimmel0adb2142017-08-14 10:54:04 +0200679 const struct mlxsw_sp_fib *fib, u8 tree_id)
Jiri Pirko6b75c482016-07-04 08:23:09 +0200680{
681 char raltb_pl[MLXSW_REG_RALTB_LEN];
682
Ido Schimmel76610eb2017-03-10 08:53:41 +0100683 mlxsw_reg_raltb_pack(raltb_pl, fib->vr->id,
684 (enum mlxsw_reg_ralxx_protocol) fib->proto,
Ido Schimmel0adb2142017-08-14 10:54:04 +0200685 tree_id);
Jiri Pirko6b75c482016-07-04 08:23:09 +0200686 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raltb), raltb_pl);
687}
688
689static int mlxsw_sp_vr_lpm_tree_unbind(struct mlxsw_sp *mlxsw_sp,
Ido Schimmel76610eb2017-03-10 08:53:41 +0100690 const struct mlxsw_sp_fib *fib)
Jiri Pirko6b75c482016-07-04 08:23:09 +0200691{
692 char raltb_pl[MLXSW_REG_RALTB_LEN];
693
694 /* Bind to tree 0 which is default */
Ido Schimmel76610eb2017-03-10 08:53:41 +0100695 mlxsw_reg_raltb_pack(raltb_pl, fib->vr->id,
696 (enum mlxsw_reg_ralxx_protocol) fib->proto, 0);
Jiri Pirko6b75c482016-07-04 08:23:09 +0200697 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raltb), raltb_pl);
698}
699
700static u32 mlxsw_sp_fix_tb_id(u32 tb_id)
701{
Yotam Gigi7e50d432017-09-27 08:23:19 +0200702 /* For our purpose, squash main, default and local tables into one */
703 if (tb_id == RT_TABLE_LOCAL || tb_id == RT_TABLE_DEFAULT)
Jiri Pirko6b75c482016-07-04 08:23:09 +0200704 tb_id = RT_TABLE_MAIN;
705 return tb_id;
706}
707
708static struct mlxsw_sp_vr *mlxsw_sp_vr_find(struct mlxsw_sp *mlxsw_sp,
Ido Schimmel76610eb2017-03-10 08:53:41 +0100709 u32 tb_id)
Jiri Pirko6b75c482016-07-04 08:23:09 +0200710{
711 struct mlxsw_sp_vr *vr;
712 int i;
713
714 tb_id = mlxsw_sp_fix_tb_id(tb_id);
Nogah Frankel9497c042016-09-20 11:16:54 +0200715
Jiri Pirkoc1a38312016-10-21 16:07:23 +0200716 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
Ido Schimmel9011b672017-05-16 19:38:25 +0200717 vr = &mlxsw_sp->router->vrs[i];
Ido Schimmel76610eb2017-03-10 08:53:41 +0100718 if (mlxsw_sp_vr_is_used(vr) && vr->tb_id == tb_id)
Jiri Pirko6b75c482016-07-04 08:23:09 +0200719 return vr;
720 }
721 return NULL;
722}
723
Ido Schimmel76610eb2017-03-10 08:53:41 +0100724static struct mlxsw_sp_fib *mlxsw_sp_vr_fib(const struct mlxsw_sp_vr *vr,
725 enum mlxsw_sp_l3proto proto)
Jiri Pirko6b75c482016-07-04 08:23:09 +0200726{
Ido Schimmel76610eb2017-03-10 08:53:41 +0100727 switch (proto) {
728 case MLXSW_SP_L3_PROTO_IPV4:
729 return vr->fib4;
730 case MLXSW_SP_L3_PROTO_IPV6:
Ido Schimmela3d9bc52017-07-18 10:10:22 +0200731 return vr->fib6;
Ido Schimmel76610eb2017-03-10 08:53:41 +0100732 }
733 return NULL;
734}
735
736static struct mlxsw_sp_vr *mlxsw_sp_vr_create(struct mlxsw_sp *mlxsw_sp,
David Ahernf8fa9b42017-10-18 09:56:56 -0700737 u32 tb_id,
738 struct netlink_ext_ack *extack)
Ido Schimmel76610eb2017-03-10 08:53:41 +0100739{
Jiri Pirko6b75c482016-07-04 08:23:09 +0200740 struct mlxsw_sp_vr *vr;
Ido Schimmela3d9bc52017-07-18 10:10:22 +0200741 int err;
Jiri Pirko6b75c482016-07-04 08:23:09 +0200742
743 vr = mlxsw_sp_vr_find_unused(mlxsw_sp);
David Ahernf8fa9b42017-10-18 09:56:56 -0700744 if (!vr) {
745 NL_SET_ERR_MSG(extack, "spectrum: Exceeded number of supported virtual routers");
Jiri Pirko6b75c482016-07-04 08:23:09 +0200746 return ERR_PTR(-EBUSY);
David Ahernf8fa9b42017-10-18 09:56:56 -0700747 }
Ido Schimmel76610eb2017-03-10 08:53:41 +0100748 vr->fib4 = mlxsw_sp_fib_create(vr, MLXSW_SP_L3_PROTO_IPV4);
749 if (IS_ERR(vr->fib4))
750 return ERR_CAST(vr->fib4);
Ido Schimmela3d9bc52017-07-18 10:10:22 +0200751 vr->fib6 = mlxsw_sp_fib_create(vr, MLXSW_SP_L3_PROTO_IPV6);
752 if (IS_ERR(vr->fib6)) {
753 err = PTR_ERR(vr->fib6);
754 goto err_fib6_create;
755 }
Yotam Gigid42b0962017-09-27 08:23:20 +0200756 vr->mr4_table = mlxsw_sp_mr_table_create(mlxsw_sp, vr->id,
757 MLXSW_SP_L3_PROTO_IPV4);
758 if (IS_ERR(vr->mr4_table)) {
759 err = PTR_ERR(vr->mr4_table);
760 goto err_mr_table_create;
761 }
Jiri Pirko6b75c482016-07-04 08:23:09 +0200762 vr->tb_id = tb_id;
Jiri Pirko6b75c482016-07-04 08:23:09 +0200763 return vr;
Ido Schimmela3d9bc52017-07-18 10:10:22 +0200764
Yotam Gigid42b0962017-09-27 08:23:20 +0200765err_mr_table_create:
766 mlxsw_sp_fib_destroy(vr->fib6);
767 vr->fib6 = NULL;
Ido Schimmela3d9bc52017-07-18 10:10:22 +0200768err_fib6_create:
769 mlxsw_sp_fib_destroy(vr->fib4);
770 vr->fib4 = NULL;
771 return ERR_PTR(err);
Jiri Pirko6b75c482016-07-04 08:23:09 +0200772}
773
Ido Schimmel76610eb2017-03-10 08:53:41 +0100774static void mlxsw_sp_vr_destroy(struct mlxsw_sp_vr *vr)
Jiri Pirko6b75c482016-07-04 08:23:09 +0200775{
Yotam Gigid42b0962017-09-27 08:23:20 +0200776 mlxsw_sp_mr_table_destroy(vr->mr4_table);
777 vr->mr4_table = NULL;
Ido Schimmela3d9bc52017-07-18 10:10:22 +0200778 mlxsw_sp_fib_destroy(vr->fib6);
779 vr->fib6 = NULL;
Ido Schimmel76610eb2017-03-10 08:53:41 +0100780 mlxsw_sp_fib_destroy(vr->fib4);
781 vr->fib4 = NULL;
Jiri Pirko6b75c482016-07-04 08:23:09 +0200782}
783
David Ahernf8fa9b42017-10-18 09:56:56 -0700784static struct mlxsw_sp_vr *mlxsw_sp_vr_get(struct mlxsw_sp *mlxsw_sp, u32 tb_id,
785 struct netlink_ext_ack *extack)
Jiri Pirko6b75c482016-07-04 08:23:09 +0200786{
787 struct mlxsw_sp_vr *vr;
Jiri Pirko6b75c482016-07-04 08:23:09 +0200788
789 tb_id = mlxsw_sp_fix_tb_id(tb_id);
Ido Schimmel76610eb2017-03-10 08:53:41 +0100790 vr = mlxsw_sp_vr_find(mlxsw_sp, tb_id);
791 if (!vr)
David Ahernf8fa9b42017-10-18 09:56:56 -0700792 vr = mlxsw_sp_vr_create(mlxsw_sp, tb_id, extack);
Jiri Pirko6b75c482016-07-04 08:23:09 +0200793 return vr;
794}
795
Ido Schimmel76610eb2017-03-10 08:53:41 +0100796static void mlxsw_sp_vr_put(struct mlxsw_sp_vr *vr)
Jiri Pirko6b75c482016-07-04 08:23:09 +0200797{
Ido Schimmela3d9bc52017-07-18 10:10:22 +0200798 if (!vr->rif_count && list_empty(&vr->fib4->node_list) &&
Yotam Gigid42b0962017-09-27 08:23:20 +0200799 list_empty(&vr->fib6->node_list) &&
800 mlxsw_sp_mr_table_empty(vr->mr4_table))
Ido Schimmel76610eb2017-03-10 08:53:41 +0100801 mlxsw_sp_vr_destroy(vr);
Jiri Pirko6b75c482016-07-04 08:23:09 +0200802}
803
Ido Schimmelfc922bb2017-08-14 10:54:05 +0200804static bool
805mlxsw_sp_vr_lpm_tree_should_replace(struct mlxsw_sp_vr *vr,
806 enum mlxsw_sp_l3proto proto, u8 tree_id)
807{
808 struct mlxsw_sp_fib *fib = mlxsw_sp_vr_fib(vr, proto);
809
810 if (!mlxsw_sp_vr_is_used(vr))
811 return false;
812 if (fib->lpm_tree && fib->lpm_tree->id == tree_id)
813 return true;
814 return false;
815}
816
817static int mlxsw_sp_vr_lpm_tree_replace(struct mlxsw_sp *mlxsw_sp,
818 struct mlxsw_sp_fib *fib,
819 struct mlxsw_sp_lpm_tree *new_tree)
820{
821 struct mlxsw_sp_lpm_tree *old_tree = fib->lpm_tree;
822 int err;
823
824 err = mlxsw_sp_vr_lpm_tree_bind(mlxsw_sp, fib, new_tree->id);
825 if (err)
826 return err;
827 fib->lpm_tree = new_tree;
828 mlxsw_sp_lpm_tree_hold(new_tree);
829 mlxsw_sp_lpm_tree_put(mlxsw_sp, old_tree);
830 return 0;
831}
832
833static int mlxsw_sp_vrs_lpm_tree_replace(struct mlxsw_sp *mlxsw_sp,
834 struct mlxsw_sp_fib *fib,
835 struct mlxsw_sp_lpm_tree *new_tree)
836{
837 struct mlxsw_sp_lpm_tree *old_tree = fib->lpm_tree;
838 enum mlxsw_sp_l3proto proto = fib->proto;
839 u8 old_id, new_id = new_tree->id;
840 struct mlxsw_sp_vr *vr;
841 int i, err;
842
843 if (!old_tree)
844 goto no_replace;
845 old_id = old_tree->id;
846
847 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
848 vr = &mlxsw_sp->router->vrs[i];
849 if (!mlxsw_sp_vr_lpm_tree_should_replace(vr, proto, old_id))
850 continue;
851 err = mlxsw_sp_vr_lpm_tree_replace(mlxsw_sp,
852 mlxsw_sp_vr_fib(vr, proto),
853 new_tree);
854 if (err)
855 goto err_tree_replace;
856 }
857
858 return 0;
859
860err_tree_replace:
861 for (i--; i >= 0; i--) {
862 if (!mlxsw_sp_vr_lpm_tree_should_replace(vr, proto, new_id))
863 continue;
864 mlxsw_sp_vr_lpm_tree_replace(mlxsw_sp,
865 mlxsw_sp_vr_fib(vr, proto),
866 old_tree);
867 }
868 return err;
869
870no_replace:
871 err = mlxsw_sp_vr_lpm_tree_bind(mlxsw_sp, fib, new_tree->id);
872 if (err)
873 return err;
874 fib->lpm_tree = new_tree;
875 mlxsw_sp_lpm_tree_hold(new_tree);
876 return 0;
877}
878
879static void
880mlxsw_sp_vrs_prefixes(struct mlxsw_sp *mlxsw_sp,
881 enum mlxsw_sp_l3proto proto,
882 struct mlxsw_sp_prefix_usage *req_prefix_usage)
883{
884 int i;
885
886 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
887 struct mlxsw_sp_vr *vr = &mlxsw_sp->router->vrs[i];
888 struct mlxsw_sp_fib *fib = mlxsw_sp_vr_fib(vr, proto);
889 unsigned char prefix;
890
891 if (!mlxsw_sp_vr_is_used(vr))
892 continue;
893 mlxsw_sp_prefix_usage_for_each(prefix, &fib->prefix_usage)
894 mlxsw_sp_prefix_usage_set(req_prefix_usage, prefix);
895 }
896}
897
Nogah Frankel9497c042016-09-20 11:16:54 +0200898static int mlxsw_sp_vrs_init(struct mlxsw_sp *mlxsw_sp)
Jiri Pirko6b75c482016-07-04 08:23:09 +0200899{
900 struct mlxsw_sp_vr *vr;
Jiri Pirkoc1a38312016-10-21 16:07:23 +0200901 u64 max_vrs;
Jiri Pirko6b75c482016-07-04 08:23:09 +0200902 int i;
903
Jiri Pirkoc1a38312016-10-21 16:07:23 +0200904 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_VRS))
Nogah Frankel9497c042016-09-20 11:16:54 +0200905 return -EIO;
906
Jiri Pirkoc1a38312016-10-21 16:07:23 +0200907 max_vrs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS);
Ido Schimmel9011b672017-05-16 19:38:25 +0200908 mlxsw_sp->router->vrs = kcalloc(max_vrs, sizeof(struct mlxsw_sp_vr),
909 GFP_KERNEL);
910 if (!mlxsw_sp->router->vrs)
Nogah Frankel9497c042016-09-20 11:16:54 +0200911 return -ENOMEM;
912
Jiri Pirkoc1a38312016-10-21 16:07:23 +0200913 for (i = 0; i < max_vrs; i++) {
Ido Schimmel9011b672017-05-16 19:38:25 +0200914 vr = &mlxsw_sp->router->vrs[i];
Jiri Pirko6b75c482016-07-04 08:23:09 +0200915 vr->id = i;
916 }
Nogah Frankel9497c042016-09-20 11:16:54 +0200917
918 return 0;
919}
920
Ido Schimmelac571de2016-11-14 11:26:32 +0100921static void mlxsw_sp_router_fib_flush(struct mlxsw_sp *mlxsw_sp);
922
Nogah Frankel9497c042016-09-20 11:16:54 +0200923static void mlxsw_sp_vrs_fini(struct mlxsw_sp *mlxsw_sp)
924{
Ido Schimmel30572242016-12-03 16:45:01 +0100925 /* At this stage we're guaranteed not to have new incoming
926 * FIB notifications and the work queue is free from FIBs
927 * sitting on top of mlxsw netdevs. However, we can still
928 * have other FIBs queued. Flush the queue before flushing
929 * the device's tables. No need for locks, as we're the only
930 * writer.
931 */
932 mlxsw_core_flush_owq();
Ido Schimmelac571de2016-11-14 11:26:32 +0100933 mlxsw_sp_router_fib_flush(mlxsw_sp);
Ido Schimmel9011b672017-05-16 19:38:25 +0200934 kfree(mlxsw_sp->router->vrs);
Jiri Pirko6b75c482016-07-04 08:23:09 +0200935}
936
Petr Machata6ddb7422017-09-02 23:49:19 +0200937static struct net_device *
938__mlxsw_sp_ipip_netdev_ul_dev_get(const struct net_device *ol_dev)
939{
940 struct ip_tunnel *tun = netdev_priv(ol_dev);
941 struct net *net = dev_net(ol_dev);
942
943 return __dev_get_by_index(net, tun->parms.link);
944}
945
946static u32 mlxsw_sp_ipip_dev_ul_tb_id(const struct net_device *ol_dev)
947{
948 struct net_device *d = __mlxsw_sp_ipip_netdev_ul_dev_get(ol_dev);
949
950 if (d)
951 return l3mdev_fib_table(d) ? : RT_TABLE_MAIN;
952 else
953 return l3mdev_fib_table(ol_dev) ? : RT_TABLE_MAIN;
954}
955
Petr Machata1012b9a2017-09-02 23:49:23 +0200956static struct mlxsw_sp_rif *
957mlxsw_sp_rif_create(struct mlxsw_sp *mlxsw_sp,
David Ahernf8fa9b42017-10-18 09:56:56 -0700958 const struct mlxsw_sp_rif_params *params,
959 struct netlink_ext_ack *extack);
Petr Machata1012b9a2017-09-02 23:49:23 +0200960
961static struct mlxsw_sp_rif_ipip_lb *
962mlxsw_sp_ipip_ol_ipip_lb_create(struct mlxsw_sp *mlxsw_sp,
963 enum mlxsw_sp_ipip_type ipipt,
Petr Machata7e75af62017-11-03 10:03:36 +0100964 struct net_device *ol_dev,
965 struct netlink_ext_ack *extack)
Petr Machata1012b9a2017-09-02 23:49:23 +0200966{
967 struct mlxsw_sp_rif_params_ipip_lb lb_params;
968 const struct mlxsw_sp_ipip_ops *ipip_ops;
969 struct mlxsw_sp_rif *rif;
970
971 ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipipt];
972 lb_params = (struct mlxsw_sp_rif_params_ipip_lb) {
973 .common.dev = ol_dev,
974 .common.lag = false,
975 .lb_config = ipip_ops->ol_loopback_config(mlxsw_sp, ol_dev),
976 };
977
Petr Machata7e75af62017-11-03 10:03:36 +0100978 rif = mlxsw_sp_rif_create(mlxsw_sp, &lb_params.common, extack);
Petr Machata1012b9a2017-09-02 23:49:23 +0200979 if (IS_ERR(rif))
980 return ERR_CAST(rif);
981 return container_of(rif, struct mlxsw_sp_rif_ipip_lb, common);
982}
983
984static struct mlxsw_sp_ipip_entry *
985mlxsw_sp_ipip_entry_alloc(struct mlxsw_sp *mlxsw_sp,
986 enum mlxsw_sp_ipip_type ipipt,
987 struct net_device *ol_dev)
988{
989 struct mlxsw_sp_ipip_entry *ipip_entry;
990 struct mlxsw_sp_ipip_entry *ret = NULL;
991
992 ipip_entry = kzalloc(sizeof(*ipip_entry), GFP_KERNEL);
993 if (!ipip_entry)
994 return ERR_PTR(-ENOMEM);
995
996 ipip_entry->ol_lb = mlxsw_sp_ipip_ol_ipip_lb_create(mlxsw_sp, ipipt,
Petr Machata7e75af62017-11-03 10:03:36 +0100997 ol_dev, NULL);
Petr Machata1012b9a2017-09-02 23:49:23 +0200998 if (IS_ERR(ipip_entry->ol_lb)) {
999 ret = ERR_CAST(ipip_entry->ol_lb);
1000 goto err_ol_ipip_lb_create;
1001 }
1002
1003 ipip_entry->ipipt = ipipt;
1004 ipip_entry->ol_dev = ol_dev;
1005
1006 return ipip_entry;
1007
1008err_ol_ipip_lb_create:
1009 kfree(ipip_entry);
1010 return ret;
1011}
1012
1013static void
Petr Machata4cccb732017-10-16 16:26:39 +02001014mlxsw_sp_ipip_entry_dealloc(struct mlxsw_sp_ipip_entry *ipip_entry)
Petr Machata1012b9a2017-09-02 23:49:23 +02001015{
Petr Machata1012b9a2017-09-02 23:49:23 +02001016 mlxsw_sp_rif_destroy(&ipip_entry->ol_lb->common);
1017 kfree(ipip_entry);
1018}
1019
Petr Machata1012b9a2017-09-02 23:49:23 +02001020static bool mlxsw_sp_l3addr_eq(const union mlxsw_sp_l3addr *addr1,
1021 const union mlxsw_sp_l3addr *addr2)
1022{
1023 return !memcmp(addr1, addr2, sizeof(*addr1));
1024}
1025
1026static bool
1027mlxsw_sp_ipip_entry_saddr_matches(struct mlxsw_sp *mlxsw_sp,
1028 const enum mlxsw_sp_l3proto ul_proto,
1029 union mlxsw_sp_l3addr saddr,
1030 u32 ul_tb_id,
1031 struct mlxsw_sp_ipip_entry *ipip_entry)
1032{
1033 u32 tun_ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(ipip_entry->ol_dev);
1034 enum mlxsw_sp_ipip_type ipipt = ipip_entry->ipipt;
1035 union mlxsw_sp_l3addr tun_saddr;
1036
1037 if (mlxsw_sp->router->ipip_ops_arr[ipipt]->ul_proto != ul_proto)
1038 return false;
1039
1040 tun_saddr = mlxsw_sp_ipip_netdev_saddr(ul_proto, ipip_entry->ol_dev);
1041 return tun_ul_tb_id == ul_tb_id &&
1042 mlxsw_sp_l3addr_eq(&tun_saddr, &saddr);
1043}
1044
Petr Machata4607f6d2017-09-02 23:49:25 +02001045static int
1046mlxsw_sp_fib_entry_decap_init(struct mlxsw_sp *mlxsw_sp,
1047 struct mlxsw_sp_fib_entry *fib_entry,
1048 struct mlxsw_sp_ipip_entry *ipip_entry)
1049{
1050 u32 tunnel_index;
1051 int err;
1052
1053 err = mlxsw_sp_kvdl_alloc(mlxsw_sp, 1, &tunnel_index);
1054 if (err)
1055 return err;
1056
1057 ipip_entry->decap_fib_entry = fib_entry;
1058 fib_entry->decap.ipip_entry = ipip_entry;
1059 fib_entry->decap.tunnel_index = tunnel_index;
1060 return 0;
1061}
1062
1063static void mlxsw_sp_fib_entry_decap_fini(struct mlxsw_sp *mlxsw_sp,
1064 struct mlxsw_sp_fib_entry *fib_entry)
1065{
1066 /* Unlink this node from the IPIP entry that it's the decap entry of. */
1067 fib_entry->decap.ipip_entry->decap_fib_entry = NULL;
1068 fib_entry->decap.ipip_entry = NULL;
1069 mlxsw_sp_kvdl_free(mlxsw_sp, fib_entry->decap.tunnel_index);
1070}
1071
Petr Machata1cc38fb2017-09-02 23:49:26 +02001072static struct mlxsw_sp_fib_node *
1073mlxsw_sp_fib_node_lookup(struct mlxsw_sp_fib *fib, const void *addr,
1074 size_t addr_len, unsigned char prefix_len);
Petr Machata4607f6d2017-09-02 23:49:25 +02001075static int mlxsw_sp_fib_entry_update(struct mlxsw_sp *mlxsw_sp,
1076 struct mlxsw_sp_fib_entry *fib_entry);
1077
1078static void
1079mlxsw_sp_ipip_entry_demote_decap(struct mlxsw_sp *mlxsw_sp,
1080 struct mlxsw_sp_ipip_entry *ipip_entry)
1081{
1082 struct mlxsw_sp_fib_entry *fib_entry = ipip_entry->decap_fib_entry;
1083
1084 mlxsw_sp_fib_entry_decap_fini(mlxsw_sp, fib_entry);
1085 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
1086
1087 mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
1088}
1089
Petr Machata1cc38fb2017-09-02 23:49:26 +02001090static void
1091mlxsw_sp_ipip_entry_promote_decap(struct mlxsw_sp *mlxsw_sp,
1092 struct mlxsw_sp_ipip_entry *ipip_entry,
1093 struct mlxsw_sp_fib_entry *decap_fib_entry)
1094{
1095 if (mlxsw_sp_fib_entry_decap_init(mlxsw_sp, decap_fib_entry,
1096 ipip_entry))
1097 return;
1098 decap_fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP;
1099
1100 if (mlxsw_sp_fib_entry_update(mlxsw_sp, decap_fib_entry))
1101 mlxsw_sp_ipip_entry_demote_decap(mlxsw_sp, ipip_entry);
1102}
1103
1104/* Given an IPIP entry, find the corresponding decap route. */
1105static struct mlxsw_sp_fib_entry *
1106mlxsw_sp_ipip_entry_find_decap(struct mlxsw_sp *mlxsw_sp,
1107 struct mlxsw_sp_ipip_entry *ipip_entry)
1108{
1109 static struct mlxsw_sp_fib_node *fib_node;
1110 const struct mlxsw_sp_ipip_ops *ipip_ops;
1111 struct mlxsw_sp_fib_entry *fib_entry;
1112 unsigned char saddr_prefix_len;
1113 union mlxsw_sp_l3addr saddr;
1114 struct mlxsw_sp_fib *ul_fib;
1115 struct mlxsw_sp_vr *ul_vr;
1116 const void *saddrp;
1117 size_t saddr_len;
1118 u32 ul_tb_id;
1119 u32 saddr4;
1120
1121 ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt];
1122
1123 ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(ipip_entry->ol_dev);
1124 ul_vr = mlxsw_sp_vr_find(mlxsw_sp, ul_tb_id);
1125 if (!ul_vr)
1126 return NULL;
1127
1128 ul_fib = mlxsw_sp_vr_fib(ul_vr, ipip_ops->ul_proto);
1129 saddr = mlxsw_sp_ipip_netdev_saddr(ipip_ops->ul_proto,
1130 ipip_entry->ol_dev);
1131
1132 switch (ipip_ops->ul_proto) {
1133 case MLXSW_SP_L3_PROTO_IPV4:
1134 saddr4 = be32_to_cpu(saddr.addr4);
1135 saddrp = &saddr4;
1136 saddr_len = 4;
1137 saddr_prefix_len = 32;
1138 break;
1139 case MLXSW_SP_L3_PROTO_IPV6:
1140 WARN_ON(1);
1141 return NULL;
1142 }
1143
1144 fib_node = mlxsw_sp_fib_node_lookup(ul_fib, saddrp, saddr_len,
1145 saddr_prefix_len);
1146 if (!fib_node || list_empty(&fib_node->entry_list))
1147 return NULL;
1148
1149 fib_entry = list_first_entry(&fib_node->entry_list,
1150 struct mlxsw_sp_fib_entry, list);
1151 if (fib_entry->type != MLXSW_SP_FIB_ENTRY_TYPE_TRAP)
1152 return NULL;
1153
1154 return fib_entry;
1155}
1156
Petr Machata1012b9a2017-09-02 23:49:23 +02001157static struct mlxsw_sp_ipip_entry *
Petr Machata4cccb732017-10-16 16:26:39 +02001158mlxsw_sp_ipip_entry_create(struct mlxsw_sp *mlxsw_sp,
1159 enum mlxsw_sp_ipip_type ipipt,
1160 struct net_device *ol_dev)
Petr Machata1012b9a2017-09-02 23:49:23 +02001161{
1162 u32 ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(ol_dev);
1163 struct mlxsw_sp_router *router = mlxsw_sp->router;
1164 struct mlxsw_sp_ipip_entry *ipip_entry;
1165 enum mlxsw_sp_l3proto ul_proto;
1166 union mlxsw_sp_l3addr saddr;
1167
Petr Machata4cccb732017-10-16 16:26:39 +02001168 /* The configuration where several tunnels have the same local address
1169 * in the same underlay table needs special treatment in the HW. That is
1170 * currently not implemented in the driver.
1171 */
Petr Machata1012b9a2017-09-02 23:49:23 +02001172 list_for_each_entry(ipip_entry, &mlxsw_sp->router->ipip_list,
1173 ipip_list_node) {
Petr Machata1012b9a2017-09-02 23:49:23 +02001174 ul_proto = router->ipip_ops_arr[ipip_entry->ipipt]->ul_proto;
1175 saddr = mlxsw_sp_ipip_netdev_saddr(ul_proto, ol_dev);
1176 if (mlxsw_sp_ipip_entry_saddr_matches(mlxsw_sp, ul_proto, saddr,
1177 ul_tb_id, ipip_entry))
1178 return ERR_PTR(-EEXIST);
1179 }
1180
1181 ipip_entry = mlxsw_sp_ipip_entry_alloc(mlxsw_sp, ipipt, ol_dev);
1182 if (IS_ERR(ipip_entry))
1183 return ipip_entry;
1184
1185 list_add_tail(&ipip_entry->ipip_list_node,
1186 &mlxsw_sp->router->ipip_list);
1187
Petr Machata1012b9a2017-09-02 23:49:23 +02001188 return ipip_entry;
1189}
1190
1191static void
Petr Machata4cccb732017-10-16 16:26:39 +02001192mlxsw_sp_ipip_entry_destroy(struct mlxsw_sp *mlxsw_sp,
1193 struct mlxsw_sp_ipip_entry *ipip_entry)
Petr Machata1012b9a2017-09-02 23:49:23 +02001194{
Petr Machata4cccb732017-10-16 16:26:39 +02001195 list_del(&ipip_entry->ipip_list_node);
1196 mlxsw_sp_ipip_entry_dealloc(ipip_entry);
Petr Machata1012b9a2017-09-02 23:49:23 +02001197}
1198
Petr Machata4607f6d2017-09-02 23:49:25 +02001199static bool
1200mlxsw_sp_ipip_entry_matches_decap(struct mlxsw_sp *mlxsw_sp,
1201 const struct net_device *ul_dev,
1202 enum mlxsw_sp_l3proto ul_proto,
1203 union mlxsw_sp_l3addr ul_dip,
1204 struct mlxsw_sp_ipip_entry *ipip_entry)
1205{
1206 u32 ul_tb_id = l3mdev_fib_table(ul_dev) ? : RT_TABLE_MAIN;
1207 enum mlxsw_sp_ipip_type ipipt = ipip_entry->ipipt;
1208 struct net_device *ipip_ul_dev;
1209
1210 if (mlxsw_sp->router->ipip_ops_arr[ipipt]->ul_proto != ul_proto)
1211 return false;
1212
1213 ipip_ul_dev = __mlxsw_sp_ipip_netdev_ul_dev_get(ipip_entry->ol_dev);
1214 return mlxsw_sp_ipip_entry_saddr_matches(mlxsw_sp, ul_proto, ul_dip,
1215 ul_tb_id, ipip_entry) &&
1216 (!ipip_ul_dev || ipip_ul_dev == ul_dev);
1217}
1218
1219/* Given decap parameters, find the corresponding IPIP entry. */
1220static struct mlxsw_sp_ipip_entry *
1221mlxsw_sp_ipip_entry_find_by_decap(struct mlxsw_sp *mlxsw_sp,
1222 const struct net_device *ul_dev,
1223 enum mlxsw_sp_l3proto ul_proto,
1224 union mlxsw_sp_l3addr ul_dip)
1225{
1226 struct mlxsw_sp_ipip_entry *ipip_entry;
1227
1228 list_for_each_entry(ipip_entry, &mlxsw_sp->router->ipip_list,
1229 ipip_list_node)
1230 if (mlxsw_sp_ipip_entry_matches_decap(mlxsw_sp, ul_dev,
1231 ul_proto, ul_dip,
1232 ipip_entry))
1233 return ipip_entry;
1234
1235 return NULL;
1236}
1237
Petr Machata6698c162017-10-16 16:26:36 +02001238static bool mlxsw_sp_netdev_ipip_type(const struct mlxsw_sp *mlxsw_sp,
1239 const struct net_device *dev,
1240 enum mlxsw_sp_ipip_type *p_type)
1241{
1242 struct mlxsw_sp_router *router = mlxsw_sp->router;
1243 const struct mlxsw_sp_ipip_ops *ipip_ops;
1244 enum mlxsw_sp_ipip_type ipipt;
1245
1246 for (ipipt = 0; ipipt < MLXSW_SP_IPIP_TYPE_MAX; ++ipipt) {
1247 ipip_ops = router->ipip_ops_arr[ipipt];
1248 if (dev->type == ipip_ops->dev_type) {
1249 if (p_type)
1250 *p_type = ipipt;
1251 return true;
1252 }
1253 }
1254 return false;
1255}
1256
Petr Machata796ec772017-11-03 10:03:29 +01001257bool mlxsw_sp_netdev_is_ipip_ol(const struct mlxsw_sp *mlxsw_sp,
1258 const struct net_device *dev)
Petr Machata00635872017-10-16 16:26:37 +02001259{
1260 return mlxsw_sp_netdev_ipip_type(mlxsw_sp, dev, NULL);
1261}
1262
1263static struct mlxsw_sp_ipip_entry *
1264mlxsw_sp_ipip_entry_find_by_ol_dev(struct mlxsw_sp *mlxsw_sp,
1265 const struct net_device *ol_dev)
1266{
1267 struct mlxsw_sp_ipip_entry *ipip_entry;
1268
1269 list_for_each_entry(ipip_entry, &mlxsw_sp->router->ipip_list,
1270 ipip_list_node)
1271 if (ipip_entry->ol_dev == ol_dev)
1272 return ipip_entry;
1273
1274 return NULL;
1275}
1276
Petr Machatacafdb2a2017-11-03 10:03:30 +01001277static bool mlxsw_sp_netdevice_ipip_can_offload(struct mlxsw_sp *mlxsw_sp,
1278 const struct net_device *ol_dev,
1279 enum mlxsw_sp_ipip_type ipipt)
1280{
1281 const struct mlxsw_sp_ipip_ops *ops
1282 = mlxsw_sp->router->ipip_ops_arr[ipipt];
1283
1284 /* For deciding whether decap should be offloaded, we don't care about
1285 * overlay protocol, so ask whether either one is supported.
1286 */
1287 return ops->can_offload(mlxsw_sp, ol_dev, MLXSW_SP_L3_PROTO_IPV4) ||
1288 ops->can_offload(mlxsw_sp, ol_dev, MLXSW_SP_L3_PROTO_IPV6);
1289}
1290
Petr Machata796ec772017-11-03 10:03:29 +01001291static int mlxsw_sp_netdevice_ipip_ol_reg_event(struct mlxsw_sp *mlxsw_sp,
1292 struct net_device *ol_dev)
Petr Machata00635872017-10-16 16:26:37 +02001293{
Petr Machata00635872017-10-16 16:26:37 +02001294 struct mlxsw_sp_ipip_entry *ipip_entry;
1295 enum mlxsw_sp_ipip_type ipipt;
1296
1297 mlxsw_sp_netdev_ipip_type(mlxsw_sp, ol_dev, &ipipt);
Petr Machatacafdb2a2017-11-03 10:03:30 +01001298 if (mlxsw_sp_netdevice_ipip_can_offload(mlxsw_sp, ol_dev, ipipt)) {
Petr Machata4cccb732017-10-16 16:26:39 +02001299 ipip_entry = mlxsw_sp_ipip_entry_create(mlxsw_sp, ipipt,
1300 ol_dev);
Petr Machata00635872017-10-16 16:26:37 +02001301 if (IS_ERR(ipip_entry))
1302 return PTR_ERR(ipip_entry);
1303 }
1304
1305 return 0;
1306}
1307
Petr Machata796ec772017-11-03 10:03:29 +01001308static void mlxsw_sp_netdevice_ipip_ol_unreg_event(struct mlxsw_sp *mlxsw_sp,
1309 struct net_device *ol_dev)
Petr Machata00635872017-10-16 16:26:37 +02001310{
1311 struct mlxsw_sp_ipip_entry *ipip_entry;
1312
1313 ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
1314 if (ipip_entry)
Petr Machata4cccb732017-10-16 16:26:39 +02001315 mlxsw_sp_ipip_entry_destroy(mlxsw_sp, ipip_entry);
Petr Machata00635872017-10-16 16:26:37 +02001316}
1317
Petr Machata47518ca2017-11-03 10:03:35 +01001318static void
1319mlxsw_sp_ipip_entry_ol_up_event(struct mlxsw_sp *mlxsw_sp,
1320 struct mlxsw_sp_ipip_entry *ipip_entry)
1321{
1322 struct mlxsw_sp_fib_entry *decap_fib_entry;
1323
1324 decap_fib_entry = mlxsw_sp_ipip_entry_find_decap(mlxsw_sp, ipip_entry);
1325 if (decap_fib_entry)
1326 mlxsw_sp_ipip_entry_promote_decap(mlxsw_sp, ipip_entry,
1327 decap_fib_entry);
1328}
1329
Petr Machata6d4de442017-11-03 10:03:34 +01001330static void mlxsw_sp_netdevice_ipip_ol_up_event(struct mlxsw_sp *mlxsw_sp,
1331 struct net_device *ol_dev)
Petr Machata00635872017-10-16 16:26:37 +02001332{
Petr Machata00635872017-10-16 16:26:37 +02001333 struct mlxsw_sp_ipip_entry *ipip_entry;
1334
1335 ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
Petr Machata47518ca2017-11-03 10:03:35 +01001336 if (ipip_entry)
1337 mlxsw_sp_ipip_entry_ol_up_event(mlxsw_sp, ipip_entry);
Petr Machata00635872017-10-16 16:26:37 +02001338}
1339
Petr Machataa3fe1982017-11-03 10:03:33 +01001340static void
1341mlxsw_sp_ipip_entry_ol_down_event(struct mlxsw_sp *mlxsw_sp,
1342 struct mlxsw_sp_ipip_entry *ipip_entry)
1343{
1344 if (ipip_entry->decap_fib_entry)
1345 mlxsw_sp_ipip_entry_demote_decap(mlxsw_sp, ipip_entry);
1346}
1347
Petr Machata796ec772017-11-03 10:03:29 +01001348static void mlxsw_sp_netdevice_ipip_ol_down_event(struct mlxsw_sp *mlxsw_sp,
1349 struct net_device *ol_dev)
Petr Machata00635872017-10-16 16:26:37 +02001350{
1351 struct mlxsw_sp_ipip_entry *ipip_entry;
1352
1353 ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
Petr Machataa3fe1982017-11-03 10:03:33 +01001354 if (ipip_entry)
1355 mlxsw_sp_ipip_entry_ol_down_event(mlxsw_sp, ipip_entry);
Petr Machata00635872017-10-16 16:26:37 +02001356}
1357
Petr Machata0c5f1cd2017-11-03 10:03:38 +01001358static void mlxsw_sp_nexthop_rif_update(struct mlxsw_sp *mlxsw_sp,
1359 struct mlxsw_sp_rif *rif);
Petr Machata65a61212017-11-03 10:03:37 +01001360static int
1361mlxsw_sp_ipip_entry_ol_lb_update(struct mlxsw_sp *mlxsw_sp,
1362 struct mlxsw_sp_ipip_entry *ipip_entry,
Petr Machata0c5f1cd2017-11-03 10:03:38 +01001363 bool keep_encap,
Petr Machata65a61212017-11-03 10:03:37 +01001364 struct netlink_ext_ack *extack)
Petr Machataf63ce4e2017-10-16 16:26:38 +02001365{
Petr Machata65a61212017-11-03 10:03:37 +01001366 struct mlxsw_sp_rif_ipip_lb *old_lb_rif = ipip_entry->ol_lb;
1367 struct mlxsw_sp_rif_ipip_lb *new_lb_rif;
Petr Machataf63ce4e2017-10-16 16:26:38 +02001368
Petr Machata65a61212017-11-03 10:03:37 +01001369 new_lb_rif = mlxsw_sp_ipip_ol_ipip_lb_create(mlxsw_sp,
1370 ipip_entry->ipipt,
1371 ipip_entry->ol_dev,
1372 extack);
1373 if (IS_ERR(new_lb_rif))
1374 return PTR_ERR(new_lb_rif);
1375 ipip_entry->ol_lb = new_lb_rif;
Petr Machata0c5f1cd2017-11-03 10:03:38 +01001376
1377 if (keep_encap) {
1378 list_splice_init(&old_lb_rif->common.nexthop_list,
1379 &new_lb_rif->common.nexthop_list);
1380 mlxsw_sp_nexthop_rif_update(mlxsw_sp, &new_lb_rif->common);
1381 }
1382
Petr Machata65a61212017-11-03 10:03:37 +01001383 mlxsw_sp_rif_destroy(&old_lb_rif->common);
Petr Machataf63ce4e2017-10-16 16:26:38 +02001384
Petr Machata65a61212017-11-03 10:03:37 +01001385 return 0;
1386}
1387
Petr Machata0c5f1cd2017-11-03 10:03:38 +01001388/**
1389 * Update the offload related to an IPIP entry. This always updates decap, and
1390 * in addition to that it also:
1391 * @recreate_loopback: recreates the associated loopback RIF
1392 * @keep_encap: updates next hops that use the tunnel netdevice. This is only
1393 * relevant when recreate_loopback is true.
1394 * @update_nexthops: updates next hops, keeping the current loopback RIF. This
1395 * is only relevant when recreate_loopback is false.
1396 */
Petr Machata65a61212017-11-03 10:03:37 +01001397int __mlxsw_sp_ipip_entry_update_tunnel(struct mlxsw_sp *mlxsw_sp,
1398 struct mlxsw_sp_ipip_entry *ipip_entry,
Petr Machata0c5f1cd2017-11-03 10:03:38 +01001399 bool recreate_loopback,
1400 bool keep_encap,
1401 bool update_nexthops,
Petr Machata65a61212017-11-03 10:03:37 +01001402 struct netlink_ext_ack *extack)
1403{
1404 int err;
1405
1406 /* RIFs can't be edited, so to update loopback, we need to destroy and
1407 * recreate it. That creates a window of opportunity where RALUE and
1408 * RATR registers end up referencing a RIF that's already gone. RATRs
1409 * are handled in mlxsw_sp_ipip_entry_ol_lb_update(), and to take care
Petr Machataf63ce4e2017-10-16 16:26:38 +02001410 * of RALUE, demote the decap route back.
1411 */
1412 if (ipip_entry->decap_fib_entry)
1413 mlxsw_sp_ipip_entry_demote_decap(mlxsw_sp, ipip_entry);
1414
Petr Machata0c5f1cd2017-11-03 10:03:38 +01001415 if (recreate_loopback) {
1416 err = mlxsw_sp_ipip_entry_ol_lb_update(mlxsw_sp, ipip_entry,
1417 keep_encap, extack);
1418 if (err)
1419 return err;
1420 } else if (update_nexthops) {
1421 mlxsw_sp_nexthop_rif_update(mlxsw_sp,
1422 &ipip_entry->ol_lb->common);
1423 }
Petr Machataf63ce4e2017-10-16 16:26:38 +02001424
Petr Machata65a61212017-11-03 10:03:37 +01001425 if (ipip_entry->ol_dev->flags & IFF_UP)
1426 mlxsw_sp_ipip_entry_ol_up_event(mlxsw_sp, ipip_entry);
Petr Machataf63ce4e2017-10-16 16:26:38 +02001427
1428 return 0;
1429}
1430
Petr Machata65a61212017-11-03 10:03:37 +01001431static int mlxsw_sp_netdevice_ipip_ol_vrf_event(struct mlxsw_sp *mlxsw_sp,
1432 struct net_device *ol_dev,
1433 struct netlink_ext_ack *extack)
1434{
1435 struct mlxsw_sp_ipip_entry *ipip_entry =
1436 mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
1437
1438 if (!ipip_entry)
1439 return 0;
1440 return __mlxsw_sp_ipip_entry_update_tunnel(mlxsw_sp, ipip_entry,
Petr Machata0c5f1cd2017-11-03 10:03:38 +01001441 true, false, false, extack);
Petr Machata65a61212017-11-03 10:03:37 +01001442}
1443
Petr Machata7e75af62017-11-03 10:03:36 +01001444int mlxsw_sp_netdevice_ipip_ol_event(struct mlxsw_sp *mlxsw_sp,
1445 struct net_device *ol_dev,
1446 unsigned long event,
1447 struct netdev_notifier_info *info)
Petr Machata00635872017-10-16 16:26:37 +02001448{
Petr Machata7e75af62017-11-03 10:03:36 +01001449 struct netdev_notifier_changeupper_info *chup;
1450 struct netlink_ext_ack *extack;
1451
Petr Machata00635872017-10-16 16:26:37 +02001452 switch (event) {
1453 case NETDEV_REGISTER:
Petr Machata796ec772017-11-03 10:03:29 +01001454 return mlxsw_sp_netdevice_ipip_ol_reg_event(mlxsw_sp, ol_dev);
Petr Machata00635872017-10-16 16:26:37 +02001455 case NETDEV_UNREGISTER:
Petr Machata796ec772017-11-03 10:03:29 +01001456 mlxsw_sp_netdevice_ipip_ol_unreg_event(mlxsw_sp, ol_dev);
Petr Machata00635872017-10-16 16:26:37 +02001457 return 0;
1458 case NETDEV_UP:
Petr Machata6d4de442017-11-03 10:03:34 +01001459 mlxsw_sp_netdevice_ipip_ol_up_event(mlxsw_sp, ol_dev);
1460 return 0;
Petr Machata00635872017-10-16 16:26:37 +02001461 case NETDEV_DOWN:
Petr Machata796ec772017-11-03 10:03:29 +01001462 mlxsw_sp_netdevice_ipip_ol_down_event(mlxsw_sp, ol_dev);
Petr Machata00635872017-10-16 16:26:37 +02001463 return 0;
Petr Machataf63ce4e2017-10-16 16:26:38 +02001464 case NETDEV_CHANGEUPPER:
Petr Machata7e75af62017-11-03 10:03:36 +01001465 chup = container_of(info, typeof(*chup), info);
1466 extack = info->extack;
1467 if (netif_is_l3_master(chup->upper_dev))
Petr Machata796ec772017-11-03 10:03:29 +01001468 return mlxsw_sp_netdevice_ipip_ol_vrf_event(mlxsw_sp,
Petr Machata7e75af62017-11-03 10:03:36 +01001469 ol_dev,
1470 extack);
Petr Machataf63ce4e2017-10-16 16:26:38 +02001471 return 0;
Petr Machata00635872017-10-16 16:26:37 +02001472 }
1473 return 0;
1474}
1475
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001476struct mlxsw_sp_neigh_key {
Jiri Pirko33b13412016-11-10 12:31:04 +01001477 struct neighbour *n;
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001478};
1479
1480struct mlxsw_sp_neigh_entry {
Ido Schimmel9665b742017-02-08 11:16:42 +01001481 struct list_head rif_list_node;
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001482 struct rhash_head ht_node;
1483 struct mlxsw_sp_neigh_key key;
1484 u16 rif;
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001485 bool connected;
Yotam Gigia6bf9e92016-07-05 11:27:44 +02001486 unsigned char ha[ETH_ALEN];
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001487 struct list_head nexthop_list; /* list of nexthops using
1488 * this neigh entry
1489 */
Yotam Gigib2157142016-07-05 11:27:51 +02001490 struct list_head nexthop_neighs_list_node;
Arkadi Sharshevsky7cfcbc72017-08-24 08:40:08 +02001491 unsigned int counter_index;
1492 bool counter_valid;
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001493};
1494
1495static const struct rhashtable_params mlxsw_sp_neigh_ht_params = {
1496 .key_offset = offsetof(struct mlxsw_sp_neigh_entry, key),
1497 .head_offset = offsetof(struct mlxsw_sp_neigh_entry, ht_node),
1498 .key_len = sizeof(struct mlxsw_sp_neigh_key),
1499};
1500
Arkadi Sharshevskyf17cc842017-08-24 08:40:04 +02001501struct mlxsw_sp_neigh_entry *
1502mlxsw_sp_rif_neigh_next(struct mlxsw_sp_rif *rif,
1503 struct mlxsw_sp_neigh_entry *neigh_entry)
1504{
1505 if (!neigh_entry) {
1506 if (list_empty(&rif->neigh_list))
1507 return NULL;
1508 else
1509 return list_first_entry(&rif->neigh_list,
1510 typeof(*neigh_entry),
1511 rif_list_node);
1512 }
Arkadi Sharshevskyec2437f2017-09-25 10:32:24 +02001513 if (list_is_last(&neigh_entry->rif_list_node, &rif->neigh_list))
Arkadi Sharshevskyf17cc842017-08-24 08:40:04 +02001514 return NULL;
1515 return list_next_entry(neigh_entry, rif_list_node);
1516}
1517
1518int mlxsw_sp_neigh_entry_type(struct mlxsw_sp_neigh_entry *neigh_entry)
1519{
1520 return neigh_entry->key.n->tbl->family;
1521}
1522
1523unsigned char *
1524mlxsw_sp_neigh_entry_ha(struct mlxsw_sp_neigh_entry *neigh_entry)
1525{
1526 return neigh_entry->ha;
1527}
1528
1529u32 mlxsw_sp_neigh4_entry_dip(struct mlxsw_sp_neigh_entry *neigh_entry)
1530{
1531 struct neighbour *n;
1532
1533 n = neigh_entry->key.n;
1534 return ntohl(*((__be32 *) n->primary_key));
1535}
1536
Arkadi Sharshevsky02507682017-08-31 17:59:15 +02001537struct in6_addr *
1538mlxsw_sp_neigh6_entry_dip(struct mlxsw_sp_neigh_entry *neigh_entry)
1539{
1540 struct neighbour *n;
1541
1542 n = neigh_entry->key.n;
1543 return (struct in6_addr *) &n->primary_key;
1544}
1545
Arkadi Sharshevsky7cfcbc72017-08-24 08:40:08 +02001546int mlxsw_sp_neigh_counter_get(struct mlxsw_sp *mlxsw_sp,
1547 struct mlxsw_sp_neigh_entry *neigh_entry,
1548 u64 *p_counter)
1549{
1550 if (!neigh_entry->counter_valid)
1551 return -EINVAL;
1552
1553 return mlxsw_sp_flow_counter_get(mlxsw_sp, neigh_entry->counter_index,
1554 p_counter, NULL);
1555}
1556
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001557static struct mlxsw_sp_neigh_entry *
1558mlxsw_sp_neigh_entry_alloc(struct mlxsw_sp *mlxsw_sp, struct neighbour *n,
1559 u16 rif)
1560{
1561 struct mlxsw_sp_neigh_entry *neigh_entry;
1562
1563 neigh_entry = kzalloc(sizeof(*neigh_entry), GFP_KERNEL);
1564 if (!neigh_entry)
1565 return NULL;
1566
1567 neigh_entry->key.n = n;
1568 neigh_entry->rif = rif;
1569 INIT_LIST_HEAD(&neigh_entry->nexthop_list);
1570
1571 return neigh_entry;
1572}
1573
1574static void mlxsw_sp_neigh_entry_free(struct mlxsw_sp_neigh_entry *neigh_entry)
1575{
1576 kfree(neigh_entry);
1577}
1578
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001579static int
1580mlxsw_sp_neigh_entry_insert(struct mlxsw_sp *mlxsw_sp,
1581 struct mlxsw_sp_neigh_entry *neigh_entry)
1582{
Ido Schimmel9011b672017-05-16 19:38:25 +02001583 return rhashtable_insert_fast(&mlxsw_sp->router->neigh_ht,
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001584 &neigh_entry->ht_node,
1585 mlxsw_sp_neigh_ht_params);
1586}
1587
1588static void
1589mlxsw_sp_neigh_entry_remove(struct mlxsw_sp *mlxsw_sp,
1590 struct mlxsw_sp_neigh_entry *neigh_entry)
1591{
Ido Schimmel9011b672017-05-16 19:38:25 +02001592 rhashtable_remove_fast(&mlxsw_sp->router->neigh_ht,
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001593 &neigh_entry->ht_node,
1594 mlxsw_sp_neigh_ht_params);
1595}
1596
Arkadi Sharshevsky7cfcbc72017-08-24 08:40:08 +02001597static bool
Arkadi Sharshevsky1ed55742017-08-31 17:59:18 +02001598mlxsw_sp_neigh_counter_should_alloc(struct mlxsw_sp *mlxsw_sp,
1599 struct mlxsw_sp_neigh_entry *neigh_entry)
Arkadi Sharshevsky7cfcbc72017-08-24 08:40:08 +02001600{
1601 struct devlink *devlink;
Arkadi Sharshevsky1ed55742017-08-31 17:59:18 +02001602 const char *table_name;
1603
1604 switch (mlxsw_sp_neigh_entry_type(neigh_entry)) {
1605 case AF_INET:
1606 table_name = MLXSW_SP_DPIPE_TABLE_NAME_HOST4;
1607 break;
1608 case AF_INET6:
1609 table_name = MLXSW_SP_DPIPE_TABLE_NAME_HOST6;
1610 break;
1611 default:
1612 WARN_ON(1);
1613 return false;
1614 }
Arkadi Sharshevsky7cfcbc72017-08-24 08:40:08 +02001615
1616 devlink = priv_to_devlink(mlxsw_sp->core);
Arkadi Sharshevsky1ed55742017-08-31 17:59:18 +02001617 return devlink_dpipe_table_counter_enabled(devlink, table_name);
Arkadi Sharshevsky7cfcbc72017-08-24 08:40:08 +02001618}
1619
1620static void
1621mlxsw_sp_neigh_counter_alloc(struct mlxsw_sp *mlxsw_sp,
1622 struct mlxsw_sp_neigh_entry *neigh_entry)
1623{
Arkadi Sharshevsky1ed55742017-08-31 17:59:18 +02001624 if (!mlxsw_sp_neigh_counter_should_alloc(mlxsw_sp, neigh_entry))
Arkadi Sharshevsky7cfcbc72017-08-24 08:40:08 +02001625 return;
1626
1627 if (mlxsw_sp_flow_counter_alloc(mlxsw_sp, &neigh_entry->counter_index))
1628 return;
1629
1630 neigh_entry->counter_valid = true;
1631}
1632
1633static void
1634mlxsw_sp_neigh_counter_free(struct mlxsw_sp *mlxsw_sp,
1635 struct mlxsw_sp_neigh_entry *neigh_entry)
1636{
1637 if (!neigh_entry->counter_valid)
1638 return;
1639 mlxsw_sp_flow_counter_free(mlxsw_sp,
1640 neigh_entry->counter_index);
1641 neigh_entry->counter_valid = false;
1642}
1643
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001644static struct mlxsw_sp_neigh_entry *
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001645mlxsw_sp_neigh_entry_create(struct mlxsw_sp *mlxsw_sp, struct neighbour *n)
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001646{
1647 struct mlxsw_sp_neigh_entry *neigh_entry;
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01001648 struct mlxsw_sp_rif *rif;
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001649 int err;
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001650
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01001651 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, n->dev);
1652 if (!rif)
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001653 return ERR_PTR(-EINVAL);
1654
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01001655 neigh_entry = mlxsw_sp_neigh_entry_alloc(mlxsw_sp, n, rif->rif_index);
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001656 if (!neigh_entry)
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001657 return ERR_PTR(-ENOMEM);
1658
1659 err = mlxsw_sp_neigh_entry_insert(mlxsw_sp, neigh_entry);
1660 if (err)
1661 goto err_neigh_entry_insert;
1662
Arkadi Sharshevsky7cfcbc72017-08-24 08:40:08 +02001663 mlxsw_sp_neigh_counter_alloc(mlxsw_sp, neigh_entry);
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01001664 list_add(&neigh_entry->rif_list_node, &rif->neigh_list);
Ido Schimmel9665b742017-02-08 11:16:42 +01001665
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001666 return neigh_entry;
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001667
1668err_neigh_entry_insert:
1669 mlxsw_sp_neigh_entry_free(neigh_entry);
1670 return ERR_PTR(err);
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001671}
1672
1673static void
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001674mlxsw_sp_neigh_entry_destroy(struct mlxsw_sp *mlxsw_sp,
1675 struct mlxsw_sp_neigh_entry *neigh_entry)
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001676{
Ido Schimmel9665b742017-02-08 11:16:42 +01001677 list_del(&neigh_entry->rif_list_node);
Arkadi Sharshevsky7cfcbc72017-08-24 08:40:08 +02001678 mlxsw_sp_neigh_counter_free(mlxsw_sp, neigh_entry);
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001679 mlxsw_sp_neigh_entry_remove(mlxsw_sp, neigh_entry);
1680 mlxsw_sp_neigh_entry_free(neigh_entry);
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001681}
1682
1683static struct mlxsw_sp_neigh_entry *
Jiri Pirko33b13412016-11-10 12:31:04 +01001684mlxsw_sp_neigh_entry_lookup(struct mlxsw_sp *mlxsw_sp, struct neighbour *n)
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001685{
Jiri Pirko33b13412016-11-10 12:31:04 +01001686 struct mlxsw_sp_neigh_key key;
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001687
Jiri Pirko33b13412016-11-10 12:31:04 +01001688 key.n = n;
Ido Schimmel9011b672017-05-16 19:38:25 +02001689 return rhashtable_lookup_fast(&mlxsw_sp->router->neigh_ht,
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001690 &key, mlxsw_sp_neigh_ht_params);
1691}
1692
Yotam Gigic723c7352016-07-05 11:27:43 +02001693static void
1694mlxsw_sp_router_neighs_update_interval_init(struct mlxsw_sp *mlxsw_sp)
1695{
Arkadi Sharshevskya6c9b5d2017-07-18 10:10:18 +02001696 unsigned long interval;
Yotam Gigic723c7352016-07-05 11:27:43 +02001697
Ido Schimmelb5f3e0d2017-07-24 09:56:00 +02001698#if IS_ENABLED(CONFIG_IPV6)
Arkadi Sharshevskya6c9b5d2017-07-18 10:10:18 +02001699 interval = min_t(unsigned long,
1700 NEIGH_VAR(&arp_tbl.parms, DELAY_PROBE_TIME),
1701 NEIGH_VAR(&nd_tbl.parms, DELAY_PROBE_TIME));
Ido Schimmelb5f3e0d2017-07-24 09:56:00 +02001702#else
1703 interval = NEIGH_VAR(&arp_tbl.parms, DELAY_PROBE_TIME);
1704#endif
Ido Schimmel9011b672017-05-16 19:38:25 +02001705 mlxsw_sp->router->neighs_update.interval = jiffies_to_msecs(interval);
Yotam Gigic723c7352016-07-05 11:27:43 +02001706}
1707
1708static void mlxsw_sp_router_neigh_ent_ipv4_process(struct mlxsw_sp *mlxsw_sp,
1709 char *rauhtd_pl,
1710 int ent_index)
1711{
1712 struct net_device *dev;
1713 struct neighbour *n;
1714 __be32 dipn;
1715 u32 dip;
1716 u16 rif;
1717
1718 mlxsw_reg_rauhtd_ent_ipv4_unpack(rauhtd_pl, ent_index, &rif, &dip);
1719
Ido Schimmel5f9efff2017-05-16 19:38:27 +02001720 if (!mlxsw_sp->router->rifs[rif]) {
Yotam Gigic723c7352016-07-05 11:27:43 +02001721 dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Incorrect RIF in neighbour entry\n");
1722 return;
1723 }
1724
1725 dipn = htonl(dip);
Ido Schimmel5f9efff2017-05-16 19:38:27 +02001726 dev = mlxsw_sp->router->rifs[rif]->dev;
Yotam Gigic723c7352016-07-05 11:27:43 +02001727 n = neigh_lookup(&arp_tbl, &dipn, dev);
1728 if (!n) {
1729 netdev_err(dev, "Failed to find matching neighbour for IP=%pI4h\n",
1730 &dip);
1731 return;
1732 }
1733
1734 netdev_dbg(dev, "Updating neighbour with IP=%pI4h\n", &dip);
1735 neigh_event_send(n, NULL);
1736 neigh_release(n);
1737}
1738
Ido Schimmeldf9a21f2017-08-15 09:10:33 +02001739#if IS_ENABLED(CONFIG_IPV6)
Arkadi Sharshevsky60f040c2017-07-18 10:10:17 +02001740static void mlxsw_sp_router_neigh_ent_ipv6_process(struct mlxsw_sp *mlxsw_sp,
1741 char *rauhtd_pl,
1742 int rec_index)
1743{
1744 struct net_device *dev;
1745 struct neighbour *n;
1746 struct in6_addr dip;
1747 u16 rif;
1748
1749 mlxsw_reg_rauhtd_ent_ipv6_unpack(rauhtd_pl, rec_index, &rif,
1750 (char *) &dip);
1751
1752 if (!mlxsw_sp->router->rifs[rif]) {
1753 dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Incorrect RIF in neighbour entry\n");
1754 return;
1755 }
1756
1757 dev = mlxsw_sp->router->rifs[rif]->dev;
1758 n = neigh_lookup(&nd_tbl, &dip, dev);
1759 if (!n) {
1760 netdev_err(dev, "Failed to find matching neighbour for IP=%pI6c\n",
1761 &dip);
1762 return;
1763 }
1764
1765 netdev_dbg(dev, "Updating neighbour with IP=%pI6c\n", &dip);
1766 neigh_event_send(n, NULL);
1767 neigh_release(n);
1768}
Ido Schimmelb5f3e0d2017-07-24 09:56:00 +02001769#else
1770static void mlxsw_sp_router_neigh_ent_ipv6_process(struct mlxsw_sp *mlxsw_sp,
1771 char *rauhtd_pl,
1772 int rec_index)
1773{
1774}
1775#endif
Arkadi Sharshevsky60f040c2017-07-18 10:10:17 +02001776
Yotam Gigic723c7352016-07-05 11:27:43 +02001777static void mlxsw_sp_router_neigh_rec_ipv4_process(struct mlxsw_sp *mlxsw_sp,
1778 char *rauhtd_pl,
1779 int rec_index)
1780{
1781 u8 num_entries;
1782 int i;
1783
1784 num_entries = mlxsw_reg_rauhtd_ipv4_rec_num_entries_get(rauhtd_pl,
1785 rec_index);
1786 /* Hardware starts counting at 0, so add 1. */
1787 num_entries++;
1788
1789 /* Each record consists of several neighbour entries. */
1790 for (i = 0; i < num_entries; i++) {
1791 int ent_index;
1792
1793 ent_index = rec_index * MLXSW_REG_RAUHTD_IPV4_ENT_PER_REC + i;
1794 mlxsw_sp_router_neigh_ent_ipv4_process(mlxsw_sp, rauhtd_pl,
1795 ent_index);
1796 }
1797
1798}
1799
Arkadi Sharshevsky60f040c2017-07-18 10:10:17 +02001800static void mlxsw_sp_router_neigh_rec_ipv6_process(struct mlxsw_sp *mlxsw_sp,
1801 char *rauhtd_pl,
1802 int rec_index)
1803{
1804 /* One record contains one entry. */
1805 mlxsw_sp_router_neigh_ent_ipv6_process(mlxsw_sp, rauhtd_pl,
1806 rec_index);
1807}
1808
Yotam Gigic723c7352016-07-05 11:27:43 +02001809static void mlxsw_sp_router_neigh_rec_process(struct mlxsw_sp *mlxsw_sp,
1810 char *rauhtd_pl, int rec_index)
1811{
1812 switch (mlxsw_reg_rauhtd_rec_type_get(rauhtd_pl, rec_index)) {
1813 case MLXSW_REG_RAUHTD_TYPE_IPV4:
1814 mlxsw_sp_router_neigh_rec_ipv4_process(mlxsw_sp, rauhtd_pl,
1815 rec_index);
1816 break;
1817 case MLXSW_REG_RAUHTD_TYPE_IPV6:
Arkadi Sharshevsky60f040c2017-07-18 10:10:17 +02001818 mlxsw_sp_router_neigh_rec_ipv6_process(mlxsw_sp, rauhtd_pl,
1819 rec_index);
Yotam Gigic723c7352016-07-05 11:27:43 +02001820 break;
1821 }
1822}
1823
Arkadi Sharshevsky42cdb332016-11-11 16:34:26 +01001824static bool mlxsw_sp_router_rauhtd_is_full(char *rauhtd_pl)
1825{
1826 u8 num_rec, last_rec_index, num_entries;
1827
1828 num_rec = mlxsw_reg_rauhtd_num_rec_get(rauhtd_pl);
1829 last_rec_index = num_rec - 1;
1830
1831 if (num_rec < MLXSW_REG_RAUHTD_REC_MAX_NUM)
1832 return false;
1833 if (mlxsw_reg_rauhtd_rec_type_get(rauhtd_pl, last_rec_index) ==
1834 MLXSW_REG_RAUHTD_TYPE_IPV6)
1835 return true;
1836
1837 num_entries = mlxsw_reg_rauhtd_ipv4_rec_num_entries_get(rauhtd_pl,
1838 last_rec_index);
1839 if (++num_entries == MLXSW_REG_RAUHTD_IPV4_ENT_PER_REC)
1840 return true;
1841 return false;
1842}
1843
Arkadi Sharshevsky60f040c2017-07-18 10:10:17 +02001844static int
1845__mlxsw_sp_router_neighs_update_rauhtd(struct mlxsw_sp *mlxsw_sp,
1846 char *rauhtd_pl,
1847 enum mlxsw_reg_rauhtd_type type)
Yotam Gigic723c7352016-07-05 11:27:43 +02001848{
Arkadi Sharshevsky60f040c2017-07-18 10:10:17 +02001849 int i, num_rec;
1850 int err;
Yotam Gigic723c7352016-07-05 11:27:43 +02001851
1852 /* Make sure the neighbour's netdev isn't removed in the
1853 * process.
1854 */
1855 rtnl_lock();
1856 do {
Arkadi Sharshevsky60f040c2017-07-18 10:10:17 +02001857 mlxsw_reg_rauhtd_pack(rauhtd_pl, type);
Yotam Gigic723c7352016-07-05 11:27:43 +02001858 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(rauhtd),
1859 rauhtd_pl);
1860 if (err) {
Petr Machata7ff176f2017-10-02 12:21:57 +02001861 dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Failed to dump neighbour table\n");
Yotam Gigic723c7352016-07-05 11:27:43 +02001862 break;
1863 }
1864 num_rec = mlxsw_reg_rauhtd_num_rec_get(rauhtd_pl);
1865 for (i = 0; i < num_rec; i++)
1866 mlxsw_sp_router_neigh_rec_process(mlxsw_sp, rauhtd_pl,
1867 i);
Arkadi Sharshevsky42cdb332016-11-11 16:34:26 +01001868 } while (mlxsw_sp_router_rauhtd_is_full(rauhtd_pl));
Yotam Gigic723c7352016-07-05 11:27:43 +02001869 rtnl_unlock();
1870
Arkadi Sharshevsky60f040c2017-07-18 10:10:17 +02001871 return err;
1872}
1873
1874static int mlxsw_sp_router_neighs_update_rauhtd(struct mlxsw_sp *mlxsw_sp)
1875{
1876 enum mlxsw_reg_rauhtd_type type;
1877 char *rauhtd_pl;
1878 int err;
1879
1880 rauhtd_pl = kmalloc(MLXSW_REG_RAUHTD_LEN, GFP_KERNEL);
1881 if (!rauhtd_pl)
1882 return -ENOMEM;
1883
1884 type = MLXSW_REG_RAUHTD_TYPE_IPV4;
1885 err = __mlxsw_sp_router_neighs_update_rauhtd(mlxsw_sp, rauhtd_pl, type);
1886 if (err)
1887 goto out;
1888
1889 type = MLXSW_REG_RAUHTD_TYPE_IPV6;
1890 err = __mlxsw_sp_router_neighs_update_rauhtd(mlxsw_sp, rauhtd_pl, type);
1891out:
Yotam Gigic723c7352016-07-05 11:27:43 +02001892 kfree(rauhtd_pl);
Yotam Gigib2157142016-07-05 11:27:51 +02001893 return err;
1894}
1895
1896static void mlxsw_sp_router_neighs_update_nh(struct mlxsw_sp *mlxsw_sp)
1897{
1898 struct mlxsw_sp_neigh_entry *neigh_entry;
1899
1900 /* Take RTNL mutex here to prevent lists from changes */
1901 rtnl_lock();
Ido Schimmel9011b672017-05-16 19:38:25 +02001902 list_for_each_entry(neigh_entry, &mlxsw_sp->router->nexthop_neighs_list,
Ido Schimmel8a0b7272017-02-06 16:20:15 +01001903 nexthop_neighs_list_node)
Yotam Gigib2157142016-07-05 11:27:51 +02001904 /* If this neigh have nexthops, make the kernel think this neigh
1905 * is active regardless of the traffic.
1906 */
Ido Schimmel8a0b7272017-02-06 16:20:15 +01001907 neigh_event_send(neigh_entry->key.n, NULL);
Yotam Gigib2157142016-07-05 11:27:51 +02001908 rtnl_unlock();
1909}
1910
1911static void
1912mlxsw_sp_router_neighs_update_work_schedule(struct mlxsw_sp *mlxsw_sp)
1913{
Ido Schimmel9011b672017-05-16 19:38:25 +02001914 unsigned long interval = mlxsw_sp->router->neighs_update.interval;
Yotam Gigib2157142016-07-05 11:27:51 +02001915
Ido Schimmel9011b672017-05-16 19:38:25 +02001916 mlxsw_core_schedule_dw(&mlxsw_sp->router->neighs_update.dw,
Yotam Gigib2157142016-07-05 11:27:51 +02001917 msecs_to_jiffies(interval));
1918}
1919
1920static void mlxsw_sp_router_neighs_update_work(struct work_struct *work)
1921{
Ido Schimmel9011b672017-05-16 19:38:25 +02001922 struct mlxsw_sp_router *router;
Yotam Gigib2157142016-07-05 11:27:51 +02001923 int err;
1924
Ido Schimmel9011b672017-05-16 19:38:25 +02001925 router = container_of(work, struct mlxsw_sp_router,
1926 neighs_update.dw.work);
1927 err = mlxsw_sp_router_neighs_update_rauhtd(router->mlxsw_sp);
Yotam Gigib2157142016-07-05 11:27:51 +02001928 if (err)
Ido Schimmel9011b672017-05-16 19:38:25 +02001929 dev_err(router->mlxsw_sp->bus_info->dev, "Could not update kernel for neigh activity");
Yotam Gigib2157142016-07-05 11:27:51 +02001930
Ido Schimmel9011b672017-05-16 19:38:25 +02001931 mlxsw_sp_router_neighs_update_nh(router->mlxsw_sp);
Yotam Gigib2157142016-07-05 11:27:51 +02001932
Ido Schimmel9011b672017-05-16 19:38:25 +02001933 mlxsw_sp_router_neighs_update_work_schedule(router->mlxsw_sp);
Yotam Gigic723c7352016-07-05 11:27:43 +02001934}
1935
Yotam Gigi0b2361d2016-07-05 11:27:52 +02001936static void mlxsw_sp_router_probe_unresolved_nexthops(struct work_struct *work)
1937{
1938 struct mlxsw_sp_neigh_entry *neigh_entry;
Ido Schimmel9011b672017-05-16 19:38:25 +02001939 struct mlxsw_sp_router *router;
Yotam Gigi0b2361d2016-07-05 11:27:52 +02001940
Ido Schimmel9011b672017-05-16 19:38:25 +02001941 router = container_of(work, struct mlxsw_sp_router,
1942 nexthop_probe_dw.work);
Yotam Gigi0b2361d2016-07-05 11:27:52 +02001943 /* Iterate over nexthop neighbours, find those who are unresolved and
1944 * send arp on them. This solves the chicken-egg problem when
1945 * the nexthop wouldn't get offloaded until the neighbor is resolved
1946 * but it wouldn't get resolved ever in case traffic is flowing in HW
1947 * using different nexthop.
1948 *
1949 * Take RTNL mutex here to prevent lists from changes.
1950 */
1951 rtnl_lock();
Ido Schimmel9011b672017-05-16 19:38:25 +02001952 list_for_each_entry(neigh_entry, &router->nexthop_neighs_list,
Ido Schimmel8a0b7272017-02-06 16:20:15 +01001953 nexthop_neighs_list_node)
Ido Schimmel01b1aa32017-02-06 16:20:16 +01001954 if (!neigh_entry->connected)
Jiri Pirko33b13412016-11-10 12:31:04 +01001955 neigh_event_send(neigh_entry->key.n, NULL);
Yotam Gigi0b2361d2016-07-05 11:27:52 +02001956 rtnl_unlock();
1957
Ido Schimmel9011b672017-05-16 19:38:25 +02001958 mlxsw_core_schedule_dw(&router->nexthop_probe_dw,
Yotam Gigi0b2361d2016-07-05 11:27:52 +02001959 MLXSW_SP_UNRESOLVED_NH_PROBE_INTERVAL);
1960}
1961
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001962static void
1963mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp *mlxsw_sp,
1964 struct mlxsw_sp_neigh_entry *neigh_entry,
1965 bool removing);
1966
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001967static enum mlxsw_reg_rauht_op mlxsw_sp_rauht_op(bool adding)
Yotam Gigia6bf9e92016-07-05 11:27:44 +02001968{
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001969 return adding ? MLXSW_REG_RAUHT_OP_WRITE_ADD :
1970 MLXSW_REG_RAUHT_OP_WRITE_DELETE;
1971}
1972
1973static void
1974mlxsw_sp_router_neigh_entry_op4(struct mlxsw_sp *mlxsw_sp,
1975 struct mlxsw_sp_neigh_entry *neigh_entry,
1976 enum mlxsw_reg_rauht_op op)
1977{
Jiri Pirko33b13412016-11-10 12:31:04 +01001978 struct neighbour *n = neigh_entry->key.n;
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001979 u32 dip = ntohl(*((__be32 *) n->primary_key));
Yotam Gigia6bf9e92016-07-05 11:27:44 +02001980 char rauht_pl[MLXSW_REG_RAUHT_LEN];
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001981
1982 mlxsw_reg_rauht_pack4(rauht_pl, op, neigh_entry->rif, neigh_entry->ha,
1983 dip);
Arkadi Sharshevsky7cfcbc72017-08-24 08:40:08 +02001984 if (neigh_entry->counter_valid)
1985 mlxsw_reg_rauht_pack_counter(rauht_pl,
1986 neigh_entry->counter_index);
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001987 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rauht), rauht_pl);
1988}
1989
1990static void
Arkadi Sharshevskyd5eb89c2017-07-18 10:10:15 +02001991mlxsw_sp_router_neigh_entry_op6(struct mlxsw_sp *mlxsw_sp,
1992 struct mlxsw_sp_neigh_entry *neigh_entry,
1993 enum mlxsw_reg_rauht_op op)
1994{
1995 struct neighbour *n = neigh_entry->key.n;
1996 char rauht_pl[MLXSW_REG_RAUHT_LEN];
1997 const char *dip = n->primary_key;
1998
1999 mlxsw_reg_rauht_pack6(rauht_pl, op, neigh_entry->rif, neigh_entry->ha,
2000 dip);
Arkadi Sharshevsky7cfcbc72017-08-24 08:40:08 +02002001 if (neigh_entry->counter_valid)
2002 mlxsw_reg_rauht_pack_counter(rauht_pl,
2003 neigh_entry->counter_index);
Arkadi Sharshevskyd5eb89c2017-07-18 10:10:15 +02002004 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rauht), rauht_pl);
2005}
2006
Arkadi Sharshevsky1d1056d82017-08-31 17:59:13 +02002007bool mlxsw_sp_neigh_ipv6_ignore(struct mlxsw_sp_neigh_entry *neigh_entry)
Arkadi Sharshevskyd5eb89c2017-07-18 10:10:15 +02002008{
Arkadi Sharshevsky1d1056d82017-08-31 17:59:13 +02002009 struct neighbour *n = neigh_entry->key.n;
2010
Arkadi Sharshevskyd5eb89c2017-07-18 10:10:15 +02002011 /* Packets with a link-local destination address are trapped
2012 * after LPM lookup and never reach the neighbour table, so
2013 * there is no need to program such neighbours to the device.
2014 */
2015 if (ipv6_addr_type((struct in6_addr *) &n->primary_key) &
2016 IPV6_ADDR_LINKLOCAL)
2017 return true;
2018 return false;
2019}
2020
2021static void
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002022mlxsw_sp_neigh_entry_update(struct mlxsw_sp *mlxsw_sp,
2023 struct mlxsw_sp_neigh_entry *neigh_entry,
2024 bool adding)
2025{
2026 if (!adding && !neigh_entry->connected)
2027 return;
2028 neigh_entry->connected = adding;
Ido Schimmelb5f3e0d2017-07-24 09:56:00 +02002029 if (neigh_entry->key.n->tbl->family == AF_INET) {
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002030 mlxsw_sp_router_neigh_entry_op4(mlxsw_sp, neigh_entry,
2031 mlxsw_sp_rauht_op(adding));
Ido Schimmelb5f3e0d2017-07-24 09:56:00 +02002032 } else if (neigh_entry->key.n->tbl->family == AF_INET6) {
Arkadi Sharshevsky1d1056d82017-08-31 17:59:13 +02002033 if (mlxsw_sp_neigh_ipv6_ignore(neigh_entry))
Arkadi Sharshevskyd5eb89c2017-07-18 10:10:15 +02002034 return;
2035 mlxsw_sp_router_neigh_entry_op6(mlxsw_sp, neigh_entry,
2036 mlxsw_sp_rauht_op(adding));
2037 } else {
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002038 WARN_ON_ONCE(1);
Arkadi Sharshevskyd5eb89c2017-07-18 10:10:15 +02002039 }
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002040}
2041
Arkadi Sharshevskya481d712017-08-24 08:40:10 +02002042void
2043mlxsw_sp_neigh_entry_counter_update(struct mlxsw_sp *mlxsw_sp,
2044 struct mlxsw_sp_neigh_entry *neigh_entry,
2045 bool adding)
2046{
2047 if (adding)
2048 mlxsw_sp_neigh_counter_alloc(mlxsw_sp, neigh_entry);
2049 else
2050 mlxsw_sp_neigh_counter_free(mlxsw_sp, neigh_entry);
2051 mlxsw_sp_neigh_entry_update(mlxsw_sp, neigh_entry, true);
2052}
2053
Ido Schimmelceb88812017-11-02 17:14:07 +01002054struct mlxsw_sp_netevent_work {
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002055 struct work_struct work;
2056 struct mlxsw_sp *mlxsw_sp;
2057 struct neighbour *n;
2058};
2059
2060static void mlxsw_sp_router_neigh_event_work(struct work_struct *work)
2061{
Ido Schimmelceb88812017-11-02 17:14:07 +01002062 struct mlxsw_sp_netevent_work *net_work =
2063 container_of(work, struct mlxsw_sp_netevent_work, work);
2064 struct mlxsw_sp *mlxsw_sp = net_work->mlxsw_sp;
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002065 struct mlxsw_sp_neigh_entry *neigh_entry;
Ido Schimmelceb88812017-11-02 17:14:07 +01002066 struct neighbour *n = net_work->n;
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002067 unsigned char ha[ETH_ALEN];
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002068 bool entry_connected;
Ido Schimmel93a87e52016-12-23 09:32:49 +01002069 u8 nud_state, dead;
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002070
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002071 /* If these parameters are changed after we release the lock,
2072 * then we are guaranteed to receive another event letting us
2073 * know about it.
2074 */
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002075 read_lock_bh(&n->lock);
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002076 memcpy(ha, n->ha, ETH_ALEN);
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002077 nud_state = n->nud_state;
Ido Schimmel93a87e52016-12-23 09:32:49 +01002078 dead = n->dead;
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002079 read_unlock_bh(&n->lock);
2080
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002081 rtnl_lock();
Ido Schimmel93a87e52016-12-23 09:32:49 +01002082 entry_connected = nud_state & NUD_VALID && !dead;
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002083 neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp, n);
2084 if (!entry_connected && !neigh_entry)
2085 goto out;
2086 if (!neigh_entry) {
2087 neigh_entry = mlxsw_sp_neigh_entry_create(mlxsw_sp, n);
2088 if (IS_ERR(neigh_entry))
2089 goto out;
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002090 }
2091
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002092 memcpy(neigh_entry->ha, ha, ETH_ALEN);
2093 mlxsw_sp_neigh_entry_update(mlxsw_sp, neigh_entry, entry_connected);
2094 mlxsw_sp_nexthop_neigh_update(mlxsw_sp, neigh_entry, !entry_connected);
2095
2096 if (!neigh_entry->connected && list_empty(&neigh_entry->nexthop_list))
2097 mlxsw_sp_neigh_entry_destroy(mlxsw_sp, neigh_entry);
2098
2099out:
2100 rtnl_unlock();
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002101 neigh_release(n);
Ido Schimmelceb88812017-11-02 17:14:07 +01002102 kfree(net_work);
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002103}
2104
Ido Schimmel28678f02017-11-02 17:14:10 +01002105static int mlxsw_sp_mp_hash_init(struct mlxsw_sp *mlxsw_sp);
2106
2107static void mlxsw_sp_router_mp_hash_event_work(struct work_struct *work)
2108{
2109 struct mlxsw_sp_netevent_work *net_work =
2110 container_of(work, struct mlxsw_sp_netevent_work, work);
2111 struct mlxsw_sp *mlxsw_sp = net_work->mlxsw_sp;
2112
2113 mlxsw_sp_mp_hash_init(mlxsw_sp);
2114 kfree(net_work);
2115}
2116
2117static int mlxsw_sp_router_netevent_event(struct notifier_block *nb,
Ido Schimmel48fac882017-11-02 17:14:06 +01002118 unsigned long event, void *ptr)
Yotam Gigic723c7352016-07-05 11:27:43 +02002119{
Ido Schimmelceb88812017-11-02 17:14:07 +01002120 struct mlxsw_sp_netevent_work *net_work;
Yotam Gigic723c7352016-07-05 11:27:43 +02002121 struct mlxsw_sp_port *mlxsw_sp_port;
Ido Schimmel28678f02017-11-02 17:14:10 +01002122 struct mlxsw_sp_router *router;
Yotam Gigic723c7352016-07-05 11:27:43 +02002123 struct mlxsw_sp *mlxsw_sp;
2124 unsigned long interval;
2125 struct neigh_parms *p;
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002126 struct neighbour *n;
Ido Schimmel28678f02017-11-02 17:14:10 +01002127 struct net *net;
Yotam Gigic723c7352016-07-05 11:27:43 +02002128
2129 switch (event) {
2130 case NETEVENT_DELAY_PROBE_TIME_UPDATE:
2131 p = ptr;
2132
2133 /* We don't care about changes in the default table. */
Ido Schimmelb5f3e0d2017-07-24 09:56:00 +02002134 if (!p->dev || (p->tbl->family != AF_INET &&
2135 p->tbl->family != AF_INET6))
Yotam Gigic723c7352016-07-05 11:27:43 +02002136 return NOTIFY_DONE;
2137
2138 /* We are in atomic context and can't take RTNL mutex,
2139 * so use RCU variant to walk the device chain.
2140 */
2141 mlxsw_sp_port = mlxsw_sp_port_lower_dev_hold(p->dev);
2142 if (!mlxsw_sp_port)
2143 return NOTIFY_DONE;
2144
2145 mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2146 interval = jiffies_to_msecs(NEIGH_VAR(p, DELAY_PROBE_TIME));
Ido Schimmel9011b672017-05-16 19:38:25 +02002147 mlxsw_sp->router->neighs_update.interval = interval;
Yotam Gigic723c7352016-07-05 11:27:43 +02002148
2149 mlxsw_sp_port_dev_put(mlxsw_sp_port);
2150 break;
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002151 case NETEVENT_NEIGH_UPDATE:
2152 n = ptr;
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002153
Ido Schimmelb5f3e0d2017-07-24 09:56:00 +02002154 if (n->tbl->family != AF_INET && n->tbl->family != AF_INET6)
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002155 return NOTIFY_DONE;
2156
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002157 mlxsw_sp_port = mlxsw_sp_port_lower_dev_hold(n->dev);
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002158 if (!mlxsw_sp_port)
2159 return NOTIFY_DONE;
2160
Ido Schimmelceb88812017-11-02 17:14:07 +01002161 net_work = kzalloc(sizeof(*net_work), GFP_ATOMIC);
2162 if (!net_work) {
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002163 mlxsw_sp_port_dev_put(mlxsw_sp_port);
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002164 return NOTIFY_BAD;
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002165 }
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002166
Ido Schimmelceb88812017-11-02 17:14:07 +01002167 INIT_WORK(&net_work->work, mlxsw_sp_router_neigh_event_work);
2168 net_work->mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2169 net_work->n = n;
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002170
2171 /* Take a reference to ensure the neighbour won't be
2172 * destructed until we drop the reference in delayed
2173 * work.
2174 */
2175 neigh_clone(n);
Ido Schimmelceb88812017-11-02 17:14:07 +01002176 mlxsw_core_schedule_work(&net_work->work);
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002177 mlxsw_sp_port_dev_put(mlxsw_sp_port);
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002178 break;
Ido Schimmel28678f02017-11-02 17:14:10 +01002179 case NETEVENT_MULTIPATH_HASH_UPDATE:
2180 net = ptr;
2181
2182 if (!net_eq(net, &init_net))
2183 return NOTIFY_DONE;
2184
2185 net_work = kzalloc(sizeof(*net_work), GFP_ATOMIC);
2186 if (!net_work)
2187 return NOTIFY_BAD;
2188
2189 router = container_of(nb, struct mlxsw_sp_router, netevent_nb);
2190 INIT_WORK(&net_work->work, mlxsw_sp_router_mp_hash_event_work);
2191 net_work->mlxsw_sp = router->mlxsw_sp;
2192 mlxsw_core_schedule_work(&net_work->work);
2193 break;
Yotam Gigic723c7352016-07-05 11:27:43 +02002194 }
2195
2196 return NOTIFY_DONE;
2197}
2198
Jiri Pirko6cf3c972016-07-05 11:27:39 +02002199static int mlxsw_sp_neigh_init(struct mlxsw_sp *mlxsw_sp)
2200{
Yotam Gigic723c7352016-07-05 11:27:43 +02002201 int err;
2202
Ido Schimmel9011b672017-05-16 19:38:25 +02002203 err = rhashtable_init(&mlxsw_sp->router->neigh_ht,
Yotam Gigic723c7352016-07-05 11:27:43 +02002204 &mlxsw_sp_neigh_ht_params);
2205 if (err)
2206 return err;
2207
2208 /* Initialize the polling interval according to the default
2209 * table.
2210 */
2211 mlxsw_sp_router_neighs_update_interval_init(mlxsw_sp);
2212
Yotam Gigi0b2361d2016-07-05 11:27:52 +02002213 /* Create the delayed works for the activity_update */
Ido Schimmel9011b672017-05-16 19:38:25 +02002214 INIT_DELAYED_WORK(&mlxsw_sp->router->neighs_update.dw,
Yotam Gigic723c7352016-07-05 11:27:43 +02002215 mlxsw_sp_router_neighs_update_work);
Ido Schimmel9011b672017-05-16 19:38:25 +02002216 INIT_DELAYED_WORK(&mlxsw_sp->router->nexthop_probe_dw,
Yotam Gigi0b2361d2016-07-05 11:27:52 +02002217 mlxsw_sp_router_probe_unresolved_nexthops);
Ido Schimmel9011b672017-05-16 19:38:25 +02002218 mlxsw_core_schedule_dw(&mlxsw_sp->router->neighs_update.dw, 0);
2219 mlxsw_core_schedule_dw(&mlxsw_sp->router->nexthop_probe_dw, 0);
Yotam Gigic723c7352016-07-05 11:27:43 +02002220 return 0;
Jiri Pirko6cf3c972016-07-05 11:27:39 +02002221}
2222
2223static void mlxsw_sp_neigh_fini(struct mlxsw_sp *mlxsw_sp)
2224{
Ido Schimmel9011b672017-05-16 19:38:25 +02002225 cancel_delayed_work_sync(&mlxsw_sp->router->neighs_update.dw);
2226 cancel_delayed_work_sync(&mlxsw_sp->router->nexthop_probe_dw);
2227 rhashtable_destroy(&mlxsw_sp->router->neigh_ht);
Jiri Pirko6cf3c972016-07-05 11:27:39 +02002228}
2229
Ido Schimmel9665b742017-02-08 11:16:42 +01002230static void mlxsw_sp_neigh_rif_gone_sync(struct mlxsw_sp *mlxsw_sp,
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01002231 struct mlxsw_sp_rif *rif)
Ido Schimmel9665b742017-02-08 11:16:42 +01002232{
2233 struct mlxsw_sp_neigh_entry *neigh_entry, *tmp;
2234
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01002235 list_for_each_entry_safe(neigh_entry, tmp, &rif->neigh_list,
Ido Schimmel4a3c67a2017-07-21 20:31:38 +02002236 rif_list_node) {
2237 mlxsw_sp_neigh_entry_update(mlxsw_sp, neigh_entry, false);
Ido Schimmel9665b742017-02-08 11:16:42 +01002238 mlxsw_sp_neigh_entry_destroy(mlxsw_sp, neigh_entry);
Ido Schimmel4a3c67a2017-07-21 20:31:38 +02002239 }
Ido Schimmel9665b742017-02-08 11:16:42 +01002240}
2241
Petr Machata35225e42017-09-02 23:49:22 +02002242enum mlxsw_sp_nexthop_type {
2243 MLXSW_SP_NEXTHOP_TYPE_ETH,
Petr Machata1012b9a2017-09-02 23:49:23 +02002244 MLXSW_SP_NEXTHOP_TYPE_IPIP,
Petr Machata35225e42017-09-02 23:49:22 +02002245};
2246
Ido Schimmelc53b8e12017-02-08 11:16:30 +01002247struct mlxsw_sp_nexthop_key {
2248 struct fib_nh *fib_nh;
2249};
2250
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002251struct mlxsw_sp_nexthop {
2252 struct list_head neigh_list_node; /* member of neigh entry list */
Ido Schimmel9665b742017-02-08 11:16:42 +01002253 struct list_head rif_list_node;
Arkadi Sharshevskydbe45982017-09-25 10:32:23 +02002254 struct list_head router_list_node;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002255 struct mlxsw_sp_nexthop_group *nh_grp; /* pointer back to the group
2256 * this belongs to
2257 */
Ido Schimmelc53b8e12017-02-08 11:16:30 +01002258 struct rhash_head ht_node;
2259 struct mlxsw_sp_nexthop_key key;
Ido Schimmel58adf2c2017-07-18 10:10:19 +02002260 unsigned char gw_addr[sizeof(struct in6_addr)];
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02002261 int ifindex;
Ido Schimmel408bd942017-10-22 23:11:46 +02002262 int nh_weight;
Ido Schimmeleb789982017-10-22 23:11:48 +02002263 int norm_nh_weight;
2264 int num_adj_entries;
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01002265 struct mlxsw_sp_rif *rif;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002266 u8 should_offload:1, /* set indicates this neigh is connected and
2267 * should be put to KVD linear area of this group.
2268 */
2269 offloaded:1, /* set in case the neigh is actually put into
2270 * KVD linear area of this group.
2271 */
2272 update:1; /* set indicates that MAC of this neigh should be
2273 * updated in HW
2274 */
Petr Machata35225e42017-09-02 23:49:22 +02002275 enum mlxsw_sp_nexthop_type type;
2276 union {
2277 struct mlxsw_sp_neigh_entry *neigh_entry;
Petr Machata1012b9a2017-09-02 23:49:23 +02002278 struct mlxsw_sp_ipip_entry *ipip_entry;
Petr Machata35225e42017-09-02 23:49:22 +02002279 };
Arkadi Sharshevskya5390272017-09-25 10:32:28 +02002280 unsigned int counter_index;
2281 bool counter_valid;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002282};
2283
2284struct mlxsw_sp_nexthop_group {
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02002285 void *priv;
Ido Schimmele9ad5e72017-02-08 11:16:29 +01002286 struct rhash_head ht_node;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002287 struct list_head fib_list; /* list of fib entries that use this group */
Ido Schimmel58adf2c2017-07-18 10:10:19 +02002288 struct neigh_table *neigh_tbl;
Ido Schimmelb3e8d1e2017-02-08 11:16:32 +01002289 u8 adj_index_valid:1,
2290 gateway:1; /* routes using the group use a gateway */
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002291 u32 adj_index;
2292 u16 ecmp_size;
2293 u16 count;
Ido Schimmeleb789982017-10-22 23:11:48 +02002294 int sum_norm_weight;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002295 struct mlxsw_sp_nexthop nexthops[0];
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01002296#define nh_rif nexthops[0].rif
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002297};
2298
Arkadi Sharshevsky427e6522017-09-25 10:32:30 +02002299void mlxsw_sp_nexthop_counter_alloc(struct mlxsw_sp *mlxsw_sp,
2300 struct mlxsw_sp_nexthop *nh)
Arkadi Sharshevskya5390272017-09-25 10:32:28 +02002301{
2302 struct devlink *devlink;
2303
2304 devlink = priv_to_devlink(mlxsw_sp->core);
2305 if (!devlink_dpipe_table_counter_enabled(devlink,
2306 MLXSW_SP_DPIPE_TABLE_NAME_ADJ))
2307 return;
2308
2309 if (mlxsw_sp_flow_counter_alloc(mlxsw_sp, &nh->counter_index))
2310 return;
2311
2312 nh->counter_valid = true;
2313}
2314
Arkadi Sharshevsky427e6522017-09-25 10:32:30 +02002315void mlxsw_sp_nexthop_counter_free(struct mlxsw_sp *mlxsw_sp,
2316 struct mlxsw_sp_nexthop *nh)
Arkadi Sharshevskya5390272017-09-25 10:32:28 +02002317{
2318 if (!nh->counter_valid)
2319 return;
2320 mlxsw_sp_flow_counter_free(mlxsw_sp, nh->counter_index);
2321 nh->counter_valid = false;
2322}
2323
2324int mlxsw_sp_nexthop_counter_get(struct mlxsw_sp *mlxsw_sp,
2325 struct mlxsw_sp_nexthop *nh, u64 *p_counter)
2326{
2327 if (!nh->counter_valid)
2328 return -EINVAL;
2329
2330 return mlxsw_sp_flow_counter_get(mlxsw_sp, nh->counter_index,
2331 p_counter, NULL);
2332}
2333
Arkadi Sharshevskyc556cd22017-09-25 10:32:25 +02002334struct mlxsw_sp_nexthop *mlxsw_sp_nexthop_next(struct mlxsw_sp_router *router,
2335 struct mlxsw_sp_nexthop *nh)
2336{
2337 if (!nh) {
2338 if (list_empty(&router->nexthop_list))
2339 return NULL;
2340 else
2341 return list_first_entry(&router->nexthop_list,
2342 typeof(*nh), router_list_node);
2343 }
2344 if (list_is_last(&nh->router_list_node, &router->nexthop_list))
2345 return NULL;
2346 return list_next_entry(nh, router_list_node);
2347}
2348
2349bool mlxsw_sp_nexthop_offload(struct mlxsw_sp_nexthop *nh)
2350{
2351 return nh->offloaded;
2352}
2353
2354unsigned char *mlxsw_sp_nexthop_ha(struct mlxsw_sp_nexthop *nh)
2355{
2356 if (!nh->offloaded)
2357 return NULL;
2358 return nh->neigh_entry->ha;
2359}
2360
2361int mlxsw_sp_nexthop_indexes(struct mlxsw_sp_nexthop *nh, u32 *p_adj_index,
Ido Schimmele69cd9d2017-10-22 23:11:43 +02002362 u32 *p_adj_size, u32 *p_adj_hash_index)
Arkadi Sharshevskyc556cd22017-09-25 10:32:25 +02002363{
2364 struct mlxsw_sp_nexthop_group *nh_grp = nh->nh_grp;
2365 u32 adj_hash_index = 0;
2366 int i;
2367
2368 if (!nh->offloaded || !nh_grp->adj_index_valid)
2369 return -EINVAL;
2370
2371 *p_adj_index = nh_grp->adj_index;
Ido Schimmele69cd9d2017-10-22 23:11:43 +02002372 *p_adj_size = nh_grp->ecmp_size;
Arkadi Sharshevskyc556cd22017-09-25 10:32:25 +02002373
2374 for (i = 0; i < nh_grp->count; i++) {
2375 struct mlxsw_sp_nexthop *nh_iter = &nh_grp->nexthops[i];
2376
2377 if (nh_iter == nh)
2378 break;
2379 if (nh_iter->offloaded)
Ido Schimmeleb789982017-10-22 23:11:48 +02002380 adj_hash_index += nh_iter->num_adj_entries;
Arkadi Sharshevskyc556cd22017-09-25 10:32:25 +02002381 }
2382
2383 *p_adj_hash_index = adj_hash_index;
2384 return 0;
2385}
2386
2387struct mlxsw_sp_rif *mlxsw_sp_nexthop_rif(struct mlxsw_sp_nexthop *nh)
2388{
2389 return nh->rif;
2390}
2391
2392bool mlxsw_sp_nexthop_group_has_ipip(struct mlxsw_sp_nexthop *nh)
2393{
2394 struct mlxsw_sp_nexthop_group *nh_grp = nh->nh_grp;
2395 int i;
2396
2397 for (i = 0; i < nh_grp->count; i++) {
2398 struct mlxsw_sp_nexthop *nh_iter = &nh_grp->nexthops[i];
2399
2400 if (nh_iter->type == MLXSW_SP_NEXTHOP_TYPE_IPIP)
2401 return true;
2402 }
2403 return false;
2404}
2405
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02002406static struct fib_info *
2407mlxsw_sp_nexthop4_group_fi(const struct mlxsw_sp_nexthop_group *nh_grp)
2408{
2409 return nh_grp->priv;
2410}
2411
2412struct mlxsw_sp_nexthop_group_cmp_arg {
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02002413 enum mlxsw_sp_l3proto proto;
2414 union {
2415 struct fib_info *fi;
2416 struct mlxsw_sp_fib6_entry *fib6_entry;
2417 };
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02002418};
2419
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02002420static bool
2421mlxsw_sp_nexthop6_group_has_nexthop(const struct mlxsw_sp_nexthop_group *nh_grp,
2422 const struct in6_addr *gw, int ifindex)
2423{
2424 int i;
2425
2426 for (i = 0; i < nh_grp->count; i++) {
2427 const struct mlxsw_sp_nexthop *nh;
2428
2429 nh = &nh_grp->nexthops[i];
2430 if (nh->ifindex == ifindex &&
2431 ipv6_addr_equal(gw, (struct in6_addr *) nh->gw_addr))
2432 return true;
2433 }
2434
2435 return false;
2436}
2437
2438static bool
2439mlxsw_sp_nexthop6_group_cmp(const struct mlxsw_sp_nexthop_group *nh_grp,
2440 const struct mlxsw_sp_fib6_entry *fib6_entry)
2441{
2442 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
2443
2444 if (nh_grp->count != fib6_entry->nrt6)
2445 return false;
2446
2447 list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) {
2448 struct in6_addr *gw;
2449 int ifindex;
2450
2451 ifindex = mlxsw_sp_rt6->rt->dst.dev->ifindex;
2452 gw = &mlxsw_sp_rt6->rt->rt6i_gateway;
2453 if (!mlxsw_sp_nexthop6_group_has_nexthop(nh_grp, gw, ifindex))
2454 return false;
2455 }
2456
2457 return true;
2458}
2459
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02002460static int
2461mlxsw_sp_nexthop_group_cmp(struct rhashtable_compare_arg *arg, const void *ptr)
2462{
2463 const struct mlxsw_sp_nexthop_group_cmp_arg *cmp_arg = arg->key;
2464 const struct mlxsw_sp_nexthop_group *nh_grp = ptr;
2465
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02002466 switch (cmp_arg->proto) {
2467 case MLXSW_SP_L3_PROTO_IPV4:
2468 return cmp_arg->fi != mlxsw_sp_nexthop4_group_fi(nh_grp);
2469 case MLXSW_SP_L3_PROTO_IPV6:
2470 return !mlxsw_sp_nexthop6_group_cmp(nh_grp,
2471 cmp_arg->fib6_entry);
2472 default:
2473 WARN_ON(1);
2474 return 1;
2475 }
2476}
2477
2478static int
2479mlxsw_sp_nexthop_group_type(const struct mlxsw_sp_nexthop_group *nh_grp)
2480{
2481 return nh_grp->neigh_tbl->family;
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02002482}
2483
2484static u32 mlxsw_sp_nexthop_group_hash_obj(const void *data, u32 len, u32 seed)
2485{
2486 const struct mlxsw_sp_nexthop_group *nh_grp = data;
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02002487 const struct mlxsw_sp_nexthop *nh;
2488 struct fib_info *fi;
2489 unsigned int val;
2490 int i;
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02002491
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02002492 switch (mlxsw_sp_nexthop_group_type(nh_grp)) {
2493 case AF_INET:
2494 fi = mlxsw_sp_nexthop4_group_fi(nh_grp);
2495 return jhash(&fi, sizeof(fi), seed);
2496 case AF_INET6:
2497 val = nh_grp->count;
2498 for (i = 0; i < nh_grp->count; i++) {
2499 nh = &nh_grp->nexthops[i];
2500 val ^= nh->ifindex;
2501 }
2502 return jhash(&val, sizeof(val), seed);
2503 default:
2504 WARN_ON(1);
2505 return 0;
2506 }
2507}
2508
2509static u32
2510mlxsw_sp_nexthop6_group_hash(struct mlxsw_sp_fib6_entry *fib6_entry, u32 seed)
2511{
2512 unsigned int val = fib6_entry->nrt6;
2513 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
2514 struct net_device *dev;
2515
2516 list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) {
2517 dev = mlxsw_sp_rt6->rt->dst.dev;
2518 val ^= dev->ifindex;
2519 }
2520
2521 return jhash(&val, sizeof(val), seed);
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02002522}
2523
2524static u32
2525mlxsw_sp_nexthop_group_hash(const void *data, u32 len, u32 seed)
2526{
2527 const struct mlxsw_sp_nexthop_group_cmp_arg *cmp_arg = data;
2528
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02002529 switch (cmp_arg->proto) {
2530 case MLXSW_SP_L3_PROTO_IPV4:
2531 return jhash(&cmp_arg->fi, sizeof(cmp_arg->fi), seed);
2532 case MLXSW_SP_L3_PROTO_IPV6:
2533 return mlxsw_sp_nexthop6_group_hash(cmp_arg->fib6_entry, seed);
2534 default:
2535 WARN_ON(1);
2536 return 0;
2537 }
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02002538}
2539
Ido Schimmele9ad5e72017-02-08 11:16:29 +01002540static const struct rhashtable_params mlxsw_sp_nexthop_group_ht_params = {
Ido Schimmele9ad5e72017-02-08 11:16:29 +01002541 .head_offset = offsetof(struct mlxsw_sp_nexthop_group, ht_node),
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02002542 .hashfn = mlxsw_sp_nexthop_group_hash,
2543 .obj_hashfn = mlxsw_sp_nexthop_group_hash_obj,
2544 .obj_cmpfn = mlxsw_sp_nexthop_group_cmp,
Ido Schimmele9ad5e72017-02-08 11:16:29 +01002545};
2546
2547static int mlxsw_sp_nexthop_group_insert(struct mlxsw_sp *mlxsw_sp,
2548 struct mlxsw_sp_nexthop_group *nh_grp)
2549{
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02002550 if (mlxsw_sp_nexthop_group_type(nh_grp) == AF_INET6 &&
2551 !nh_grp->gateway)
2552 return 0;
2553
Ido Schimmel9011b672017-05-16 19:38:25 +02002554 return rhashtable_insert_fast(&mlxsw_sp->router->nexthop_group_ht,
Ido Schimmele9ad5e72017-02-08 11:16:29 +01002555 &nh_grp->ht_node,
2556 mlxsw_sp_nexthop_group_ht_params);
2557}
2558
2559static void mlxsw_sp_nexthop_group_remove(struct mlxsw_sp *mlxsw_sp,
2560 struct mlxsw_sp_nexthop_group *nh_grp)
2561{
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02002562 if (mlxsw_sp_nexthop_group_type(nh_grp) == AF_INET6 &&
2563 !nh_grp->gateway)
2564 return;
2565
Ido Schimmel9011b672017-05-16 19:38:25 +02002566 rhashtable_remove_fast(&mlxsw_sp->router->nexthop_group_ht,
Ido Schimmele9ad5e72017-02-08 11:16:29 +01002567 &nh_grp->ht_node,
2568 mlxsw_sp_nexthop_group_ht_params);
2569}
2570
2571static struct mlxsw_sp_nexthop_group *
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02002572mlxsw_sp_nexthop4_group_lookup(struct mlxsw_sp *mlxsw_sp,
2573 struct fib_info *fi)
Ido Schimmele9ad5e72017-02-08 11:16:29 +01002574{
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02002575 struct mlxsw_sp_nexthop_group_cmp_arg cmp_arg;
2576
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02002577 cmp_arg.proto = MLXSW_SP_L3_PROTO_IPV4;
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02002578 cmp_arg.fi = fi;
2579 return rhashtable_lookup_fast(&mlxsw_sp->router->nexthop_group_ht,
2580 &cmp_arg,
Ido Schimmele9ad5e72017-02-08 11:16:29 +01002581 mlxsw_sp_nexthop_group_ht_params);
2582}
2583
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02002584static struct mlxsw_sp_nexthop_group *
2585mlxsw_sp_nexthop6_group_lookup(struct mlxsw_sp *mlxsw_sp,
2586 struct mlxsw_sp_fib6_entry *fib6_entry)
2587{
2588 struct mlxsw_sp_nexthop_group_cmp_arg cmp_arg;
2589
2590 cmp_arg.proto = MLXSW_SP_L3_PROTO_IPV6;
2591 cmp_arg.fib6_entry = fib6_entry;
2592 return rhashtable_lookup_fast(&mlxsw_sp->router->nexthop_group_ht,
2593 &cmp_arg,
2594 mlxsw_sp_nexthop_group_ht_params);
2595}
2596
Ido Schimmelc53b8e12017-02-08 11:16:30 +01002597static const struct rhashtable_params mlxsw_sp_nexthop_ht_params = {
2598 .key_offset = offsetof(struct mlxsw_sp_nexthop, key),
2599 .head_offset = offsetof(struct mlxsw_sp_nexthop, ht_node),
2600 .key_len = sizeof(struct mlxsw_sp_nexthop_key),
2601};
2602
2603static int mlxsw_sp_nexthop_insert(struct mlxsw_sp *mlxsw_sp,
2604 struct mlxsw_sp_nexthop *nh)
2605{
Ido Schimmel9011b672017-05-16 19:38:25 +02002606 return rhashtable_insert_fast(&mlxsw_sp->router->nexthop_ht,
Ido Schimmelc53b8e12017-02-08 11:16:30 +01002607 &nh->ht_node, mlxsw_sp_nexthop_ht_params);
2608}
2609
2610static void mlxsw_sp_nexthop_remove(struct mlxsw_sp *mlxsw_sp,
2611 struct mlxsw_sp_nexthop *nh)
2612{
Ido Schimmel9011b672017-05-16 19:38:25 +02002613 rhashtable_remove_fast(&mlxsw_sp->router->nexthop_ht, &nh->ht_node,
Ido Schimmelc53b8e12017-02-08 11:16:30 +01002614 mlxsw_sp_nexthop_ht_params);
2615}
2616
Ido Schimmelad178c82017-02-08 11:16:40 +01002617static struct mlxsw_sp_nexthop *
2618mlxsw_sp_nexthop_lookup(struct mlxsw_sp *mlxsw_sp,
2619 struct mlxsw_sp_nexthop_key key)
2620{
Ido Schimmel9011b672017-05-16 19:38:25 +02002621 return rhashtable_lookup_fast(&mlxsw_sp->router->nexthop_ht, &key,
Ido Schimmelad178c82017-02-08 11:16:40 +01002622 mlxsw_sp_nexthop_ht_params);
2623}
2624
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002625static int mlxsw_sp_adj_index_mass_update_vr(struct mlxsw_sp *mlxsw_sp,
Ido Schimmel76610eb2017-03-10 08:53:41 +01002626 const struct mlxsw_sp_fib *fib,
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002627 u32 adj_index, u16 ecmp_size,
2628 u32 new_adj_index,
2629 u16 new_ecmp_size)
2630{
2631 char raleu_pl[MLXSW_REG_RALEU_LEN];
2632
Ido Schimmel1a9234e662016-09-19 08:29:26 +02002633 mlxsw_reg_raleu_pack(raleu_pl,
Ido Schimmel76610eb2017-03-10 08:53:41 +01002634 (enum mlxsw_reg_ralxx_protocol) fib->proto,
2635 fib->vr->id, adj_index, ecmp_size, new_adj_index,
Ido Schimmel1a9234e662016-09-19 08:29:26 +02002636 new_ecmp_size);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002637 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raleu), raleu_pl);
2638}
2639
2640static int mlxsw_sp_adj_index_mass_update(struct mlxsw_sp *mlxsw_sp,
2641 struct mlxsw_sp_nexthop_group *nh_grp,
2642 u32 old_adj_index, u16 old_ecmp_size)
2643{
2644 struct mlxsw_sp_fib_entry *fib_entry;
Ido Schimmel76610eb2017-03-10 08:53:41 +01002645 struct mlxsw_sp_fib *fib = NULL;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002646 int err;
2647
2648 list_for_each_entry(fib_entry, &nh_grp->fib_list, nexthop_group_node) {
Ido Schimmel76610eb2017-03-10 08:53:41 +01002649 if (fib == fib_entry->fib_node->fib)
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002650 continue;
Ido Schimmel76610eb2017-03-10 08:53:41 +01002651 fib = fib_entry->fib_node->fib;
2652 err = mlxsw_sp_adj_index_mass_update_vr(mlxsw_sp, fib,
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002653 old_adj_index,
2654 old_ecmp_size,
2655 nh_grp->adj_index,
2656 nh_grp->ecmp_size);
2657 if (err)
2658 return err;
2659 }
2660 return 0;
2661}
2662
Ido Schimmeleb789982017-10-22 23:11:48 +02002663static int __mlxsw_sp_nexthop_update(struct mlxsw_sp *mlxsw_sp, u32 adj_index,
2664 struct mlxsw_sp_nexthop *nh)
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002665{
2666 struct mlxsw_sp_neigh_entry *neigh_entry = nh->neigh_entry;
2667 char ratr_pl[MLXSW_REG_RATR_LEN];
2668
2669 mlxsw_reg_ratr_pack(ratr_pl, MLXSW_REG_RATR_OP_WRITE_WRITE_ENTRY,
Petr Machata89e41982017-09-02 23:49:15 +02002670 true, MLXSW_REG_RATR_TYPE_ETHERNET,
2671 adj_index, neigh_entry->rif);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002672 mlxsw_reg_ratr_eth_entry_pack(ratr_pl, neigh_entry->ha);
Arkadi Sharshevskya5390272017-09-25 10:32:28 +02002673 if (nh->counter_valid)
2674 mlxsw_reg_ratr_counter_pack(ratr_pl, nh->counter_index, true);
2675 else
2676 mlxsw_reg_ratr_counter_pack(ratr_pl, 0, false);
2677
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002678 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ratr), ratr_pl);
2679}
2680
Ido Schimmeleb789982017-10-22 23:11:48 +02002681int mlxsw_sp_nexthop_update(struct mlxsw_sp *mlxsw_sp, u32 adj_index,
2682 struct mlxsw_sp_nexthop *nh)
2683{
2684 int i;
2685
2686 for (i = 0; i < nh->num_adj_entries; i++) {
2687 int err;
2688
2689 err = __mlxsw_sp_nexthop_update(mlxsw_sp, adj_index + i, nh);
2690 if (err)
2691 return err;
2692 }
2693
2694 return 0;
2695}
2696
2697static int __mlxsw_sp_nexthop_ipip_update(struct mlxsw_sp *mlxsw_sp,
2698 u32 adj_index,
2699 struct mlxsw_sp_nexthop *nh)
Petr Machata1012b9a2017-09-02 23:49:23 +02002700{
2701 const struct mlxsw_sp_ipip_ops *ipip_ops;
2702
2703 ipip_ops = mlxsw_sp->router->ipip_ops_arr[nh->ipip_entry->ipipt];
2704 return ipip_ops->nexthop_update(mlxsw_sp, adj_index, nh->ipip_entry);
2705}
2706
Ido Schimmeleb789982017-10-22 23:11:48 +02002707static int mlxsw_sp_nexthop_ipip_update(struct mlxsw_sp *mlxsw_sp,
2708 u32 adj_index,
2709 struct mlxsw_sp_nexthop *nh)
2710{
2711 int i;
2712
2713 for (i = 0; i < nh->num_adj_entries; i++) {
2714 int err;
2715
2716 err = __mlxsw_sp_nexthop_ipip_update(mlxsw_sp, adj_index + i,
2717 nh);
2718 if (err)
2719 return err;
2720 }
2721
2722 return 0;
2723}
2724
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002725static int
Petr Machata35225e42017-09-02 23:49:22 +02002726mlxsw_sp_nexthop_group_update(struct mlxsw_sp *mlxsw_sp,
2727 struct mlxsw_sp_nexthop_group *nh_grp,
2728 bool reallocate)
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002729{
2730 u32 adj_index = nh_grp->adj_index; /* base */
2731 struct mlxsw_sp_nexthop *nh;
2732 int i;
2733 int err;
2734
2735 for (i = 0; i < nh_grp->count; i++) {
2736 nh = &nh_grp->nexthops[i];
2737
2738 if (!nh->should_offload) {
2739 nh->offloaded = 0;
2740 continue;
2741 }
2742
Ido Schimmela59b7e02017-01-23 11:11:42 +01002743 if (nh->update || reallocate) {
Petr Machata35225e42017-09-02 23:49:22 +02002744 switch (nh->type) {
2745 case MLXSW_SP_NEXTHOP_TYPE_ETH:
Arkadi Sharshevskya5390272017-09-25 10:32:28 +02002746 err = mlxsw_sp_nexthop_update
Petr Machata35225e42017-09-02 23:49:22 +02002747 (mlxsw_sp, adj_index, nh);
2748 break;
Petr Machata1012b9a2017-09-02 23:49:23 +02002749 case MLXSW_SP_NEXTHOP_TYPE_IPIP:
2750 err = mlxsw_sp_nexthop_ipip_update
2751 (mlxsw_sp, adj_index, nh);
2752 break;
Petr Machata35225e42017-09-02 23:49:22 +02002753 }
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002754 if (err)
2755 return err;
2756 nh->update = 0;
2757 nh->offloaded = 1;
2758 }
Ido Schimmeleb789982017-10-22 23:11:48 +02002759 adj_index += nh->num_adj_entries;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002760 }
2761 return 0;
2762}
2763
Ido Schimmel1819ae32017-07-21 18:04:28 +02002764static bool
2765mlxsw_sp_fib_node_entry_is_first(const struct mlxsw_sp_fib_node *fib_node,
2766 const struct mlxsw_sp_fib_entry *fib_entry);
2767
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002768static int
2769mlxsw_sp_nexthop_fib_entries_update(struct mlxsw_sp *mlxsw_sp,
2770 struct mlxsw_sp_nexthop_group *nh_grp)
2771{
2772 struct mlxsw_sp_fib_entry *fib_entry;
2773 int err;
2774
2775 list_for_each_entry(fib_entry, &nh_grp->fib_list, nexthop_group_node) {
Ido Schimmel1819ae32017-07-21 18:04:28 +02002776 if (!mlxsw_sp_fib_node_entry_is_first(fib_entry->fib_node,
2777 fib_entry))
2778 continue;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002779 err = mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
2780 if (err)
2781 return err;
2782 }
2783 return 0;
2784}
2785
2786static void
Ido Schimmel77d964e2017-08-02 09:56:05 +02002787mlxsw_sp_fib_entry_offload_refresh(struct mlxsw_sp_fib_entry *fib_entry,
2788 enum mlxsw_reg_ralue_op op, int err);
2789
2790static void
2791mlxsw_sp_nexthop_fib_entries_refresh(struct mlxsw_sp_nexthop_group *nh_grp)
2792{
2793 enum mlxsw_reg_ralue_op op = MLXSW_REG_RALUE_OP_WRITE_WRITE;
2794 struct mlxsw_sp_fib_entry *fib_entry;
2795
2796 list_for_each_entry(fib_entry, &nh_grp->fib_list, nexthop_group_node) {
2797 if (!mlxsw_sp_fib_node_entry_is_first(fib_entry->fib_node,
2798 fib_entry))
2799 continue;
2800 mlxsw_sp_fib_entry_offload_refresh(fib_entry, op, 0);
2801 }
2802}
2803
Ido Schimmel425a08c2017-10-22 23:11:47 +02002804static void mlxsw_sp_adj_grp_size_round_up(u16 *p_adj_grp_size)
2805{
2806 /* Valid sizes for an adjacency group are:
2807 * 1-64, 512, 1024, 2048 and 4096.
2808 */
2809 if (*p_adj_grp_size <= 64)
2810 return;
2811 else if (*p_adj_grp_size <= 512)
2812 *p_adj_grp_size = 512;
2813 else if (*p_adj_grp_size <= 1024)
2814 *p_adj_grp_size = 1024;
2815 else if (*p_adj_grp_size <= 2048)
2816 *p_adj_grp_size = 2048;
2817 else
2818 *p_adj_grp_size = 4096;
2819}
2820
2821static void mlxsw_sp_adj_grp_size_round_down(u16 *p_adj_grp_size,
2822 unsigned int alloc_size)
2823{
2824 if (alloc_size >= 4096)
2825 *p_adj_grp_size = 4096;
2826 else if (alloc_size >= 2048)
2827 *p_adj_grp_size = 2048;
2828 else if (alloc_size >= 1024)
2829 *p_adj_grp_size = 1024;
2830 else if (alloc_size >= 512)
2831 *p_adj_grp_size = 512;
2832}
2833
2834static int mlxsw_sp_fix_adj_grp_size(struct mlxsw_sp *mlxsw_sp,
2835 u16 *p_adj_grp_size)
2836{
2837 unsigned int alloc_size;
2838 int err;
2839
2840 /* Round up the requested group size to the next size supported
2841 * by the device and make sure the request can be satisfied.
2842 */
2843 mlxsw_sp_adj_grp_size_round_up(p_adj_grp_size);
2844 err = mlxsw_sp_kvdl_alloc_size_query(mlxsw_sp, *p_adj_grp_size,
2845 &alloc_size);
2846 if (err)
2847 return err;
2848 /* It is possible the allocation results in more allocated
2849 * entries than requested. Try to use as much of them as
2850 * possible.
2851 */
2852 mlxsw_sp_adj_grp_size_round_down(p_adj_grp_size, alloc_size);
2853
2854 return 0;
2855}
2856
Ido Schimmel77d964e2017-08-02 09:56:05 +02002857static void
Ido Schimmeleb789982017-10-22 23:11:48 +02002858mlxsw_sp_nexthop_group_normalize(struct mlxsw_sp_nexthop_group *nh_grp)
2859{
2860 int i, g = 0, sum_norm_weight = 0;
2861 struct mlxsw_sp_nexthop *nh;
2862
2863 for (i = 0; i < nh_grp->count; i++) {
2864 nh = &nh_grp->nexthops[i];
2865
2866 if (!nh->should_offload)
2867 continue;
2868 if (g > 0)
2869 g = gcd(nh->nh_weight, g);
2870 else
2871 g = nh->nh_weight;
2872 }
2873
2874 for (i = 0; i < nh_grp->count; i++) {
2875 nh = &nh_grp->nexthops[i];
2876
2877 if (!nh->should_offload)
2878 continue;
2879 nh->norm_nh_weight = nh->nh_weight / g;
2880 sum_norm_weight += nh->norm_nh_weight;
2881 }
2882
2883 nh_grp->sum_norm_weight = sum_norm_weight;
2884}
2885
2886static void
2887mlxsw_sp_nexthop_group_rebalance(struct mlxsw_sp_nexthop_group *nh_grp)
2888{
2889 int total = nh_grp->sum_norm_weight;
2890 u16 ecmp_size = nh_grp->ecmp_size;
2891 int i, weight = 0, lower_bound = 0;
2892
2893 for (i = 0; i < nh_grp->count; i++) {
2894 struct mlxsw_sp_nexthop *nh = &nh_grp->nexthops[i];
2895 int upper_bound;
2896
2897 if (!nh->should_offload)
2898 continue;
2899 weight += nh->norm_nh_weight;
2900 upper_bound = DIV_ROUND_CLOSEST(ecmp_size * weight, total);
2901 nh->num_adj_entries = upper_bound - lower_bound;
2902 lower_bound = upper_bound;
2903 }
2904}
2905
2906static void
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002907mlxsw_sp_nexthop_group_refresh(struct mlxsw_sp *mlxsw_sp,
2908 struct mlxsw_sp_nexthop_group *nh_grp)
2909{
Ido Schimmeleb789982017-10-22 23:11:48 +02002910 u16 ecmp_size, old_ecmp_size;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002911 struct mlxsw_sp_nexthop *nh;
2912 bool offload_change = false;
2913 u32 adj_index;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002914 bool old_adj_index_valid;
2915 u32 old_adj_index;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002916 int i;
2917 int err;
2918
Ido Schimmelb3e8d1e2017-02-08 11:16:32 +01002919 if (!nh_grp->gateway) {
2920 mlxsw_sp_nexthop_fib_entries_update(mlxsw_sp, nh_grp);
2921 return;
2922 }
2923
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002924 for (i = 0; i < nh_grp->count; i++) {
2925 nh = &nh_grp->nexthops[i];
2926
Petr Machata56b8a9e2017-07-31 09:27:29 +02002927 if (nh->should_offload != nh->offloaded) {
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002928 offload_change = true;
2929 if (nh->should_offload)
2930 nh->update = 1;
2931 }
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002932 }
2933 if (!offload_change) {
2934 /* Nothing was added or removed, so no need to reallocate. Just
2935 * update MAC on existing adjacency indexes.
2936 */
Petr Machata35225e42017-09-02 23:49:22 +02002937 err = mlxsw_sp_nexthop_group_update(mlxsw_sp, nh_grp, false);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002938 if (err) {
2939 dev_warn(mlxsw_sp->bus_info->dev, "Failed to update neigh MAC in adjacency table.\n");
2940 goto set_trap;
2941 }
2942 return;
2943 }
Ido Schimmeleb789982017-10-22 23:11:48 +02002944 mlxsw_sp_nexthop_group_normalize(nh_grp);
2945 if (!nh_grp->sum_norm_weight)
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002946 /* No neigh of this group is connected so we just set
2947 * the trap and let everthing flow through kernel.
2948 */
2949 goto set_trap;
2950
Ido Schimmeleb789982017-10-22 23:11:48 +02002951 ecmp_size = nh_grp->sum_norm_weight;
Ido Schimmel425a08c2017-10-22 23:11:47 +02002952 err = mlxsw_sp_fix_adj_grp_size(mlxsw_sp, &ecmp_size);
2953 if (err)
2954 /* No valid allocation size available. */
2955 goto set_trap;
2956
Arkadi Sharshevsky13124442017-03-25 08:28:22 +01002957 err = mlxsw_sp_kvdl_alloc(mlxsw_sp, ecmp_size, &adj_index);
2958 if (err) {
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002959 /* We ran out of KVD linear space, just set the
2960 * trap and let everything flow through kernel.
2961 */
2962 dev_warn(mlxsw_sp->bus_info->dev, "Failed to allocate KVD linear area for nexthop group.\n");
2963 goto set_trap;
2964 }
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002965 old_adj_index_valid = nh_grp->adj_index_valid;
2966 old_adj_index = nh_grp->adj_index;
2967 old_ecmp_size = nh_grp->ecmp_size;
2968 nh_grp->adj_index_valid = 1;
2969 nh_grp->adj_index = adj_index;
2970 nh_grp->ecmp_size = ecmp_size;
Ido Schimmeleb789982017-10-22 23:11:48 +02002971 mlxsw_sp_nexthop_group_rebalance(nh_grp);
Petr Machata35225e42017-09-02 23:49:22 +02002972 err = mlxsw_sp_nexthop_group_update(mlxsw_sp, nh_grp, true);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002973 if (err) {
2974 dev_warn(mlxsw_sp->bus_info->dev, "Failed to update neigh MAC in adjacency table.\n");
2975 goto set_trap;
2976 }
2977
2978 if (!old_adj_index_valid) {
2979 /* The trap was set for fib entries, so we have to call
2980 * fib entry update to unset it and use adjacency index.
2981 */
2982 err = mlxsw_sp_nexthop_fib_entries_update(mlxsw_sp, nh_grp);
2983 if (err) {
2984 dev_warn(mlxsw_sp->bus_info->dev, "Failed to add adjacency index to fib entries.\n");
2985 goto set_trap;
2986 }
2987 return;
2988 }
2989
2990 err = mlxsw_sp_adj_index_mass_update(mlxsw_sp, nh_grp,
2991 old_adj_index, old_ecmp_size);
2992 mlxsw_sp_kvdl_free(mlxsw_sp, old_adj_index);
2993 if (err) {
2994 dev_warn(mlxsw_sp->bus_info->dev, "Failed to mass-update adjacency index for nexthop group.\n");
2995 goto set_trap;
2996 }
Ido Schimmel77d964e2017-08-02 09:56:05 +02002997
2998 /* Offload state within the group changed, so update the flags. */
2999 mlxsw_sp_nexthop_fib_entries_refresh(nh_grp);
3000
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003001 return;
3002
3003set_trap:
3004 old_adj_index_valid = nh_grp->adj_index_valid;
3005 nh_grp->adj_index_valid = 0;
3006 for (i = 0; i < nh_grp->count; i++) {
3007 nh = &nh_grp->nexthops[i];
3008 nh->offloaded = 0;
3009 }
3010 err = mlxsw_sp_nexthop_fib_entries_update(mlxsw_sp, nh_grp);
3011 if (err)
3012 dev_warn(mlxsw_sp->bus_info->dev, "Failed to set traps for fib entries.\n");
3013 if (old_adj_index_valid)
3014 mlxsw_sp_kvdl_free(mlxsw_sp, nh_grp->adj_index);
3015}
3016
3017static void __mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp_nexthop *nh,
3018 bool removing)
3019{
Petr Machata213666a2017-07-31 09:27:30 +02003020 if (!removing)
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003021 nh->should_offload = 1;
Petr Machata213666a2017-07-31 09:27:30 +02003022 else if (nh->offloaded)
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003023 nh->should_offload = 0;
3024 nh->update = 1;
3025}
3026
3027static void
3028mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp *mlxsw_sp,
3029 struct mlxsw_sp_neigh_entry *neigh_entry,
3030 bool removing)
3031{
3032 struct mlxsw_sp_nexthop *nh;
3033
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003034 list_for_each_entry(nh, &neigh_entry->nexthop_list,
3035 neigh_list_node) {
3036 __mlxsw_sp_nexthop_neigh_update(nh, removing);
3037 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nh_grp);
3038 }
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003039}
3040
Ido Schimmel9665b742017-02-08 11:16:42 +01003041static void mlxsw_sp_nexthop_rif_init(struct mlxsw_sp_nexthop *nh,
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003042 struct mlxsw_sp_rif *rif)
Ido Schimmel9665b742017-02-08 11:16:42 +01003043{
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003044 if (nh->rif)
Ido Schimmel9665b742017-02-08 11:16:42 +01003045 return;
3046
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003047 nh->rif = rif;
3048 list_add(&nh->rif_list_node, &rif->nexthop_list);
Ido Schimmel9665b742017-02-08 11:16:42 +01003049}
3050
3051static void mlxsw_sp_nexthop_rif_fini(struct mlxsw_sp_nexthop *nh)
3052{
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003053 if (!nh->rif)
Ido Schimmel9665b742017-02-08 11:16:42 +01003054 return;
3055
3056 list_del(&nh->rif_list_node);
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003057 nh->rif = NULL;
Ido Schimmel9665b742017-02-08 11:16:42 +01003058}
3059
Ido Schimmela8c97012017-02-08 11:16:35 +01003060static int mlxsw_sp_nexthop_neigh_init(struct mlxsw_sp *mlxsw_sp,
3061 struct mlxsw_sp_nexthop *nh)
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003062{
3063 struct mlxsw_sp_neigh_entry *neigh_entry;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003064 struct neighbour *n;
Ido Schimmel93a87e52016-12-23 09:32:49 +01003065 u8 nud_state, dead;
Ido Schimmelc53b8e12017-02-08 11:16:30 +01003066 int err;
3067
Ido Schimmelad178c82017-02-08 11:16:40 +01003068 if (!nh->nh_grp->gateway || nh->neigh_entry)
Ido Schimmelb8399a12017-02-08 11:16:33 +01003069 return 0;
3070
Jiri Pirko33b13412016-11-10 12:31:04 +01003071 /* Take a reference of neigh here ensuring that neigh would
Petr Machata8de3c172017-07-31 09:27:25 +02003072 * not be destructed before the nexthop entry is finished.
Jiri Pirko33b13412016-11-10 12:31:04 +01003073 * The reference is taken either in neigh_lookup() or
Ido Schimmelfd76d912017-02-06 16:20:17 +01003074 * in neigh_create() in case n is not found.
Jiri Pirko33b13412016-11-10 12:31:04 +01003075 */
Ido Schimmel58adf2c2017-07-18 10:10:19 +02003076 n = neigh_lookup(nh->nh_grp->neigh_tbl, &nh->gw_addr, nh->rif->dev);
Jiri Pirko33b13412016-11-10 12:31:04 +01003077 if (!n) {
Ido Schimmel58adf2c2017-07-18 10:10:19 +02003078 n = neigh_create(nh->nh_grp->neigh_tbl, &nh->gw_addr,
3079 nh->rif->dev);
Ido Schimmela8c97012017-02-08 11:16:35 +01003080 if (IS_ERR(n))
3081 return PTR_ERR(n);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003082 neigh_event_send(n, NULL);
Jiri Pirko33b13412016-11-10 12:31:04 +01003083 }
3084 neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp, n);
3085 if (!neigh_entry) {
Ido Schimmel5c8802f2017-02-06 16:20:13 +01003086 neigh_entry = mlxsw_sp_neigh_entry_create(mlxsw_sp, n);
3087 if (IS_ERR(neigh_entry)) {
Ido Schimmelc53b8e12017-02-08 11:16:30 +01003088 err = -EINVAL;
3089 goto err_neigh_entry_create;
Ido Schimmel5c8802f2017-02-06 16:20:13 +01003090 }
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003091 }
Yotam Gigib2157142016-07-05 11:27:51 +02003092
3093 /* If that is the first nexthop connected to that neigh, add to
3094 * nexthop_neighs_list
3095 */
3096 if (list_empty(&neigh_entry->nexthop_list))
3097 list_add_tail(&neigh_entry->nexthop_neighs_list_node,
Ido Schimmel9011b672017-05-16 19:38:25 +02003098 &mlxsw_sp->router->nexthop_neighs_list);
Yotam Gigib2157142016-07-05 11:27:51 +02003099
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003100 nh->neigh_entry = neigh_entry;
3101 list_add_tail(&nh->neigh_list_node, &neigh_entry->nexthop_list);
3102 read_lock_bh(&n->lock);
3103 nud_state = n->nud_state;
Ido Schimmel93a87e52016-12-23 09:32:49 +01003104 dead = n->dead;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003105 read_unlock_bh(&n->lock);
Ido Schimmel93a87e52016-12-23 09:32:49 +01003106 __mlxsw_sp_nexthop_neigh_update(nh, !(nud_state & NUD_VALID && !dead));
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003107
3108 return 0;
Ido Schimmelc53b8e12017-02-08 11:16:30 +01003109
3110err_neigh_entry_create:
3111 neigh_release(n);
Ido Schimmelc53b8e12017-02-08 11:16:30 +01003112 return err;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003113}
3114
Ido Schimmela8c97012017-02-08 11:16:35 +01003115static void mlxsw_sp_nexthop_neigh_fini(struct mlxsw_sp *mlxsw_sp,
3116 struct mlxsw_sp_nexthop *nh)
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003117{
3118 struct mlxsw_sp_neigh_entry *neigh_entry = nh->neigh_entry;
Ido Schimmela8c97012017-02-08 11:16:35 +01003119 struct neighbour *n;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003120
Ido Schimmelb8399a12017-02-08 11:16:33 +01003121 if (!neigh_entry)
Ido Schimmela8c97012017-02-08 11:16:35 +01003122 return;
3123 n = neigh_entry->key.n;
Ido Schimmelb8399a12017-02-08 11:16:33 +01003124
Ido Schimmel58312122016-12-23 09:32:50 +01003125 __mlxsw_sp_nexthop_neigh_update(nh, true);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003126 list_del(&nh->neigh_list_node);
Ido Schimmele58be792017-02-08 11:16:28 +01003127 nh->neigh_entry = NULL;
Yotam Gigib2157142016-07-05 11:27:51 +02003128
3129 /* If that is the last nexthop connected to that neigh, remove from
3130 * nexthop_neighs_list
3131 */
Ido Schimmele58be792017-02-08 11:16:28 +01003132 if (list_empty(&neigh_entry->nexthop_list))
3133 list_del(&neigh_entry->nexthop_neighs_list_node);
Yotam Gigib2157142016-07-05 11:27:51 +02003134
Ido Schimmel5c8802f2017-02-06 16:20:13 +01003135 if (!neigh_entry->connected && list_empty(&neigh_entry->nexthop_list))
3136 mlxsw_sp_neigh_entry_destroy(mlxsw_sp, neigh_entry);
3137
3138 neigh_release(n);
Ido Schimmela8c97012017-02-08 11:16:35 +01003139}
Ido Schimmelc53b8e12017-02-08 11:16:30 +01003140
Petr Machata1012b9a2017-09-02 23:49:23 +02003141static int mlxsw_sp_nexthop_ipip_init(struct mlxsw_sp *mlxsw_sp,
Petr Machata1012b9a2017-09-02 23:49:23 +02003142 struct mlxsw_sp_nexthop *nh,
3143 struct net_device *ol_dev)
3144{
3145 if (!nh->nh_grp->gateway || nh->ipip_entry)
3146 return 0;
3147
Petr Machata4cccb732017-10-16 16:26:39 +02003148 nh->ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
3149 if (!nh->ipip_entry)
3150 return -ENOENT;
Petr Machata1012b9a2017-09-02 23:49:23 +02003151
3152 __mlxsw_sp_nexthop_neigh_update(nh, false);
3153 return 0;
3154}
3155
3156static void mlxsw_sp_nexthop_ipip_fini(struct mlxsw_sp *mlxsw_sp,
3157 struct mlxsw_sp_nexthop *nh)
3158{
3159 struct mlxsw_sp_ipip_entry *ipip_entry = nh->ipip_entry;
3160
3161 if (!ipip_entry)
3162 return;
3163
3164 __mlxsw_sp_nexthop_neigh_update(nh, true);
Petr Machata1012b9a2017-09-02 23:49:23 +02003165 nh->ipip_entry = NULL;
3166}
3167
3168static bool mlxsw_sp_nexthop4_ipip_type(const struct mlxsw_sp *mlxsw_sp,
3169 const struct fib_nh *fib_nh,
3170 enum mlxsw_sp_ipip_type *p_ipipt)
3171{
3172 struct net_device *dev = fib_nh->nh_dev;
3173
3174 return dev &&
3175 fib_nh->nh_parent->fib_type == RTN_UNICAST &&
3176 mlxsw_sp_netdev_ipip_type(mlxsw_sp, dev, p_ipipt);
3177}
3178
Petr Machata35225e42017-09-02 23:49:22 +02003179static void mlxsw_sp_nexthop_type_fini(struct mlxsw_sp *mlxsw_sp,
3180 struct mlxsw_sp_nexthop *nh)
3181{
3182 switch (nh->type) {
3183 case MLXSW_SP_NEXTHOP_TYPE_ETH:
3184 mlxsw_sp_nexthop_neigh_fini(mlxsw_sp, nh);
3185 mlxsw_sp_nexthop_rif_fini(nh);
3186 break;
Petr Machata1012b9a2017-09-02 23:49:23 +02003187 case MLXSW_SP_NEXTHOP_TYPE_IPIP:
Petr Machatade0f43c2017-10-02 12:14:57 +02003188 mlxsw_sp_nexthop_rif_fini(nh);
Petr Machata1012b9a2017-09-02 23:49:23 +02003189 mlxsw_sp_nexthop_ipip_fini(mlxsw_sp, nh);
3190 break;
Petr Machata35225e42017-09-02 23:49:22 +02003191 }
3192}
3193
3194static int mlxsw_sp_nexthop4_type_init(struct mlxsw_sp *mlxsw_sp,
3195 struct mlxsw_sp_nexthop *nh,
3196 struct fib_nh *fib_nh)
3197{
Petr Machata1012b9a2017-09-02 23:49:23 +02003198 struct mlxsw_sp_router *router = mlxsw_sp->router;
Petr Machata35225e42017-09-02 23:49:22 +02003199 struct net_device *dev = fib_nh->nh_dev;
Petr Machata1012b9a2017-09-02 23:49:23 +02003200 enum mlxsw_sp_ipip_type ipipt;
Petr Machata35225e42017-09-02 23:49:22 +02003201 struct mlxsw_sp_rif *rif;
3202 int err;
3203
Petr Machata1012b9a2017-09-02 23:49:23 +02003204 if (mlxsw_sp_nexthop4_ipip_type(mlxsw_sp, fib_nh, &ipipt) &&
3205 router->ipip_ops_arr[ipipt]->can_offload(mlxsw_sp, dev,
3206 MLXSW_SP_L3_PROTO_IPV4)) {
3207 nh->type = MLXSW_SP_NEXTHOP_TYPE_IPIP;
Petr Machata4cccb732017-10-16 16:26:39 +02003208 err = mlxsw_sp_nexthop_ipip_init(mlxsw_sp, nh, dev);
Petr Machatade0f43c2017-10-02 12:14:57 +02003209 if (err)
3210 return err;
3211 mlxsw_sp_nexthop_rif_init(nh, &nh->ipip_entry->ol_lb->common);
3212 return 0;
Petr Machata1012b9a2017-09-02 23:49:23 +02003213 }
3214
Petr Machata35225e42017-09-02 23:49:22 +02003215 nh->type = MLXSW_SP_NEXTHOP_TYPE_ETH;
3216 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
3217 if (!rif)
3218 return 0;
3219
3220 mlxsw_sp_nexthop_rif_init(nh, rif);
3221 err = mlxsw_sp_nexthop_neigh_init(mlxsw_sp, nh);
3222 if (err)
3223 goto err_neigh_init;
3224
3225 return 0;
3226
3227err_neigh_init:
3228 mlxsw_sp_nexthop_rif_fini(nh);
3229 return err;
3230}
3231
3232static void mlxsw_sp_nexthop4_type_fini(struct mlxsw_sp *mlxsw_sp,
3233 struct mlxsw_sp_nexthop *nh)
3234{
3235 mlxsw_sp_nexthop_type_fini(mlxsw_sp, nh);
3236}
3237
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02003238static int mlxsw_sp_nexthop4_init(struct mlxsw_sp *mlxsw_sp,
3239 struct mlxsw_sp_nexthop_group *nh_grp,
3240 struct mlxsw_sp_nexthop *nh,
3241 struct fib_nh *fib_nh)
Ido Schimmela8c97012017-02-08 11:16:35 +01003242{
3243 struct net_device *dev = fib_nh->nh_dev;
Ido Schimmeldf6dd79b2017-02-08 14:36:49 +01003244 struct in_device *in_dev;
Ido Schimmela8c97012017-02-08 11:16:35 +01003245 int err;
3246
3247 nh->nh_grp = nh_grp;
3248 nh->key.fib_nh = fib_nh;
Ido Schimmel408bd942017-10-22 23:11:46 +02003249#ifdef CONFIG_IP_ROUTE_MULTIPATH
3250 nh->nh_weight = fib_nh->nh_weight;
3251#else
3252 nh->nh_weight = 1;
3253#endif
Ido Schimmel58adf2c2017-07-18 10:10:19 +02003254 memcpy(&nh->gw_addr, &fib_nh->nh_gw, sizeof(fib_nh->nh_gw));
Ido Schimmela8c97012017-02-08 11:16:35 +01003255 err = mlxsw_sp_nexthop_insert(mlxsw_sp, nh);
3256 if (err)
3257 return err;
3258
Arkadi Sharshevskya5390272017-09-25 10:32:28 +02003259 mlxsw_sp_nexthop_counter_alloc(mlxsw_sp, nh);
Arkadi Sharshevskydbe45982017-09-25 10:32:23 +02003260 list_add_tail(&nh->router_list_node, &mlxsw_sp->router->nexthop_list);
3261
Ido Schimmel97989ee2017-03-10 08:53:38 +01003262 if (!dev)
3263 return 0;
3264
Ido Schimmeldf6dd79b2017-02-08 14:36:49 +01003265 in_dev = __in_dev_get_rtnl(dev);
3266 if (in_dev && IN_DEV_IGNORE_ROUTES_WITH_LINKDOWN(in_dev) &&
3267 fib_nh->nh_flags & RTNH_F_LINKDOWN)
3268 return 0;
3269
Petr Machata35225e42017-09-02 23:49:22 +02003270 err = mlxsw_sp_nexthop4_type_init(mlxsw_sp, nh, fib_nh);
Ido Schimmela8c97012017-02-08 11:16:35 +01003271 if (err)
3272 goto err_nexthop_neigh_init;
3273
3274 return 0;
3275
3276err_nexthop_neigh_init:
3277 mlxsw_sp_nexthop_remove(mlxsw_sp, nh);
3278 return err;
3279}
3280
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02003281static void mlxsw_sp_nexthop4_fini(struct mlxsw_sp *mlxsw_sp,
3282 struct mlxsw_sp_nexthop *nh)
Ido Schimmela8c97012017-02-08 11:16:35 +01003283{
Petr Machata35225e42017-09-02 23:49:22 +02003284 mlxsw_sp_nexthop4_type_fini(mlxsw_sp, nh);
Arkadi Sharshevskydbe45982017-09-25 10:32:23 +02003285 list_del(&nh->router_list_node);
Arkadi Sharshevskya5390272017-09-25 10:32:28 +02003286 mlxsw_sp_nexthop_counter_free(mlxsw_sp, nh);
Ido Schimmelc53b8e12017-02-08 11:16:30 +01003287 mlxsw_sp_nexthop_remove(mlxsw_sp, nh);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003288}
3289
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02003290static void mlxsw_sp_nexthop4_event(struct mlxsw_sp *mlxsw_sp,
3291 unsigned long event, struct fib_nh *fib_nh)
Ido Schimmelad178c82017-02-08 11:16:40 +01003292{
3293 struct mlxsw_sp_nexthop_key key;
3294 struct mlxsw_sp_nexthop *nh;
Ido Schimmelad178c82017-02-08 11:16:40 +01003295
Ido Schimmel9011b672017-05-16 19:38:25 +02003296 if (mlxsw_sp->router->aborted)
Ido Schimmelad178c82017-02-08 11:16:40 +01003297 return;
3298
3299 key.fib_nh = fib_nh;
3300 nh = mlxsw_sp_nexthop_lookup(mlxsw_sp, key);
3301 if (WARN_ON_ONCE(!nh))
3302 return;
3303
Ido Schimmelad178c82017-02-08 11:16:40 +01003304 switch (event) {
3305 case FIB_EVENT_NH_ADD:
Petr Machata35225e42017-09-02 23:49:22 +02003306 mlxsw_sp_nexthop4_type_init(mlxsw_sp, nh, fib_nh);
Ido Schimmelad178c82017-02-08 11:16:40 +01003307 break;
3308 case FIB_EVENT_NH_DEL:
Petr Machata35225e42017-09-02 23:49:22 +02003309 mlxsw_sp_nexthop4_type_fini(mlxsw_sp, nh);
Ido Schimmelad178c82017-02-08 11:16:40 +01003310 break;
3311 }
3312
3313 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nh_grp);
3314}
3315
Petr Machata0c5f1cd2017-11-03 10:03:38 +01003316static void mlxsw_sp_nexthop_rif_update(struct mlxsw_sp *mlxsw_sp,
3317 struct mlxsw_sp_rif *rif)
3318{
3319 struct mlxsw_sp_nexthop *nh;
3320
3321 list_for_each_entry(nh, &rif->nexthop_list, rif_list_node) {
3322 __mlxsw_sp_nexthop_neigh_update(nh, false);
3323 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nh_grp);
3324 }
3325}
3326
Ido Schimmel9665b742017-02-08 11:16:42 +01003327static void mlxsw_sp_nexthop_rif_gone_sync(struct mlxsw_sp *mlxsw_sp,
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003328 struct mlxsw_sp_rif *rif)
Ido Schimmel9665b742017-02-08 11:16:42 +01003329{
3330 struct mlxsw_sp_nexthop *nh, *tmp;
3331
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003332 list_for_each_entry_safe(nh, tmp, &rif->nexthop_list, rif_list_node) {
Petr Machata35225e42017-09-02 23:49:22 +02003333 mlxsw_sp_nexthop_type_fini(mlxsw_sp, nh);
Ido Schimmel9665b742017-02-08 11:16:42 +01003334 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nh_grp);
3335 }
3336}
3337
Petr Machata9b014512017-09-02 23:49:20 +02003338static bool mlxsw_sp_fi_is_gateway(const struct mlxsw_sp *mlxsw_sp,
3339 const struct fib_info *fi)
3340{
Petr Machata1012b9a2017-09-02 23:49:23 +02003341 return fi->fib_nh->nh_scope == RT_SCOPE_LINK ||
3342 mlxsw_sp_nexthop4_ipip_type(mlxsw_sp, fi->fib_nh, NULL);
Petr Machata9b014512017-09-02 23:49:20 +02003343}
3344
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003345static struct mlxsw_sp_nexthop_group *
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02003346mlxsw_sp_nexthop4_group_create(struct mlxsw_sp *mlxsw_sp, struct fib_info *fi)
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003347{
3348 struct mlxsw_sp_nexthop_group *nh_grp;
3349 struct mlxsw_sp_nexthop *nh;
3350 struct fib_nh *fib_nh;
3351 size_t alloc_size;
3352 int i;
3353 int err;
3354
3355 alloc_size = sizeof(*nh_grp) +
3356 fi->fib_nhs * sizeof(struct mlxsw_sp_nexthop);
3357 nh_grp = kzalloc(alloc_size, GFP_KERNEL);
3358 if (!nh_grp)
3359 return ERR_PTR(-ENOMEM);
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02003360 nh_grp->priv = fi;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003361 INIT_LIST_HEAD(&nh_grp->fib_list);
Ido Schimmel58adf2c2017-07-18 10:10:19 +02003362 nh_grp->neigh_tbl = &arp_tbl;
3363
Petr Machata9b014512017-09-02 23:49:20 +02003364 nh_grp->gateway = mlxsw_sp_fi_is_gateway(mlxsw_sp, fi);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003365 nh_grp->count = fi->fib_nhs;
Ido Schimmel7387dbb2017-07-12 09:12:53 +02003366 fib_info_hold(fi);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003367 for (i = 0; i < nh_grp->count; i++) {
3368 nh = &nh_grp->nexthops[i];
3369 fib_nh = &fi->fib_nh[i];
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02003370 err = mlxsw_sp_nexthop4_init(mlxsw_sp, nh_grp, nh, fib_nh);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003371 if (err)
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02003372 goto err_nexthop4_init;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003373 }
Ido Schimmele9ad5e72017-02-08 11:16:29 +01003374 err = mlxsw_sp_nexthop_group_insert(mlxsw_sp, nh_grp);
3375 if (err)
3376 goto err_nexthop_group_insert;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003377 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
3378 return nh_grp;
3379
Ido Schimmele9ad5e72017-02-08 11:16:29 +01003380err_nexthop_group_insert:
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02003381err_nexthop4_init:
Ido Schimmeldf6dd79b2017-02-08 14:36:49 +01003382 for (i--; i >= 0; i--) {
3383 nh = &nh_grp->nexthops[i];
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02003384 mlxsw_sp_nexthop4_fini(mlxsw_sp, nh);
Ido Schimmeldf6dd79b2017-02-08 14:36:49 +01003385 }
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02003386 fib_info_put(fi);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003387 kfree(nh_grp);
3388 return ERR_PTR(err);
3389}
3390
3391static void
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02003392mlxsw_sp_nexthop4_group_destroy(struct mlxsw_sp *mlxsw_sp,
3393 struct mlxsw_sp_nexthop_group *nh_grp)
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003394{
3395 struct mlxsw_sp_nexthop *nh;
3396 int i;
3397
Ido Schimmele9ad5e72017-02-08 11:16:29 +01003398 mlxsw_sp_nexthop_group_remove(mlxsw_sp, nh_grp);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003399 for (i = 0; i < nh_grp->count; i++) {
3400 nh = &nh_grp->nexthops[i];
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02003401 mlxsw_sp_nexthop4_fini(mlxsw_sp, nh);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003402 }
Ido Schimmel58312122016-12-23 09:32:50 +01003403 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
3404 WARN_ON_ONCE(nh_grp->adj_index_valid);
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02003405 fib_info_put(mlxsw_sp_nexthop4_group_fi(nh_grp));
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003406 kfree(nh_grp);
3407}
3408
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02003409static int mlxsw_sp_nexthop4_group_get(struct mlxsw_sp *mlxsw_sp,
3410 struct mlxsw_sp_fib_entry *fib_entry,
3411 struct fib_info *fi)
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003412{
3413 struct mlxsw_sp_nexthop_group *nh_grp;
3414
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02003415 nh_grp = mlxsw_sp_nexthop4_group_lookup(mlxsw_sp, fi);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003416 if (!nh_grp) {
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02003417 nh_grp = mlxsw_sp_nexthop4_group_create(mlxsw_sp, fi);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003418 if (IS_ERR(nh_grp))
3419 return PTR_ERR(nh_grp);
3420 }
3421 list_add_tail(&fib_entry->nexthop_group_node, &nh_grp->fib_list);
3422 fib_entry->nh_group = nh_grp;
3423 return 0;
3424}
3425
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02003426static void mlxsw_sp_nexthop4_group_put(struct mlxsw_sp *mlxsw_sp,
3427 struct mlxsw_sp_fib_entry *fib_entry)
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003428{
3429 struct mlxsw_sp_nexthop_group *nh_grp = fib_entry->nh_group;
3430
3431 list_del(&fib_entry->nexthop_group_node);
3432 if (!list_empty(&nh_grp->fib_list))
3433 return;
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02003434 mlxsw_sp_nexthop4_group_destroy(mlxsw_sp, nh_grp);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003435}
3436
Ido Schimmel013b20f2017-02-08 11:16:36 +01003437static bool
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02003438mlxsw_sp_fib4_entry_should_offload(const struct mlxsw_sp_fib_entry *fib_entry)
3439{
3440 struct mlxsw_sp_fib4_entry *fib4_entry;
3441
3442 fib4_entry = container_of(fib_entry, struct mlxsw_sp_fib4_entry,
3443 common);
3444 return !fib4_entry->tos;
3445}
3446
3447static bool
Ido Schimmel013b20f2017-02-08 11:16:36 +01003448mlxsw_sp_fib_entry_should_offload(const struct mlxsw_sp_fib_entry *fib_entry)
3449{
3450 struct mlxsw_sp_nexthop_group *nh_group = fib_entry->nh_group;
3451
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02003452 switch (fib_entry->fib_node->fib->proto) {
3453 case MLXSW_SP_L3_PROTO_IPV4:
3454 if (!mlxsw_sp_fib4_entry_should_offload(fib_entry))
3455 return false;
3456 break;
3457 case MLXSW_SP_L3_PROTO_IPV6:
3458 break;
3459 }
Ido Schimmel9aecce12017-02-09 10:28:42 +01003460
Ido Schimmel013b20f2017-02-08 11:16:36 +01003461 switch (fib_entry->type) {
3462 case MLXSW_SP_FIB_ENTRY_TYPE_REMOTE:
3463 return !!nh_group->adj_index_valid;
3464 case MLXSW_SP_FIB_ENTRY_TYPE_LOCAL:
Ido Schimmel70ad3502017-02-08 11:16:38 +01003465 return !!nh_group->nh_rif;
Petr Machata4607f6d2017-09-02 23:49:25 +02003466 case MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP:
3467 return true;
Ido Schimmel013b20f2017-02-08 11:16:36 +01003468 default:
3469 return false;
3470 }
3471}
3472
Ido Schimmel428b8512017-08-03 13:28:28 +02003473static struct mlxsw_sp_nexthop *
3474mlxsw_sp_rt6_nexthop(struct mlxsw_sp_nexthop_group *nh_grp,
3475 const struct mlxsw_sp_rt6 *mlxsw_sp_rt6)
3476{
3477 int i;
3478
3479 for (i = 0; i < nh_grp->count; i++) {
3480 struct mlxsw_sp_nexthop *nh = &nh_grp->nexthops[i];
3481 struct rt6_info *rt = mlxsw_sp_rt6->rt;
3482
3483 if (nh->rif && nh->rif->dev == rt->dst.dev &&
3484 ipv6_addr_equal((const struct in6_addr *) &nh->gw_addr,
3485 &rt->rt6i_gateway))
3486 return nh;
3487 continue;
3488 }
3489
3490 return NULL;
3491}
3492
Ido Schimmel3984d1a2017-08-02 09:56:03 +02003493static void
3494mlxsw_sp_fib4_entry_offload_set(struct mlxsw_sp_fib_entry *fib_entry)
3495{
3496 struct mlxsw_sp_nexthop_group *nh_grp = fib_entry->nh_group;
3497 int i;
3498
Petr Machata4607f6d2017-09-02 23:49:25 +02003499 if (fib_entry->type == MLXSW_SP_FIB_ENTRY_TYPE_LOCAL ||
3500 fib_entry->type == MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP) {
Ido Schimmel3984d1a2017-08-02 09:56:03 +02003501 nh_grp->nexthops->key.fib_nh->nh_flags |= RTNH_F_OFFLOAD;
3502 return;
3503 }
3504
3505 for (i = 0; i < nh_grp->count; i++) {
3506 struct mlxsw_sp_nexthop *nh = &nh_grp->nexthops[i];
3507
3508 if (nh->offloaded)
3509 nh->key.fib_nh->nh_flags |= RTNH_F_OFFLOAD;
3510 else
3511 nh->key.fib_nh->nh_flags &= ~RTNH_F_OFFLOAD;
3512 }
3513}
3514
3515static void
3516mlxsw_sp_fib4_entry_offload_unset(struct mlxsw_sp_fib_entry *fib_entry)
3517{
3518 struct mlxsw_sp_nexthop_group *nh_grp = fib_entry->nh_group;
3519 int i;
3520
3521 for (i = 0; i < nh_grp->count; i++) {
3522 struct mlxsw_sp_nexthop *nh = &nh_grp->nexthops[i];
3523
3524 nh->key.fib_nh->nh_flags &= ~RTNH_F_OFFLOAD;
3525 }
3526}
3527
Ido Schimmel428b8512017-08-03 13:28:28 +02003528static void
3529mlxsw_sp_fib6_entry_offload_set(struct mlxsw_sp_fib_entry *fib_entry)
3530{
3531 struct mlxsw_sp_fib6_entry *fib6_entry;
3532 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
3533
3534 fib6_entry = container_of(fib_entry, struct mlxsw_sp_fib6_entry,
3535 common);
3536
3537 if (fib_entry->type == MLXSW_SP_FIB_ENTRY_TYPE_LOCAL) {
3538 list_first_entry(&fib6_entry->rt6_list, struct mlxsw_sp_rt6,
Ido Schimmelfe400792017-08-15 09:09:49 +02003539 list)->rt->rt6i_nh_flags |= RTNH_F_OFFLOAD;
Ido Schimmel428b8512017-08-03 13:28:28 +02003540 return;
3541 }
3542
3543 list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) {
3544 struct mlxsw_sp_nexthop_group *nh_grp = fib_entry->nh_group;
3545 struct mlxsw_sp_nexthop *nh;
3546
3547 nh = mlxsw_sp_rt6_nexthop(nh_grp, mlxsw_sp_rt6);
3548 if (nh && nh->offloaded)
Ido Schimmelfe400792017-08-15 09:09:49 +02003549 mlxsw_sp_rt6->rt->rt6i_nh_flags |= RTNH_F_OFFLOAD;
Ido Schimmel428b8512017-08-03 13:28:28 +02003550 else
Ido Schimmelfe400792017-08-15 09:09:49 +02003551 mlxsw_sp_rt6->rt->rt6i_nh_flags &= ~RTNH_F_OFFLOAD;
Ido Schimmel428b8512017-08-03 13:28:28 +02003552 }
3553}
3554
3555static void
3556mlxsw_sp_fib6_entry_offload_unset(struct mlxsw_sp_fib_entry *fib_entry)
3557{
3558 struct mlxsw_sp_fib6_entry *fib6_entry;
3559 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
3560
3561 fib6_entry = container_of(fib_entry, struct mlxsw_sp_fib6_entry,
3562 common);
3563 list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) {
3564 struct rt6_info *rt = mlxsw_sp_rt6->rt;
3565
Ido Schimmelfe400792017-08-15 09:09:49 +02003566 rt->rt6i_nh_flags &= ~RTNH_F_OFFLOAD;
Ido Schimmel428b8512017-08-03 13:28:28 +02003567 }
3568}
3569
Ido Schimmel013b20f2017-02-08 11:16:36 +01003570static void mlxsw_sp_fib_entry_offload_set(struct mlxsw_sp_fib_entry *fib_entry)
3571{
Ido Schimmel76610eb2017-03-10 08:53:41 +01003572 switch (fib_entry->fib_node->fib->proto) {
Ido Schimmel013b20f2017-02-08 11:16:36 +01003573 case MLXSW_SP_L3_PROTO_IPV4:
Ido Schimmel3984d1a2017-08-02 09:56:03 +02003574 mlxsw_sp_fib4_entry_offload_set(fib_entry);
Ido Schimmel013b20f2017-02-08 11:16:36 +01003575 break;
3576 case MLXSW_SP_L3_PROTO_IPV6:
Ido Schimmel428b8512017-08-03 13:28:28 +02003577 mlxsw_sp_fib6_entry_offload_set(fib_entry);
3578 break;
Ido Schimmel013b20f2017-02-08 11:16:36 +01003579 }
3580}
3581
3582static void
3583mlxsw_sp_fib_entry_offload_unset(struct mlxsw_sp_fib_entry *fib_entry)
3584{
Ido Schimmel76610eb2017-03-10 08:53:41 +01003585 switch (fib_entry->fib_node->fib->proto) {
Ido Schimmel013b20f2017-02-08 11:16:36 +01003586 case MLXSW_SP_L3_PROTO_IPV4:
Ido Schimmel3984d1a2017-08-02 09:56:03 +02003587 mlxsw_sp_fib4_entry_offload_unset(fib_entry);
Ido Schimmel013b20f2017-02-08 11:16:36 +01003588 break;
3589 case MLXSW_SP_L3_PROTO_IPV6:
Ido Schimmel428b8512017-08-03 13:28:28 +02003590 mlxsw_sp_fib6_entry_offload_unset(fib_entry);
3591 break;
Ido Schimmel013b20f2017-02-08 11:16:36 +01003592 }
Ido Schimmel013b20f2017-02-08 11:16:36 +01003593}
3594
3595static void
3596mlxsw_sp_fib_entry_offload_refresh(struct mlxsw_sp_fib_entry *fib_entry,
3597 enum mlxsw_reg_ralue_op op, int err)
3598{
3599 switch (op) {
3600 case MLXSW_REG_RALUE_OP_WRITE_DELETE:
Ido Schimmel013b20f2017-02-08 11:16:36 +01003601 return mlxsw_sp_fib_entry_offload_unset(fib_entry);
3602 case MLXSW_REG_RALUE_OP_WRITE_WRITE:
3603 if (err)
3604 return;
Ido Schimmel1353ee72017-08-02 09:56:04 +02003605 if (mlxsw_sp_fib_entry_should_offload(fib_entry))
Ido Schimmel013b20f2017-02-08 11:16:36 +01003606 mlxsw_sp_fib_entry_offload_set(fib_entry);
Petr Machata85f44a12017-10-02 12:21:58 +02003607 else
Ido Schimmel013b20f2017-02-08 11:16:36 +01003608 mlxsw_sp_fib_entry_offload_unset(fib_entry);
3609 return;
3610 default:
3611 return;
3612 }
3613}
3614
Ido Schimmel9dbf4d72017-07-18 10:10:24 +02003615static void
3616mlxsw_sp_fib_entry_ralue_pack(char *ralue_pl,
3617 const struct mlxsw_sp_fib_entry *fib_entry,
3618 enum mlxsw_reg_ralue_op op)
3619{
3620 struct mlxsw_sp_fib *fib = fib_entry->fib_node->fib;
3621 enum mlxsw_reg_ralxx_protocol proto;
3622 u32 *p_dip;
3623
3624 proto = (enum mlxsw_reg_ralxx_protocol) fib->proto;
3625
3626 switch (fib->proto) {
3627 case MLXSW_SP_L3_PROTO_IPV4:
3628 p_dip = (u32 *) fib_entry->fib_node->key.addr;
3629 mlxsw_reg_ralue_pack4(ralue_pl, proto, op, fib->vr->id,
3630 fib_entry->fib_node->key.prefix_len,
3631 *p_dip);
3632 break;
3633 case MLXSW_SP_L3_PROTO_IPV6:
3634 mlxsw_reg_ralue_pack6(ralue_pl, proto, op, fib->vr->id,
3635 fib_entry->fib_node->key.prefix_len,
3636 fib_entry->fib_node->key.addr);
3637 break;
3638 }
3639}
3640
3641static int mlxsw_sp_fib_entry_op_remote(struct mlxsw_sp *mlxsw_sp,
3642 struct mlxsw_sp_fib_entry *fib_entry,
3643 enum mlxsw_reg_ralue_op op)
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003644{
3645 char ralue_pl[MLXSW_REG_RALUE_LEN];
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003646 enum mlxsw_reg_ralue_trap_action trap_action;
3647 u16 trap_id = 0;
3648 u32 adjacency_index = 0;
3649 u16 ecmp_size = 0;
3650
3651 /* In case the nexthop group adjacency index is valid, use it
3652 * with provided ECMP size. Otherwise, setup trap and pass
3653 * traffic to kernel.
3654 */
Ido Schimmel4b411472017-02-08 11:16:37 +01003655 if (mlxsw_sp_fib_entry_should_offload(fib_entry)) {
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003656 trap_action = MLXSW_REG_RALUE_TRAP_ACTION_NOP;
3657 adjacency_index = fib_entry->nh_group->adj_index;
3658 ecmp_size = fib_entry->nh_group->ecmp_size;
3659 } else {
3660 trap_action = MLXSW_REG_RALUE_TRAP_ACTION_TRAP;
3661 trap_id = MLXSW_TRAP_ID_RTR_INGRESS0;
3662 }
3663
Ido Schimmel9dbf4d72017-07-18 10:10:24 +02003664 mlxsw_sp_fib_entry_ralue_pack(ralue_pl, fib_entry, op);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003665 mlxsw_reg_ralue_act_remote_pack(ralue_pl, trap_action, trap_id,
3666 adjacency_index, ecmp_size);
3667 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
3668}
3669
Ido Schimmel9dbf4d72017-07-18 10:10:24 +02003670static int mlxsw_sp_fib_entry_op_local(struct mlxsw_sp *mlxsw_sp,
3671 struct mlxsw_sp_fib_entry *fib_entry,
3672 enum mlxsw_reg_ralue_op op)
Jiri Pirko61c503f2016-07-04 08:23:11 +02003673{
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003674 struct mlxsw_sp_rif *rif = fib_entry->nh_group->nh_rif;
Ido Schimmel70ad3502017-02-08 11:16:38 +01003675 enum mlxsw_reg_ralue_trap_action trap_action;
Jiri Pirko61c503f2016-07-04 08:23:11 +02003676 char ralue_pl[MLXSW_REG_RALUE_LEN];
Ido Schimmel70ad3502017-02-08 11:16:38 +01003677 u16 trap_id = 0;
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003678 u16 rif_index = 0;
Ido Schimmel70ad3502017-02-08 11:16:38 +01003679
3680 if (mlxsw_sp_fib_entry_should_offload(fib_entry)) {
3681 trap_action = MLXSW_REG_RALUE_TRAP_ACTION_NOP;
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003682 rif_index = rif->rif_index;
Ido Schimmel70ad3502017-02-08 11:16:38 +01003683 } else {
3684 trap_action = MLXSW_REG_RALUE_TRAP_ACTION_TRAP;
3685 trap_id = MLXSW_TRAP_ID_RTR_INGRESS0;
3686 }
Jiri Pirko61c503f2016-07-04 08:23:11 +02003687
Ido Schimmel9dbf4d72017-07-18 10:10:24 +02003688 mlxsw_sp_fib_entry_ralue_pack(ralue_pl, fib_entry, op);
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003689 mlxsw_reg_ralue_act_local_pack(ralue_pl, trap_action, trap_id,
3690 rif_index);
Jiri Pirko61c503f2016-07-04 08:23:11 +02003691 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
3692}
3693
Ido Schimmel9dbf4d72017-07-18 10:10:24 +02003694static int mlxsw_sp_fib_entry_op_trap(struct mlxsw_sp *mlxsw_sp,
3695 struct mlxsw_sp_fib_entry *fib_entry,
3696 enum mlxsw_reg_ralue_op op)
Jiri Pirko61c503f2016-07-04 08:23:11 +02003697{
3698 char ralue_pl[MLXSW_REG_RALUE_LEN];
Jiri Pirko61c503f2016-07-04 08:23:11 +02003699
Ido Schimmel9dbf4d72017-07-18 10:10:24 +02003700 mlxsw_sp_fib_entry_ralue_pack(ralue_pl, fib_entry, op);
Jiri Pirko61c503f2016-07-04 08:23:11 +02003701 mlxsw_reg_ralue_act_ip2me_pack(ralue_pl);
3702 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
3703}
3704
Petr Machata4607f6d2017-09-02 23:49:25 +02003705static int
3706mlxsw_sp_fib_entry_op_ipip_decap(struct mlxsw_sp *mlxsw_sp,
3707 struct mlxsw_sp_fib_entry *fib_entry,
3708 enum mlxsw_reg_ralue_op op)
3709{
3710 struct mlxsw_sp_ipip_entry *ipip_entry = fib_entry->decap.ipip_entry;
3711 const struct mlxsw_sp_ipip_ops *ipip_ops;
3712
3713 if (WARN_ON(!ipip_entry))
3714 return -EINVAL;
3715
3716 ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt];
3717 return ipip_ops->fib_entry_op(mlxsw_sp, ipip_entry, op,
3718 fib_entry->decap.tunnel_index);
3719}
3720
Ido Schimmel9dbf4d72017-07-18 10:10:24 +02003721static int __mlxsw_sp_fib_entry_op(struct mlxsw_sp *mlxsw_sp,
3722 struct mlxsw_sp_fib_entry *fib_entry,
3723 enum mlxsw_reg_ralue_op op)
Jiri Pirko61c503f2016-07-04 08:23:11 +02003724{
3725 switch (fib_entry->type) {
3726 case MLXSW_SP_FIB_ENTRY_TYPE_REMOTE:
Ido Schimmel9dbf4d72017-07-18 10:10:24 +02003727 return mlxsw_sp_fib_entry_op_remote(mlxsw_sp, fib_entry, op);
Jiri Pirko61c503f2016-07-04 08:23:11 +02003728 case MLXSW_SP_FIB_ENTRY_TYPE_LOCAL:
Ido Schimmel9dbf4d72017-07-18 10:10:24 +02003729 return mlxsw_sp_fib_entry_op_local(mlxsw_sp, fib_entry, op);
Jiri Pirko61c503f2016-07-04 08:23:11 +02003730 case MLXSW_SP_FIB_ENTRY_TYPE_TRAP:
Ido Schimmel9dbf4d72017-07-18 10:10:24 +02003731 return mlxsw_sp_fib_entry_op_trap(mlxsw_sp, fib_entry, op);
Petr Machata4607f6d2017-09-02 23:49:25 +02003732 case MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP:
3733 return mlxsw_sp_fib_entry_op_ipip_decap(mlxsw_sp,
3734 fib_entry, op);
Jiri Pirko61c503f2016-07-04 08:23:11 +02003735 }
3736 return -EINVAL;
3737}
3738
3739static int mlxsw_sp_fib_entry_op(struct mlxsw_sp *mlxsw_sp,
3740 struct mlxsw_sp_fib_entry *fib_entry,
3741 enum mlxsw_reg_ralue_op op)
3742{
Ido Schimmel9dbf4d72017-07-18 10:10:24 +02003743 int err = __mlxsw_sp_fib_entry_op(mlxsw_sp, fib_entry, op);
Ido Schimmel013b20f2017-02-08 11:16:36 +01003744
Ido Schimmel013b20f2017-02-08 11:16:36 +01003745 mlxsw_sp_fib_entry_offload_refresh(fib_entry, op, err);
Ido Schimmel9dbf4d72017-07-18 10:10:24 +02003746
Ido Schimmel013b20f2017-02-08 11:16:36 +01003747 return err;
Jiri Pirko61c503f2016-07-04 08:23:11 +02003748}
3749
3750static int mlxsw_sp_fib_entry_update(struct mlxsw_sp *mlxsw_sp,
3751 struct mlxsw_sp_fib_entry *fib_entry)
3752{
Jiri Pirko7146da32016-09-01 10:37:41 +02003753 return mlxsw_sp_fib_entry_op(mlxsw_sp, fib_entry,
3754 MLXSW_REG_RALUE_OP_WRITE_WRITE);
Jiri Pirko61c503f2016-07-04 08:23:11 +02003755}
3756
3757static int mlxsw_sp_fib_entry_del(struct mlxsw_sp *mlxsw_sp,
3758 struct mlxsw_sp_fib_entry *fib_entry)
3759{
3760 return mlxsw_sp_fib_entry_op(mlxsw_sp, fib_entry,
3761 MLXSW_REG_RALUE_OP_WRITE_DELETE);
3762}
3763
Jiri Pirko61c503f2016-07-04 08:23:11 +02003764static int
Ido Schimmel013b20f2017-02-08 11:16:36 +01003765mlxsw_sp_fib4_entry_type_set(struct mlxsw_sp *mlxsw_sp,
3766 const struct fib_entry_notifier_info *fen_info,
3767 struct mlxsw_sp_fib_entry *fib_entry)
Jiri Pirko61c503f2016-07-04 08:23:11 +02003768{
Petr Machata4607f6d2017-09-02 23:49:25 +02003769 union mlxsw_sp_l3addr dip = { .addr4 = htonl(fen_info->dst) };
3770 struct net_device *dev = fen_info->fi->fib_dev;
3771 struct mlxsw_sp_ipip_entry *ipip_entry;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02003772 struct fib_info *fi = fen_info->fi;
Jiri Pirko61c503f2016-07-04 08:23:11 +02003773
Ido Schimmel97989ee2017-03-10 08:53:38 +01003774 switch (fen_info->type) {
Ido Schimmel97989ee2017-03-10 08:53:38 +01003775 case RTN_LOCAL:
Petr Machata4607f6d2017-09-02 23:49:25 +02003776 ipip_entry = mlxsw_sp_ipip_entry_find_by_decap(mlxsw_sp, dev,
3777 MLXSW_SP_L3_PROTO_IPV4, dip);
3778 if (ipip_entry) {
3779 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP;
3780 return mlxsw_sp_fib_entry_decap_init(mlxsw_sp,
3781 fib_entry,
3782 ipip_entry);
3783 }
3784 /* fall through */
3785 case RTN_BROADCAST:
Jiri Pirko61c503f2016-07-04 08:23:11 +02003786 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
3787 return 0;
Ido Schimmel97989ee2017-03-10 08:53:38 +01003788 case RTN_UNREACHABLE: /* fall through */
3789 case RTN_BLACKHOLE: /* fall through */
3790 case RTN_PROHIBIT:
3791 /* Packets hitting these routes need to be trapped, but
3792 * can do so with a lower priority than packets directed
3793 * at the host, so use action type local instead of trap.
3794 */
Jiri Pirkob45f64d2016-09-26 12:52:31 +02003795 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_LOCAL;
Ido Schimmel97989ee2017-03-10 08:53:38 +01003796 return 0;
3797 case RTN_UNICAST:
Petr Machata9b014512017-09-02 23:49:20 +02003798 if (mlxsw_sp_fi_is_gateway(mlxsw_sp, fi))
Ido Schimmel97989ee2017-03-10 08:53:38 +01003799 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_REMOTE;
Petr Machata9b014512017-09-02 23:49:20 +02003800 else
3801 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_LOCAL;
Ido Schimmel97989ee2017-03-10 08:53:38 +01003802 return 0;
3803 default:
3804 return -EINVAL;
3805 }
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003806}
3807
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02003808static struct mlxsw_sp_fib4_entry *
Ido Schimmel9aecce12017-02-09 10:28:42 +01003809mlxsw_sp_fib4_entry_create(struct mlxsw_sp *mlxsw_sp,
3810 struct mlxsw_sp_fib_node *fib_node,
3811 const struct fib_entry_notifier_info *fen_info)
Jiri Pirko5b004412016-09-01 10:37:40 +02003812{
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02003813 struct mlxsw_sp_fib4_entry *fib4_entry;
Jiri Pirko5b004412016-09-01 10:37:40 +02003814 struct mlxsw_sp_fib_entry *fib_entry;
Ido Schimmel9aecce12017-02-09 10:28:42 +01003815 int err;
3816
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02003817 fib4_entry = kzalloc(sizeof(*fib4_entry), GFP_KERNEL);
3818 if (!fib4_entry)
3819 return ERR_PTR(-ENOMEM);
3820 fib_entry = &fib4_entry->common;
Ido Schimmel9aecce12017-02-09 10:28:42 +01003821
3822 err = mlxsw_sp_fib4_entry_type_set(mlxsw_sp, fen_info, fib_entry);
3823 if (err)
3824 goto err_fib4_entry_type_set;
3825
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02003826 err = mlxsw_sp_nexthop4_group_get(mlxsw_sp, fib_entry, fen_info->fi);
Ido Schimmel9aecce12017-02-09 10:28:42 +01003827 if (err)
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02003828 goto err_nexthop4_group_get;
Ido Schimmel9aecce12017-02-09 10:28:42 +01003829
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02003830 fib4_entry->prio = fen_info->fi->fib_priority;
3831 fib4_entry->tb_id = fen_info->tb_id;
3832 fib4_entry->type = fen_info->type;
3833 fib4_entry->tos = fen_info->tos;
Ido Schimmel9aecce12017-02-09 10:28:42 +01003834
3835 fib_entry->fib_node = fib_node;
3836
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02003837 return fib4_entry;
Ido Schimmel9aecce12017-02-09 10:28:42 +01003838
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02003839err_nexthop4_group_get:
Ido Schimmel9aecce12017-02-09 10:28:42 +01003840err_fib4_entry_type_set:
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02003841 kfree(fib4_entry);
Ido Schimmel9aecce12017-02-09 10:28:42 +01003842 return ERR_PTR(err);
3843}
3844
3845static void mlxsw_sp_fib4_entry_destroy(struct mlxsw_sp *mlxsw_sp,
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02003846 struct mlxsw_sp_fib4_entry *fib4_entry)
Ido Schimmel9aecce12017-02-09 10:28:42 +01003847{
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02003848 mlxsw_sp_nexthop4_group_put(mlxsw_sp, &fib4_entry->common);
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02003849 kfree(fib4_entry);
Ido Schimmel9aecce12017-02-09 10:28:42 +01003850}
3851
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02003852static struct mlxsw_sp_fib4_entry *
Ido Schimmel9aecce12017-02-09 10:28:42 +01003853mlxsw_sp_fib4_entry_lookup(struct mlxsw_sp *mlxsw_sp,
3854 const struct fib_entry_notifier_info *fen_info)
3855{
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02003856 struct mlxsw_sp_fib4_entry *fib4_entry;
Ido Schimmel9aecce12017-02-09 10:28:42 +01003857 struct mlxsw_sp_fib_node *fib_node;
Ido Schimmel160e22a2017-07-18 10:10:20 +02003858 struct mlxsw_sp_fib *fib;
3859 struct mlxsw_sp_vr *vr;
Ido Schimmel9aecce12017-02-09 10:28:42 +01003860
Ido Schimmel160e22a2017-07-18 10:10:20 +02003861 vr = mlxsw_sp_vr_find(mlxsw_sp, fen_info->tb_id);
3862 if (!vr)
3863 return NULL;
3864 fib = mlxsw_sp_vr_fib(vr, MLXSW_SP_L3_PROTO_IPV4);
3865
3866 fib_node = mlxsw_sp_fib_node_lookup(fib, &fen_info->dst,
3867 sizeof(fen_info->dst),
3868 fen_info->dst_len);
3869 if (!fib_node)
Ido Schimmel9aecce12017-02-09 10:28:42 +01003870 return NULL;
3871
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02003872 list_for_each_entry(fib4_entry, &fib_node->entry_list, common.list) {
3873 if (fib4_entry->tb_id == fen_info->tb_id &&
3874 fib4_entry->tos == fen_info->tos &&
3875 fib4_entry->type == fen_info->type &&
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02003876 mlxsw_sp_nexthop4_group_fi(fib4_entry->common.nh_group) ==
3877 fen_info->fi) {
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02003878 return fib4_entry;
Ido Schimmel9aecce12017-02-09 10:28:42 +01003879 }
3880 }
3881
3882 return NULL;
3883}
3884
3885static const struct rhashtable_params mlxsw_sp_fib_ht_params = {
3886 .key_offset = offsetof(struct mlxsw_sp_fib_node, key),
3887 .head_offset = offsetof(struct mlxsw_sp_fib_node, ht_node),
3888 .key_len = sizeof(struct mlxsw_sp_fib_key),
3889 .automatic_shrinking = true,
3890};
3891
3892static int mlxsw_sp_fib_node_insert(struct mlxsw_sp_fib *fib,
3893 struct mlxsw_sp_fib_node *fib_node)
3894{
3895 return rhashtable_insert_fast(&fib->ht, &fib_node->ht_node,
3896 mlxsw_sp_fib_ht_params);
3897}
3898
3899static void mlxsw_sp_fib_node_remove(struct mlxsw_sp_fib *fib,
3900 struct mlxsw_sp_fib_node *fib_node)
3901{
3902 rhashtable_remove_fast(&fib->ht, &fib_node->ht_node,
3903 mlxsw_sp_fib_ht_params);
3904}
3905
3906static struct mlxsw_sp_fib_node *
3907mlxsw_sp_fib_node_lookup(struct mlxsw_sp_fib *fib, const void *addr,
3908 size_t addr_len, unsigned char prefix_len)
3909{
3910 struct mlxsw_sp_fib_key key;
3911
3912 memset(&key, 0, sizeof(key));
3913 memcpy(key.addr, addr, addr_len);
3914 key.prefix_len = prefix_len;
3915 return rhashtable_lookup_fast(&fib->ht, &key, mlxsw_sp_fib_ht_params);
3916}
3917
3918static struct mlxsw_sp_fib_node *
Ido Schimmel76610eb2017-03-10 08:53:41 +01003919mlxsw_sp_fib_node_create(struct mlxsw_sp_fib *fib, const void *addr,
Ido Schimmel9aecce12017-02-09 10:28:42 +01003920 size_t addr_len, unsigned char prefix_len)
3921{
3922 struct mlxsw_sp_fib_node *fib_node;
3923
3924 fib_node = kzalloc(sizeof(*fib_node), GFP_KERNEL);
3925 if (!fib_node)
3926 return NULL;
3927
3928 INIT_LIST_HEAD(&fib_node->entry_list);
Ido Schimmel76610eb2017-03-10 08:53:41 +01003929 list_add(&fib_node->list, &fib->node_list);
Ido Schimmel9aecce12017-02-09 10:28:42 +01003930 memcpy(fib_node->key.addr, addr, addr_len);
3931 fib_node->key.prefix_len = prefix_len;
Ido Schimmel9aecce12017-02-09 10:28:42 +01003932
3933 return fib_node;
3934}
3935
3936static void mlxsw_sp_fib_node_destroy(struct mlxsw_sp_fib_node *fib_node)
3937{
Ido Schimmel9aecce12017-02-09 10:28:42 +01003938 list_del(&fib_node->list);
3939 WARN_ON(!list_empty(&fib_node->entry_list));
3940 kfree(fib_node);
3941}
3942
3943static bool
3944mlxsw_sp_fib_node_entry_is_first(const struct mlxsw_sp_fib_node *fib_node,
3945 const struct mlxsw_sp_fib_entry *fib_entry)
3946{
3947 return list_first_entry(&fib_node->entry_list,
3948 struct mlxsw_sp_fib_entry, list) == fib_entry;
3949}
3950
Ido Schimmelfc922bb2017-08-14 10:54:05 +02003951static int mlxsw_sp_fib_lpm_tree_link(struct mlxsw_sp *mlxsw_sp,
3952 struct mlxsw_sp_fib *fib,
3953 struct mlxsw_sp_fib_node *fib_node)
3954{
3955 struct mlxsw_sp_prefix_usage req_prefix_usage = {{ 0 } };
3956 struct mlxsw_sp_lpm_tree *lpm_tree;
3957 int err;
3958
3959 /* Since the tree is shared between all virtual routers we must
3960 * make sure it contains all the required prefix lengths. This
3961 * can be computed by either adding the new prefix length to the
3962 * existing prefix usage of a bound tree, or by aggregating the
3963 * prefix lengths across all virtual routers and adding the new
3964 * one as well.
3965 */
3966 if (fib->lpm_tree)
3967 mlxsw_sp_prefix_usage_cpy(&req_prefix_usage,
3968 &fib->lpm_tree->prefix_usage);
3969 else
3970 mlxsw_sp_vrs_prefixes(mlxsw_sp, fib->proto, &req_prefix_usage);
3971 mlxsw_sp_prefix_usage_set(&req_prefix_usage, fib_node->key.prefix_len);
3972
3973 lpm_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, &req_prefix_usage,
3974 fib->proto);
3975 if (IS_ERR(lpm_tree))
3976 return PTR_ERR(lpm_tree);
3977
3978 if (fib->lpm_tree && fib->lpm_tree->id == lpm_tree->id)
3979 return 0;
3980
3981 err = mlxsw_sp_vrs_lpm_tree_replace(mlxsw_sp, fib, lpm_tree);
3982 if (err)
3983 return err;
3984
3985 return 0;
3986}
3987
3988static void mlxsw_sp_fib_lpm_tree_unlink(struct mlxsw_sp *mlxsw_sp,
3989 struct mlxsw_sp_fib *fib)
3990{
Ido Schimmelfc922bb2017-08-14 10:54:05 +02003991 if (!mlxsw_sp_prefix_usage_none(&fib->prefix_usage))
3992 return;
3993 mlxsw_sp_vr_lpm_tree_unbind(mlxsw_sp, fib);
3994 mlxsw_sp_lpm_tree_put(mlxsw_sp, fib->lpm_tree);
3995 fib->lpm_tree = NULL;
3996}
3997
Ido Schimmel9aecce12017-02-09 10:28:42 +01003998static void mlxsw_sp_fib_node_prefix_inc(struct mlxsw_sp_fib_node *fib_node)
3999{
4000 unsigned char prefix_len = fib_node->key.prefix_len;
Ido Schimmel76610eb2017-03-10 08:53:41 +01004001 struct mlxsw_sp_fib *fib = fib_node->fib;
Ido Schimmel9aecce12017-02-09 10:28:42 +01004002
4003 if (fib->prefix_ref_count[prefix_len]++ == 0)
4004 mlxsw_sp_prefix_usage_set(&fib->prefix_usage, prefix_len);
4005}
4006
4007static void mlxsw_sp_fib_node_prefix_dec(struct mlxsw_sp_fib_node *fib_node)
4008{
4009 unsigned char prefix_len = fib_node->key.prefix_len;
Ido Schimmel76610eb2017-03-10 08:53:41 +01004010 struct mlxsw_sp_fib *fib = fib_node->fib;
Ido Schimmel9aecce12017-02-09 10:28:42 +01004011
4012 if (--fib->prefix_ref_count[prefix_len] == 0)
4013 mlxsw_sp_prefix_usage_clear(&fib->prefix_usage, prefix_len);
4014}
4015
Ido Schimmel76610eb2017-03-10 08:53:41 +01004016static int mlxsw_sp_fib_node_init(struct mlxsw_sp *mlxsw_sp,
4017 struct mlxsw_sp_fib_node *fib_node,
4018 struct mlxsw_sp_fib *fib)
4019{
Ido Schimmel76610eb2017-03-10 08:53:41 +01004020 int err;
4021
4022 err = mlxsw_sp_fib_node_insert(fib, fib_node);
4023 if (err)
4024 return err;
4025 fib_node->fib = fib;
4026
Ido Schimmelfc922bb2017-08-14 10:54:05 +02004027 err = mlxsw_sp_fib_lpm_tree_link(mlxsw_sp, fib, fib_node);
4028 if (err)
4029 goto err_fib_lpm_tree_link;
Ido Schimmel76610eb2017-03-10 08:53:41 +01004030
4031 mlxsw_sp_fib_node_prefix_inc(fib_node);
4032
4033 return 0;
4034
Ido Schimmelfc922bb2017-08-14 10:54:05 +02004035err_fib_lpm_tree_link:
Ido Schimmel76610eb2017-03-10 08:53:41 +01004036 fib_node->fib = NULL;
4037 mlxsw_sp_fib_node_remove(fib, fib_node);
4038 return err;
4039}
4040
4041static void mlxsw_sp_fib_node_fini(struct mlxsw_sp *mlxsw_sp,
4042 struct mlxsw_sp_fib_node *fib_node)
4043{
Ido Schimmel76610eb2017-03-10 08:53:41 +01004044 struct mlxsw_sp_fib *fib = fib_node->fib;
4045
4046 mlxsw_sp_fib_node_prefix_dec(fib_node);
Ido Schimmelfc922bb2017-08-14 10:54:05 +02004047 mlxsw_sp_fib_lpm_tree_unlink(mlxsw_sp, fib);
Ido Schimmel76610eb2017-03-10 08:53:41 +01004048 fib_node->fib = NULL;
4049 mlxsw_sp_fib_node_remove(fib, fib_node);
4050}
4051
Ido Schimmel9aecce12017-02-09 10:28:42 +01004052static struct mlxsw_sp_fib_node *
Ido Schimmel731ea1c2017-07-18 10:10:21 +02004053mlxsw_sp_fib_node_get(struct mlxsw_sp *mlxsw_sp, u32 tb_id, const void *addr,
4054 size_t addr_len, unsigned char prefix_len,
4055 enum mlxsw_sp_l3proto proto)
Ido Schimmel9aecce12017-02-09 10:28:42 +01004056{
4057 struct mlxsw_sp_fib_node *fib_node;
Ido Schimmel76610eb2017-03-10 08:53:41 +01004058 struct mlxsw_sp_fib *fib;
Jiri Pirko5b004412016-09-01 10:37:40 +02004059 struct mlxsw_sp_vr *vr;
4060 int err;
4061
David Ahernf8fa9b42017-10-18 09:56:56 -07004062 vr = mlxsw_sp_vr_get(mlxsw_sp, tb_id, NULL);
Jiri Pirko5b004412016-09-01 10:37:40 +02004063 if (IS_ERR(vr))
4064 return ERR_CAST(vr);
Ido Schimmel731ea1c2017-07-18 10:10:21 +02004065 fib = mlxsw_sp_vr_fib(vr, proto);
Jiri Pirko5b004412016-09-01 10:37:40 +02004066
Ido Schimmel731ea1c2017-07-18 10:10:21 +02004067 fib_node = mlxsw_sp_fib_node_lookup(fib, addr, addr_len, prefix_len);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004068 if (fib_node)
4069 return fib_node;
4070
Ido Schimmel731ea1c2017-07-18 10:10:21 +02004071 fib_node = mlxsw_sp_fib_node_create(fib, addr, addr_len, prefix_len);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004072 if (!fib_node) {
Jiri Pirko5b004412016-09-01 10:37:40 +02004073 err = -ENOMEM;
Ido Schimmel9aecce12017-02-09 10:28:42 +01004074 goto err_fib_node_create;
Jiri Pirko5b004412016-09-01 10:37:40 +02004075 }
Jiri Pirko5b004412016-09-01 10:37:40 +02004076
Ido Schimmel76610eb2017-03-10 08:53:41 +01004077 err = mlxsw_sp_fib_node_init(mlxsw_sp, fib_node, fib);
4078 if (err)
4079 goto err_fib_node_init;
4080
Ido Schimmel9aecce12017-02-09 10:28:42 +01004081 return fib_node;
Jiri Pirko5b004412016-09-01 10:37:40 +02004082
Ido Schimmel76610eb2017-03-10 08:53:41 +01004083err_fib_node_init:
4084 mlxsw_sp_fib_node_destroy(fib_node);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004085err_fib_node_create:
Ido Schimmel76610eb2017-03-10 08:53:41 +01004086 mlxsw_sp_vr_put(vr);
Jiri Pirko5b004412016-09-01 10:37:40 +02004087 return ERR_PTR(err);
4088}
4089
Ido Schimmel731ea1c2017-07-18 10:10:21 +02004090static void mlxsw_sp_fib_node_put(struct mlxsw_sp *mlxsw_sp,
4091 struct mlxsw_sp_fib_node *fib_node)
Jiri Pirko5b004412016-09-01 10:37:40 +02004092{
Ido Schimmel76610eb2017-03-10 08:53:41 +01004093 struct mlxsw_sp_vr *vr = fib_node->fib->vr;
Jiri Pirko5b004412016-09-01 10:37:40 +02004094
Ido Schimmel9aecce12017-02-09 10:28:42 +01004095 if (!list_empty(&fib_node->entry_list))
4096 return;
Ido Schimmel76610eb2017-03-10 08:53:41 +01004097 mlxsw_sp_fib_node_fini(mlxsw_sp, fib_node);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004098 mlxsw_sp_fib_node_destroy(fib_node);
Ido Schimmel76610eb2017-03-10 08:53:41 +01004099 mlxsw_sp_vr_put(vr);
Jiri Pirko5b004412016-09-01 10:37:40 +02004100}
4101
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004102static struct mlxsw_sp_fib4_entry *
Ido Schimmel9aecce12017-02-09 10:28:42 +01004103mlxsw_sp_fib4_node_entry_find(const struct mlxsw_sp_fib_node *fib_node,
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004104 const struct mlxsw_sp_fib4_entry *new4_entry)
Jiri Pirko61c503f2016-07-04 08:23:11 +02004105{
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004106 struct mlxsw_sp_fib4_entry *fib4_entry;
Ido Schimmel9aecce12017-02-09 10:28:42 +01004107
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004108 list_for_each_entry(fib4_entry, &fib_node->entry_list, common.list) {
4109 if (fib4_entry->tb_id > new4_entry->tb_id)
Ido Schimmel9aecce12017-02-09 10:28:42 +01004110 continue;
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004111 if (fib4_entry->tb_id != new4_entry->tb_id)
Ido Schimmel9aecce12017-02-09 10:28:42 +01004112 break;
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004113 if (fib4_entry->tos > new4_entry->tos)
Ido Schimmel9aecce12017-02-09 10:28:42 +01004114 continue;
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004115 if (fib4_entry->prio >= new4_entry->prio ||
4116 fib4_entry->tos < new4_entry->tos)
4117 return fib4_entry;
Ido Schimmel9aecce12017-02-09 10:28:42 +01004118 }
4119
4120 return NULL;
4121}
4122
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004123static int
4124mlxsw_sp_fib4_node_list_append(struct mlxsw_sp_fib4_entry *fib4_entry,
4125 struct mlxsw_sp_fib4_entry *new4_entry)
Ido Schimmel4283bce2017-02-09 10:28:43 +01004126{
4127 struct mlxsw_sp_fib_node *fib_node;
4128
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004129 if (WARN_ON(!fib4_entry))
Ido Schimmel4283bce2017-02-09 10:28:43 +01004130 return -EINVAL;
4131
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004132 fib_node = fib4_entry->common.fib_node;
4133 list_for_each_entry_from(fib4_entry, &fib_node->entry_list,
4134 common.list) {
4135 if (fib4_entry->tb_id != new4_entry->tb_id ||
4136 fib4_entry->tos != new4_entry->tos ||
4137 fib4_entry->prio != new4_entry->prio)
Ido Schimmel4283bce2017-02-09 10:28:43 +01004138 break;
4139 }
4140
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004141 list_add_tail(&new4_entry->common.list, &fib4_entry->common.list);
Ido Schimmel4283bce2017-02-09 10:28:43 +01004142 return 0;
4143}
4144
Ido Schimmel9aecce12017-02-09 10:28:42 +01004145static int
Ido Schimmel9efbee62017-07-18 10:10:28 +02004146mlxsw_sp_fib4_node_list_insert(struct mlxsw_sp_fib4_entry *new4_entry,
Ido Schimmel599cf8f2017-02-09 10:28:44 +01004147 bool replace, bool append)
Ido Schimmel9aecce12017-02-09 10:28:42 +01004148{
Ido Schimmel9efbee62017-07-18 10:10:28 +02004149 struct mlxsw_sp_fib_node *fib_node = new4_entry->common.fib_node;
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004150 struct mlxsw_sp_fib4_entry *fib4_entry;
Ido Schimmel9aecce12017-02-09 10:28:42 +01004151
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004152 fib4_entry = mlxsw_sp_fib4_node_entry_find(fib_node, new4_entry);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004153
Ido Schimmel4283bce2017-02-09 10:28:43 +01004154 if (append)
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004155 return mlxsw_sp_fib4_node_list_append(fib4_entry, new4_entry);
4156 if (replace && WARN_ON(!fib4_entry))
Ido Schimmel599cf8f2017-02-09 10:28:44 +01004157 return -EINVAL;
Ido Schimmel4283bce2017-02-09 10:28:43 +01004158
Ido Schimmel599cf8f2017-02-09 10:28:44 +01004159 /* Insert new entry before replaced one, so that we can later
4160 * remove the second.
4161 */
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004162 if (fib4_entry) {
4163 list_add_tail(&new4_entry->common.list,
4164 &fib4_entry->common.list);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004165 } else {
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004166 struct mlxsw_sp_fib4_entry *last;
Ido Schimmel9aecce12017-02-09 10:28:42 +01004167
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004168 list_for_each_entry(last, &fib_node->entry_list, common.list) {
4169 if (new4_entry->tb_id > last->tb_id)
Ido Schimmel9aecce12017-02-09 10:28:42 +01004170 break;
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004171 fib4_entry = last;
Ido Schimmel9aecce12017-02-09 10:28:42 +01004172 }
4173
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004174 if (fib4_entry)
4175 list_add(&new4_entry->common.list,
4176 &fib4_entry->common.list);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004177 else
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004178 list_add(&new4_entry->common.list,
4179 &fib_node->entry_list);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004180 }
4181
4182 return 0;
4183}
4184
4185static void
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004186mlxsw_sp_fib4_node_list_remove(struct mlxsw_sp_fib4_entry *fib4_entry)
Ido Schimmel9aecce12017-02-09 10:28:42 +01004187{
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004188 list_del(&fib4_entry->common.list);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004189}
4190
Ido Schimmel80c238f2017-07-18 10:10:29 +02004191static int mlxsw_sp_fib_node_entry_add(struct mlxsw_sp *mlxsw_sp,
4192 struct mlxsw_sp_fib_entry *fib_entry)
Ido Schimmel9aecce12017-02-09 10:28:42 +01004193{
Ido Schimmel9efbee62017-07-18 10:10:28 +02004194 struct mlxsw_sp_fib_node *fib_node = fib_entry->fib_node;
4195
Ido Schimmel9aecce12017-02-09 10:28:42 +01004196 if (!mlxsw_sp_fib_node_entry_is_first(fib_node, fib_entry))
4197 return 0;
4198
4199 /* To prevent packet loss, overwrite the previously offloaded
4200 * entry.
4201 */
4202 if (!list_is_singular(&fib_node->entry_list)) {
4203 enum mlxsw_reg_ralue_op op = MLXSW_REG_RALUE_OP_WRITE_DELETE;
4204 struct mlxsw_sp_fib_entry *n = list_next_entry(fib_entry, list);
4205
4206 mlxsw_sp_fib_entry_offload_refresh(n, op, 0);
4207 }
4208
4209 return mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
4210}
4211
Ido Schimmel80c238f2017-07-18 10:10:29 +02004212static void mlxsw_sp_fib_node_entry_del(struct mlxsw_sp *mlxsw_sp,
4213 struct mlxsw_sp_fib_entry *fib_entry)
Ido Schimmel9aecce12017-02-09 10:28:42 +01004214{
Ido Schimmel9efbee62017-07-18 10:10:28 +02004215 struct mlxsw_sp_fib_node *fib_node = fib_entry->fib_node;
4216
Ido Schimmel9aecce12017-02-09 10:28:42 +01004217 if (!mlxsw_sp_fib_node_entry_is_first(fib_node, fib_entry))
4218 return;
4219
4220 /* Promote the next entry by overwriting the deleted entry */
4221 if (!list_is_singular(&fib_node->entry_list)) {
4222 struct mlxsw_sp_fib_entry *n = list_next_entry(fib_entry, list);
4223 enum mlxsw_reg_ralue_op op = MLXSW_REG_RALUE_OP_WRITE_DELETE;
4224
4225 mlxsw_sp_fib_entry_update(mlxsw_sp, n);
4226 mlxsw_sp_fib_entry_offload_refresh(fib_entry, op, 0);
4227 return;
4228 }
4229
4230 mlxsw_sp_fib_entry_del(mlxsw_sp, fib_entry);
4231}
4232
4233static int mlxsw_sp_fib4_node_entry_link(struct mlxsw_sp *mlxsw_sp,
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004234 struct mlxsw_sp_fib4_entry *fib4_entry,
Ido Schimmel599cf8f2017-02-09 10:28:44 +01004235 bool replace, bool append)
Ido Schimmel9aecce12017-02-09 10:28:42 +01004236{
Ido Schimmel9aecce12017-02-09 10:28:42 +01004237 int err;
4238
Ido Schimmel9efbee62017-07-18 10:10:28 +02004239 err = mlxsw_sp_fib4_node_list_insert(fib4_entry, replace, append);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004240 if (err)
4241 return err;
4242
Ido Schimmel80c238f2017-07-18 10:10:29 +02004243 err = mlxsw_sp_fib_node_entry_add(mlxsw_sp, &fib4_entry->common);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004244 if (err)
Ido Schimmel80c238f2017-07-18 10:10:29 +02004245 goto err_fib_node_entry_add;
Ido Schimmel9aecce12017-02-09 10:28:42 +01004246
Ido Schimmel9aecce12017-02-09 10:28:42 +01004247 return 0;
4248
Ido Schimmel80c238f2017-07-18 10:10:29 +02004249err_fib_node_entry_add:
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004250 mlxsw_sp_fib4_node_list_remove(fib4_entry);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004251 return err;
4252}
4253
4254static void
4255mlxsw_sp_fib4_node_entry_unlink(struct mlxsw_sp *mlxsw_sp,
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004256 struct mlxsw_sp_fib4_entry *fib4_entry)
Ido Schimmel9aecce12017-02-09 10:28:42 +01004257{
Ido Schimmel80c238f2017-07-18 10:10:29 +02004258 mlxsw_sp_fib_node_entry_del(mlxsw_sp, &fib4_entry->common);
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004259 mlxsw_sp_fib4_node_list_remove(fib4_entry);
Petr Machata4607f6d2017-09-02 23:49:25 +02004260
4261 if (fib4_entry->common.type == MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP)
4262 mlxsw_sp_fib_entry_decap_fini(mlxsw_sp, &fib4_entry->common);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004263}
4264
Ido Schimmel599cf8f2017-02-09 10:28:44 +01004265static void mlxsw_sp_fib4_entry_replace(struct mlxsw_sp *mlxsw_sp,
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004266 struct mlxsw_sp_fib4_entry *fib4_entry,
Ido Schimmel599cf8f2017-02-09 10:28:44 +01004267 bool replace)
4268{
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004269 struct mlxsw_sp_fib_node *fib_node = fib4_entry->common.fib_node;
4270 struct mlxsw_sp_fib4_entry *replaced;
Ido Schimmel599cf8f2017-02-09 10:28:44 +01004271
4272 if (!replace)
4273 return;
4274
4275 /* We inserted the new entry before replaced one */
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004276 replaced = list_next_entry(fib4_entry, common.list);
Ido Schimmel599cf8f2017-02-09 10:28:44 +01004277
4278 mlxsw_sp_fib4_node_entry_unlink(mlxsw_sp, replaced);
4279 mlxsw_sp_fib4_entry_destroy(mlxsw_sp, replaced);
Ido Schimmel731ea1c2017-07-18 10:10:21 +02004280 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
Ido Schimmel599cf8f2017-02-09 10:28:44 +01004281}
4282
Ido Schimmel9aecce12017-02-09 10:28:42 +01004283static int
4284mlxsw_sp_router_fib4_add(struct mlxsw_sp *mlxsw_sp,
Ido Schimmel4283bce2017-02-09 10:28:43 +01004285 const struct fib_entry_notifier_info *fen_info,
Ido Schimmel599cf8f2017-02-09 10:28:44 +01004286 bool replace, bool append)
Ido Schimmel9aecce12017-02-09 10:28:42 +01004287{
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004288 struct mlxsw_sp_fib4_entry *fib4_entry;
Ido Schimmel9aecce12017-02-09 10:28:42 +01004289 struct mlxsw_sp_fib_node *fib_node;
Jiri Pirko61c503f2016-07-04 08:23:11 +02004290 int err;
4291
Ido Schimmel9011b672017-05-16 19:38:25 +02004292 if (mlxsw_sp->router->aborted)
Jiri Pirkob45f64d2016-09-26 12:52:31 +02004293 return 0;
4294
Ido Schimmel731ea1c2017-07-18 10:10:21 +02004295 fib_node = mlxsw_sp_fib_node_get(mlxsw_sp, fen_info->tb_id,
4296 &fen_info->dst, sizeof(fen_info->dst),
4297 fen_info->dst_len,
4298 MLXSW_SP_L3_PROTO_IPV4);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004299 if (IS_ERR(fib_node)) {
4300 dev_warn(mlxsw_sp->bus_info->dev, "Failed to get FIB node\n");
4301 return PTR_ERR(fib_node);
4302 }
4303
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004304 fib4_entry = mlxsw_sp_fib4_entry_create(mlxsw_sp, fib_node, fen_info);
4305 if (IS_ERR(fib4_entry)) {
Ido Schimmel9aecce12017-02-09 10:28:42 +01004306 dev_warn(mlxsw_sp->bus_info->dev, "Failed to create FIB entry\n");
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004307 err = PTR_ERR(fib4_entry);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004308 goto err_fib4_entry_create;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02004309 }
Jiri Pirko61c503f2016-07-04 08:23:11 +02004310
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004311 err = mlxsw_sp_fib4_node_entry_link(mlxsw_sp, fib4_entry, replace,
Ido Schimmel599cf8f2017-02-09 10:28:44 +01004312 append);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02004313 if (err) {
Ido Schimmel9aecce12017-02-09 10:28:42 +01004314 dev_warn(mlxsw_sp->bus_info->dev, "Failed to link FIB entry to node\n");
4315 goto err_fib4_node_entry_link;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02004316 }
Ido Schimmel9aecce12017-02-09 10:28:42 +01004317
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004318 mlxsw_sp_fib4_entry_replace(mlxsw_sp, fib4_entry, replace);
Ido Schimmel599cf8f2017-02-09 10:28:44 +01004319
Jiri Pirko61c503f2016-07-04 08:23:11 +02004320 return 0;
4321
Ido Schimmel9aecce12017-02-09 10:28:42 +01004322err_fib4_node_entry_link:
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004323 mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib4_entry);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004324err_fib4_entry_create:
Ido Schimmel731ea1c2017-07-18 10:10:21 +02004325 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
Jiri Pirko61c503f2016-07-04 08:23:11 +02004326 return err;
4327}
4328
Jiri Pirko37956d72016-10-20 16:05:43 +02004329static void mlxsw_sp_router_fib4_del(struct mlxsw_sp *mlxsw_sp,
4330 struct fib_entry_notifier_info *fen_info)
Jiri Pirko61c503f2016-07-04 08:23:11 +02004331{
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004332 struct mlxsw_sp_fib4_entry *fib4_entry;
Ido Schimmel9aecce12017-02-09 10:28:42 +01004333 struct mlxsw_sp_fib_node *fib_node;
Jiri Pirko61c503f2016-07-04 08:23:11 +02004334
Ido Schimmel9011b672017-05-16 19:38:25 +02004335 if (mlxsw_sp->router->aborted)
Jiri Pirko37956d72016-10-20 16:05:43 +02004336 return;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02004337
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004338 fib4_entry = mlxsw_sp_fib4_entry_lookup(mlxsw_sp, fen_info);
4339 if (WARN_ON(!fib4_entry))
Jiri Pirko37956d72016-10-20 16:05:43 +02004340 return;
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004341 fib_node = fib4_entry->common.fib_node;
Jiri Pirko5b004412016-09-01 10:37:40 +02004342
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004343 mlxsw_sp_fib4_node_entry_unlink(mlxsw_sp, fib4_entry);
4344 mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib4_entry);
Ido Schimmel731ea1c2017-07-18 10:10:21 +02004345 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
Jiri Pirko61c503f2016-07-04 08:23:11 +02004346}
Jiri Pirkob45f64d2016-09-26 12:52:31 +02004347
Ido Schimmel428b8512017-08-03 13:28:28 +02004348static bool mlxsw_sp_fib6_rt_should_ignore(const struct rt6_info *rt)
4349{
4350 /* Packets with link-local destination IP arriving to the router
4351 * are trapped to the CPU, so no need to program specific routes
4352 * for them.
4353 */
4354 if (ipv6_addr_type(&rt->rt6i_dst.addr) & IPV6_ADDR_LINKLOCAL)
4355 return true;
4356
4357 /* Multicast routes aren't supported, so ignore them. Neighbour
4358 * Discovery packets are specifically trapped.
4359 */
4360 if (ipv6_addr_type(&rt->rt6i_dst.addr) & IPV6_ADDR_MULTICAST)
4361 return true;
4362
4363 /* Cloned routes are irrelevant in the forwarding path. */
4364 if (rt->rt6i_flags & RTF_CACHE)
4365 return true;
4366
4367 return false;
4368}
4369
4370static struct mlxsw_sp_rt6 *mlxsw_sp_rt6_create(struct rt6_info *rt)
4371{
4372 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
4373
4374 mlxsw_sp_rt6 = kzalloc(sizeof(*mlxsw_sp_rt6), GFP_KERNEL);
4375 if (!mlxsw_sp_rt6)
4376 return ERR_PTR(-ENOMEM);
4377
4378 /* In case of route replace, replaced route is deleted with
4379 * no notification. Take reference to prevent accessing freed
4380 * memory.
4381 */
4382 mlxsw_sp_rt6->rt = rt;
4383 rt6_hold(rt);
4384
4385 return mlxsw_sp_rt6;
4386}
4387
4388#if IS_ENABLED(CONFIG_IPV6)
4389static void mlxsw_sp_rt6_release(struct rt6_info *rt)
4390{
4391 rt6_release(rt);
4392}
4393#else
4394static void mlxsw_sp_rt6_release(struct rt6_info *rt)
4395{
4396}
4397#endif
4398
4399static void mlxsw_sp_rt6_destroy(struct mlxsw_sp_rt6 *mlxsw_sp_rt6)
4400{
4401 mlxsw_sp_rt6_release(mlxsw_sp_rt6->rt);
4402 kfree(mlxsw_sp_rt6);
4403}
4404
4405static bool mlxsw_sp_fib6_rt_can_mp(const struct rt6_info *rt)
4406{
4407 /* RTF_CACHE routes are ignored */
4408 return (rt->rt6i_flags & (RTF_GATEWAY | RTF_ADDRCONF)) == RTF_GATEWAY;
4409}
4410
4411static struct rt6_info *
4412mlxsw_sp_fib6_entry_rt(const struct mlxsw_sp_fib6_entry *fib6_entry)
4413{
4414 return list_first_entry(&fib6_entry->rt6_list, struct mlxsw_sp_rt6,
4415 list)->rt;
4416}
4417
4418static struct mlxsw_sp_fib6_entry *
4419mlxsw_sp_fib6_node_mp_entry_find(const struct mlxsw_sp_fib_node *fib_node,
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02004420 const struct rt6_info *nrt, bool replace)
Ido Schimmel428b8512017-08-03 13:28:28 +02004421{
4422 struct mlxsw_sp_fib6_entry *fib6_entry;
4423
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02004424 if (!mlxsw_sp_fib6_rt_can_mp(nrt) || replace)
Ido Schimmel428b8512017-08-03 13:28:28 +02004425 return NULL;
4426
4427 list_for_each_entry(fib6_entry, &fib_node->entry_list, common.list) {
4428 struct rt6_info *rt = mlxsw_sp_fib6_entry_rt(fib6_entry);
4429
4430 /* RT6_TABLE_LOCAL and RT6_TABLE_MAIN share the same
4431 * virtual router.
4432 */
4433 if (rt->rt6i_table->tb6_id > nrt->rt6i_table->tb6_id)
4434 continue;
4435 if (rt->rt6i_table->tb6_id != nrt->rt6i_table->tb6_id)
4436 break;
4437 if (rt->rt6i_metric < nrt->rt6i_metric)
4438 continue;
4439 if (rt->rt6i_metric == nrt->rt6i_metric &&
4440 mlxsw_sp_fib6_rt_can_mp(rt))
4441 return fib6_entry;
4442 if (rt->rt6i_metric > nrt->rt6i_metric)
4443 break;
4444 }
4445
4446 return NULL;
4447}
4448
4449static struct mlxsw_sp_rt6 *
4450mlxsw_sp_fib6_entry_rt_find(const struct mlxsw_sp_fib6_entry *fib6_entry,
4451 const struct rt6_info *rt)
4452{
4453 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
4454
4455 list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) {
4456 if (mlxsw_sp_rt6->rt == rt)
4457 return mlxsw_sp_rt6;
4458 }
4459
4460 return NULL;
4461}
4462
Petr Machata8f28a302017-09-02 23:49:24 +02004463static bool mlxsw_sp_nexthop6_ipip_type(const struct mlxsw_sp *mlxsw_sp,
4464 const struct rt6_info *rt,
4465 enum mlxsw_sp_ipip_type *ret)
4466{
4467 return rt->dst.dev &&
4468 mlxsw_sp_netdev_ipip_type(mlxsw_sp, rt->dst.dev, ret);
4469}
4470
Petr Machata35225e42017-09-02 23:49:22 +02004471static int mlxsw_sp_nexthop6_type_init(struct mlxsw_sp *mlxsw_sp,
4472 struct mlxsw_sp_nexthop_group *nh_grp,
4473 struct mlxsw_sp_nexthop *nh,
4474 const struct rt6_info *rt)
Ido Schimmel428b8512017-08-03 13:28:28 +02004475{
Petr Machata8f28a302017-09-02 23:49:24 +02004476 struct mlxsw_sp_router *router = mlxsw_sp->router;
Ido Schimmel428b8512017-08-03 13:28:28 +02004477 struct net_device *dev = rt->dst.dev;
Petr Machata8f28a302017-09-02 23:49:24 +02004478 enum mlxsw_sp_ipip_type ipipt;
Ido Schimmel428b8512017-08-03 13:28:28 +02004479 struct mlxsw_sp_rif *rif;
4480 int err;
4481
Petr Machata8f28a302017-09-02 23:49:24 +02004482 if (mlxsw_sp_nexthop6_ipip_type(mlxsw_sp, rt, &ipipt) &&
4483 router->ipip_ops_arr[ipipt]->can_offload(mlxsw_sp, dev,
4484 MLXSW_SP_L3_PROTO_IPV6)) {
4485 nh->type = MLXSW_SP_NEXTHOP_TYPE_IPIP;
Petr Machata4cccb732017-10-16 16:26:39 +02004486 err = mlxsw_sp_nexthop_ipip_init(mlxsw_sp, nh, dev);
Petr Machatade0f43c2017-10-02 12:14:57 +02004487 if (err)
4488 return err;
4489 mlxsw_sp_nexthop_rif_init(nh, &nh->ipip_entry->ol_lb->common);
4490 return 0;
Petr Machata8f28a302017-09-02 23:49:24 +02004491 }
4492
Petr Machata35225e42017-09-02 23:49:22 +02004493 nh->type = MLXSW_SP_NEXTHOP_TYPE_ETH;
Ido Schimmel428b8512017-08-03 13:28:28 +02004494 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
4495 if (!rif)
4496 return 0;
4497 mlxsw_sp_nexthop_rif_init(nh, rif);
4498
4499 err = mlxsw_sp_nexthop_neigh_init(mlxsw_sp, nh);
4500 if (err)
4501 goto err_nexthop_neigh_init;
4502
4503 return 0;
4504
4505err_nexthop_neigh_init:
4506 mlxsw_sp_nexthop_rif_fini(nh);
4507 return err;
4508}
4509
Petr Machata35225e42017-09-02 23:49:22 +02004510static void mlxsw_sp_nexthop6_type_fini(struct mlxsw_sp *mlxsw_sp,
4511 struct mlxsw_sp_nexthop *nh)
4512{
4513 mlxsw_sp_nexthop_type_fini(mlxsw_sp, nh);
4514}
4515
4516static int mlxsw_sp_nexthop6_init(struct mlxsw_sp *mlxsw_sp,
4517 struct mlxsw_sp_nexthop_group *nh_grp,
4518 struct mlxsw_sp_nexthop *nh,
4519 const struct rt6_info *rt)
4520{
4521 struct net_device *dev = rt->dst.dev;
4522
4523 nh->nh_grp = nh_grp;
Ido Schimmel408bd942017-10-22 23:11:46 +02004524 nh->nh_weight = 1;
Petr Machata35225e42017-09-02 23:49:22 +02004525 memcpy(&nh->gw_addr, &rt->rt6i_gateway, sizeof(nh->gw_addr));
Arkadi Sharshevskya5390272017-09-25 10:32:28 +02004526 mlxsw_sp_nexthop_counter_alloc(mlxsw_sp, nh);
Petr Machata35225e42017-09-02 23:49:22 +02004527
Arkadi Sharshevskydbe45982017-09-25 10:32:23 +02004528 list_add_tail(&nh->router_list_node, &mlxsw_sp->router->nexthop_list);
4529
Petr Machata35225e42017-09-02 23:49:22 +02004530 if (!dev)
4531 return 0;
4532 nh->ifindex = dev->ifindex;
4533
4534 return mlxsw_sp_nexthop6_type_init(mlxsw_sp, nh_grp, nh, rt);
4535}
4536
Ido Schimmel428b8512017-08-03 13:28:28 +02004537static void mlxsw_sp_nexthop6_fini(struct mlxsw_sp *mlxsw_sp,
4538 struct mlxsw_sp_nexthop *nh)
4539{
Petr Machata35225e42017-09-02 23:49:22 +02004540 mlxsw_sp_nexthop6_type_fini(mlxsw_sp, nh);
Arkadi Sharshevskydbe45982017-09-25 10:32:23 +02004541 list_del(&nh->router_list_node);
Arkadi Sharshevskya5390272017-09-25 10:32:28 +02004542 mlxsw_sp_nexthop_counter_free(mlxsw_sp, nh);
Ido Schimmel428b8512017-08-03 13:28:28 +02004543}
4544
Petr Machataf6050ee2017-09-02 23:49:21 +02004545static bool mlxsw_sp_rt6_is_gateway(const struct mlxsw_sp *mlxsw_sp,
4546 const struct rt6_info *rt)
4547{
Petr Machata8f28a302017-09-02 23:49:24 +02004548 return rt->rt6i_flags & RTF_GATEWAY ||
4549 mlxsw_sp_nexthop6_ipip_type(mlxsw_sp, rt, NULL);
Petr Machataf6050ee2017-09-02 23:49:21 +02004550}
4551
Ido Schimmel428b8512017-08-03 13:28:28 +02004552static struct mlxsw_sp_nexthop_group *
4553mlxsw_sp_nexthop6_group_create(struct mlxsw_sp *mlxsw_sp,
4554 struct mlxsw_sp_fib6_entry *fib6_entry)
4555{
4556 struct mlxsw_sp_nexthop_group *nh_grp;
4557 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
4558 struct mlxsw_sp_nexthop *nh;
4559 size_t alloc_size;
4560 int i = 0;
4561 int err;
4562
4563 alloc_size = sizeof(*nh_grp) +
4564 fib6_entry->nrt6 * sizeof(struct mlxsw_sp_nexthop);
4565 nh_grp = kzalloc(alloc_size, GFP_KERNEL);
4566 if (!nh_grp)
4567 return ERR_PTR(-ENOMEM);
4568 INIT_LIST_HEAD(&nh_grp->fib_list);
4569#if IS_ENABLED(CONFIG_IPV6)
4570 nh_grp->neigh_tbl = &nd_tbl;
4571#endif
4572 mlxsw_sp_rt6 = list_first_entry(&fib6_entry->rt6_list,
4573 struct mlxsw_sp_rt6, list);
Petr Machataf6050ee2017-09-02 23:49:21 +02004574 nh_grp->gateway = mlxsw_sp_rt6_is_gateway(mlxsw_sp, mlxsw_sp_rt6->rt);
Ido Schimmel428b8512017-08-03 13:28:28 +02004575 nh_grp->count = fib6_entry->nrt6;
4576 for (i = 0; i < nh_grp->count; i++) {
4577 struct rt6_info *rt = mlxsw_sp_rt6->rt;
4578
4579 nh = &nh_grp->nexthops[i];
4580 err = mlxsw_sp_nexthop6_init(mlxsw_sp, nh_grp, nh, rt);
4581 if (err)
4582 goto err_nexthop6_init;
4583 mlxsw_sp_rt6 = list_next_entry(mlxsw_sp_rt6, list);
4584 }
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02004585
4586 err = mlxsw_sp_nexthop_group_insert(mlxsw_sp, nh_grp);
4587 if (err)
4588 goto err_nexthop_group_insert;
4589
Ido Schimmel428b8512017-08-03 13:28:28 +02004590 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
4591 return nh_grp;
4592
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02004593err_nexthop_group_insert:
Ido Schimmel428b8512017-08-03 13:28:28 +02004594err_nexthop6_init:
4595 for (i--; i >= 0; i--) {
4596 nh = &nh_grp->nexthops[i];
4597 mlxsw_sp_nexthop6_fini(mlxsw_sp, nh);
4598 }
4599 kfree(nh_grp);
4600 return ERR_PTR(err);
4601}
4602
4603static void
4604mlxsw_sp_nexthop6_group_destroy(struct mlxsw_sp *mlxsw_sp,
4605 struct mlxsw_sp_nexthop_group *nh_grp)
4606{
4607 struct mlxsw_sp_nexthop *nh;
4608 int i = nh_grp->count;
4609
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02004610 mlxsw_sp_nexthop_group_remove(mlxsw_sp, nh_grp);
Ido Schimmel428b8512017-08-03 13:28:28 +02004611 for (i--; i >= 0; i--) {
4612 nh = &nh_grp->nexthops[i];
4613 mlxsw_sp_nexthop6_fini(mlxsw_sp, nh);
4614 }
4615 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
4616 WARN_ON(nh_grp->adj_index_valid);
4617 kfree(nh_grp);
4618}
4619
4620static int mlxsw_sp_nexthop6_group_get(struct mlxsw_sp *mlxsw_sp,
4621 struct mlxsw_sp_fib6_entry *fib6_entry)
4622{
4623 struct mlxsw_sp_nexthop_group *nh_grp;
4624
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02004625 nh_grp = mlxsw_sp_nexthop6_group_lookup(mlxsw_sp, fib6_entry);
4626 if (!nh_grp) {
4627 nh_grp = mlxsw_sp_nexthop6_group_create(mlxsw_sp, fib6_entry);
4628 if (IS_ERR(nh_grp))
4629 return PTR_ERR(nh_grp);
4630 }
Ido Schimmel428b8512017-08-03 13:28:28 +02004631
4632 list_add_tail(&fib6_entry->common.nexthop_group_node,
4633 &nh_grp->fib_list);
4634 fib6_entry->common.nh_group = nh_grp;
4635
4636 return 0;
4637}
4638
4639static void mlxsw_sp_nexthop6_group_put(struct mlxsw_sp *mlxsw_sp,
4640 struct mlxsw_sp_fib_entry *fib_entry)
4641{
4642 struct mlxsw_sp_nexthop_group *nh_grp = fib_entry->nh_group;
4643
4644 list_del(&fib_entry->nexthop_group_node);
4645 if (!list_empty(&nh_grp->fib_list))
4646 return;
4647 mlxsw_sp_nexthop6_group_destroy(mlxsw_sp, nh_grp);
4648}
4649
4650static int
4651mlxsw_sp_nexthop6_group_update(struct mlxsw_sp *mlxsw_sp,
4652 struct mlxsw_sp_fib6_entry *fib6_entry)
4653{
4654 struct mlxsw_sp_nexthop_group *old_nh_grp = fib6_entry->common.nh_group;
4655 int err;
4656
4657 fib6_entry->common.nh_group = NULL;
4658 list_del(&fib6_entry->common.nexthop_group_node);
4659
4660 err = mlxsw_sp_nexthop6_group_get(mlxsw_sp, fib6_entry);
4661 if (err)
4662 goto err_nexthop6_group_get;
4663
4664 /* In case this entry is offloaded, then the adjacency index
4665 * currently associated with it in the device's table is that
4666 * of the old group. Start using the new one instead.
4667 */
4668 err = mlxsw_sp_fib_node_entry_add(mlxsw_sp, &fib6_entry->common);
4669 if (err)
4670 goto err_fib_node_entry_add;
4671
4672 if (list_empty(&old_nh_grp->fib_list))
4673 mlxsw_sp_nexthop6_group_destroy(mlxsw_sp, old_nh_grp);
4674
4675 return 0;
4676
4677err_fib_node_entry_add:
4678 mlxsw_sp_nexthop6_group_put(mlxsw_sp, &fib6_entry->common);
4679err_nexthop6_group_get:
4680 list_add_tail(&fib6_entry->common.nexthop_group_node,
4681 &old_nh_grp->fib_list);
4682 fib6_entry->common.nh_group = old_nh_grp;
4683 return err;
4684}
4685
4686static int
4687mlxsw_sp_fib6_entry_nexthop_add(struct mlxsw_sp *mlxsw_sp,
4688 struct mlxsw_sp_fib6_entry *fib6_entry,
4689 struct rt6_info *rt)
4690{
4691 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
4692 int err;
4693
4694 mlxsw_sp_rt6 = mlxsw_sp_rt6_create(rt);
4695 if (IS_ERR(mlxsw_sp_rt6))
4696 return PTR_ERR(mlxsw_sp_rt6);
4697
4698 list_add_tail(&mlxsw_sp_rt6->list, &fib6_entry->rt6_list);
4699 fib6_entry->nrt6++;
4700
4701 err = mlxsw_sp_nexthop6_group_update(mlxsw_sp, fib6_entry);
4702 if (err)
4703 goto err_nexthop6_group_update;
4704
4705 return 0;
4706
4707err_nexthop6_group_update:
4708 fib6_entry->nrt6--;
4709 list_del(&mlxsw_sp_rt6->list);
4710 mlxsw_sp_rt6_destroy(mlxsw_sp_rt6);
4711 return err;
4712}
4713
4714static void
4715mlxsw_sp_fib6_entry_nexthop_del(struct mlxsw_sp *mlxsw_sp,
4716 struct mlxsw_sp_fib6_entry *fib6_entry,
4717 struct rt6_info *rt)
4718{
4719 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
4720
4721 mlxsw_sp_rt6 = mlxsw_sp_fib6_entry_rt_find(fib6_entry, rt);
4722 if (WARN_ON(!mlxsw_sp_rt6))
4723 return;
4724
4725 fib6_entry->nrt6--;
4726 list_del(&mlxsw_sp_rt6->list);
4727 mlxsw_sp_nexthop6_group_update(mlxsw_sp, fib6_entry);
4728 mlxsw_sp_rt6_destroy(mlxsw_sp_rt6);
4729}
4730
Petr Machataf6050ee2017-09-02 23:49:21 +02004731static void mlxsw_sp_fib6_entry_type_set(struct mlxsw_sp *mlxsw_sp,
4732 struct mlxsw_sp_fib_entry *fib_entry,
Ido Schimmel428b8512017-08-03 13:28:28 +02004733 const struct rt6_info *rt)
4734{
4735 /* Packets hitting RTF_REJECT routes need to be discarded by the
4736 * stack. We can rely on their destination device not having a
4737 * RIF (it's the loopback device) and can thus use action type
4738 * local, which will cause them to be trapped with a lower
4739 * priority than packets that need to be locally received.
4740 */
Ido Schimmeld3b6d372017-09-01 10:58:55 +02004741 if (rt->rt6i_flags & (RTF_LOCAL | RTF_ANYCAST))
Ido Schimmel428b8512017-08-03 13:28:28 +02004742 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
4743 else if (rt->rt6i_flags & RTF_REJECT)
4744 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_LOCAL;
Petr Machataf6050ee2017-09-02 23:49:21 +02004745 else if (mlxsw_sp_rt6_is_gateway(mlxsw_sp, rt))
Ido Schimmel428b8512017-08-03 13:28:28 +02004746 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_REMOTE;
4747 else
4748 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_LOCAL;
4749}
4750
4751static void
4752mlxsw_sp_fib6_entry_rt_destroy_all(struct mlxsw_sp_fib6_entry *fib6_entry)
4753{
4754 struct mlxsw_sp_rt6 *mlxsw_sp_rt6, *tmp;
4755
4756 list_for_each_entry_safe(mlxsw_sp_rt6, tmp, &fib6_entry->rt6_list,
4757 list) {
4758 fib6_entry->nrt6--;
4759 list_del(&mlxsw_sp_rt6->list);
4760 mlxsw_sp_rt6_destroy(mlxsw_sp_rt6);
4761 }
4762}
4763
4764static struct mlxsw_sp_fib6_entry *
4765mlxsw_sp_fib6_entry_create(struct mlxsw_sp *mlxsw_sp,
4766 struct mlxsw_sp_fib_node *fib_node,
4767 struct rt6_info *rt)
4768{
4769 struct mlxsw_sp_fib6_entry *fib6_entry;
4770 struct mlxsw_sp_fib_entry *fib_entry;
4771 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
4772 int err;
4773
4774 fib6_entry = kzalloc(sizeof(*fib6_entry), GFP_KERNEL);
4775 if (!fib6_entry)
4776 return ERR_PTR(-ENOMEM);
4777 fib_entry = &fib6_entry->common;
4778
4779 mlxsw_sp_rt6 = mlxsw_sp_rt6_create(rt);
4780 if (IS_ERR(mlxsw_sp_rt6)) {
4781 err = PTR_ERR(mlxsw_sp_rt6);
4782 goto err_rt6_create;
4783 }
4784
Petr Machataf6050ee2017-09-02 23:49:21 +02004785 mlxsw_sp_fib6_entry_type_set(mlxsw_sp, fib_entry, mlxsw_sp_rt6->rt);
Ido Schimmel428b8512017-08-03 13:28:28 +02004786
4787 INIT_LIST_HEAD(&fib6_entry->rt6_list);
4788 list_add_tail(&mlxsw_sp_rt6->list, &fib6_entry->rt6_list);
4789 fib6_entry->nrt6 = 1;
4790 err = mlxsw_sp_nexthop6_group_get(mlxsw_sp, fib6_entry);
4791 if (err)
4792 goto err_nexthop6_group_get;
4793
4794 fib_entry->fib_node = fib_node;
4795
4796 return fib6_entry;
4797
4798err_nexthop6_group_get:
4799 list_del(&mlxsw_sp_rt6->list);
4800 mlxsw_sp_rt6_destroy(mlxsw_sp_rt6);
4801err_rt6_create:
4802 kfree(fib6_entry);
4803 return ERR_PTR(err);
4804}
4805
4806static void mlxsw_sp_fib6_entry_destroy(struct mlxsw_sp *mlxsw_sp,
4807 struct mlxsw_sp_fib6_entry *fib6_entry)
4808{
4809 mlxsw_sp_nexthop6_group_put(mlxsw_sp, &fib6_entry->common);
4810 mlxsw_sp_fib6_entry_rt_destroy_all(fib6_entry);
4811 WARN_ON(fib6_entry->nrt6);
4812 kfree(fib6_entry);
4813}
4814
4815static struct mlxsw_sp_fib6_entry *
4816mlxsw_sp_fib6_node_entry_find(const struct mlxsw_sp_fib_node *fib_node,
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02004817 const struct rt6_info *nrt, bool replace)
Ido Schimmel428b8512017-08-03 13:28:28 +02004818{
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02004819 struct mlxsw_sp_fib6_entry *fib6_entry, *fallback = NULL;
Ido Schimmel428b8512017-08-03 13:28:28 +02004820
4821 list_for_each_entry(fib6_entry, &fib_node->entry_list, common.list) {
4822 struct rt6_info *rt = mlxsw_sp_fib6_entry_rt(fib6_entry);
4823
4824 if (rt->rt6i_table->tb6_id > nrt->rt6i_table->tb6_id)
4825 continue;
4826 if (rt->rt6i_table->tb6_id != nrt->rt6i_table->tb6_id)
4827 break;
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02004828 if (replace && rt->rt6i_metric == nrt->rt6i_metric) {
4829 if (mlxsw_sp_fib6_rt_can_mp(rt) ==
4830 mlxsw_sp_fib6_rt_can_mp(nrt))
4831 return fib6_entry;
4832 if (mlxsw_sp_fib6_rt_can_mp(nrt))
4833 fallback = fallback ?: fib6_entry;
4834 }
Ido Schimmel428b8512017-08-03 13:28:28 +02004835 if (rt->rt6i_metric > nrt->rt6i_metric)
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02004836 return fallback ?: fib6_entry;
Ido Schimmel428b8512017-08-03 13:28:28 +02004837 }
4838
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02004839 return fallback;
Ido Schimmel428b8512017-08-03 13:28:28 +02004840}
4841
4842static int
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02004843mlxsw_sp_fib6_node_list_insert(struct mlxsw_sp_fib6_entry *new6_entry,
4844 bool replace)
Ido Schimmel428b8512017-08-03 13:28:28 +02004845{
4846 struct mlxsw_sp_fib_node *fib_node = new6_entry->common.fib_node;
4847 struct rt6_info *nrt = mlxsw_sp_fib6_entry_rt(new6_entry);
4848 struct mlxsw_sp_fib6_entry *fib6_entry;
4849
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02004850 fib6_entry = mlxsw_sp_fib6_node_entry_find(fib_node, nrt, replace);
4851
4852 if (replace && WARN_ON(!fib6_entry))
4853 return -EINVAL;
Ido Schimmel428b8512017-08-03 13:28:28 +02004854
4855 if (fib6_entry) {
4856 list_add_tail(&new6_entry->common.list,
4857 &fib6_entry->common.list);
4858 } else {
4859 struct mlxsw_sp_fib6_entry *last;
4860
4861 list_for_each_entry(last, &fib_node->entry_list, common.list) {
4862 struct rt6_info *rt = mlxsw_sp_fib6_entry_rt(last);
4863
4864 if (nrt->rt6i_table->tb6_id > rt->rt6i_table->tb6_id)
4865 break;
4866 fib6_entry = last;
4867 }
4868
4869 if (fib6_entry)
4870 list_add(&new6_entry->common.list,
4871 &fib6_entry->common.list);
4872 else
4873 list_add(&new6_entry->common.list,
4874 &fib_node->entry_list);
4875 }
4876
4877 return 0;
4878}
4879
4880static void
4881mlxsw_sp_fib6_node_list_remove(struct mlxsw_sp_fib6_entry *fib6_entry)
4882{
4883 list_del(&fib6_entry->common.list);
4884}
4885
4886static int mlxsw_sp_fib6_node_entry_link(struct mlxsw_sp *mlxsw_sp,
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02004887 struct mlxsw_sp_fib6_entry *fib6_entry,
4888 bool replace)
Ido Schimmel428b8512017-08-03 13:28:28 +02004889{
4890 int err;
4891
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02004892 err = mlxsw_sp_fib6_node_list_insert(fib6_entry, replace);
Ido Schimmel428b8512017-08-03 13:28:28 +02004893 if (err)
4894 return err;
4895
4896 err = mlxsw_sp_fib_node_entry_add(mlxsw_sp, &fib6_entry->common);
4897 if (err)
4898 goto err_fib_node_entry_add;
4899
4900 return 0;
4901
4902err_fib_node_entry_add:
4903 mlxsw_sp_fib6_node_list_remove(fib6_entry);
4904 return err;
4905}
4906
4907static void
4908mlxsw_sp_fib6_node_entry_unlink(struct mlxsw_sp *mlxsw_sp,
4909 struct mlxsw_sp_fib6_entry *fib6_entry)
4910{
4911 mlxsw_sp_fib_node_entry_del(mlxsw_sp, &fib6_entry->common);
4912 mlxsw_sp_fib6_node_list_remove(fib6_entry);
4913}
4914
4915static struct mlxsw_sp_fib6_entry *
4916mlxsw_sp_fib6_entry_lookup(struct mlxsw_sp *mlxsw_sp,
4917 const struct rt6_info *rt)
4918{
4919 struct mlxsw_sp_fib6_entry *fib6_entry;
4920 struct mlxsw_sp_fib_node *fib_node;
4921 struct mlxsw_sp_fib *fib;
4922 struct mlxsw_sp_vr *vr;
4923
4924 vr = mlxsw_sp_vr_find(mlxsw_sp, rt->rt6i_table->tb6_id);
4925 if (!vr)
4926 return NULL;
4927 fib = mlxsw_sp_vr_fib(vr, MLXSW_SP_L3_PROTO_IPV6);
4928
4929 fib_node = mlxsw_sp_fib_node_lookup(fib, &rt->rt6i_dst.addr,
4930 sizeof(rt->rt6i_dst.addr),
4931 rt->rt6i_dst.plen);
4932 if (!fib_node)
4933 return NULL;
4934
4935 list_for_each_entry(fib6_entry, &fib_node->entry_list, common.list) {
4936 struct rt6_info *iter_rt = mlxsw_sp_fib6_entry_rt(fib6_entry);
4937
4938 if (rt->rt6i_table->tb6_id == iter_rt->rt6i_table->tb6_id &&
4939 rt->rt6i_metric == iter_rt->rt6i_metric &&
4940 mlxsw_sp_fib6_entry_rt_find(fib6_entry, rt))
4941 return fib6_entry;
4942 }
4943
4944 return NULL;
4945}
4946
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02004947static void mlxsw_sp_fib6_entry_replace(struct mlxsw_sp *mlxsw_sp,
4948 struct mlxsw_sp_fib6_entry *fib6_entry,
4949 bool replace)
4950{
4951 struct mlxsw_sp_fib_node *fib_node = fib6_entry->common.fib_node;
4952 struct mlxsw_sp_fib6_entry *replaced;
4953
4954 if (!replace)
4955 return;
4956
4957 replaced = list_next_entry(fib6_entry, common.list);
4958
4959 mlxsw_sp_fib6_node_entry_unlink(mlxsw_sp, replaced);
4960 mlxsw_sp_fib6_entry_destroy(mlxsw_sp, replaced);
4961 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
4962}
4963
Ido Schimmel428b8512017-08-03 13:28:28 +02004964static int mlxsw_sp_router_fib6_add(struct mlxsw_sp *mlxsw_sp,
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02004965 struct rt6_info *rt, bool replace)
Ido Schimmel428b8512017-08-03 13:28:28 +02004966{
4967 struct mlxsw_sp_fib6_entry *fib6_entry;
4968 struct mlxsw_sp_fib_node *fib_node;
4969 int err;
4970
4971 if (mlxsw_sp->router->aborted)
4972 return 0;
4973
Ido Schimmelf36f5ac2017-08-03 13:28:30 +02004974 if (rt->rt6i_src.plen)
4975 return -EINVAL;
4976
Ido Schimmel428b8512017-08-03 13:28:28 +02004977 if (mlxsw_sp_fib6_rt_should_ignore(rt))
4978 return 0;
4979
4980 fib_node = mlxsw_sp_fib_node_get(mlxsw_sp, rt->rt6i_table->tb6_id,
4981 &rt->rt6i_dst.addr,
4982 sizeof(rt->rt6i_dst.addr),
4983 rt->rt6i_dst.plen,
4984 MLXSW_SP_L3_PROTO_IPV6);
4985 if (IS_ERR(fib_node))
4986 return PTR_ERR(fib_node);
4987
4988 /* Before creating a new entry, try to append route to an existing
4989 * multipath entry.
4990 */
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02004991 fib6_entry = mlxsw_sp_fib6_node_mp_entry_find(fib_node, rt, replace);
Ido Schimmel428b8512017-08-03 13:28:28 +02004992 if (fib6_entry) {
4993 err = mlxsw_sp_fib6_entry_nexthop_add(mlxsw_sp, fib6_entry, rt);
4994 if (err)
4995 goto err_fib6_entry_nexthop_add;
4996 return 0;
4997 }
4998
4999 fib6_entry = mlxsw_sp_fib6_entry_create(mlxsw_sp, fib_node, rt);
5000 if (IS_ERR(fib6_entry)) {
5001 err = PTR_ERR(fib6_entry);
5002 goto err_fib6_entry_create;
5003 }
5004
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02005005 err = mlxsw_sp_fib6_node_entry_link(mlxsw_sp, fib6_entry, replace);
Ido Schimmel428b8512017-08-03 13:28:28 +02005006 if (err)
5007 goto err_fib6_node_entry_link;
5008
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02005009 mlxsw_sp_fib6_entry_replace(mlxsw_sp, fib6_entry, replace);
5010
Ido Schimmel428b8512017-08-03 13:28:28 +02005011 return 0;
5012
5013err_fib6_node_entry_link:
5014 mlxsw_sp_fib6_entry_destroy(mlxsw_sp, fib6_entry);
5015err_fib6_entry_create:
5016err_fib6_entry_nexthop_add:
5017 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
5018 return err;
5019}
5020
5021static void mlxsw_sp_router_fib6_del(struct mlxsw_sp *mlxsw_sp,
5022 struct rt6_info *rt)
5023{
5024 struct mlxsw_sp_fib6_entry *fib6_entry;
5025 struct mlxsw_sp_fib_node *fib_node;
5026
5027 if (mlxsw_sp->router->aborted)
5028 return;
5029
5030 if (mlxsw_sp_fib6_rt_should_ignore(rt))
5031 return;
5032
5033 fib6_entry = mlxsw_sp_fib6_entry_lookup(mlxsw_sp, rt);
5034 if (WARN_ON(!fib6_entry))
5035 return;
5036
5037 /* If route is part of a multipath entry, but not the last one
5038 * removed, then only reduce its nexthop group.
5039 */
5040 if (!list_is_singular(&fib6_entry->rt6_list)) {
5041 mlxsw_sp_fib6_entry_nexthop_del(mlxsw_sp, fib6_entry, rt);
5042 return;
5043 }
5044
5045 fib_node = fib6_entry->common.fib_node;
5046
5047 mlxsw_sp_fib6_node_entry_unlink(mlxsw_sp, fib6_entry);
5048 mlxsw_sp_fib6_entry_destroy(mlxsw_sp, fib6_entry);
5049 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
5050}
5051
Ido Schimmelbc65a8a2017-07-18 10:10:25 +02005052static int __mlxsw_sp_router_set_abort_trap(struct mlxsw_sp *mlxsw_sp,
5053 enum mlxsw_reg_ralxx_protocol proto,
5054 u8 tree_id)
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005055{
5056 char ralta_pl[MLXSW_REG_RALTA_LEN];
5057 char ralst_pl[MLXSW_REG_RALST_LEN];
Ido Schimmelb5d90e62017-03-10 08:53:43 +01005058 int i, err;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005059
Ido Schimmelbc65a8a2017-07-18 10:10:25 +02005060 mlxsw_reg_ralta_pack(ralta_pl, true, proto, tree_id);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005061 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralta), ralta_pl);
5062 if (err)
5063 return err;
5064
Ido Schimmelbc65a8a2017-07-18 10:10:25 +02005065 mlxsw_reg_ralst_pack(ralst_pl, 0xff, tree_id);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005066 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralst), ralst_pl);
5067 if (err)
5068 return err;
5069
Ido Schimmelb5d90e62017-03-10 08:53:43 +01005070 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
Ido Schimmel9011b672017-05-16 19:38:25 +02005071 struct mlxsw_sp_vr *vr = &mlxsw_sp->router->vrs[i];
Ido Schimmelb5d90e62017-03-10 08:53:43 +01005072 char raltb_pl[MLXSW_REG_RALTB_LEN];
5073 char ralue_pl[MLXSW_REG_RALUE_LEN];
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005074
Ido Schimmelbc65a8a2017-07-18 10:10:25 +02005075 mlxsw_reg_raltb_pack(raltb_pl, vr->id, proto, tree_id);
Ido Schimmelb5d90e62017-03-10 08:53:43 +01005076 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raltb),
5077 raltb_pl);
5078 if (err)
5079 return err;
5080
Ido Schimmelbc65a8a2017-07-18 10:10:25 +02005081 mlxsw_reg_ralue_pack(ralue_pl, proto,
5082 MLXSW_REG_RALUE_OP_WRITE_WRITE, vr->id, 0);
Ido Schimmelb5d90e62017-03-10 08:53:43 +01005083 mlxsw_reg_ralue_act_ip2me_pack(ralue_pl);
5084 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue),
5085 ralue_pl);
5086 if (err)
5087 return err;
5088 }
5089
5090 return 0;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005091}
5092
Yotam Gigid42b0962017-09-27 08:23:20 +02005093static int mlxsw_sp_router_fibmr_add(struct mlxsw_sp *mlxsw_sp,
5094 struct mfc_entry_notifier_info *men_info,
5095 bool replace)
5096{
5097 struct mlxsw_sp_vr *vr;
5098
5099 if (mlxsw_sp->router->aborted)
5100 return 0;
5101
David Ahernf8fa9b42017-10-18 09:56:56 -07005102 vr = mlxsw_sp_vr_get(mlxsw_sp, men_info->tb_id, NULL);
Yotam Gigid42b0962017-09-27 08:23:20 +02005103 if (IS_ERR(vr))
5104 return PTR_ERR(vr);
5105
5106 return mlxsw_sp_mr_route4_add(vr->mr4_table, men_info->mfc, replace);
5107}
5108
5109static void mlxsw_sp_router_fibmr_del(struct mlxsw_sp *mlxsw_sp,
5110 struct mfc_entry_notifier_info *men_info)
5111{
5112 struct mlxsw_sp_vr *vr;
5113
5114 if (mlxsw_sp->router->aborted)
5115 return;
5116
5117 vr = mlxsw_sp_vr_find(mlxsw_sp, men_info->tb_id);
5118 if (WARN_ON(!vr))
5119 return;
5120
5121 mlxsw_sp_mr_route4_del(vr->mr4_table, men_info->mfc);
5122 mlxsw_sp_vr_put(vr);
5123}
5124
5125static int
5126mlxsw_sp_router_fibmr_vif_add(struct mlxsw_sp *mlxsw_sp,
5127 struct vif_entry_notifier_info *ven_info)
5128{
5129 struct mlxsw_sp_rif *rif;
5130 struct mlxsw_sp_vr *vr;
5131
5132 if (mlxsw_sp->router->aborted)
5133 return 0;
5134
David Ahernf8fa9b42017-10-18 09:56:56 -07005135 vr = mlxsw_sp_vr_get(mlxsw_sp, ven_info->tb_id, NULL);
Yotam Gigid42b0962017-09-27 08:23:20 +02005136 if (IS_ERR(vr))
5137 return PTR_ERR(vr);
5138
5139 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, ven_info->dev);
5140 return mlxsw_sp_mr_vif_add(vr->mr4_table, ven_info->dev,
5141 ven_info->vif_index,
5142 ven_info->vif_flags, rif);
5143}
5144
5145static void
5146mlxsw_sp_router_fibmr_vif_del(struct mlxsw_sp *mlxsw_sp,
5147 struct vif_entry_notifier_info *ven_info)
5148{
5149 struct mlxsw_sp_vr *vr;
5150
5151 if (mlxsw_sp->router->aborted)
5152 return;
5153
5154 vr = mlxsw_sp_vr_find(mlxsw_sp, ven_info->tb_id);
5155 if (WARN_ON(!vr))
5156 return;
5157
5158 mlxsw_sp_mr_vif_del(vr->mr4_table, ven_info->vif_index);
5159 mlxsw_sp_vr_put(vr);
5160}
5161
Ido Schimmelbc65a8a2017-07-18 10:10:25 +02005162static int mlxsw_sp_router_set_abort_trap(struct mlxsw_sp *mlxsw_sp)
5163{
5164 enum mlxsw_reg_ralxx_protocol proto = MLXSW_REG_RALXX_PROTOCOL_IPV4;
5165 int err;
5166
5167 err = __mlxsw_sp_router_set_abort_trap(mlxsw_sp, proto,
5168 MLXSW_SP_LPM_TREE_MIN);
5169 if (err)
5170 return err;
5171
Yotam Gigid42b0962017-09-27 08:23:20 +02005172 /* The multicast router code does not need an abort trap as by default,
5173 * packets that don't match any routes are trapped to the CPU.
5174 */
5175
Ido Schimmelbc65a8a2017-07-18 10:10:25 +02005176 proto = MLXSW_REG_RALXX_PROTOCOL_IPV6;
5177 return __mlxsw_sp_router_set_abort_trap(mlxsw_sp, proto,
5178 MLXSW_SP_LPM_TREE_MIN + 1);
5179}
5180
Ido Schimmel9aecce12017-02-09 10:28:42 +01005181static void mlxsw_sp_fib4_node_flush(struct mlxsw_sp *mlxsw_sp,
5182 struct mlxsw_sp_fib_node *fib_node)
5183{
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02005184 struct mlxsw_sp_fib4_entry *fib4_entry, *tmp;
Ido Schimmel9aecce12017-02-09 10:28:42 +01005185
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02005186 list_for_each_entry_safe(fib4_entry, tmp, &fib_node->entry_list,
5187 common.list) {
5188 bool do_break = &tmp->common.list == &fib_node->entry_list;
Ido Schimmel9aecce12017-02-09 10:28:42 +01005189
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02005190 mlxsw_sp_fib4_node_entry_unlink(mlxsw_sp, fib4_entry);
5191 mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib4_entry);
Ido Schimmel731ea1c2017-07-18 10:10:21 +02005192 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
Ido Schimmel9aecce12017-02-09 10:28:42 +01005193 /* Break when entry list is empty and node was freed.
5194 * Otherwise, we'll access freed memory in the next
5195 * iteration.
5196 */
5197 if (do_break)
5198 break;
5199 }
5200}
5201
Ido Schimmel428b8512017-08-03 13:28:28 +02005202static void mlxsw_sp_fib6_node_flush(struct mlxsw_sp *mlxsw_sp,
5203 struct mlxsw_sp_fib_node *fib_node)
5204{
5205 struct mlxsw_sp_fib6_entry *fib6_entry, *tmp;
5206
5207 list_for_each_entry_safe(fib6_entry, tmp, &fib_node->entry_list,
5208 common.list) {
5209 bool do_break = &tmp->common.list == &fib_node->entry_list;
5210
5211 mlxsw_sp_fib6_node_entry_unlink(mlxsw_sp, fib6_entry);
5212 mlxsw_sp_fib6_entry_destroy(mlxsw_sp, fib6_entry);
5213 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
5214 if (do_break)
5215 break;
5216 }
5217}
5218
Ido Schimmel9aecce12017-02-09 10:28:42 +01005219static void mlxsw_sp_fib_node_flush(struct mlxsw_sp *mlxsw_sp,
5220 struct mlxsw_sp_fib_node *fib_node)
5221{
Ido Schimmel76610eb2017-03-10 08:53:41 +01005222 switch (fib_node->fib->proto) {
Ido Schimmel9aecce12017-02-09 10:28:42 +01005223 case MLXSW_SP_L3_PROTO_IPV4:
5224 mlxsw_sp_fib4_node_flush(mlxsw_sp, fib_node);
5225 break;
5226 case MLXSW_SP_L3_PROTO_IPV6:
Ido Schimmel428b8512017-08-03 13:28:28 +02005227 mlxsw_sp_fib6_node_flush(mlxsw_sp, fib_node);
Ido Schimmel9aecce12017-02-09 10:28:42 +01005228 break;
5229 }
5230}
5231
Ido Schimmel76610eb2017-03-10 08:53:41 +01005232static void mlxsw_sp_vr_fib_flush(struct mlxsw_sp *mlxsw_sp,
5233 struct mlxsw_sp_vr *vr,
5234 enum mlxsw_sp_l3proto proto)
5235{
5236 struct mlxsw_sp_fib *fib = mlxsw_sp_vr_fib(vr, proto);
5237 struct mlxsw_sp_fib_node *fib_node, *tmp;
5238
5239 list_for_each_entry_safe(fib_node, tmp, &fib->node_list, list) {
5240 bool do_break = &tmp->list == &fib->node_list;
5241
5242 mlxsw_sp_fib_node_flush(mlxsw_sp, fib_node);
5243 if (do_break)
5244 break;
5245 }
5246}
5247
Ido Schimmelac571de2016-11-14 11:26:32 +01005248static void mlxsw_sp_router_fib_flush(struct mlxsw_sp *mlxsw_sp)
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005249{
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005250 int i;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005251
Jiri Pirkoc1a38312016-10-21 16:07:23 +02005252 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
Ido Schimmel9011b672017-05-16 19:38:25 +02005253 struct mlxsw_sp_vr *vr = &mlxsw_sp->router->vrs[i];
Ido Schimmelac571de2016-11-14 11:26:32 +01005254
Ido Schimmel76610eb2017-03-10 08:53:41 +01005255 if (!mlxsw_sp_vr_is_used(vr))
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005256 continue;
Yotam Gigid42b0962017-09-27 08:23:20 +02005257
5258 mlxsw_sp_mr_table_flush(vr->mr4_table);
Ido Schimmel76610eb2017-03-10 08:53:41 +01005259 mlxsw_sp_vr_fib_flush(mlxsw_sp, vr, MLXSW_SP_L3_PROTO_IPV4);
Ido Schimmela3d9bc52017-07-18 10:10:22 +02005260
5261 /* If virtual router was only used for IPv4, then it's no
5262 * longer used.
5263 */
5264 if (!mlxsw_sp_vr_is_used(vr))
5265 continue;
5266 mlxsw_sp_vr_fib_flush(mlxsw_sp, vr, MLXSW_SP_L3_PROTO_IPV6);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005267 }
Ido Schimmelac571de2016-11-14 11:26:32 +01005268}
5269
Ido Schimmelbc65a8a2017-07-18 10:10:25 +02005270static void mlxsw_sp_router_fib_abort(struct mlxsw_sp *mlxsw_sp)
Ido Schimmelac571de2016-11-14 11:26:32 +01005271{
5272 int err;
5273
Ido Schimmel9011b672017-05-16 19:38:25 +02005274 if (mlxsw_sp->router->aborted)
Ido Schimmeld331d302016-11-16 09:51:58 +01005275 return;
5276 dev_warn(mlxsw_sp->bus_info->dev, "FIB abort triggered. Note that FIB entries are no longer being offloaded to this device.\n");
Ido Schimmelac571de2016-11-14 11:26:32 +01005277 mlxsw_sp_router_fib_flush(mlxsw_sp);
Ido Schimmel9011b672017-05-16 19:38:25 +02005278 mlxsw_sp->router->aborted = true;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005279 err = mlxsw_sp_router_set_abort_trap(mlxsw_sp);
5280 if (err)
5281 dev_warn(mlxsw_sp->bus_info->dev, "Failed to set abort trap.\n");
5282}
5283
Ido Schimmel30572242016-12-03 16:45:01 +01005284struct mlxsw_sp_fib_event_work {
Ido Schimmela0e47612017-02-06 16:20:10 +01005285 struct work_struct work;
Ido Schimmelad178c82017-02-08 11:16:40 +01005286 union {
Ido Schimmel428b8512017-08-03 13:28:28 +02005287 struct fib6_entry_notifier_info fen6_info;
Ido Schimmelad178c82017-02-08 11:16:40 +01005288 struct fib_entry_notifier_info fen_info;
Ido Schimmel5d7bfd12017-03-16 09:08:14 +01005289 struct fib_rule_notifier_info fr_info;
Ido Schimmelad178c82017-02-08 11:16:40 +01005290 struct fib_nh_notifier_info fnh_info;
Yotam Gigid42b0962017-09-27 08:23:20 +02005291 struct mfc_entry_notifier_info men_info;
5292 struct vif_entry_notifier_info ven_info;
Ido Schimmelad178c82017-02-08 11:16:40 +01005293 };
Ido Schimmel30572242016-12-03 16:45:01 +01005294 struct mlxsw_sp *mlxsw_sp;
5295 unsigned long event;
5296};
5297
Ido Schimmel66a57632017-08-03 13:28:26 +02005298static void mlxsw_sp_router_fib4_event_work(struct work_struct *work)
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005299{
Ido Schimmel30572242016-12-03 16:45:01 +01005300 struct mlxsw_sp_fib_event_work *fib_work =
Ido Schimmela0e47612017-02-06 16:20:10 +01005301 container_of(work, struct mlxsw_sp_fib_event_work, work);
Ido Schimmel30572242016-12-03 16:45:01 +01005302 struct mlxsw_sp *mlxsw_sp = fib_work->mlxsw_sp;
Ido Schimmel599cf8f2017-02-09 10:28:44 +01005303 bool replace, append;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005304 int err;
5305
Ido Schimmel30572242016-12-03 16:45:01 +01005306 /* Protect internal structures from changes */
5307 rtnl_lock();
5308 switch (fib_work->event) {
Ido Schimmel599cf8f2017-02-09 10:28:44 +01005309 case FIB_EVENT_ENTRY_REPLACE: /* fall through */
Ido Schimmel4283bce2017-02-09 10:28:43 +01005310 case FIB_EVENT_ENTRY_APPEND: /* fall through */
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005311 case FIB_EVENT_ENTRY_ADD:
Ido Schimmel599cf8f2017-02-09 10:28:44 +01005312 replace = fib_work->event == FIB_EVENT_ENTRY_REPLACE;
Ido Schimmel4283bce2017-02-09 10:28:43 +01005313 append = fib_work->event == FIB_EVENT_ENTRY_APPEND;
5314 err = mlxsw_sp_router_fib4_add(mlxsw_sp, &fib_work->fen_info,
Ido Schimmel599cf8f2017-02-09 10:28:44 +01005315 replace, append);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005316 if (err)
Ido Schimmelbc65a8a2017-07-18 10:10:25 +02005317 mlxsw_sp_router_fib_abort(mlxsw_sp);
Ido Schimmel30572242016-12-03 16:45:01 +01005318 fib_info_put(fib_work->fen_info.fi);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005319 break;
5320 case FIB_EVENT_ENTRY_DEL:
Ido Schimmel30572242016-12-03 16:45:01 +01005321 mlxsw_sp_router_fib4_del(mlxsw_sp, &fib_work->fen_info);
5322 fib_info_put(fib_work->fen_info.fi);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005323 break;
David Ahern1f279232017-10-27 17:37:14 -07005324 case FIB_EVENT_RULE_ADD:
5325 /* if we get here, a rule was added that we do not support.
5326 * just do the fib_abort
5327 */
5328 mlxsw_sp_router_fib_abort(mlxsw_sp);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005329 break;
Ido Schimmelad178c82017-02-08 11:16:40 +01005330 case FIB_EVENT_NH_ADD: /* fall through */
5331 case FIB_EVENT_NH_DEL:
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02005332 mlxsw_sp_nexthop4_event(mlxsw_sp, fib_work->event,
5333 fib_work->fnh_info.fib_nh);
Ido Schimmelad178c82017-02-08 11:16:40 +01005334 fib_info_put(fib_work->fnh_info.fib_nh->nh_parent);
5335 break;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005336 }
Ido Schimmel30572242016-12-03 16:45:01 +01005337 rtnl_unlock();
5338 kfree(fib_work);
5339}
5340
Ido Schimmel66a57632017-08-03 13:28:26 +02005341static void mlxsw_sp_router_fib6_event_work(struct work_struct *work)
5342{
Ido Schimmel583419f2017-08-03 13:28:27 +02005343 struct mlxsw_sp_fib_event_work *fib_work =
5344 container_of(work, struct mlxsw_sp_fib_event_work, work);
5345 struct mlxsw_sp *mlxsw_sp = fib_work->mlxsw_sp;
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02005346 bool replace;
Ido Schimmel428b8512017-08-03 13:28:28 +02005347 int err;
Ido Schimmel583419f2017-08-03 13:28:27 +02005348
5349 rtnl_lock();
5350 switch (fib_work->event) {
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02005351 case FIB_EVENT_ENTRY_REPLACE: /* fall through */
Ido Schimmel428b8512017-08-03 13:28:28 +02005352 case FIB_EVENT_ENTRY_ADD:
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02005353 replace = fib_work->event == FIB_EVENT_ENTRY_REPLACE;
Ido Schimmel428b8512017-08-03 13:28:28 +02005354 err = mlxsw_sp_router_fib6_add(mlxsw_sp,
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02005355 fib_work->fen6_info.rt, replace);
Ido Schimmel428b8512017-08-03 13:28:28 +02005356 if (err)
5357 mlxsw_sp_router_fib_abort(mlxsw_sp);
5358 mlxsw_sp_rt6_release(fib_work->fen6_info.rt);
5359 break;
5360 case FIB_EVENT_ENTRY_DEL:
5361 mlxsw_sp_router_fib6_del(mlxsw_sp, fib_work->fen6_info.rt);
5362 mlxsw_sp_rt6_release(fib_work->fen6_info.rt);
5363 break;
David Ahern1f279232017-10-27 17:37:14 -07005364 case FIB_EVENT_RULE_ADD:
5365 /* if we get here, a rule was added that we do not support.
5366 * just do the fib_abort
5367 */
5368 mlxsw_sp_router_fib_abort(mlxsw_sp);
Ido Schimmel583419f2017-08-03 13:28:27 +02005369 break;
5370 }
5371 rtnl_unlock();
5372 kfree(fib_work);
Ido Schimmel66a57632017-08-03 13:28:26 +02005373}
5374
Yotam Gigid42b0962017-09-27 08:23:20 +02005375static void mlxsw_sp_router_fibmr_event_work(struct work_struct *work)
5376{
5377 struct mlxsw_sp_fib_event_work *fib_work =
5378 container_of(work, struct mlxsw_sp_fib_event_work, work);
5379 struct mlxsw_sp *mlxsw_sp = fib_work->mlxsw_sp;
Yotam Gigid42b0962017-09-27 08:23:20 +02005380 bool replace;
5381 int err;
5382
5383 rtnl_lock();
5384 switch (fib_work->event) {
5385 case FIB_EVENT_ENTRY_REPLACE: /* fall through */
5386 case FIB_EVENT_ENTRY_ADD:
5387 replace = fib_work->event == FIB_EVENT_ENTRY_REPLACE;
5388
5389 err = mlxsw_sp_router_fibmr_add(mlxsw_sp, &fib_work->men_info,
5390 replace);
5391 if (err)
5392 mlxsw_sp_router_fib_abort(mlxsw_sp);
5393 ipmr_cache_put(fib_work->men_info.mfc);
5394 break;
5395 case FIB_EVENT_ENTRY_DEL:
5396 mlxsw_sp_router_fibmr_del(mlxsw_sp, &fib_work->men_info);
5397 ipmr_cache_put(fib_work->men_info.mfc);
5398 break;
5399 case FIB_EVENT_VIF_ADD:
5400 err = mlxsw_sp_router_fibmr_vif_add(mlxsw_sp,
5401 &fib_work->ven_info);
5402 if (err)
5403 mlxsw_sp_router_fib_abort(mlxsw_sp);
5404 dev_put(fib_work->ven_info.dev);
5405 break;
5406 case FIB_EVENT_VIF_DEL:
5407 mlxsw_sp_router_fibmr_vif_del(mlxsw_sp,
5408 &fib_work->ven_info);
5409 dev_put(fib_work->ven_info.dev);
5410 break;
David Ahern1f279232017-10-27 17:37:14 -07005411 case FIB_EVENT_RULE_ADD:
5412 /* if we get here, a rule was added that we do not support.
5413 * just do the fib_abort
5414 */
5415 mlxsw_sp_router_fib_abort(mlxsw_sp);
Yotam Gigid42b0962017-09-27 08:23:20 +02005416 break;
5417 }
5418 rtnl_unlock();
5419 kfree(fib_work);
5420}
5421
Ido Schimmel66a57632017-08-03 13:28:26 +02005422static void mlxsw_sp_router_fib4_event(struct mlxsw_sp_fib_event_work *fib_work,
5423 struct fib_notifier_info *info)
5424{
David Ahern3c75f9b2017-10-18 15:01:38 -07005425 struct fib_entry_notifier_info *fen_info;
David Ahern3c75f9b2017-10-18 15:01:38 -07005426 struct fib_nh_notifier_info *fnh_info;
5427
Ido Schimmel66a57632017-08-03 13:28:26 +02005428 switch (fib_work->event) {
5429 case FIB_EVENT_ENTRY_REPLACE: /* fall through */
5430 case FIB_EVENT_ENTRY_APPEND: /* fall through */
5431 case FIB_EVENT_ENTRY_ADD: /* fall through */
5432 case FIB_EVENT_ENTRY_DEL:
David Ahern3c75f9b2017-10-18 15:01:38 -07005433 fen_info = container_of(info, struct fib_entry_notifier_info,
5434 info);
5435 fib_work->fen_info = *fen_info;
5436 /* Take reference on fib_info to prevent it from being
Ido Schimmel66a57632017-08-03 13:28:26 +02005437 * freed while work is queued. Release it afterwards.
5438 */
5439 fib_info_hold(fib_work->fen_info.fi);
5440 break;
Ido Schimmel66a57632017-08-03 13:28:26 +02005441 case FIB_EVENT_NH_ADD: /* fall through */
5442 case FIB_EVENT_NH_DEL:
David Ahern3c75f9b2017-10-18 15:01:38 -07005443 fnh_info = container_of(info, struct fib_nh_notifier_info,
5444 info);
5445 fib_work->fnh_info = *fnh_info;
Ido Schimmel66a57632017-08-03 13:28:26 +02005446 fib_info_hold(fib_work->fnh_info.fib_nh->nh_parent);
5447 break;
5448 }
5449}
5450
5451static void mlxsw_sp_router_fib6_event(struct mlxsw_sp_fib_event_work *fib_work,
5452 struct fib_notifier_info *info)
5453{
David Ahern3c75f9b2017-10-18 15:01:38 -07005454 struct fib6_entry_notifier_info *fen6_info;
David Ahern3c75f9b2017-10-18 15:01:38 -07005455
Ido Schimmel583419f2017-08-03 13:28:27 +02005456 switch (fib_work->event) {
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02005457 case FIB_EVENT_ENTRY_REPLACE: /* fall through */
Ido Schimmel428b8512017-08-03 13:28:28 +02005458 case FIB_EVENT_ENTRY_ADD: /* fall through */
5459 case FIB_EVENT_ENTRY_DEL:
David Ahern3c75f9b2017-10-18 15:01:38 -07005460 fen6_info = container_of(info, struct fib6_entry_notifier_info,
5461 info);
5462 fib_work->fen6_info = *fen6_info;
Ido Schimmel428b8512017-08-03 13:28:28 +02005463 rt6_hold(fib_work->fen6_info.rt);
5464 break;
Ido Schimmel583419f2017-08-03 13:28:27 +02005465 }
Ido Schimmel66a57632017-08-03 13:28:26 +02005466}
5467
Yotam Gigid42b0962017-09-27 08:23:20 +02005468static void
5469mlxsw_sp_router_fibmr_event(struct mlxsw_sp_fib_event_work *fib_work,
5470 struct fib_notifier_info *info)
5471{
5472 switch (fib_work->event) {
5473 case FIB_EVENT_ENTRY_REPLACE: /* fall through */
5474 case FIB_EVENT_ENTRY_ADD: /* fall through */
5475 case FIB_EVENT_ENTRY_DEL:
5476 memcpy(&fib_work->men_info, info, sizeof(fib_work->men_info));
5477 ipmr_cache_hold(fib_work->men_info.mfc);
5478 break;
5479 case FIB_EVENT_VIF_ADD: /* fall through */
5480 case FIB_EVENT_VIF_DEL:
5481 memcpy(&fib_work->ven_info, info, sizeof(fib_work->ven_info));
5482 dev_hold(fib_work->ven_info.dev);
5483 break;
David Ahern1f279232017-10-27 17:37:14 -07005484 }
5485}
5486
5487static int mlxsw_sp_router_fib_rule_event(unsigned long event,
5488 struct fib_notifier_info *info,
5489 struct mlxsw_sp *mlxsw_sp)
5490{
5491 struct netlink_ext_ack *extack = info->extack;
5492 struct fib_rule_notifier_info *fr_info;
5493 struct fib_rule *rule;
5494 int err = 0;
5495
5496 /* nothing to do at the moment */
5497 if (event == FIB_EVENT_RULE_DEL)
5498 return 0;
5499
5500 if (mlxsw_sp->router->aborted)
5501 return 0;
5502
5503 fr_info = container_of(info, struct fib_rule_notifier_info, info);
5504 rule = fr_info->rule;
5505
5506 switch (info->family) {
5507 case AF_INET:
5508 if (!fib4_rule_default(rule) && !rule->l3mdev)
5509 err = -1;
5510 break;
5511 case AF_INET6:
5512 if (!fib6_rule_default(rule) && !rule->l3mdev)
5513 err = -1;
5514 break;
5515 case RTNL_FAMILY_IPMR:
5516 if (!ipmr_rule_default(rule) && !rule->l3mdev)
5517 err = -1;
Yotam Gigid42b0962017-09-27 08:23:20 +02005518 break;
5519 }
David Ahern1f279232017-10-27 17:37:14 -07005520
5521 if (err < 0)
5522 NL_SET_ERR_MSG(extack, "spectrum: FIB rules not supported. Aborting offload");
5523
5524 return err;
Yotam Gigid42b0962017-09-27 08:23:20 +02005525}
5526
Ido Schimmel30572242016-12-03 16:45:01 +01005527/* Called with rcu_read_lock() */
5528static int mlxsw_sp_router_fib_event(struct notifier_block *nb,
5529 unsigned long event, void *ptr)
5530{
Ido Schimmel30572242016-12-03 16:45:01 +01005531 struct mlxsw_sp_fib_event_work *fib_work;
5532 struct fib_notifier_info *info = ptr;
Ido Schimmel7e39d112017-05-16 19:38:28 +02005533 struct mlxsw_sp_router *router;
David Ahern1f279232017-10-27 17:37:14 -07005534 int err;
Ido Schimmel30572242016-12-03 16:45:01 +01005535
Ido Schimmel8e29f972017-09-15 15:31:07 +02005536 if (!net_eq(info->net, &init_net) ||
Yotam Gigi664375e2017-09-27 08:23:22 +02005537 (info->family != AF_INET && info->family != AF_INET6 &&
5538 info->family != RTNL_FAMILY_IPMR))
Ido Schimmel30572242016-12-03 16:45:01 +01005539 return NOTIFY_DONE;
5540
David Ahern1f279232017-10-27 17:37:14 -07005541 router = container_of(nb, struct mlxsw_sp_router, fib_nb);
5542
5543 switch (event) {
5544 case FIB_EVENT_RULE_ADD: /* fall through */
5545 case FIB_EVENT_RULE_DEL:
5546 err = mlxsw_sp_router_fib_rule_event(event, info,
5547 router->mlxsw_sp);
5548 if (!err)
5549 return NOTIFY_DONE;
5550 }
5551
Ido Schimmel30572242016-12-03 16:45:01 +01005552 fib_work = kzalloc(sizeof(*fib_work), GFP_ATOMIC);
5553 if (WARN_ON(!fib_work))
5554 return NOTIFY_BAD;
5555
Ido Schimmel7e39d112017-05-16 19:38:28 +02005556 fib_work->mlxsw_sp = router->mlxsw_sp;
Ido Schimmel30572242016-12-03 16:45:01 +01005557 fib_work->event = event;
5558
Ido Schimmel66a57632017-08-03 13:28:26 +02005559 switch (info->family) {
5560 case AF_INET:
5561 INIT_WORK(&fib_work->work, mlxsw_sp_router_fib4_event_work);
5562 mlxsw_sp_router_fib4_event(fib_work, info);
Ido Schimmel30572242016-12-03 16:45:01 +01005563 break;
Ido Schimmel66a57632017-08-03 13:28:26 +02005564 case AF_INET6:
5565 INIT_WORK(&fib_work->work, mlxsw_sp_router_fib6_event_work);
5566 mlxsw_sp_router_fib6_event(fib_work, info);
Ido Schimmelad178c82017-02-08 11:16:40 +01005567 break;
Yotam Gigid42b0962017-09-27 08:23:20 +02005568 case RTNL_FAMILY_IPMR:
5569 INIT_WORK(&fib_work->work, mlxsw_sp_router_fibmr_event_work);
5570 mlxsw_sp_router_fibmr_event(fib_work, info);
5571 break;
Ido Schimmel30572242016-12-03 16:45:01 +01005572 }
5573
Ido Schimmela0e47612017-02-06 16:20:10 +01005574 mlxsw_core_schedule_work(&fib_work->work);
Ido Schimmel30572242016-12-03 16:45:01 +01005575
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005576 return NOTIFY_DONE;
5577}
5578
Ido Schimmel4724ba562017-03-10 08:53:39 +01005579static struct mlxsw_sp_rif *
5580mlxsw_sp_rif_find_by_dev(const struct mlxsw_sp *mlxsw_sp,
5581 const struct net_device *dev)
5582{
5583 int i;
5584
5585 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++)
Ido Schimmel5f9efff2017-05-16 19:38:27 +02005586 if (mlxsw_sp->router->rifs[i] &&
5587 mlxsw_sp->router->rifs[i]->dev == dev)
5588 return mlxsw_sp->router->rifs[i];
Ido Schimmel4724ba562017-03-10 08:53:39 +01005589
5590 return NULL;
5591}
5592
5593static int mlxsw_sp_router_rif_disable(struct mlxsw_sp *mlxsw_sp, u16 rif)
5594{
5595 char ritr_pl[MLXSW_REG_RITR_LEN];
5596 int err;
5597
5598 mlxsw_reg_ritr_rif_pack(ritr_pl, rif);
5599 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
5600 if (WARN_ON_ONCE(err))
5601 return err;
5602
5603 mlxsw_reg_ritr_enable_set(ritr_pl, false);
5604 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
5605}
5606
5607static void mlxsw_sp_router_rif_gone_sync(struct mlxsw_sp *mlxsw_sp,
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01005608 struct mlxsw_sp_rif *rif)
Ido Schimmel4724ba562017-03-10 08:53:39 +01005609{
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01005610 mlxsw_sp_router_rif_disable(mlxsw_sp, rif->rif_index);
5611 mlxsw_sp_nexthop_rif_gone_sync(mlxsw_sp, rif);
5612 mlxsw_sp_neigh_rif_gone_sync(mlxsw_sp, rif);
Ido Schimmel4724ba562017-03-10 08:53:39 +01005613}
5614
Arkadi Sharshevsky5ea12372017-07-18 10:10:13 +02005615static bool
5616mlxsw_sp_rif_should_config(struct mlxsw_sp_rif *rif, struct net_device *dev,
5617 unsigned long event)
Ido Schimmel4724ba562017-03-10 08:53:39 +01005618{
Arkadi Sharshevsky5ea12372017-07-18 10:10:13 +02005619 struct inet6_dev *inet6_dev;
5620 bool addr_list_empty = true;
5621 struct in_device *idev;
5622
Ido Schimmel4724ba562017-03-10 08:53:39 +01005623 switch (event) {
5624 case NETDEV_UP:
Petr Machataf1b1f272017-07-31 09:27:28 +02005625 return rif == NULL;
Ido Schimmel4724ba562017-03-10 08:53:39 +01005626 case NETDEV_DOWN:
Arkadi Sharshevsky5ea12372017-07-18 10:10:13 +02005627 idev = __in_dev_get_rtnl(dev);
5628 if (idev && idev->ifa_list)
5629 addr_list_empty = false;
5630
5631 inet6_dev = __in6_dev_get(dev);
5632 if (addr_list_empty && inet6_dev &&
5633 !list_empty(&inet6_dev->addr_list))
5634 addr_list_empty = false;
5635
5636 if (rif && addr_list_empty &&
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01005637 !netif_is_l3_slave(rif->dev))
Ido Schimmel4724ba562017-03-10 08:53:39 +01005638 return true;
5639 /* It is possible we already removed the RIF ourselves
5640 * if it was assigned to a netdev that is now a bridge
5641 * or LAG slave.
5642 */
5643 return false;
5644 }
5645
5646 return false;
5647}
5648
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02005649static enum mlxsw_sp_rif_type
5650mlxsw_sp_dev_rif_type(const struct mlxsw_sp *mlxsw_sp,
5651 const struct net_device *dev)
5652{
5653 enum mlxsw_sp_fid_type type;
5654
Petr Machata6ddb7422017-09-02 23:49:19 +02005655 if (mlxsw_sp_netdev_ipip_type(mlxsw_sp, dev, NULL))
5656 return MLXSW_SP_RIF_TYPE_IPIP_LB;
5657
5658 /* Otherwise RIF type is derived from the type of the underlying FID. */
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02005659 if (is_vlan_dev(dev) && netif_is_bridge_master(vlan_dev_real_dev(dev)))
5660 type = MLXSW_SP_FID_TYPE_8021Q;
5661 else if (netif_is_bridge_master(dev) && br_vlan_enabled(dev))
5662 type = MLXSW_SP_FID_TYPE_8021Q;
5663 else if (netif_is_bridge_master(dev))
5664 type = MLXSW_SP_FID_TYPE_8021D;
5665 else
5666 type = MLXSW_SP_FID_TYPE_RFID;
5667
5668 return mlxsw_sp_fid_type_rif_type(mlxsw_sp, type);
5669}
5670
Ido Schimmelde5ed992017-06-04 16:53:40 +02005671static int mlxsw_sp_rif_index_alloc(struct mlxsw_sp *mlxsw_sp, u16 *p_rif_index)
Ido Schimmel4724ba562017-03-10 08:53:39 +01005672{
5673 int i;
5674
Ido Schimmelde5ed992017-06-04 16:53:40 +02005675 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++) {
5676 if (!mlxsw_sp->router->rifs[i]) {
5677 *p_rif_index = i;
5678 return 0;
5679 }
5680 }
Ido Schimmel4724ba562017-03-10 08:53:39 +01005681
Ido Schimmelde5ed992017-06-04 16:53:40 +02005682 return -ENOBUFS;
Ido Schimmel4724ba562017-03-10 08:53:39 +01005683}
5684
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02005685static struct mlxsw_sp_rif *mlxsw_sp_rif_alloc(size_t rif_size, u16 rif_index,
5686 u16 vr_id,
5687 struct net_device *l3_dev)
Ido Schimmel4724ba562017-03-10 08:53:39 +01005688{
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01005689 struct mlxsw_sp_rif *rif;
Ido Schimmel4724ba562017-03-10 08:53:39 +01005690
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02005691 rif = kzalloc(rif_size, GFP_KERNEL);
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01005692 if (!rif)
Ido Schimmel4724ba562017-03-10 08:53:39 +01005693 return NULL;
5694
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01005695 INIT_LIST_HEAD(&rif->nexthop_list);
5696 INIT_LIST_HEAD(&rif->neigh_list);
5697 ether_addr_copy(rif->addr, l3_dev->dev_addr);
5698 rif->mtu = l3_dev->mtu;
5699 rif->vr_id = vr_id;
5700 rif->dev = l3_dev;
5701 rif->rif_index = rif_index;
Ido Schimmel4724ba562017-03-10 08:53:39 +01005702
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01005703 return rif;
Ido Schimmel4724ba562017-03-10 08:53:39 +01005704}
5705
Ido Schimmel5f9efff2017-05-16 19:38:27 +02005706struct mlxsw_sp_rif *mlxsw_sp_rif_by_index(const struct mlxsw_sp *mlxsw_sp,
5707 u16 rif_index)
5708{
5709 return mlxsw_sp->router->rifs[rif_index];
5710}
5711
Arkadi Sharshevskyfd1b9d42017-03-28 17:24:16 +02005712u16 mlxsw_sp_rif_index(const struct mlxsw_sp_rif *rif)
5713{
5714 return rif->rif_index;
5715}
5716
Petr Machata92107cf2017-09-02 23:49:28 +02005717u16 mlxsw_sp_ipip_lb_rif_index(const struct mlxsw_sp_rif_ipip_lb *lb_rif)
5718{
5719 return lb_rif->common.rif_index;
5720}
5721
5722u16 mlxsw_sp_ipip_lb_ul_vr_id(const struct mlxsw_sp_rif_ipip_lb *lb_rif)
5723{
5724 return lb_rif->ul_vr_id;
5725}
5726
Arkadi Sharshevskyfd1b9d42017-03-28 17:24:16 +02005727int mlxsw_sp_rif_dev_ifindex(const struct mlxsw_sp_rif *rif)
5728{
5729 return rif->dev->ifindex;
5730}
5731
Yotam Gigi91e4d592017-09-19 10:00:19 +02005732const struct net_device *mlxsw_sp_rif_dev(const struct mlxsw_sp_rif *rif)
5733{
5734 return rif->dev;
5735}
5736
Ido Schimmel4724ba562017-03-10 08:53:39 +01005737static struct mlxsw_sp_rif *
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02005738mlxsw_sp_rif_create(struct mlxsw_sp *mlxsw_sp,
David Ahernf8fa9b42017-10-18 09:56:56 -07005739 const struct mlxsw_sp_rif_params *params,
5740 struct netlink_ext_ack *extack)
Ido Schimmel4724ba562017-03-10 08:53:39 +01005741{
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02005742 u32 tb_id = l3mdev_fib_table(params->dev);
5743 const struct mlxsw_sp_rif_ops *ops;
Petr Machata010cadf2017-09-02 23:49:18 +02005744 struct mlxsw_sp_fid *fid = NULL;
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02005745 enum mlxsw_sp_rif_type type;
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01005746 struct mlxsw_sp_rif *rif;
Ido Schimmela1107482017-05-26 08:37:39 +02005747 struct mlxsw_sp_vr *vr;
5748 u16 rif_index;
Ido Schimmel4724ba562017-03-10 08:53:39 +01005749 int err;
5750
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02005751 type = mlxsw_sp_dev_rif_type(mlxsw_sp, params->dev);
5752 ops = mlxsw_sp->router->rif_ops_arr[type];
5753
David Ahernf8fa9b42017-10-18 09:56:56 -07005754 vr = mlxsw_sp_vr_get(mlxsw_sp, tb_id ? : RT_TABLE_MAIN, extack);
Ido Schimmelc9ec53f2017-05-26 08:37:38 +02005755 if (IS_ERR(vr))
5756 return ERR_CAST(vr);
Petr Machata28a04c72017-10-02 12:14:56 +02005757 vr->rif_count++;
Ido Schimmelc9ec53f2017-05-26 08:37:38 +02005758
Ido Schimmelde5ed992017-06-04 16:53:40 +02005759 err = mlxsw_sp_rif_index_alloc(mlxsw_sp, &rif_index);
David Ahernf8fa9b42017-10-18 09:56:56 -07005760 if (err) {
5761 NL_SET_ERR_MSG(extack, "spectrum: Exceeded number of supported router interfaces");
Ido Schimmelde5ed992017-06-04 16:53:40 +02005762 goto err_rif_index_alloc;
David Ahernf8fa9b42017-10-18 09:56:56 -07005763 }
Ido Schimmel4724ba562017-03-10 08:53:39 +01005764
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02005765 rif = mlxsw_sp_rif_alloc(ops->rif_size, rif_index, vr->id, params->dev);
Ido Schimmela13a5942017-05-26 08:37:33 +02005766 if (!rif) {
5767 err = -ENOMEM;
5768 goto err_rif_alloc;
5769 }
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02005770 rif->mlxsw_sp = mlxsw_sp;
5771 rif->ops = ops;
Ido Schimmela13a5942017-05-26 08:37:33 +02005772
Petr Machata010cadf2017-09-02 23:49:18 +02005773 if (ops->fid_get) {
5774 fid = ops->fid_get(rif);
5775 if (IS_ERR(fid)) {
5776 err = PTR_ERR(fid);
5777 goto err_fid_get;
5778 }
5779 rif->fid = fid;
Ido Schimmel4d93cee2017-05-26 08:37:34 +02005780 }
5781
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02005782 if (ops->setup)
5783 ops->setup(rif, params);
5784
5785 err = ops->configure(rif);
Ido Schimmel4724ba562017-03-10 08:53:39 +01005786 if (err)
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02005787 goto err_configure;
Ido Schimmel4724ba562017-03-10 08:53:39 +01005788
Yotam Gigid42b0962017-09-27 08:23:20 +02005789 err = mlxsw_sp_mr_rif_add(vr->mr4_table, rif);
5790 if (err)
5791 goto err_mr_rif_add;
5792
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02005793 mlxsw_sp_rif_counters_alloc(rif);
Ido Schimmel5f9efff2017-05-16 19:38:27 +02005794 mlxsw_sp->router->rifs[rif_index] = rif;
Ido Schimmel4724ba562017-03-10 08:53:39 +01005795
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01005796 return rif;
Ido Schimmel4724ba562017-03-10 08:53:39 +01005797
Yotam Gigid42b0962017-09-27 08:23:20 +02005798err_mr_rif_add:
5799 ops->deconfigure(rif);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02005800err_configure:
Petr Machata010cadf2017-09-02 23:49:18 +02005801 if (fid)
5802 mlxsw_sp_fid_put(fid);
Ido Schimmela1107482017-05-26 08:37:39 +02005803err_fid_get:
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02005804 kfree(rif);
5805err_rif_alloc:
Ido Schimmelde5ed992017-06-04 16:53:40 +02005806err_rif_index_alloc:
Petr Machata28a04c72017-10-02 12:14:56 +02005807 vr->rif_count--;
Ido Schimmelc9ec53f2017-05-26 08:37:38 +02005808 mlxsw_sp_vr_put(vr);
Ido Schimmel4724ba562017-03-10 08:53:39 +01005809 return ERR_PTR(err);
5810}
5811
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02005812void mlxsw_sp_rif_destroy(struct mlxsw_sp_rif *rif)
Ido Schimmel4724ba562017-03-10 08:53:39 +01005813{
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02005814 const struct mlxsw_sp_rif_ops *ops = rif->ops;
5815 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
Ido Schimmela1107482017-05-26 08:37:39 +02005816 struct mlxsw_sp_fid *fid = rif->fid;
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02005817 struct mlxsw_sp_vr *vr;
Ido Schimmel4724ba562017-03-10 08:53:39 +01005818
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01005819 mlxsw_sp_router_rif_gone_sync(mlxsw_sp, rif);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02005820 vr = &mlxsw_sp->router->vrs[rif->vr_id];
Arkadi Sharshevskye0c0afd2017-03-28 17:24:15 +02005821
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02005822 mlxsw_sp->router->rifs[rif->rif_index] = NULL;
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02005823 mlxsw_sp_rif_counters_free(rif);
Yotam Gigid42b0962017-09-27 08:23:20 +02005824 mlxsw_sp_mr_rif_del(vr->mr4_table, rif);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02005825 ops->deconfigure(rif);
Petr Machata010cadf2017-09-02 23:49:18 +02005826 if (fid)
5827 /* Loopback RIFs are not associated with a FID. */
5828 mlxsw_sp_fid_put(fid);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02005829 kfree(rif);
Petr Machata28a04c72017-10-02 12:14:56 +02005830 vr->rif_count--;
Ido Schimmelc9ec53f2017-05-26 08:37:38 +02005831 mlxsw_sp_vr_put(vr);
Ido Schimmel4724ba562017-03-10 08:53:39 +01005832}
5833
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02005834static void
5835mlxsw_sp_rif_subport_params_init(struct mlxsw_sp_rif_params *params,
5836 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
5837{
5838 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
5839
5840 params->vid = mlxsw_sp_port_vlan->vid;
5841 params->lag = mlxsw_sp_port->lagged;
5842 if (params->lag)
5843 params->lag_id = mlxsw_sp_port->lag_id;
5844 else
5845 params->system_port = mlxsw_sp_port->local_port;
5846}
5847
Ido Schimmel7cbecf22017-05-26 08:37:28 +02005848static int
Ido Schimmela1107482017-05-26 08:37:39 +02005849mlxsw_sp_port_vlan_router_join(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan,
David Ahernf8fa9b42017-10-18 09:56:56 -07005850 struct net_device *l3_dev,
5851 struct netlink_ext_ack *extack)
Ido Schimmel4724ba562017-03-10 08:53:39 +01005852{
Ido Schimmel7cbecf22017-05-26 08:37:28 +02005853 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
Ido Schimmel1b8f09a2017-05-26 08:37:36 +02005854 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
Ido Schimmel7cbecf22017-05-26 08:37:28 +02005855 u16 vid = mlxsw_sp_port_vlan->vid;
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01005856 struct mlxsw_sp_rif *rif;
Ido Schimmela1107482017-05-26 08:37:39 +02005857 struct mlxsw_sp_fid *fid;
Ido Schimmel03ea01e2017-05-23 21:56:30 +02005858 int err;
Ido Schimmel4724ba562017-03-10 08:53:39 +01005859
Ido Schimmel1b8f09a2017-05-26 08:37:36 +02005860 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev);
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01005861 if (!rif) {
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02005862 struct mlxsw_sp_rif_params params = {
5863 .dev = l3_dev,
5864 };
5865
5866 mlxsw_sp_rif_subport_params_init(&params, mlxsw_sp_port_vlan);
David Ahernf8fa9b42017-10-18 09:56:56 -07005867 rif = mlxsw_sp_rif_create(mlxsw_sp, &params, extack);
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01005868 if (IS_ERR(rif))
5869 return PTR_ERR(rif);
Ido Schimmel4724ba562017-03-10 08:53:39 +01005870 }
5871
Ido Schimmela1107482017-05-26 08:37:39 +02005872 /* FID was already created, just take a reference */
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02005873 fid = rif->ops->fid_get(rif);
Ido Schimmela1107482017-05-26 08:37:39 +02005874 err = mlxsw_sp_fid_port_vid_map(fid, mlxsw_sp_port, vid);
5875 if (err)
5876 goto err_fid_port_vid_map;
5877
Ido Schimmel7cbecf22017-05-26 08:37:28 +02005878 err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, false);
Ido Schimmel03ea01e2017-05-23 21:56:30 +02005879 if (err)
5880 goto err_port_vid_learning_set;
5881
Ido Schimmel7cbecf22017-05-26 08:37:28 +02005882 err = mlxsw_sp_port_vid_stp_set(mlxsw_sp_port, vid,
Ido Schimmel03ea01e2017-05-23 21:56:30 +02005883 BR_STATE_FORWARDING);
5884 if (err)
5885 goto err_port_vid_stp_set;
5886
Ido Schimmela1107482017-05-26 08:37:39 +02005887 mlxsw_sp_port_vlan->fid = fid;
Ido Schimmel4724ba562017-03-10 08:53:39 +01005888
Ido Schimmel4724ba562017-03-10 08:53:39 +01005889 return 0;
Ido Schimmel03ea01e2017-05-23 21:56:30 +02005890
5891err_port_vid_stp_set:
Ido Schimmel7cbecf22017-05-26 08:37:28 +02005892 mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, true);
Ido Schimmel03ea01e2017-05-23 21:56:30 +02005893err_port_vid_learning_set:
Ido Schimmela1107482017-05-26 08:37:39 +02005894 mlxsw_sp_fid_port_vid_unmap(fid, mlxsw_sp_port, vid);
5895err_fid_port_vid_map:
5896 mlxsw_sp_fid_put(fid);
Ido Schimmel03ea01e2017-05-23 21:56:30 +02005897 return err;
Ido Schimmel4724ba562017-03-10 08:53:39 +01005898}
5899
Ido Schimmela1107482017-05-26 08:37:39 +02005900void
5901mlxsw_sp_port_vlan_router_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
Ido Schimmel4724ba562017-03-10 08:53:39 +01005902{
Ido Schimmelce95e152017-05-26 08:37:27 +02005903 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
Ido Schimmel7cbecf22017-05-26 08:37:28 +02005904 struct mlxsw_sp_fid *fid = mlxsw_sp_port_vlan->fid;
Ido Schimmelce95e152017-05-26 08:37:27 +02005905 u16 vid = mlxsw_sp_port_vlan->vid;
Ido Schimmelce95e152017-05-26 08:37:27 +02005906
Ido Schimmela1107482017-05-26 08:37:39 +02005907 if (WARN_ON(mlxsw_sp_fid_type(fid) != MLXSW_SP_FID_TYPE_RFID))
5908 return;
Ido Schimmel4aafc362017-05-26 08:37:25 +02005909
Ido Schimmela1107482017-05-26 08:37:39 +02005910 mlxsw_sp_port_vlan->fid = NULL;
Ido Schimmel7cbecf22017-05-26 08:37:28 +02005911 mlxsw_sp_port_vid_stp_set(mlxsw_sp_port, vid, BR_STATE_BLOCKING);
5912 mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, true);
Ido Schimmela1107482017-05-26 08:37:39 +02005913 mlxsw_sp_fid_port_vid_unmap(fid, mlxsw_sp_port, vid);
5914 /* If router port holds the last reference on the rFID, then the
5915 * associated Sub-port RIF will be destroyed.
5916 */
5917 mlxsw_sp_fid_put(fid);
Ido Schimmel4724ba562017-03-10 08:53:39 +01005918}
5919
Ido Schimmel7cbecf22017-05-26 08:37:28 +02005920static int mlxsw_sp_inetaddr_port_vlan_event(struct net_device *l3_dev,
5921 struct net_device *port_dev,
David Ahernf8fa9b42017-10-18 09:56:56 -07005922 unsigned long event, u16 vid,
5923 struct netlink_ext_ack *extack)
Ido Schimmel4724ba562017-03-10 08:53:39 +01005924{
5925 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(port_dev);
Ido Schimmelce95e152017-05-26 08:37:27 +02005926 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
Ido Schimmel4724ba562017-03-10 08:53:39 +01005927
Ido Schimmelce95e152017-05-26 08:37:27 +02005928 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
Ido Schimmel7cbecf22017-05-26 08:37:28 +02005929 if (WARN_ON(!mlxsw_sp_port_vlan))
5930 return -EINVAL;
Ido Schimmel4724ba562017-03-10 08:53:39 +01005931
5932 switch (event) {
5933 case NETDEV_UP:
Ido Schimmela1107482017-05-26 08:37:39 +02005934 return mlxsw_sp_port_vlan_router_join(mlxsw_sp_port_vlan,
David Ahernf8fa9b42017-10-18 09:56:56 -07005935 l3_dev, extack);
Ido Schimmel4724ba562017-03-10 08:53:39 +01005936 case NETDEV_DOWN:
Ido Schimmela1107482017-05-26 08:37:39 +02005937 mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port_vlan);
Ido Schimmel4724ba562017-03-10 08:53:39 +01005938 break;
5939 }
5940
5941 return 0;
5942}
5943
5944static int mlxsw_sp_inetaddr_port_event(struct net_device *port_dev,
David Ahernf8fa9b42017-10-18 09:56:56 -07005945 unsigned long event,
5946 struct netlink_ext_ack *extack)
Ido Schimmel4724ba562017-03-10 08:53:39 +01005947{
Jiri Pirko2b94e582017-04-18 16:55:37 +02005948 if (netif_is_bridge_port(port_dev) ||
5949 netif_is_lag_port(port_dev) ||
5950 netif_is_ovs_port(port_dev))
Ido Schimmel4724ba562017-03-10 08:53:39 +01005951 return 0;
5952
David Ahernf8fa9b42017-10-18 09:56:56 -07005953 return mlxsw_sp_inetaddr_port_vlan_event(port_dev, port_dev, event, 1,
5954 extack);
Ido Schimmel4724ba562017-03-10 08:53:39 +01005955}
5956
5957static int __mlxsw_sp_inetaddr_lag_event(struct net_device *l3_dev,
5958 struct net_device *lag_dev,
David Ahernf8fa9b42017-10-18 09:56:56 -07005959 unsigned long event, u16 vid,
5960 struct netlink_ext_ack *extack)
Ido Schimmel4724ba562017-03-10 08:53:39 +01005961{
5962 struct net_device *port_dev;
5963 struct list_head *iter;
5964 int err;
5965
5966 netdev_for_each_lower_dev(lag_dev, port_dev, iter) {
5967 if (mlxsw_sp_port_dev_check(port_dev)) {
Ido Schimmel7cbecf22017-05-26 08:37:28 +02005968 err = mlxsw_sp_inetaddr_port_vlan_event(l3_dev,
5969 port_dev,
David Ahernf8fa9b42017-10-18 09:56:56 -07005970 event, vid,
5971 extack);
Ido Schimmel4724ba562017-03-10 08:53:39 +01005972 if (err)
5973 return err;
5974 }
5975 }
5976
5977 return 0;
5978}
5979
5980static int mlxsw_sp_inetaddr_lag_event(struct net_device *lag_dev,
David Ahernf8fa9b42017-10-18 09:56:56 -07005981 unsigned long event,
5982 struct netlink_ext_ack *extack)
Ido Schimmel4724ba562017-03-10 08:53:39 +01005983{
5984 if (netif_is_bridge_port(lag_dev))
5985 return 0;
5986
David Ahernf8fa9b42017-10-18 09:56:56 -07005987 return __mlxsw_sp_inetaddr_lag_event(lag_dev, lag_dev, event, 1,
5988 extack);
Ido Schimmel4724ba562017-03-10 08:53:39 +01005989}
5990
Ido Schimmel4724ba562017-03-10 08:53:39 +01005991static int mlxsw_sp_inetaddr_bridge_event(struct net_device *l3_dev,
David Ahernf8fa9b42017-10-18 09:56:56 -07005992 unsigned long event,
5993 struct netlink_ext_ack *extack)
Ido Schimmel4724ba562017-03-10 08:53:39 +01005994{
5995 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(l3_dev);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02005996 struct mlxsw_sp_rif_params params = {
5997 .dev = l3_dev,
5998 };
Ido Schimmela1107482017-05-26 08:37:39 +02005999 struct mlxsw_sp_rif *rif;
Ido Schimmel4724ba562017-03-10 08:53:39 +01006000
6001 switch (event) {
6002 case NETDEV_UP:
David Ahernf8fa9b42017-10-18 09:56:56 -07006003 rif = mlxsw_sp_rif_create(mlxsw_sp, &params, extack);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006004 if (IS_ERR(rif))
6005 return PTR_ERR(rif);
6006 break;
Ido Schimmel4724ba562017-03-10 08:53:39 +01006007 case NETDEV_DOWN:
Ido Schimmela1107482017-05-26 08:37:39 +02006008 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006009 mlxsw_sp_rif_destroy(rif);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006010 break;
6011 }
6012
6013 return 0;
6014}
6015
6016static int mlxsw_sp_inetaddr_vlan_event(struct net_device *vlan_dev,
David Ahernf8fa9b42017-10-18 09:56:56 -07006017 unsigned long event,
6018 struct netlink_ext_ack *extack)
Ido Schimmel4724ba562017-03-10 08:53:39 +01006019{
6020 struct net_device *real_dev = vlan_dev_real_dev(vlan_dev);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006021 u16 vid = vlan_dev_vlan_id(vlan_dev);
6022
Ido Schimmel6b27c8a2017-06-28 09:03:12 +03006023 if (netif_is_bridge_port(vlan_dev))
6024 return 0;
6025
Ido Schimmel4724ba562017-03-10 08:53:39 +01006026 if (mlxsw_sp_port_dev_check(real_dev))
Ido Schimmel7cbecf22017-05-26 08:37:28 +02006027 return mlxsw_sp_inetaddr_port_vlan_event(vlan_dev, real_dev,
David Ahernf8fa9b42017-10-18 09:56:56 -07006028 event, vid, extack);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006029 else if (netif_is_lag_master(real_dev))
6030 return __mlxsw_sp_inetaddr_lag_event(vlan_dev, real_dev, event,
David Ahernf8fa9b42017-10-18 09:56:56 -07006031 vid, extack);
Ido Schimmelc57529e2017-05-26 08:37:31 +02006032 else if (netif_is_bridge_master(real_dev) && br_vlan_enabled(real_dev))
David Ahernf8fa9b42017-10-18 09:56:56 -07006033 return mlxsw_sp_inetaddr_bridge_event(vlan_dev, event, extack);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006034
6035 return 0;
6036}
6037
Ido Schimmelb1e45522017-04-30 19:47:14 +03006038static int __mlxsw_sp_inetaddr_event(struct net_device *dev,
David Ahernf8fa9b42017-10-18 09:56:56 -07006039 unsigned long event,
6040 struct netlink_ext_ack *extack)
Ido Schimmelb1e45522017-04-30 19:47:14 +03006041{
6042 if (mlxsw_sp_port_dev_check(dev))
David Ahernf8fa9b42017-10-18 09:56:56 -07006043 return mlxsw_sp_inetaddr_port_event(dev, event, extack);
Ido Schimmelb1e45522017-04-30 19:47:14 +03006044 else if (netif_is_lag_master(dev))
David Ahernf8fa9b42017-10-18 09:56:56 -07006045 return mlxsw_sp_inetaddr_lag_event(dev, event, extack);
Ido Schimmelb1e45522017-04-30 19:47:14 +03006046 else if (netif_is_bridge_master(dev))
David Ahernf8fa9b42017-10-18 09:56:56 -07006047 return mlxsw_sp_inetaddr_bridge_event(dev, event, extack);
Ido Schimmelb1e45522017-04-30 19:47:14 +03006048 else if (is_vlan_dev(dev))
David Ahernf8fa9b42017-10-18 09:56:56 -07006049 return mlxsw_sp_inetaddr_vlan_event(dev, event, extack);
Ido Schimmelb1e45522017-04-30 19:47:14 +03006050 else
6051 return 0;
6052}
6053
Ido Schimmel4724ba562017-03-10 08:53:39 +01006054int mlxsw_sp_inetaddr_event(struct notifier_block *unused,
6055 unsigned long event, void *ptr)
6056{
6057 struct in_ifaddr *ifa = (struct in_ifaddr *) ptr;
6058 struct net_device *dev = ifa->ifa_dev->dev;
6059 struct mlxsw_sp *mlxsw_sp;
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01006060 struct mlxsw_sp_rif *rif;
Ido Schimmel4724ba562017-03-10 08:53:39 +01006061 int err = 0;
6062
David Ahern89d5dd22017-10-18 09:56:55 -07006063 /* NETDEV_UP event is handled by mlxsw_sp_inetaddr_valid_event */
6064 if (event == NETDEV_UP)
6065 goto out;
6066
6067 mlxsw_sp = mlxsw_sp_lower_get(dev);
6068 if (!mlxsw_sp)
6069 goto out;
6070
6071 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
6072 if (!mlxsw_sp_rif_should_config(rif, dev, event))
6073 goto out;
6074
David Ahernf8fa9b42017-10-18 09:56:56 -07006075 err = __mlxsw_sp_inetaddr_event(dev, event, NULL);
David Ahern89d5dd22017-10-18 09:56:55 -07006076out:
6077 return notifier_from_errno(err);
6078}
6079
6080int mlxsw_sp_inetaddr_valid_event(struct notifier_block *unused,
6081 unsigned long event, void *ptr)
6082{
6083 struct in_validator_info *ivi = (struct in_validator_info *) ptr;
6084 struct net_device *dev = ivi->ivi_dev->dev;
6085 struct mlxsw_sp *mlxsw_sp;
6086 struct mlxsw_sp_rif *rif;
6087 int err = 0;
6088
Ido Schimmel4724ba562017-03-10 08:53:39 +01006089 mlxsw_sp = mlxsw_sp_lower_get(dev);
6090 if (!mlxsw_sp)
6091 goto out;
6092
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01006093 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
Arkadi Sharshevsky5ea12372017-07-18 10:10:13 +02006094 if (!mlxsw_sp_rif_should_config(rif, dev, event))
Ido Schimmel4724ba562017-03-10 08:53:39 +01006095 goto out;
6096
David Ahernf8fa9b42017-10-18 09:56:56 -07006097 err = __mlxsw_sp_inetaddr_event(dev, event, ivi->extack);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006098out:
6099 return notifier_from_errno(err);
6100}
6101
Arkadi Sharshevsky5ea12372017-07-18 10:10:13 +02006102struct mlxsw_sp_inet6addr_event_work {
6103 struct work_struct work;
6104 struct net_device *dev;
6105 unsigned long event;
6106};
6107
6108static void mlxsw_sp_inet6addr_event_work(struct work_struct *work)
6109{
6110 struct mlxsw_sp_inet6addr_event_work *inet6addr_work =
6111 container_of(work, struct mlxsw_sp_inet6addr_event_work, work);
6112 struct net_device *dev = inet6addr_work->dev;
6113 unsigned long event = inet6addr_work->event;
6114 struct mlxsw_sp *mlxsw_sp;
6115 struct mlxsw_sp_rif *rif;
6116
6117 rtnl_lock();
6118 mlxsw_sp = mlxsw_sp_lower_get(dev);
6119 if (!mlxsw_sp)
6120 goto out;
6121
6122 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
6123 if (!mlxsw_sp_rif_should_config(rif, dev, event))
6124 goto out;
6125
David Ahernf8fa9b42017-10-18 09:56:56 -07006126 __mlxsw_sp_inetaddr_event(dev, event, NULL);
Arkadi Sharshevsky5ea12372017-07-18 10:10:13 +02006127out:
6128 rtnl_unlock();
6129 dev_put(dev);
6130 kfree(inet6addr_work);
6131}
6132
6133/* Called with rcu_read_lock() */
6134int mlxsw_sp_inet6addr_event(struct notifier_block *unused,
6135 unsigned long event, void *ptr)
6136{
6137 struct inet6_ifaddr *if6 = (struct inet6_ifaddr *) ptr;
6138 struct mlxsw_sp_inet6addr_event_work *inet6addr_work;
6139 struct net_device *dev = if6->idev->dev;
6140
David Ahern89d5dd22017-10-18 09:56:55 -07006141 /* NETDEV_UP event is handled by mlxsw_sp_inet6addr_valid_event */
6142 if (event == NETDEV_UP)
6143 return NOTIFY_DONE;
6144
Arkadi Sharshevsky5ea12372017-07-18 10:10:13 +02006145 if (!mlxsw_sp_port_dev_lower_find_rcu(dev))
6146 return NOTIFY_DONE;
6147
6148 inet6addr_work = kzalloc(sizeof(*inet6addr_work), GFP_ATOMIC);
6149 if (!inet6addr_work)
6150 return NOTIFY_BAD;
6151
6152 INIT_WORK(&inet6addr_work->work, mlxsw_sp_inet6addr_event_work);
6153 inet6addr_work->dev = dev;
6154 inet6addr_work->event = event;
6155 dev_hold(dev);
6156 mlxsw_core_schedule_work(&inet6addr_work->work);
6157
6158 return NOTIFY_DONE;
6159}
6160
David Ahern89d5dd22017-10-18 09:56:55 -07006161int mlxsw_sp_inet6addr_valid_event(struct notifier_block *unused,
6162 unsigned long event, void *ptr)
6163{
6164 struct in6_validator_info *i6vi = (struct in6_validator_info *) ptr;
6165 struct net_device *dev = i6vi->i6vi_dev->dev;
6166 struct mlxsw_sp *mlxsw_sp;
6167 struct mlxsw_sp_rif *rif;
6168 int err = 0;
6169
6170 mlxsw_sp = mlxsw_sp_lower_get(dev);
6171 if (!mlxsw_sp)
6172 goto out;
6173
6174 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
6175 if (!mlxsw_sp_rif_should_config(rif, dev, event))
6176 goto out;
6177
David Ahernf8fa9b42017-10-18 09:56:56 -07006178 err = __mlxsw_sp_inetaddr_event(dev, event, i6vi->extack);
David Ahern89d5dd22017-10-18 09:56:55 -07006179out:
6180 return notifier_from_errno(err);
6181}
6182
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01006183static int mlxsw_sp_rif_edit(struct mlxsw_sp *mlxsw_sp, u16 rif_index,
Ido Schimmel4724ba562017-03-10 08:53:39 +01006184 const char *mac, int mtu)
6185{
6186 char ritr_pl[MLXSW_REG_RITR_LEN];
6187 int err;
6188
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01006189 mlxsw_reg_ritr_rif_pack(ritr_pl, rif_index);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006190 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
6191 if (err)
6192 return err;
6193
6194 mlxsw_reg_ritr_mtu_set(ritr_pl, mtu);
6195 mlxsw_reg_ritr_if_mac_memcpy_to(ritr_pl, mac);
6196 mlxsw_reg_ritr_op_set(ritr_pl, MLXSW_REG_RITR_RIF_CREATE);
6197 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
6198}
6199
6200int mlxsw_sp_netdevice_router_port_event(struct net_device *dev)
6201{
6202 struct mlxsw_sp *mlxsw_sp;
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01006203 struct mlxsw_sp_rif *rif;
Ido Schimmela1107482017-05-26 08:37:39 +02006204 u16 fid_index;
Ido Schimmel4724ba562017-03-10 08:53:39 +01006205 int err;
6206
6207 mlxsw_sp = mlxsw_sp_lower_get(dev);
6208 if (!mlxsw_sp)
6209 return 0;
6210
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01006211 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
6212 if (!rif)
Ido Schimmel4724ba562017-03-10 08:53:39 +01006213 return 0;
Ido Schimmela1107482017-05-26 08:37:39 +02006214 fid_index = mlxsw_sp_fid_index(rif->fid);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006215
Ido Schimmela1107482017-05-26 08:37:39 +02006216 err = mlxsw_sp_rif_fdb_op(mlxsw_sp, rif->addr, fid_index, false);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006217 if (err)
6218 return err;
6219
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01006220 err = mlxsw_sp_rif_edit(mlxsw_sp, rif->rif_index, dev->dev_addr,
6221 dev->mtu);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006222 if (err)
6223 goto err_rif_edit;
6224
Ido Schimmela1107482017-05-26 08:37:39 +02006225 err = mlxsw_sp_rif_fdb_op(mlxsw_sp, dev->dev_addr, fid_index, true);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006226 if (err)
6227 goto err_rif_fdb_op;
6228
Yotam Gigifd890fe2017-09-27 08:23:21 +02006229 if (rif->mtu != dev->mtu) {
6230 struct mlxsw_sp_vr *vr;
6231
6232 /* The RIF is relevant only to its mr_table instance, as unlike
6233 * unicast routing, in multicast routing a RIF cannot be shared
6234 * between several multicast routing tables.
6235 */
6236 vr = &mlxsw_sp->router->vrs[rif->vr_id];
6237 mlxsw_sp_mr_rif_mtu_update(vr->mr4_table, rif, dev->mtu);
6238 }
6239
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01006240 ether_addr_copy(rif->addr, dev->dev_addr);
6241 rif->mtu = dev->mtu;
Ido Schimmel4724ba562017-03-10 08:53:39 +01006242
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01006243 netdev_dbg(dev, "Updated RIF=%d\n", rif->rif_index);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006244
6245 return 0;
6246
6247err_rif_fdb_op:
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01006248 mlxsw_sp_rif_edit(mlxsw_sp, rif->rif_index, rif->addr, rif->mtu);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006249err_rif_edit:
Ido Schimmela1107482017-05-26 08:37:39 +02006250 mlxsw_sp_rif_fdb_op(mlxsw_sp, rif->addr, fid_index, true);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006251 return err;
6252}
6253
Ido Schimmelb1e45522017-04-30 19:47:14 +03006254static int mlxsw_sp_port_vrf_join(struct mlxsw_sp *mlxsw_sp,
David Ahernf8fa9b42017-10-18 09:56:56 -07006255 struct net_device *l3_dev,
6256 struct netlink_ext_ack *extack)
Ido Schimmel7179eb52017-03-16 09:08:18 +01006257{
Ido Schimmelb1e45522017-04-30 19:47:14 +03006258 struct mlxsw_sp_rif *rif;
Ido Schimmel7179eb52017-03-16 09:08:18 +01006259
Ido Schimmelb1e45522017-04-30 19:47:14 +03006260 /* If netdev is already associated with a RIF, then we need to
6261 * destroy it and create a new one with the new virtual router ID.
Ido Schimmel7179eb52017-03-16 09:08:18 +01006262 */
Ido Schimmelb1e45522017-04-30 19:47:14 +03006263 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev);
6264 if (rif)
David Ahernf8fa9b42017-10-18 09:56:56 -07006265 __mlxsw_sp_inetaddr_event(l3_dev, NETDEV_DOWN, extack);
Ido Schimmel7179eb52017-03-16 09:08:18 +01006266
David Ahernf8fa9b42017-10-18 09:56:56 -07006267 return __mlxsw_sp_inetaddr_event(l3_dev, NETDEV_UP, extack);
Ido Schimmel7179eb52017-03-16 09:08:18 +01006268}
6269
Ido Schimmelb1e45522017-04-30 19:47:14 +03006270static void mlxsw_sp_port_vrf_leave(struct mlxsw_sp *mlxsw_sp,
6271 struct net_device *l3_dev)
Ido Schimmel7179eb52017-03-16 09:08:18 +01006272{
Ido Schimmelb1e45522017-04-30 19:47:14 +03006273 struct mlxsw_sp_rif *rif;
Ido Schimmel7179eb52017-03-16 09:08:18 +01006274
Ido Schimmelb1e45522017-04-30 19:47:14 +03006275 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev);
6276 if (!rif)
Ido Schimmel7179eb52017-03-16 09:08:18 +01006277 return;
David Ahernf8fa9b42017-10-18 09:56:56 -07006278 __mlxsw_sp_inetaddr_event(l3_dev, NETDEV_DOWN, NULL);
Ido Schimmel7179eb52017-03-16 09:08:18 +01006279}
6280
Ido Schimmelb1e45522017-04-30 19:47:14 +03006281int mlxsw_sp_netdevice_vrf_event(struct net_device *l3_dev, unsigned long event,
6282 struct netdev_notifier_changeupper_info *info)
Ido Schimmel3d70e4582017-03-16 09:08:19 +01006283{
Ido Schimmelb1e45522017-04-30 19:47:14 +03006284 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(l3_dev);
6285 int err = 0;
Ido Schimmel3d70e4582017-03-16 09:08:19 +01006286
Ido Schimmelb1e45522017-04-30 19:47:14 +03006287 if (!mlxsw_sp)
6288 return 0;
Ido Schimmel3d70e4582017-03-16 09:08:19 +01006289
Ido Schimmelb1e45522017-04-30 19:47:14 +03006290 switch (event) {
6291 case NETDEV_PRECHANGEUPPER:
6292 return 0;
6293 case NETDEV_CHANGEUPPER:
David Ahernf8fa9b42017-10-18 09:56:56 -07006294 if (info->linking) {
6295 struct netlink_ext_ack *extack;
6296
6297 extack = netdev_notifier_info_to_extack(&info->info);
6298 err = mlxsw_sp_port_vrf_join(mlxsw_sp, l3_dev, extack);
6299 } else {
Ido Schimmelb1e45522017-04-30 19:47:14 +03006300 mlxsw_sp_port_vrf_leave(mlxsw_sp, l3_dev);
David Ahernf8fa9b42017-10-18 09:56:56 -07006301 }
Ido Schimmelb1e45522017-04-30 19:47:14 +03006302 break;
6303 }
Ido Schimmel3d70e4582017-03-16 09:08:19 +01006304
Ido Schimmelb1e45522017-04-30 19:47:14 +03006305 return err;
Ido Schimmel3d70e4582017-03-16 09:08:19 +01006306}
6307
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006308static struct mlxsw_sp_rif_subport *
6309mlxsw_sp_rif_subport_rif(const struct mlxsw_sp_rif *rif)
Ido Schimmela1107482017-05-26 08:37:39 +02006310{
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006311 return container_of(rif, struct mlxsw_sp_rif_subport, common);
Ido Schimmela1107482017-05-26 08:37:39 +02006312}
6313
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006314static void mlxsw_sp_rif_subport_setup(struct mlxsw_sp_rif *rif,
6315 const struct mlxsw_sp_rif_params *params)
6316{
6317 struct mlxsw_sp_rif_subport *rif_subport;
6318
6319 rif_subport = mlxsw_sp_rif_subport_rif(rif);
6320 rif_subport->vid = params->vid;
6321 rif_subport->lag = params->lag;
6322 if (params->lag)
6323 rif_subport->lag_id = params->lag_id;
6324 else
6325 rif_subport->system_port = params->system_port;
6326}
6327
6328static int mlxsw_sp_rif_subport_op(struct mlxsw_sp_rif *rif, bool enable)
6329{
6330 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
6331 struct mlxsw_sp_rif_subport *rif_subport;
6332 char ritr_pl[MLXSW_REG_RITR_LEN];
6333
6334 rif_subport = mlxsw_sp_rif_subport_rif(rif);
6335 mlxsw_reg_ritr_pack(ritr_pl, enable, MLXSW_REG_RITR_SP_IF,
Petr Machata9571e822017-09-02 23:49:14 +02006336 rif->rif_index, rif->vr_id, rif->dev->mtu);
6337 mlxsw_reg_ritr_mac_pack(ritr_pl, rif->dev->dev_addr);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006338 mlxsw_reg_ritr_sp_if_pack(ritr_pl, rif_subport->lag,
6339 rif_subport->lag ? rif_subport->lag_id :
6340 rif_subport->system_port,
6341 rif_subport->vid);
6342
6343 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
6344}
6345
6346static int mlxsw_sp_rif_subport_configure(struct mlxsw_sp_rif *rif)
6347{
Petr Machata010cadf2017-09-02 23:49:18 +02006348 int err;
6349
6350 err = mlxsw_sp_rif_subport_op(rif, true);
6351 if (err)
6352 return err;
6353
6354 err = mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
6355 mlxsw_sp_fid_index(rif->fid), true);
6356 if (err)
6357 goto err_rif_fdb_op;
6358
6359 mlxsw_sp_fid_rif_set(rif->fid, rif);
6360 return 0;
6361
6362err_rif_fdb_op:
6363 mlxsw_sp_rif_subport_op(rif, false);
6364 return err;
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006365}
6366
6367static void mlxsw_sp_rif_subport_deconfigure(struct mlxsw_sp_rif *rif)
6368{
Petr Machata010cadf2017-09-02 23:49:18 +02006369 struct mlxsw_sp_fid *fid = rif->fid;
6370
6371 mlxsw_sp_fid_rif_set(fid, NULL);
6372 mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
6373 mlxsw_sp_fid_index(fid), false);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006374 mlxsw_sp_rif_subport_op(rif, false);
6375}
6376
6377static struct mlxsw_sp_fid *
6378mlxsw_sp_rif_subport_fid_get(struct mlxsw_sp_rif *rif)
6379{
6380 return mlxsw_sp_fid_rfid_get(rif->mlxsw_sp, rif->rif_index);
6381}
6382
6383static const struct mlxsw_sp_rif_ops mlxsw_sp_rif_subport_ops = {
6384 .type = MLXSW_SP_RIF_TYPE_SUBPORT,
6385 .rif_size = sizeof(struct mlxsw_sp_rif_subport),
6386 .setup = mlxsw_sp_rif_subport_setup,
6387 .configure = mlxsw_sp_rif_subport_configure,
6388 .deconfigure = mlxsw_sp_rif_subport_deconfigure,
6389 .fid_get = mlxsw_sp_rif_subport_fid_get,
6390};
6391
6392static int mlxsw_sp_rif_vlan_fid_op(struct mlxsw_sp_rif *rif,
6393 enum mlxsw_reg_ritr_if_type type,
6394 u16 vid_fid, bool enable)
6395{
6396 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
6397 char ritr_pl[MLXSW_REG_RITR_LEN];
6398
6399 mlxsw_reg_ritr_pack(ritr_pl, enable, type, rif->rif_index, rif->vr_id,
Petr Machata9571e822017-09-02 23:49:14 +02006400 rif->dev->mtu);
6401 mlxsw_reg_ritr_mac_pack(ritr_pl, rif->dev->dev_addr);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006402 mlxsw_reg_ritr_fid_set(ritr_pl, type, vid_fid);
6403
6404 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
6405}
6406
Yotam Gigib35750f2017-10-09 11:15:33 +02006407u8 mlxsw_sp_router_port(const struct mlxsw_sp *mlxsw_sp)
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006408{
6409 return mlxsw_core_max_ports(mlxsw_sp->core) + 1;
6410}
6411
6412static int mlxsw_sp_rif_vlan_configure(struct mlxsw_sp_rif *rif)
6413{
6414 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
6415 u16 vid = mlxsw_sp_fid_8021q_vid(rif->fid);
6416 int err;
6417
6418 err = mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_VLAN_IF, vid, true);
6419 if (err)
6420 return err;
6421
Ido Schimmel0d284812017-07-18 10:10:12 +02006422 err = mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
6423 mlxsw_sp_router_port(mlxsw_sp), true);
6424 if (err)
6425 goto err_fid_mc_flood_set;
6426
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006427 err = mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
6428 mlxsw_sp_router_port(mlxsw_sp), true);
6429 if (err)
6430 goto err_fid_bc_flood_set;
6431
Petr Machata010cadf2017-09-02 23:49:18 +02006432 err = mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
6433 mlxsw_sp_fid_index(rif->fid), true);
6434 if (err)
6435 goto err_rif_fdb_op;
6436
6437 mlxsw_sp_fid_rif_set(rif->fid, rif);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006438 return 0;
6439
Petr Machata010cadf2017-09-02 23:49:18 +02006440err_rif_fdb_op:
6441 mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
6442 mlxsw_sp_router_port(mlxsw_sp), false);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006443err_fid_bc_flood_set:
Ido Schimmel0d284812017-07-18 10:10:12 +02006444 mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
6445 mlxsw_sp_router_port(mlxsw_sp), false);
6446err_fid_mc_flood_set:
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006447 mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_VLAN_IF, vid, false);
6448 return err;
6449}
6450
6451static void mlxsw_sp_rif_vlan_deconfigure(struct mlxsw_sp_rif *rif)
6452{
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006453 u16 vid = mlxsw_sp_fid_8021q_vid(rif->fid);
Petr Machata010cadf2017-09-02 23:49:18 +02006454 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
6455 struct mlxsw_sp_fid *fid = rif->fid;
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006456
Petr Machata010cadf2017-09-02 23:49:18 +02006457 mlxsw_sp_fid_rif_set(fid, NULL);
6458 mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
6459 mlxsw_sp_fid_index(fid), false);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006460 mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
6461 mlxsw_sp_router_port(mlxsw_sp), false);
Ido Schimmel0d284812017-07-18 10:10:12 +02006462 mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
6463 mlxsw_sp_router_port(mlxsw_sp), false);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006464 mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_VLAN_IF, vid, false);
6465}
6466
6467static struct mlxsw_sp_fid *
6468mlxsw_sp_rif_vlan_fid_get(struct mlxsw_sp_rif *rif)
6469{
6470 u16 vid = is_vlan_dev(rif->dev) ? vlan_dev_vlan_id(rif->dev) : 1;
6471
6472 return mlxsw_sp_fid_8021q_get(rif->mlxsw_sp, vid);
6473}
6474
6475static const struct mlxsw_sp_rif_ops mlxsw_sp_rif_vlan_ops = {
6476 .type = MLXSW_SP_RIF_TYPE_VLAN,
6477 .rif_size = sizeof(struct mlxsw_sp_rif),
6478 .configure = mlxsw_sp_rif_vlan_configure,
6479 .deconfigure = mlxsw_sp_rif_vlan_deconfigure,
6480 .fid_get = mlxsw_sp_rif_vlan_fid_get,
6481};
6482
6483static int mlxsw_sp_rif_fid_configure(struct mlxsw_sp_rif *rif)
6484{
6485 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
6486 u16 fid_index = mlxsw_sp_fid_index(rif->fid);
6487 int err;
6488
6489 err = mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_FID_IF, fid_index,
6490 true);
6491 if (err)
6492 return err;
6493
Ido Schimmel0d284812017-07-18 10:10:12 +02006494 err = mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
6495 mlxsw_sp_router_port(mlxsw_sp), true);
6496 if (err)
6497 goto err_fid_mc_flood_set;
6498
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006499 err = mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
6500 mlxsw_sp_router_port(mlxsw_sp), true);
6501 if (err)
6502 goto err_fid_bc_flood_set;
6503
Petr Machata010cadf2017-09-02 23:49:18 +02006504 err = mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
6505 mlxsw_sp_fid_index(rif->fid), true);
6506 if (err)
6507 goto err_rif_fdb_op;
6508
6509 mlxsw_sp_fid_rif_set(rif->fid, rif);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006510 return 0;
6511
Petr Machata010cadf2017-09-02 23:49:18 +02006512err_rif_fdb_op:
6513 mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
6514 mlxsw_sp_router_port(mlxsw_sp), false);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006515err_fid_bc_flood_set:
Ido Schimmel0d284812017-07-18 10:10:12 +02006516 mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
6517 mlxsw_sp_router_port(mlxsw_sp), false);
6518err_fid_mc_flood_set:
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006519 mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_FID_IF, fid_index, false);
6520 return err;
6521}
6522
6523static void mlxsw_sp_rif_fid_deconfigure(struct mlxsw_sp_rif *rif)
6524{
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006525 u16 fid_index = mlxsw_sp_fid_index(rif->fid);
Petr Machata010cadf2017-09-02 23:49:18 +02006526 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
6527 struct mlxsw_sp_fid *fid = rif->fid;
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006528
Petr Machata010cadf2017-09-02 23:49:18 +02006529 mlxsw_sp_fid_rif_set(fid, NULL);
6530 mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
6531 mlxsw_sp_fid_index(fid), false);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006532 mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
6533 mlxsw_sp_router_port(mlxsw_sp), false);
Ido Schimmel0d284812017-07-18 10:10:12 +02006534 mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
6535 mlxsw_sp_router_port(mlxsw_sp), false);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006536 mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_FID_IF, fid_index, false);
6537}
6538
6539static struct mlxsw_sp_fid *
6540mlxsw_sp_rif_fid_fid_get(struct mlxsw_sp_rif *rif)
6541{
6542 return mlxsw_sp_fid_8021d_get(rif->mlxsw_sp, rif->dev->ifindex);
6543}
6544
6545static const struct mlxsw_sp_rif_ops mlxsw_sp_rif_fid_ops = {
6546 .type = MLXSW_SP_RIF_TYPE_FID,
6547 .rif_size = sizeof(struct mlxsw_sp_rif),
6548 .configure = mlxsw_sp_rif_fid_configure,
6549 .deconfigure = mlxsw_sp_rif_fid_deconfigure,
6550 .fid_get = mlxsw_sp_rif_fid_fid_get,
6551};
6552
Petr Machata6ddb7422017-09-02 23:49:19 +02006553static struct mlxsw_sp_rif_ipip_lb *
6554mlxsw_sp_rif_ipip_lb_rif(struct mlxsw_sp_rif *rif)
6555{
6556 return container_of(rif, struct mlxsw_sp_rif_ipip_lb, common);
6557}
6558
6559static void
6560mlxsw_sp_rif_ipip_lb_setup(struct mlxsw_sp_rif *rif,
6561 const struct mlxsw_sp_rif_params *params)
6562{
6563 struct mlxsw_sp_rif_params_ipip_lb *params_lb;
6564 struct mlxsw_sp_rif_ipip_lb *rif_lb;
6565
6566 params_lb = container_of(params, struct mlxsw_sp_rif_params_ipip_lb,
6567 common);
6568 rif_lb = mlxsw_sp_rif_ipip_lb_rif(rif);
6569 rif_lb->lb_config = params_lb->lb_config;
6570}
6571
6572static int
6573mlxsw_sp_rif_ipip_lb_op(struct mlxsw_sp_rif_ipip_lb *lb_rif,
6574 struct mlxsw_sp_vr *ul_vr, bool enable)
6575{
6576 struct mlxsw_sp_rif_ipip_lb_config lb_cf = lb_rif->lb_config;
6577 struct mlxsw_sp_rif *rif = &lb_rif->common;
6578 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
6579 char ritr_pl[MLXSW_REG_RITR_LEN];
6580 u32 saddr4;
6581
6582 switch (lb_cf.ul_protocol) {
6583 case MLXSW_SP_L3_PROTO_IPV4:
6584 saddr4 = be32_to_cpu(lb_cf.saddr.addr4);
6585 mlxsw_reg_ritr_pack(ritr_pl, enable, MLXSW_REG_RITR_LOOPBACK_IF,
6586 rif->rif_index, rif->vr_id, rif->dev->mtu);
6587 mlxsw_reg_ritr_loopback_ipip4_pack(ritr_pl, lb_cf.lb_ipipt,
6588 MLXSW_REG_RITR_LOOPBACK_IPIP_OPTIONS_GRE_KEY_PRESET,
6589 ul_vr->id, saddr4, lb_cf.okey);
6590 break;
6591
6592 case MLXSW_SP_L3_PROTO_IPV6:
6593 return -EAFNOSUPPORT;
6594 }
6595
6596 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
6597}
6598
6599static int
6600mlxsw_sp_rif_ipip_lb_configure(struct mlxsw_sp_rif *rif)
6601{
6602 struct mlxsw_sp_rif_ipip_lb *lb_rif = mlxsw_sp_rif_ipip_lb_rif(rif);
6603 u32 ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(rif->dev);
6604 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
6605 struct mlxsw_sp_vr *ul_vr;
6606 int err;
6607
David Ahernf8fa9b42017-10-18 09:56:56 -07006608 ul_vr = mlxsw_sp_vr_get(mlxsw_sp, ul_tb_id, NULL);
Petr Machata6ddb7422017-09-02 23:49:19 +02006609 if (IS_ERR(ul_vr))
6610 return PTR_ERR(ul_vr);
6611
6612 err = mlxsw_sp_rif_ipip_lb_op(lb_rif, ul_vr, true);
6613 if (err)
6614 goto err_loopback_op;
6615
6616 lb_rif->ul_vr_id = ul_vr->id;
6617 ++ul_vr->rif_count;
6618 return 0;
6619
6620err_loopback_op:
6621 mlxsw_sp_vr_put(ul_vr);
6622 return err;
6623}
6624
6625static void mlxsw_sp_rif_ipip_lb_deconfigure(struct mlxsw_sp_rif *rif)
6626{
6627 struct mlxsw_sp_rif_ipip_lb *lb_rif = mlxsw_sp_rif_ipip_lb_rif(rif);
6628 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
6629 struct mlxsw_sp_vr *ul_vr;
6630
6631 ul_vr = &mlxsw_sp->router->vrs[lb_rif->ul_vr_id];
6632 mlxsw_sp_rif_ipip_lb_op(lb_rif, ul_vr, false);
6633
6634 --ul_vr->rif_count;
6635 mlxsw_sp_vr_put(ul_vr);
6636}
6637
6638static const struct mlxsw_sp_rif_ops mlxsw_sp_rif_ipip_lb_ops = {
6639 .type = MLXSW_SP_RIF_TYPE_IPIP_LB,
6640 .rif_size = sizeof(struct mlxsw_sp_rif_ipip_lb),
6641 .setup = mlxsw_sp_rif_ipip_lb_setup,
6642 .configure = mlxsw_sp_rif_ipip_lb_configure,
6643 .deconfigure = mlxsw_sp_rif_ipip_lb_deconfigure,
6644};
6645
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006646static const struct mlxsw_sp_rif_ops *mlxsw_sp_rif_ops_arr[] = {
6647 [MLXSW_SP_RIF_TYPE_SUBPORT] = &mlxsw_sp_rif_subport_ops,
6648 [MLXSW_SP_RIF_TYPE_VLAN] = &mlxsw_sp_rif_vlan_ops,
6649 [MLXSW_SP_RIF_TYPE_FID] = &mlxsw_sp_rif_fid_ops,
Petr Machata6ddb7422017-09-02 23:49:19 +02006650 [MLXSW_SP_RIF_TYPE_IPIP_LB] = &mlxsw_sp_rif_ipip_lb_ops,
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006651};
6652
Ido Schimmel348b8fc2017-05-16 19:38:29 +02006653static int mlxsw_sp_rifs_init(struct mlxsw_sp *mlxsw_sp)
6654{
6655 u64 max_rifs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS);
6656
6657 mlxsw_sp->router->rifs = kcalloc(max_rifs,
6658 sizeof(struct mlxsw_sp_rif *),
6659 GFP_KERNEL);
6660 if (!mlxsw_sp->router->rifs)
6661 return -ENOMEM;
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006662
6663 mlxsw_sp->router->rif_ops_arr = mlxsw_sp_rif_ops_arr;
6664
Ido Schimmel348b8fc2017-05-16 19:38:29 +02006665 return 0;
6666}
6667
6668static void mlxsw_sp_rifs_fini(struct mlxsw_sp *mlxsw_sp)
6669{
6670 int i;
6671
6672 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++)
6673 WARN_ON_ONCE(mlxsw_sp->router->rifs[i]);
6674
6675 kfree(mlxsw_sp->router->rifs);
6676}
6677
Petr Machatadcbda282017-10-20 09:16:16 +02006678static int
6679mlxsw_sp_ipip_config_tigcr(struct mlxsw_sp *mlxsw_sp)
6680{
6681 char tigcr_pl[MLXSW_REG_TIGCR_LEN];
6682
6683 mlxsw_reg_tigcr_pack(tigcr_pl, true, 0);
6684 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(tigcr), tigcr_pl);
6685}
6686
Petr Machata38ebc0f2017-09-02 23:49:17 +02006687static int mlxsw_sp_ipips_init(struct mlxsw_sp *mlxsw_sp)
6688{
6689 mlxsw_sp->router->ipip_ops_arr = mlxsw_sp_ipip_ops_arr;
Petr Machata1012b9a2017-09-02 23:49:23 +02006690 INIT_LIST_HEAD(&mlxsw_sp->router->ipip_list);
Petr Machatadcbda282017-10-20 09:16:16 +02006691 return mlxsw_sp_ipip_config_tigcr(mlxsw_sp);
Petr Machata38ebc0f2017-09-02 23:49:17 +02006692}
6693
6694static void mlxsw_sp_ipips_fini(struct mlxsw_sp *mlxsw_sp)
6695{
Petr Machata1012b9a2017-09-02 23:49:23 +02006696 WARN_ON(!list_empty(&mlxsw_sp->router->ipip_list));
Petr Machata38ebc0f2017-09-02 23:49:17 +02006697}
6698
Ido Schimmelc3852ef2016-12-03 16:45:07 +01006699static void mlxsw_sp_router_fib_dump_flush(struct notifier_block *nb)
6700{
Ido Schimmel7e39d112017-05-16 19:38:28 +02006701 struct mlxsw_sp_router *router;
Ido Schimmelc3852ef2016-12-03 16:45:07 +01006702
6703 /* Flush pending FIB notifications and then flush the device's
6704 * table before requesting another dump. The FIB notification
6705 * block is unregistered, so no need to take RTNL.
6706 */
6707 mlxsw_core_flush_owq();
Ido Schimmel7e39d112017-05-16 19:38:28 +02006708 router = container_of(nb, struct mlxsw_sp_router, fib_nb);
6709 mlxsw_sp_router_fib_flush(router->mlxsw_sp);
Ido Schimmelc3852ef2016-12-03 16:45:07 +01006710}
6711
Ido Schimmelaf658b62017-11-02 17:14:09 +01006712#ifdef CONFIG_IP_ROUTE_MULTIPATH
6713static void mlxsw_sp_mp_hash_header_set(char *recr2_pl, int header)
6714{
6715 mlxsw_reg_recr2_outer_header_enables_set(recr2_pl, header, true);
6716}
6717
6718static void mlxsw_sp_mp_hash_field_set(char *recr2_pl, int field)
6719{
6720 mlxsw_reg_recr2_outer_header_fields_enable_set(recr2_pl, field, true);
6721}
6722
6723static void mlxsw_sp_mp4_hash_init(char *recr2_pl)
6724{
6725 bool only_l3 = !init_net.ipv4.sysctl_fib_multipath_hash_policy;
6726
6727 mlxsw_sp_mp_hash_header_set(recr2_pl,
6728 MLXSW_REG_RECR2_IPV4_EN_NOT_TCP_NOT_UDP);
6729 mlxsw_sp_mp_hash_header_set(recr2_pl, MLXSW_REG_RECR2_IPV4_EN_TCP_UDP);
6730 mlxsw_reg_recr2_ipv4_sip_enable(recr2_pl);
6731 mlxsw_reg_recr2_ipv4_dip_enable(recr2_pl);
6732 if (only_l3)
6733 return;
6734 mlxsw_sp_mp_hash_header_set(recr2_pl, MLXSW_REG_RECR2_TCP_UDP_EN_IPV4);
6735 mlxsw_sp_mp_hash_field_set(recr2_pl, MLXSW_REG_RECR2_IPV4_PROTOCOL);
6736 mlxsw_sp_mp_hash_field_set(recr2_pl, MLXSW_REG_RECR2_TCP_UDP_SPORT);
6737 mlxsw_sp_mp_hash_field_set(recr2_pl, MLXSW_REG_RECR2_TCP_UDP_DPORT);
6738}
6739
6740static void mlxsw_sp_mp6_hash_init(char *recr2_pl)
6741{
6742 mlxsw_sp_mp_hash_header_set(recr2_pl,
6743 MLXSW_REG_RECR2_IPV6_EN_NOT_TCP_NOT_UDP);
6744 mlxsw_sp_mp_hash_header_set(recr2_pl, MLXSW_REG_RECR2_IPV6_EN_TCP_UDP);
6745 mlxsw_reg_recr2_ipv6_sip_enable(recr2_pl);
6746 mlxsw_reg_recr2_ipv6_dip_enable(recr2_pl);
6747 mlxsw_sp_mp_hash_field_set(recr2_pl, MLXSW_REG_RECR2_IPV6_FLOW_LABEL);
6748 mlxsw_sp_mp_hash_field_set(recr2_pl, MLXSW_REG_RECR2_IPV6_NEXT_HEADER);
6749}
6750
6751static int mlxsw_sp_mp_hash_init(struct mlxsw_sp *mlxsw_sp)
6752{
6753 char recr2_pl[MLXSW_REG_RECR2_LEN];
6754 u32 seed;
6755
6756 get_random_bytes(&seed, sizeof(seed));
6757 mlxsw_reg_recr2_pack(recr2_pl, seed);
6758 mlxsw_sp_mp4_hash_init(recr2_pl);
6759 mlxsw_sp_mp6_hash_init(recr2_pl);
6760
6761 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(recr2), recr2_pl);
6762}
6763#else
6764static int mlxsw_sp_mp_hash_init(struct mlxsw_sp *mlxsw_sp)
6765{
6766 return 0;
6767}
6768#endif
6769
Ido Schimmel4724ba562017-03-10 08:53:39 +01006770static int __mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp)
6771{
6772 char rgcr_pl[MLXSW_REG_RGCR_LEN];
6773 u64 max_rifs;
6774 int err;
6775
6776 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_RIFS))
6777 return -EIO;
Ido Schimmel4724ba562017-03-10 08:53:39 +01006778 max_rifs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006779
Arkadi Sharshevskye29237e2017-07-18 10:10:09 +02006780 mlxsw_reg_rgcr_pack(rgcr_pl, true, true);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006781 mlxsw_reg_rgcr_max_router_interfaces_set(rgcr_pl, max_rifs);
6782 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rgcr), rgcr_pl);
6783 if (err)
Ido Schimmel348b8fc2017-05-16 19:38:29 +02006784 return err;
Ido Schimmel4724ba562017-03-10 08:53:39 +01006785 return 0;
Ido Schimmel4724ba562017-03-10 08:53:39 +01006786}
6787
6788static void __mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp)
6789{
6790 char rgcr_pl[MLXSW_REG_RGCR_LEN];
Ido Schimmel4724ba562017-03-10 08:53:39 +01006791
Arkadi Sharshevskye29237e2017-07-18 10:10:09 +02006792 mlxsw_reg_rgcr_pack(rgcr_pl, false, false);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006793 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rgcr), rgcr_pl);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006794}
6795
Jiri Pirkob45f64d2016-09-26 12:52:31 +02006796int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp)
6797{
Ido Schimmel9011b672017-05-16 19:38:25 +02006798 struct mlxsw_sp_router *router;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02006799 int err;
6800
Ido Schimmel9011b672017-05-16 19:38:25 +02006801 router = kzalloc(sizeof(*mlxsw_sp->router), GFP_KERNEL);
6802 if (!router)
6803 return -ENOMEM;
6804 mlxsw_sp->router = router;
6805 router->mlxsw_sp = mlxsw_sp;
6806
6807 INIT_LIST_HEAD(&mlxsw_sp->router->nexthop_neighs_list);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02006808 err = __mlxsw_sp_router_init(mlxsw_sp);
6809 if (err)
Ido Schimmel9011b672017-05-16 19:38:25 +02006810 goto err_router_init;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02006811
Ido Schimmel348b8fc2017-05-16 19:38:29 +02006812 err = mlxsw_sp_rifs_init(mlxsw_sp);
6813 if (err)
6814 goto err_rifs_init;
6815
Petr Machata38ebc0f2017-09-02 23:49:17 +02006816 err = mlxsw_sp_ipips_init(mlxsw_sp);
6817 if (err)
6818 goto err_ipips_init;
6819
Ido Schimmel9011b672017-05-16 19:38:25 +02006820 err = rhashtable_init(&mlxsw_sp->router->nexthop_ht,
Ido Schimmelc53b8e12017-02-08 11:16:30 +01006821 &mlxsw_sp_nexthop_ht_params);
6822 if (err)
6823 goto err_nexthop_ht_init;
6824
Ido Schimmel9011b672017-05-16 19:38:25 +02006825 err = rhashtable_init(&mlxsw_sp->router->nexthop_group_ht,
Ido Schimmele9ad5e72017-02-08 11:16:29 +01006826 &mlxsw_sp_nexthop_group_ht_params);
6827 if (err)
6828 goto err_nexthop_group_ht_init;
6829
Arkadi Sharshevskydbe45982017-09-25 10:32:23 +02006830 INIT_LIST_HEAD(&mlxsw_sp->router->nexthop_list);
Ido Schimmel8494ab02017-03-24 08:02:47 +01006831 err = mlxsw_sp_lpm_init(mlxsw_sp);
6832 if (err)
6833 goto err_lpm_init;
6834
Yotam Gigid42b0962017-09-27 08:23:20 +02006835 err = mlxsw_sp_mr_init(mlxsw_sp, &mlxsw_sp_mr_tcam_ops);
6836 if (err)
6837 goto err_mr_init;
6838
Jiri Pirkob45f64d2016-09-26 12:52:31 +02006839 err = mlxsw_sp_vrs_init(mlxsw_sp);
6840 if (err)
6841 goto err_vrs_init;
6842
Ido Schimmel8c9583a2016-10-27 15:12:57 +02006843 err = mlxsw_sp_neigh_init(mlxsw_sp);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02006844 if (err)
6845 goto err_neigh_init;
6846
Ido Schimmel48fac882017-11-02 17:14:06 +01006847 mlxsw_sp->router->netevent_nb.notifier_call =
6848 mlxsw_sp_router_netevent_event;
6849 err = register_netevent_notifier(&mlxsw_sp->router->netevent_nb);
6850 if (err)
6851 goto err_register_netevent_notifier;
6852
Ido Schimmelaf658b62017-11-02 17:14:09 +01006853 err = mlxsw_sp_mp_hash_init(mlxsw_sp);
6854 if (err)
6855 goto err_mp_hash_init;
6856
Ido Schimmel7e39d112017-05-16 19:38:28 +02006857 mlxsw_sp->router->fib_nb.notifier_call = mlxsw_sp_router_fib_event;
6858 err = register_fib_notifier(&mlxsw_sp->router->fib_nb,
Ido Schimmelc3852ef2016-12-03 16:45:07 +01006859 mlxsw_sp_router_fib_dump_flush);
6860 if (err)
6861 goto err_register_fib_notifier;
6862
Jiri Pirkob45f64d2016-09-26 12:52:31 +02006863 return 0;
6864
Ido Schimmelc3852ef2016-12-03 16:45:07 +01006865err_register_fib_notifier:
Ido Schimmelaf658b62017-11-02 17:14:09 +01006866err_mp_hash_init:
Ido Schimmel48fac882017-11-02 17:14:06 +01006867 unregister_netevent_notifier(&mlxsw_sp->router->netevent_nb);
6868err_register_netevent_notifier:
Ido Schimmelc3852ef2016-12-03 16:45:07 +01006869 mlxsw_sp_neigh_fini(mlxsw_sp);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02006870err_neigh_init:
6871 mlxsw_sp_vrs_fini(mlxsw_sp);
6872err_vrs_init:
Yotam Gigid42b0962017-09-27 08:23:20 +02006873 mlxsw_sp_mr_fini(mlxsw_sp);
6874err_mr_init:
Ido Schimmel8494ab02017-03-24 08:02:47 +01006875 mlxsw_sp_lpm_fini(mlxsw_sp);
6876err_lpm_init:
Ido Schimmel9011b672017-05-16 19:38:25 +02006877 rhashtable_destroy(&mlxsw_sp->router->nexthop_group_ht);
Ido Schimmele9ad5e72017-02-08 11:16:29 +01006878err_nexthop_group_ht_init:
Ido Schimmel9011b672017-05-16 19:38:25 +02006879 rhashtable_destroy(&mlxsw_sp->router->nexthop_ht);
Ido Schimmelc53b8e12017-02-08 11:16:30 +01006880err_nexthop_ht_init:
Petr Machata38ebc0f2017-09-02 23:49:17 +02006881 mlxsw_sp_ipips_fini(mlxsw_sp);
6882err_ipips_init:
Ido Schimmel348b8fc2017-05-16 19:38:29 +02006883 mlxsw_sp_rifs_fini(mlxsw_sp);
6884err_rifs_init:
Jiri Pirkob45f64d2016-09-26 12:52:31 +02006885 __mlxsw_sp_router_fini(mlxsw_sp);
Ido Schimmel9011b672017-05-16 19:38:25 +02006886err_router_init:
6887 kfree(mlxsw_sp->router);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02006888 return err;
6889}
6890
6891void mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp)
6892{
Ido Schimmel7e39d112017-05-16 19:38:28 +02006893 unregister_fib_notifier(&mlxsw_sp->router->fib_nb);
Ido Schimmel48fac882017-11-02 17:14:06 +01006894 unregister_netevent_notifier(&mlxsw_sp->router->netevent_nb);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02006895 mlxsw_sp_neigh_fini(mlxsw_sp);
6896 mlxsw_sp_vrs_fini(mlxsw_sp);
Yotam Gigid42b0962017-09-27 08:23:20 +02006897 mlxsw_sp_mr_fini(mlxsw_sp);
Ido Schimmel8494ab02017-03-24 08:02:47 +01006898 mlxsw_sp_lpm_fini(mlxsw_sp);
Ido Schimmel9011b672017-05-16 19:38:25 +02006899 rhashtable_destroy(&mlxsw_sp->router->nexthop_group_ht);
6900 rhashtable_destroy(&mlxsw_sp->router->nexthop_ht);
Petr Machata38ebc0f2017-09-02 23:49:17 +02006901 mlxsw_sp_ipips_fini(mlxsw_sp);
Ido Schimmel348b8fc2017-05-16 19:38:29 +02006902 mlxsw_sp_rifs_fini(mlxsw_sp);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02006903 __mlxsw_sp_router_fini(mlxsw_sp);
Ido Schimmel9011b672017-05-16 19:38:25 +02006904 kfree(mlxsw_sp->router);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02006905}