blob: aa7b820e84082b28d24f4d2fe7001dbb6d4ce585 [file] [log] [blame]
Ido Schimmel464dce12016-07-02 11:00:15 +02001/*
2 * drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
Petr Machata6ddb7422017-09-02 23:49:19 +02003 * Copyright (c) 2016-2017 Mellanox Technologies. All rights reserved.
Ido Schimmel464dce12016-07-02 11:00:15 +02004 * Copyright (c) 2016 Jiri Pirko <jiri@mellanox.com>
5 * Copyright (c) 2016 Ido Schimmel <idosch@mellanox.com>
Yotam Gigic723c7352016-07-05 11:27:43 +02006 * Copyright (c) 2016 Yotam Gigi <yotamg@mellanox.com>
Petr Machata6ddb7422017-09-02 23:49:19 +02007 * Copyright (c) 2017 Petr Machata <petrm@mellanox.com>
Ido Schimmel464dce12016-07-02 11:00:15 +02008 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions are met:
11 *
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. Neither the names of the copyright holders nor the names of its
18 * contributors may be used to endorse or promote products derived from
19 * this software without specific prior written permission.
20 *
21 * Alternatively, this software may be distributed under the terms of the
22 * GNU General Public License ("GPL") version 2 as published by the Free
23 * Software Foundation.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
26 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
29 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
36 */
37
38#include <linux/kernel.h>
39#include <linux/types.h>
Jiri Pirko5e9c16c2016-07-04 08:23:04 +020040#include <linux/rhashtable.h>
41#include <linux/bitops.h>
42#include <linux/in6.h>
Yotam Gigic723c7352016-07-05 11:27:43 +020043#include <linux/notifier.h>
Ido Schimmeldf6dd79b2017-02-08 14:36:49 +010044#include <linux/inetdevice.h>
Ido Schimmel9db032b2017-03-16 09:08:17 +010045#include <linux/netdevice.h>
Ido Schimmel03ea01e2017-05-23 21:56:30 +020046#include <linux/if_bridge.h>
Ido Schimmelb5f3e0d2017-07-24 09:56:00 +020047#include <linux/socket.h>
Ido Schimmel428b8512017-08-03 13:28:28 +020048#include <linux/route.h>
Ido Schimmeleb789982017-10-22 23:11:48 +020049#include <linux/gcd.h>
Ido Schimmelaf658b62017-11-02 17:14:09 +010050#include <linux/random.h>
Yotam Gigic723c7352016-07-05 11:27:43 +020051#include <net/netevent.h>
Jiri Pirko6cf3c972016-07-05 11:27:39 +020052#include <net/neighbour.h>
53#include <net/arp.h>
Jiri Pirkob45f64d2016-09-26 12:52:31 +020054#include <net/ip_fib.h>
Ido Schimmel583419f2017-08-03 13:28:27 +020055#include <net/ip6_fib.h>
Ido Schimmel5d7bfd12017-03-16 09:08:14 +010056#include <net/fib_rules.h>
Petr Machata6ddb7422017-09-02 23:49:19 +020057#include <net/ip_tunnels.h>
Ido Schimmel57837882017-03-16 09:08:16 +010058#include <net/l3mdev.h>
Arkadi Sharshevsky5ea12372017-07-18 10:10:13 +020059#include <net/addrconf.h>
Arkadi Sharshevskyd5eb89c2017-07-18 10:10:15 +020060#include <net/ndisc.h>
61#include <net/ipv6.h>
Ido Schimmel04b1d4e2017-08-03 13:28:11 +020062#include <net/fib_notifier.h>
Ido Schimmel464dce12016-07-02 11:00:15 +020063
64#include "spectrum.h"
65#include "core.h"
66#include "reg.h"
Arkadi Sharshevskye0c0afd2017-03-28 17:24:15 +020067#include "spectrum_cnt.h"
68#include "spectrum_dpipe.h"
Petr Machata38ebc0f2017-09-02 23:49:17 +020069#include "spectrum_ipip.h"
Yotam Gigid42b0962017-09-27 08:23:20 +020070#include "spectrum_mr.h"
71#include "spectrum_mr_tcam.h"
Arkadi Sharshevskye0c0afd2017-03-28 17:24:15 +020072#include "spectrum_router.h"
Ido Schimmel464dce12016-07-02 11:00:15 +020073
Ido Schimmel9011b672017-05-16 19:38:25 +020074struct mlxsw_sp_vr;
75struct mlxsw_sp_lpm_tree;
Ido Schimmele4f3c1c2017-05-26 08:37:40 +020076struct mlxsw_sp_rif_ops;
Ido Schimmel9011b672017-05-16 19:38:25 +020077
78struct mlxsw_sp_router {
79 struct mlxsw_sp *mlxsw_sp;
Ido Schimmel5f9efff2017-05-16 19:38:27 +020080 struct mlxsw_sp_rif **rifs;
Ido Schimmel9011b672017-05-16 19:38:25 +020081 struct mlxsw_sp_vr *vrs;
82 struct rhashtable neigh_ht;
83 struct rhashtable nexthop_group_ht;
84 struct rhashtable nexthop_ht;
Arkadi Sharshevskydbe45982017-09-25 10:32:23 +020085 struct list_head nexthop_list;
Ido Schimmel9011b672017-05-16 19:38:25 +020086 struct {
87 struct mlxsw_sp_lpm_tree *trees;
88 unsigned int tree_count;
89 } lpm;
90 struct {
91 struct delayed_work dw;
92 unsigned long interval; /* ms */
93 } neighs_update;
94 struct delayed_work nexthop_probe_dw;
95#define MLXSW_SP_UNRESOLVED_NH_PROBE_INTERVAL 5000 /* ms */
96 struct list_head nexthop_neighs_list;
Petr Machata1012b9a2017-09-02 23:49:23 +020097 struct list_head ipip_list;
Ido Schimmel9011b672017-05-16 19:38:25 +020098 bool aborted;
Ido Schimmel7e39d112017-05-16 19:38:28 +020099 struct notifier_block fib_nb;
Ido Schimmel48fac882017-11-02 17:14:06 +0100100 struct notifier_block netevent_nb;
Ido Schimmele4f3c1c2017-05-26 08:37:40 +0200101 const struct mlxsw_sp_rif_ops **rif_ops_arr;
Petr Machata38ebc0f2017-09-02 23:49:17 +0200102 const struct mlxsw_sp_ipip_ops **ipip_ops_arr;
Ido Schimmel9011b672017-05-16 19:38:25 +0200103};
104
Ido Schimmel4724ba562017-03-10 08:53:39 +0100105struct mlxsw_sp_rif {
106 struct list_head nexthop_list;
107 struct list_head neigh_list;
108 struct net_device *dev;
Ido Schimmela1107482017-05-26 08:37:39 +0200109 struct mlxsw_sp_fid *fid;
Ido Schimmel4724ba562017-03-10 08:53:39 +0100110 unsigned char addr[ETH_ALEN];
111 int mtu;
Arkadi Sharshevskybf952332017-03-17 09:38:00 +0100112 u16 rif_index;
Ido Schimmel69132292017-03-10 08:53:42 +0100113 u16 vr_id;
Ido Schimmele4f3c1c2017-05-26 08:37:40 +0200114 const struct mlxsw_sp_rif_ops *ops;
115 struct mlxsw_sp *mlxsw_sp;
116
Arkadi Sharshevskye0c0afd2017-03-28 17:24:15 +0200117 unsigned int counter_ingress;
118 bool counter_ingress_valid;
119 unsigned int counter_egress;
120 bool counter_egress_valid;
Ido Schimmel4724ba562017-03-10 08:53:39 +0100121};
122
Ido Schimmele4f3c1c2017-05-26 08:37:40 +0200123struct mlxsw_sp_rif_params {
124 struct net_device *dev;
125 union {
126 u16 system_port;
127 u16 lag_id;
128 };
129 u16 vid;
130 bool lag;
131};
132
Ido Schimmel4d93cee2017-05-26 08:37:34 +0200133struct mlxsw_sp_rif_subport {
134 struct mlxsw_sp_rif common;
135 union {
136 u16 system_port;
137 u16 lag_id;
138 };
139 u16 vid;
140 bool lag;
141};
142
Petr Machata6ddb7422017-09-02 23:49:19 +0200143struct mlxsw_sp_rif_ipip_lb {
144 struct mlxsw_sp_rif common;
145 struct mlxsw_sp_rif_ipip_lb_config lb_config;
146 u16 ul_vr_id; /* Reserved for Spectrum-2. */
147};
148
149struct mlxsw_sp_rif_params_ipip_lb {
150 struct mlxsw_sp_rif_params common;
151 struct mlxsw_sp_rif_ipip_lb_config lb_config;
152};
153
Ido Schimmele4f3c1c2017-05-26 08:37:40 +0200154struct mlxsw_sp_rif_ops {
155 enum mlxsw_sp_rif_type type;
156 size_t rif_size;
157
158 void (*setup)(struct mlxsw_sp_rif *rif,
159 const struct mlxsw_sp_rif_params *params);
160 int (*configure)(struct mlxsw_sp_rif *rif);
161 void (*deconfigure)(struct mlxsw_sp_rif *rif);
162 struct mlxsw_sp_fid * (*fid_get)(struct mlxsw_sp_rif *rif);
163};
164
Arkadi Sharshevskye0c0afd2017-03-28 17:24:15 +0200165static unsigned int *
166mlxsw_sp_rif_p_counter_get(struct mlxsw_sp_rif *rif,
167 enum mlxsw_sp_rif_counter_dir dir)
168{
169 switch (dir) {
170 case MLXSW_SP_RIF_COUNTER_EGRESS:
171 return &rif->counter_egress;
172 case MLXSW_SP_RIF_COUNTER_INGRESS:
173 return &rif->counter_ingress;
174 }
175 return NULL;
176}
177
178static bool
179mlxsw_sp_rif_counter_valid_get(struct mlxsw_sp_rif *rif,
180 enum mlxsw_sp_rif_counter_dir dir)
181{
182 switch (dir) {
183 case MLXSW_SP_RIF_COUNTER_EGRESS:
184 return rif->counter_egress_valid;
185 case MLXSW_SP_RIF_COUNTER_INGRESS:
186 return rif->counter_ingress_valid;
187 }
188 return false;
189}
190
191static void
192mlxsw_sp_rif_counter_valid_set(struct mlxsw_sp_rif *rif,
193 enum mlxsw_sp_rif_counter_dir dir,
194 bool valid)
195{
196 switch (dir) {
197 case MLXSW_SP_RIF_COUNTER_EGRESS:
198 rif->counter_egress_valid = valid;
199 break;
200 case MLXSW_SP_RIF_COUNTER_INGRESS:
201 rif->counter_ingress_valid = valid;
202 break;
203 }
204}
205
206static int mlxsw_sp_rif_counter_edit(struct mlxsw_sp *mlxsw_sp, u16 rif_index,
207 unsigned int counter_index, bool enable,
208 enum mlxsw_sp_rif_counter_dir dir)
209{
210 char ritr_pl[MLXSW_REG_RITR_LEN];
211 bool is_egress = false;
212 int err;
213
214 if (dir == MLXSW_SP_RIF_COUNTER_EGRESS)
215 is_egress = true;
216 mlxsw_reg_ritr_rif_pack(ritr_pl, rif_index);
217 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
218 if (err)
219 return err;
220
221 mlxsw_reg_ritr_counter_pack(ritr_pl, counter_index, enable,
222 is_egress);
223 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
224}
225
226int mlxsw_sp_rif_counter_value_get(struct mlxsw_sp *mlxsw_sp,
227 struct mlxsw_sp_rif *rif,
228 enum mlxsw_sp_rif_counter_dir dir, u64 *cnt)
229{
230 char ricnt_pl[MLXSW_REG_RICNT_LEN];
231 unsigned int *p_counter_index;
232 bool valid;
233 int err;
234
235 valid = mlxsw_sp_rif_counter_valid_get(rif, dir);
236 if (!valid)
237 return -EINVAL;
238
239 p_counter_index = mlxsw_sp_rif_p_counter_get(rif, dir);
240 if (!p_counter_index)
241 return -EINVAL;
242 mlxsw_reg_ricnt_pack(ricnt_pl, *p_counter_index,
243 MLXSW_REG_RICNT_OPCODE_NOP);
244 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ricnt), ricnt_pl);
245 if (err)
246 return err;
247 *cnt = mlxsw_reg_ricnt_good_unicast_packets_get(ricnt_pl);
248 return 0;
249}
250
251static int mlxsw_sp_rif_counter_clear(struct mlxsw_sp *mlxsw_sp,
252 unsigned int counter_index)
253{
254 char ricnt_pl[MLXSW_REG_RICNT_LEN];
255
256 mlxsw_reg_ricnt_pack(ricnt_pl, counter_index,
257 MLXSW_REG_RICNT_OPCODE_CLEAR);
258 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ricnt), ricnt_pl);
259}
260
261int mlxsw_sp_rif_counter_alloc(struct mlxsw_sp *mlxsw_sp,
262 struct mlxsw_sp_rif *rif,
263 enum mlxsw_sp_rif_counter_dir dir)
264{
265 unsigned int *p_counter_index;
266 int err;
267
268 p_counter_index = mlxsw_sp_rif_p_counter_get(rif, dir);
269 if (!p_counter_index)
270 return -EINVAL;
271 err = mlxsw_sp_counter_alloc(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_RIF,
272 p_counter_index);
273 if (err)
274 return err;
275
276 err = mlxsw_sp_rif_counter_clear(mlxsw_sp, *p_counter_index);
277 if (err)
278 goto err_counter_clear;
279
280 err = mlxsw_sp_rif_counter_edit(mlxsw_sp, rif->rif_index,
281 *p_counter_index, true, dir);
282 if (err)
283 goto err_counter_edit;
284 mlxsw_sp_rif_counter_valid_set(rif, dir, true);
285 return 0;
286
287err_counter_edit:
288err_counter_clear:
289 mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_RIF,
290 *p_counter_index);
291 return err;
292}
293
294void mlxsw_sp_rif_counter_free(struct mlxsw_sp *mlxsw_sp,
295 struct mlxsw_sp_rif *rif,
296 enum mlxsw_sp_rif_counter_dir dir)
297{
298 unsigned int *p_counter_index;
299
Arkadi Sharshevsky6b1206b2017-05-18 09:18:53 +0200300 if (!mlxsw_sp_rif_counter_valid_get(rif, dir))
301 return;
302
Arkadi Sharshevskye0c0afd2017-03-28 17:24:15 +0200303 p_counter_index = mlxsw_sp_rif_p_counter_get(rif, dir);
304 if (WARN_ON(!p_counter_index))
305 return;
306 mlxsw_sp_rif_counter_edit(mlxsw_sp, rif->rif_index,
307 *p_counter_index, false, dir);
308 mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_RIF,
309 *p_counter_index);
310 mlxsw_sp_rif_counter_valid_set(rif, dir, false);
311}
312
Ido Schimmele4f3c1c2017-05-26 08:37:40 +0200313static void mlxsw_sp_rif_counters_alloc(struct mlxsw_sp_rif *rif)
314{
315 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
316 struct devlink *devlink;
317
318 devlink = priv_to_devlink(mlxsw_sp->core);
319 if (!devlink_dpipe_table_counter_enabled(devlink,
320 MLXSW_SP_DPIPE_TABLE_NAME_ERIF))
321 return;
322 mlxsw_sp_rif_counter_alloc(mlxsw_sp, rif, MLXSW_SP_RIF_COUNTER_EGRESS);
323}
324
325static void mlxsw_sp_rif_counters_free(struct mlxsw_sp_rif *rif)
326{
327 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
328
329 mlxsw_sp_rif_counter_free(mlxsw_sp, rif, MLXSW_SP_RIF_COUNTER_EGRESS);
330}
331
Ido Schimmel4724ba562017-03-10 08:53:39 +0100332static struct mlxsw_sp_rif *
333mlxsw_sp_rif_find_by_dev(const struct mlxsw_sp *mlxsw_sp,
334 const struct net_device *dev);
335
Ido Schimmel7dcc18a2017-07-18 10:10:30 +0200336#define MLXSW_SP_PREFIX_COUNT (sizeof(struct in6_addr) * BITS_PER_BYTE + 1)
Ido Schimmel9011b672017-05-16 19:38:25 +0200337
338struct mlxsw_sp_prefix_usage {
339 DECLARE_BITMAP(b, MLXSW_SP_PREFIX_COUNT);
340};
341
Jiri Pirko53342022016-07-04 08:23:08 +0200342#define mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage) \
343 for_each_set_bit(prefix, (prefix_usage)->b, MLXSW_SP_PREFIX_COUNT)
344
345static bool
346mlxsw_sp_prefix_usage_eq(struct mlxsw_sp_prefix_usage *prefix_usage1,
347 struct mlxsw_sp_prefix_usage *prefix_usage2)
348{
349 return !memcmp(prefix_usage1, prefix_usage2, sizeof(*prefix_usage1));
350}
351
Jiri Pirko6b75c482016-07-04 08:23:09 +0200352static bool
353mlxsw_sp_prefix_usage_none(struct mlxsw_sp_prefix_usage *prefix_usage)
354{
355 struct mlxsw_sp_prefix_usage prefix_usage_none = {{ 0 } };
356
357 return mlxsw_sp_prefix_usage_eq(prefix_usage, &prefix_usage_none);
358}
359
360static void
361mlxsw_sp_prefix_usage_cpy(struct mlxsw_sp_prefix_usage *prefix_usage1,
362 struct mlxsw_sp_prefix_usage *prefix_usage2)
363{
364 memcpy(prefix_usage1, prefix_usage2, sizeof(*prefix_usage1));
365}
366
367static void
Jiri Pirko5e9c16c2016-07-04 08:23:04 +0200368mlxsw_sp_prefix_usage_set(struct mlxsw_sp_prefix_usage *prefix_usage,
369 unsigned char prefix_len)
370{
371 set_bit(prefix_len, prefix_usage->b);
372}
373
374static void
375mlxsw_sp_prefix_usage_clear(struct mlxsw_sp_prefix_usage *prefix_usage,
376 unsigned char prefix_len)
377{
378 clear_bit(prefix_len, prefix_usage->b);
379}
380
381struct mlxsw_sp_fib_key {
382 unsigned char addr[sizeof(struct in6_addr)];
383 unsigned char prefix_len;
384};
385
Jiri Pirko61c503f2016-07-04 08:23:11 +0200386enum mlxsw_sp_fib_entry_type {
387 MLXSW_SP_FIB_ENTRY_TYPE_REMOTE,
388 MLXSW_SP_FIB_ENTRY_TYPE_LOCAL,
389 MLXSW_SP_FIB_ENTRY_TYPE_TRAP,
Petr Machata4607f6d2017-09-02 23:49:25 +0200390
391 /* This is a special case of local delivery, where a packet should be
392 * decapsulated on reception. Note that there is no corresponding ENCAP,
393 * because that's a type of next hop, not of FIB entry. (There can be
394 * several next hops in a REMOTE entry, and some of them may be
395 * encapsulating entries.)
396 */
397 MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP,
Jiri Pirko61c503f2016-07-04 08:23:11 +0200398};
399
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +0200400struct mlxsw_sp_nexthop_group;
Ido Schimmel9011b672017-05-16 19:38:25 +0200401struct mlxsw_sp_fib;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +0200402
Ido Schimmel9aecce12017-02-09 10:28:42 +0100403struct mlxsw_sp_fib_node {
404 struct list_head entry_list;
Jiri Pirkob45f64d2016-09-26 12:52:31 +0200405 struct list_head list;
Ido Schimmel9aecce12017-02-09 10:28:42 +0100406 struct rhash_head ht_node;
Ido Schimmel76610eb2017-03-10 08:53:41 +0100407 struct mlxsw_sp_fib *fib;
Ido Schimmel9aecce12017-02-09 10:28:42 +0100408 struct mlxsw_sp_fib_key key;
409};
410
Petr Machata4607f6d2017-09-02 23:49:25 +0200411struct mlxsw_sp_fib_entry_decap {
412 struct mlxsw_sp_ipip_entry *ipip_entry;
413 u32 tunnel_index;
414};
415
Ido Schimmel9aecce12017-02-09 10:28:42 +0100416struct mlxsw_sp_fib_entry {
417 struct list_head list;
418 struct mlxsw_sp_fib_node *fib_node;
419 enum mlxsw_sp_fib_entry_type type;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +0200420 struct list_head nexthop_group_node;
421 struct mlxsw_sp_nexthop_group *nh_group;
Petr Machata4607f6d2017-09-02 23:49:25 +0200422 struct mlxsw_sp_fib_entry_decap decap; /* Valid for decap entries. */
Jiri Pirko5e9c16c2016-07-04 08:23:04 +0200423};
424
Ido Schimmel4f1c7f12017-07-18 10:10:26 +0200425struct mlxsw_sp_fib4_entry {
426 struct mlxsw_sp_fib_entry common;
427 u32 tb_id;
428 u32 prio;
429 u8 tos;
430 u8 type;
431};
432
Ido Schimmel428b8512017-08-03 13:28:28 +0200433struct mlxsw_sp_fib6_entry {
434 struct mlxsw_sp_fib_entry common;
435 struct list_head rt6_list;
436 unsigned int nrt6;
437};
438
439struct mlxsw_sp_rt6 {
440 struct list_head list;
441 struct rt6_info *rt;
442};
443
Ido Schimmel9011b672017-05-16 19:38:25 +0200444struct mlxsw_sp_lpm_tree {
445 u8 id; /* tree ID */
446 unsigned int ref_count;
447 enum mlxsw_sp_l3proto proto;
448 struct mlxsw_sp_prefix_usage prefix_usage;
449};
450
Jiri Pirko5e9c16c2016-07-04 08:23:04 +0200451struct mlxsw_sp_fib {
452 struct rhashtable ht;
Ido Schimmel9aecce12017-02-09 10:28:42 +0100453 struct list_head node_list;
Ido Schimmel76610eb2017-03-10 08:53:41 +0100454 struct mlxsw_sp_vr *vr;
455 struct mlxsw_sp_lpm_tree *lpm_tree;
Jiri Pirko5e9c16c2016-07-04 08:23:04 +0200456 unsigned long prefix_ref_count[MLXSW_SP_PREFIX_COUNT];
457 struct mlxsw_sp_prefix_usage prefix_usage;
Ido Schimmel76610eb2017-03-10 08:53:41 +0100458 enum mlxsw_sp_l3proto proto;
Jiri Pirko5e9c16c2016-07-04 08:23:04 +0200459};
460
Ido Schimmel9011b672017-05-16 19:38:25 +0200461struct mlxsw_sp_vr {
462 u16 id; /* virtual router ID */
463 u32 tb_id; /* kernel fib table id */
464 unsigned int rif_count;
465 struct mlxsw_sp_fib *fib4;
Ido Schimmela3d9bc52017-07-18 10:10:22 +0200466 struct mlxsw_sp_fib *fib6;
Yotam Gigid42b0962017-09-27 08:23:20 +0200467 struct mlxsw_sp_mr_table *mr4_table;
Ido Schimmel9011b672017-05-16 19:38:25 +0200468};
469
Ido Schimmel9aecce12017-02-09 10:28:42 +0100470static const struct rhashtable_params mlxsw_sp_fib_ht_params;
Jiri Pirko5e9c16c2016-07-04 08:23:04 +0200471
Ido Schimmel76610eb2017-03-10 08:53:41 +0100472static struct mlxsw_sp_fib *mlxsw_sp_fib_create(struct mlxsw_sp_vr *vr,
473 enum mlxsw_sp_l3proto proto)
Jiri Pirko5e9c16c2016-07-04 08:23:04 +0200474{
475 struct mlxsw_sp_fib *fib;
476 int err;
477
478 fib = kzalloc(sizeof(*fib), GFP_KERNEL);
479 if (!fib)
480 return ERR_PTR(-ENOMEM);
481 err = rhashtable_init(&fib->ht, &mlxsw_sp_fib_ht_params);
482 if (err)
483 goto err_rhashtable_init;
Ido Schimmel9aecce12017-02-09 10:28:42 +0100484 INIT_LIST_HEAD(&fib->node_list);
Ido Schimmel76610eb2017-03-10 08:53:41 +0100485 fib->proto = proto;
486 fib->vr = vr;
Jiri Pirko5e9c16c2016-07-04 08:23:04 +0200487 return fib;
488
489err_rhashtable_init:
490 kfree(fib);
491 return ERR_PTR(err);
492}
493
494static void mlxsw_sp_fib_destroy(struct mlxsw_sp_fib *fib)
495{
Ido Schimmel9aecce12017-02-09 10:28:42 +0100496 WARN_ON(!list_empty(&fib->node_list));
Ido Schimmel76610eb2017-03-10 08:53:41 +0100497 WARN_ON(fib->lpm_tree);
Jiri Pirko5e9c16c2016-07-04 08:23:04 +0200498 rhashtable_destroy(&fib->ht);
499 kfree(fib);
500}
501
Jiri Pirko53342022016-07-04 08:23:08 +0200502static struct mlxsw_sp_lpm_tree *
Ido Schimmel382dbb42017-03-10 08:53:40 +0100503mlxsw_sp_lpm_tree_find_unused(struct mlxsw_sp *mlxsw_sp)
Jiri Pirko53342022016-07-04 08:23:08 +0200504{
505 static struct mlxsw_sp_lpm_tree *lpm_tree;
506 int i;
507
Ido Schimmel9011b672017-05-16 19:38:25 +0200508 for (i = 0; i < mlxsw_sp->router->lpm.tree_count; i++) {
509 lpm_tree = &mlxsw_sp->router->lpm.trees[i];
Ido Schimmel382dbb42017-03-10 08:53:40 +0100510 if (lpm_tree->ref_count == 0)
511 return lpm_tree;
Jiri Pirko53342022016-07-04 08:23:08 +0200512 }
513 return NULL;
514}
515
516static int mlxsw_sp_lpm_tree_alloc(struct mlxsw_sp *mlxsw_sp,
517 struct mlxsw_sp_lpm_tree *lpm_tree)
518{
519 char ralta_pl[MLXSW_REG_RALTA_LEN];
520
Ido Schimmel1a9234e662016-09-19 08:29:26 +0200521 mlxsw_reg_ralta_pack(ralta_pl, true,
522 (enum mlxsw_reg_ralxx_protocol) lpm_tree->proto,
523 lpm_tree->id);
Jiri Pirko53342022016-07-04 08:23:08 +0200524 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralta), ralta_pl);
525}
526
Ido Schimmelcc702672017-08-14 10:54:03 +0200527static void mlxsw_sp_lpm_tree_free(struct mlxsw_sp *mlxsw_sp,
528 struct mlxsw_sp_lpm_tree *lpm_tree)
Jiri Pirko53342022016-07-04 08:23:08 +0200529{
530 char ralta_pl[MLXSW_REG_RALTA_LEN];
531
Ido Schimmel1a9234e662016-09-19 08:29:26 +0200532 mlxsw_reg_ralta_pack(ralta_pl, false,
533 (enum mlxsw_reg_ralxx_protocol) lpm_tree->proto,
534 lpm_tree->id);
Ido Schimmelcc702672017-08-14 10:54:03 +0200535 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralta), ralta_pl);
Jiri Pirko53342022016-07-04 08:23:08 +0200536}
537
538static int
539mlxsw_sp_lpm_tree_left_struct_set(struct mlxsw_sp *mlxsw_sp,
540 struct mlxsw_sp_prefix_usage *prefix_usage,
541 struct mlxsw_sp_lpm_tree *lpm_tree)
542{
543 char ralst_pl[MLXSW_REG_RALST_LEN];
544 u8 root_bin = 0;
545 u8 prefix;
546 u8 last_prefix = MLXSW_REG_RALST_BIN_NO_CHILD;
547
548 mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage)
549 root_bin = prefix;
550
551 mlxsw_reg_ralst_pack(ralst_pl, root_bin, lpm_tree->id);
552 mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage) {
553 if (prefix == 0)
554 continue;
555 mlxsw_reg_ralst_bin_pack(ralst_pl, prefix, last_prefix,
556 MLXSW_REG_RALST_BIN_NO_CHILD);
557 last_prefix = prefix;
558 }
559 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralst), ralst_pl);
560}
561
562static struct mlxsw_sp_lpm_tree *
563mlxsw_sp_lpm_tree_create(struct mlxsw_sp *mlxsw_sp,
564 struct mlxsw_sp_prefix_usage *prefix_usage,
Ido Schimmel382dbb42017-03-10 08:53:40 +0100565 enum mlxsw_sp_l3proto proto)
Jiri Pirko53342022016-07-04 08:23:08 +0200566{
567 struct mlxsw_sp_lpm_tree *lpm_tree;
568 int err;
569
Ido Schimmel382dbb42017-03-10 08:53:40 +0100570 lpm_tree = mlxsw_sp_lpm_tree_find_unused(mlxsw_sp);
Jiri Pirko53342022016-07-04 08:23:08 +0200571 if (!lpm_tree)
572 return ERR_PTR(-EBUSY);
573 lpm_tree->proto = proto;
574 err = mlxsw_sp_lpm_tree_alloc(mlxsw_sp, lpm_tree);
575 if (err)
576 return ERR_PTR(err);
577
578 err = mlxsw_sp_lpm_tree_left_struct_set(mlxsw_sp, prefix_usage,
579 lpm_tree);
580 if (err)
581 goto err_left_struct_set;
Jiri Pirko2083d362016-10-25 11:25:56 +0200582 memcpy(&lpm_tree->prefix_usage, prefix_usage,
583 sizeof(lpm_tree->prefix_usage));
Jiri Pirko53342022016-07-04 08:23:08 +0200584 return lpm_tree;
585
586err_left_struct_set:
587 mlxsw_sp_lpm_tree_free(mlxsw_sp, lpm_tree);
588 return ERR_PTR(err);
589}
590
Ido Schimmelcc702672017-08-14 10:54:03 +0200591static void mlxsw_sp_lpm_tree_destroy(struct mlxsw_sp *mlxsw_sp,
592 struct mlxsw_sp_lpm_tree *lpm_tree)
Jiri Pirko53342022016-07-04 08:23:08 +0200593{
Ido Schimmelcc702672017-08-14 10:54:03 +0200594 mlxsw_sp_lpm_tree_free(mlxsw_sp, lpm_tree);
Jiri Pirko53342022016-07-04 08:23:08 +0200595}
596
597static struct mlxsw_sp_lpm_tree *
598mlxsw_sp_lpm_tree_get(struct mlxsw_sp *mlxsw_sp,
599 struct mlxsw_sp_prefix_usage *prefix_usage,
Ido Schimmel382dbb42017-03-10 08:53:40 +0100600 enum mlxsw_sp_l3proto proto)
Jiri Pirko53342022016-07-04 08:23:08 +0200601{
602 struct mlxsw_sp_lpm_tree *lpm_tree;
603 int i;
604
Ido Schimmel9011b672017-05-16 19:38:25 +0200605 for (i = 0; i < mlxsw_sp->router->lpm.tree_count; i++) {
606 lpm_tree = &mlxsw_sp->router->lpm.trees[i];
Jiri Pirko8b99bec2016-10-25 11:25:57 +0200607 if (lpm_tree->ref_count != 0 &&
608 lpm_tree->proto == proto &&
Jiri Pirko53342022016-07-04 08:23:08 +0200609 mlxsw_sp_prefix_usage_eq(&lpm_tree->prefix_usage,
610 prefix_usage))
Ido Schimmelfc922bb2017-08-14 10:54:05 +0200611 return lpm_tree;
Jiri Pirko53342022016-07-04 08:23:08 +0200612 }
Ido Schimmelfc922bb2017-08-14 10:54:05 +0200613 return mlxsw_sp_lpm_tree_create(mlxsw_sp, prefix_usage, proto);
614}
Jiri Pirko53342022016-07-04 08:23:08 +0200615
Ido Schimmelfc922bb2017-08-14 10:54:05 +0200616static void mlxsw_sp_lpm_tree_hold(struct mlxsw_sp_lpm_tree *lpm_tree)
617{
Jiri Pirko53342022016-07-04 08:23:08 +0200618 lpm_tree->ref_count++;
Jiri Pirko53342022016-07-04 08:23:08 +0200619}
620
Ido Schimmelcc702672017-08-14 10:54:03 +0200621static void mlxsw_sp_lpm_tree_put(struct mlxsw_sp *mlxsw_sp,
622 struct mlxsw_sp_lpm_tree *lpm_tree)
Jiri Pirko53342022016-07-04 08:23:08 +0200623{
624 if (--lpm_tree->ref_count == 0)
Ido Schimmelcc702672017-08-14 10:54:03 +0200625 mlxsw_sp_lpm_tree_destroy(mlxsw_sp, lpm_tree);
Jiri Pirko53342022016-07-04 08:23:08 +0200626}
627
Ido Schimmeld7a60302017-06-08 08:47:43 +0200628#define MLXSW_SP_LPM_TREE_MIN 1 /* tree 0 is reserved */
Ido Schimmel8494ab02017-03-24 08:02:47 +0100629
630static int mlxsw_sp_lpm_init(struct mlxsw_sp *mlxsw_sp)
Jiri Pirko53342022016-07-04 08:23:08 +0200631{
632 struct mlxsw_sp_lpm_tree *lpm_tree;
Ido Schimmel8494ab02017-03-24 08:02:47 +0100633 u64 max_trees;
Jiri Pirko53342022016-07-04 08:23:08 +0200634 int i;
635
Ido Schimmel8494ab02017-03-24 08:02:47 +0100636 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_LPM_TREES))
637 return -EIO;
638
639 max_trees = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_LPM_TREES);
Ido Schimmel9011b672017-05-16 19:38:25 +0200640 mlxsw_sp->router->lpm.tree_count = max_trees - MLXSW_SP_LPM_TREE_MIN;
641 mlxsw_sp->router->lpm.trees = kcalloc(mlxsw_sp->router->lpm.tree_count,
Ido Schimmel8494ab02017-03-24 08:02:47 +0100642 sizeof(struct mlxsw_sp_lpm_tree),
643 GFP_KERNEL);
Ido Schimmel9011b672017-05-16 19:38:25 +0200644 if (!mlxsw_sp->router->lpm.trees)
Ido Schimmel8494ab02017-03-24 08:02:47 +0100645 return -ENOMEM;
646
Ido Schimmel9011b672017-05-16 19:38:25 +0200647 for (i = 0; i < mlxsw_sp->router->lpm.tree_count; i++) {
648 lpm_tree = &mlxsw_sp->router->lpm.trees[i];
Jiri Pirko53342022016-07-04 08:23:08 +0200649 lpm_tree->id = i + MLXSW_SP_LPM_TREE_MIN;
650 }
Ido Schimmel8494ab02017-03-24 08:02:47 +0100651
652 return 0;
653}
654
655static void mlxsw_sp_lpm_fini(struct mlxsw_sp *mlxsw_sp)
656{
Ido Schimmel9011b672017-05-16 19:38:25 +0200657 kfree(mlxsw_sp->router->lpm.trees);
Jiri Pirko53342022016-07-04 08:23:08 +0200658}
659
Ido Schimmel76610eb2017-03-10 08:53:41 +0100660static bool mlxsw_sp_vr_is_used(const struct mlxsw_sp_vr *vr)
661{
Yotam Gigid42b0962017-09-27 08:23:20 +0200662 return !!vr->fib4 || !!vr->fib6 || !!vr->mr4_table;
Ido Schimmel76610eb2017-03-10 08:53:41 +0100663}
664
Jiri Pirko6b75c482016-07-04 08:23:09 +0200665static struct mlxsw_sp_vr *mlxsw_sp_vr_find_unused(struct mlxsw_sp *mlxsw_sp)
666{
667 struct mlxsw_sp_vr *vr;
668 int i;
669
Jiri Pirkoc1a38312016-10-21 16:07:23 +0200670 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
Ido Schimmel9011b672017-05-16 19:38:25 +0200671 vr = &mlxsw_sp->router->vrs[i];
Ido Schimmel76610eb2017-03-10 08:53:41 +0100672 if (!mlxsw_sp_vr_is_used(vr))
Jiri Pirko6b75c482016-07-04 08:23:09 +0200673 return vr;
674 }
675 return NULL;
676}
677
678static int mlxsw_sp_vr_lpm_tree_bind(struct mlxsw_sp *mlxsw_sp,
Ido Schimmel0adb2142017-08-14 10:54:04 +0200679 const struct mlxsw_sp_fib *fib, u8 tree_id)
Jiri Pirko6b75c482016-07-04 08:23:09 +0200680{
681 char raltb_pl[MLXSW_REG_RALTB_LEN];
682
Ido Schimmel76610eb2017-03-10 08:53:41 +0100683 mlxsw_reg_raltb_pack(raltb_pl, fib->vr->id,
684 (enum mlxsw_reg_ralxx_protocol) fib->proto,
Ido Schimmel0adb2142017-08-14 10:54:04 +0200685 tree_id);
Jiri Pirko6b75c482016-07-04 08:23:09 +0200686 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raltb), raltb_pl);
687}
688
689static int mlxsw_sp_vr_lpm_tree_unbind(struct mlxsw_sp *mlxsw_sp,
Ido Schimmel76610eb2017-03-10 08:53:41 +0100690 const struct mlxsw_sp_fib *fib)
Jiri Pirko6b75c482016-07-04 08:23:09 +0200691{
692 char raltb_pl[MLXSW_REG_RALTB_LEN];
693
694 /* Bind to tree 0 which is default */
Ido Schimmel76610eb2017-03-10 08:53:41 +0100695 mlxsw_reg_raltb_pack(raltb_pl, fib->vr->id,
696 (enum mlxsw_reg_ralxx_protocol) fib->proto, 0);
Jiri Pirko6b75c482016-07-04 08:23:09 +0200697 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raltb), raltb_pl);
698}
699
700static u32 mlxsw_sp_fix_tb_id(u32 tb_id)
701{
Yotam Gigi7e50d432017-09-27 08:23:19 +0200702 /* For our purpose, squash main, default and local tables into one */
703 if (tb_id == RT_TABLE_LOCAL || tb_id == RT_TABLE_DEFAULT)
Jiri Pirko6b75c482016-07-04 08:23:09 +0200704 tb_id = RT_TABLE_MAIN;
705 return tb_id;
706}
707
708static struct mlxsw_sp_vr *mlxsw_sp_vr_find(struct mlxsw_sp *mlxsw_sp,
Ido Schimmel76610eb2017-03-10 08:53:41 +0100709 u32 tb_id)
Jiri Pirko6b75c482016-07-04 08:23:09 +0200710{
711 struct mlxsw_sp_vr *vr;
712 int i;
713
714 tb_id = mlxsw_sp_fix_tb_id(tb_id);
Nogah Frankel9497c042016-09-20 11:16:54 +0200715
Jiri Pirkoc1a38312016-10-21 16:07:23 +0200716 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
Ido Schimmel9011b672017-05-16 19:38:25 +0200717 vr = &mlxsw_sp->router->vrs[i];
Ido Schimmel76610eb2017-03-10 08:53:41 +0100718 if (mlxsw_sp_vr_is_used(vr) && vr->tb_id == tb_id)
Jiri Pirko6b75c482016-07-04 08:23:09 +0200719 return vr;
720 }
721 return NULL;
722}
723
Ido Schimmel76610eb2017-03-10 08:53:41 +0100724static struct mlxsw_sp_fib *mlxsw_sp_vr_fib(const struct mlxsw_sp_vr *vr,
725 enum mlxsw_sp_l3proto proto)
Jiri Pirko6b75c482016-07-04 08:23:09 +0200726{
Ido Schimmel76610eb2017-03-10 08:53:41 +0100727 switch (proto) {
728 case MLXSW_SP_L3_PROTO_IPV4:
729 return vr->fib4;
730 case MLXSW_SP_L3_PROTO_IPV6:
Ido Schimmela3d9bc52017-07-18 10:10:22 +0200731 return vr->fib6;
Ido Schimmel76610eb2017-03-10 08:53:41 +0100732 }
733 return NULL;
734}
735
736static struct mlxsw_sp_vr *mlxsw_sp_vr_create(struct mlxsw_sp *mlxsw_sp,
David Ahernf8fa9b42017-10-18 09:56:56 -0700737 u32 tb_id,
738 struct netlink_ext_ack *extack)
Ido Schimmel76610eb2017-03-10 08:53:41 +0100739{
Jiri Pirko6b75c482016-07-04 08:23:09 +0200740 struct mlxsw_sp_vr *vr;
Ido Schimmela3d9bc52017-07-18 10:10:22 +0200741 int err;
Jiri Pirko6b75c482016-07-04 08:23:09 +0200742
743 vr = mlxsw_sp_vr_find_unused(mlxsw_sp);
David Ahernf8fa9b42017-10-18 09:56:56 -0700744 if (!vr) {
745 NL_SET_ERR_MSG(extack, "spectrum: Exceeded number of supported virtual routers");
Jiri Pirko6b75c482016-07-04 08:23:09 +0200746 return ERR_PTR(-EBUSY);
David Ahernf8fa9b42017-10-18 09:56:56 -0700747 }
Ido Schimmel76610eb2017-03-10 08:53:41 +0100748 vr->fib4 = mlxsw_sp_fib_create(vr, MLXSW_SP_L3_PROTO_IPV4);
749 if (IS_ERR(vr->fib4))
750 return ERR_CAST(vr->fib4);
Ido Schimmela3d9bc52017-07-18 10:10:22 +0200751 vr->fib6 = mlxsw_sp_fib_create(vr, MLXSW_SP_L3_PROTO_IPV6);
752 if (IS_ERR(vr->fib6)) {
753 err = PTR_ERR(vr->fib6);
754 goto err_fib6_create;
755 }
Yotam Gigid42b0962017-09-27 08:23:20 +0200756 vr->mr4_table = mlxsw_sp_mr_table_create(mlxsw_sp, vr->id,
757 MLXSW_SP_L3_PROTO_IPV4);
758 if (IS_ERR(vr->mr4_table)) {
759 err = PTR_ERR(vr->mr4_table);
760 goto err_mr_table_create;
761 }
Jiri Pirko6b75c482016-07-04 08:23:09 +0200762 vr->tb_id = tb_id;
Jiri Pirko6b75c482016-07-04 08:23:09 +0200763 return vr;
Ido Schimmela3d9bc52017-07-18 10:10:22 +0200764
Yotam Gigid42b0962017-09-27 08:23:20 +0200765err_mr_table_create:
766 mlxsw_sp_fib_destroy(vr->fib6);
767 vr->fib6 = NULL;
Ido Schimmela3d9bc52017-07-18 10:10:22 +0200768err_fib6_create:
769 mlxsw_sp_fib_destroy(vr->fib4);
770 vr->fib4 = NULL;
771 return ERR_PTR(err);
Jiri Pirko6b75c482016-07-04 08:23:09 +0200772}
773
Ido Schimmel76610eb2017-03-10 08:53:41 +0100774static void mlxsw_sp_vr_destroy(struct mlxsw_sp_vr *vr)
Jiri Pirko6b75c482016-07-04 08:23:09 +0200775{
Yotam Gigid42b0962017-09-27 08:23:20 +0200776 mlxsw_sp_mr_table_destroy(vr->mr4_table);
777 vr->mr4_table = NULL;
Ido Schimmela3d9bc52017-07-18 10:10:22 +0200778 mlxsw_sp_fib_destroy(vr->fib6);
779 vr->fib6 = NULL;
Ido Schimmel76610eb2017-03-10 08:53:41 +0100780 mlxsw_sp_fib_destroy(vr->fib4);
781 vr->fib4 = NULL;
Jiri Pirko6b75c482016-07-04 08:23:09 +0200782}
783
David Ahernf8fa9b42017-10-18 09:56:56 -0700784static struct mlxsw_sp_vr *mlxsw_sp_vr_get(struct mlxsw_sp *mlxsw_sp, u32 tb_id,
785 struct netlink_ext_ack *extack)
Jiri Pirko6b75c482016-07-04 08:23:09 +0200786{
787 struct mlxsw_sp_vr *vr;
Jiri Pirko6b75c482016-07-04 08:23:09 +0200788
789 tb_id = mlxsw_sp_fix_tb_id(tb_id);
Ido Schimmel76610eb2017-03-10 08:53:41 +0100790 vr = mlxsw_sp_vr_find(mlxsw_sp, tb_id);
791 if (!vr)
David Ahernf8fa9b42017-10-18 09:56:56 -0700792 vr = mlxsw_sp_vr_create(mlxsw_sp, tb_id, extack);
Jiri Pirko6b75c482016-07-04 08:23:09 +0200793 return vr;
794}
795
Ido Schimmel76610eb2017-03-10 08:53:41 +0100796static void mlxsw_sp_vr_put(struct mlxsw_sp_vr *vr)
Jiri Pirko6b75c482016-07-04 08:23:09 +0200797{
Ido Schimmela3d9bc52017-07-18 10:10:22 +0200798 if (!vr->rif_count && list_empty(&vr->fib4->node_list) &&
Yotam Gigid42b0962017-09-27 08:23:20 +0200799 list_empty(&vr->fib6->node_list) &&
800 mlxsw_sp_mr_table_empty(vr->mr4_table))
Ido Schimmel76610eb2017-03-10 08:53:41 +0100801 mlxsw_sp_vr_destroy(vr);
Jiri Pirko6b75c482016-07-04 08:23:09 +0200802}
803
Ido Schimmelfc922bb2017-08-14 10:54:05 +0200804static bool
805mlxsw_sp_vr_lpm_tree_should_replace(struct mlxsw_sp_vr *vr,
806 enum mlxsw_sp_l3proto proto, u8 tree_id)
807{
808 struct mlxsw_sp_fib *fib = mlxsw_sp_vr_fib(vr, proto);
809
810 if (!mlxsw_sp_vr_is_used(vr))
811 return false;
812 if (fib->lpm_tree && fib->lpm_tree->id == tree_id)
813 return true;
814 return false;
815}
816
817static int mlxsw_sp_vr_lpm_tree_replace(struct mlxsw_sp *mlxsw_sp,
818 struct mlxsw_sp_fib *fib,
819 struct mlxsw_sp_lpm_tree *new_tree)
820{
821 struct mlxsw_sp_lpm_tree *old_tree = fib->lpm_tree;
822 int err;
823
824 err = mlxsw_sp_vr_lpm_tree_bind(mlxsw_sp, fib, new_tree->id);
825 if (err)
826 return err;
827 fib->lpm_tree = new_tree;
828 mlxsw_sp_lpm_tree_hold(new_tree);
829 mlxsw_sp_lpm_tree_put(mlxsw_sp, old_tree);
830 return 0;
831}
832
833static int mlxsw_sp_vrs_lpm_tree_replace(struct mlxsw_sp *mlxsw_sp,
834 struct mlxsw_sp_fib *fib,
835 struct mlxsw_sp_lpm_tree *new_tree)
836{
837 struct mlxsw_sp_lpm_tree *old_tree = fib->lpm_tree;
838 enum mlxsw_sp_l3proto proto = fib->proto;
839 u8 old_id, new_id = new_tree->id;
840 struct mlxsw_sp_vr *vr;
841 int i, err;
842
843 if (!old_tree)
844 goto no_replace;
845 old_id = old_tree->id;
846
847 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
848 vr = &mlxsw_sp->router->vrs[i];
849 if (!mlxsw_sp_vr_lpm_tree_should_replace(vr, proto, old_id))
850 continue;
851 err = mlxsw_sp_vr_lpm_tree_replace(mlxsw_sp,
852 mlxsw_sp_vr_fib(vr, proto),
853 new_tree);
854 if (err)
855 goto err_tree_replace;
856 }
857
858 return 0;
859
860err_tree_replace:
861 for (i--; i >= 0; i--) {
862 if (!mlxsw_sp_vr_lpm_tree_should_replace(vr, proto, new_id))
863 continue;
864 mlxsw_sp_vr_lpm_tree_replace(mlxsw_sp,
865 mlxsw_sp_vr_fib(vr, proto),
866 old_tree);
867 }
868 return err;
869
870no_replace:
871 err = mlxsw_sp_vr_lpm_tree_bind(mlxsw_sp, fib, new_tree->id);
872 if (err)
873 return err;
874 fib->lpm_tree = new_tree;
875 mlxsw_sp_lpm_tree_hold(new_tree);
876 return 0;
877}
878
879static void
880mlxsw_sp_vrs_prefixes(struct mlxsw_sp *mlxsw_sp,
881 enum mlxsw_sp_l3proto proto,
882 struct mlxsw_sp_prefix_usage *req_prefix_usage)
883{
884 int i;
885
886 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
887 struct mlxsw_sp_vr *vr = &mlxsw_sp->router->vrs[i];
888 struct mlxsw_sp_fib *fib = mlxsw_sp_vr_fib(vr, proto);
889 unsigned char prefix;
890
891 if (!mlxsw_sp_vr_is_used(vr))
892 continue;
893 mlxsw_sp_prefix_usage_for_each(prefix, &fib->prefix_usage)
894 mlxsw_sp_prefix_usage_set(req_prefix_usage, prefix);
895 }
896}
897
Nogah Frankel9497c042016-09-20 11:16:54 +0200898static int mlxsw_sp_vrs_init(struct mlxsw_sp *mlxsw_sp)
Jiri Pirko6b75c482016-07-04 08:23:09 +0200899{
900 struct mlxsw_sp_vr *vr;
Jiri Pirkoc1a38312016-10-21 16:07:23 +0200901 u64 max_vrs;
Jiri Pirko6b75c482016-07-04 08:23:09 +0200902 int i;
903
Jiri Pirkoc1a38312016-10-21 16:07:23 +0200904 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_VRS))
Nogah Frankel9497c042016-09-20 11:16:54 +0200905 return -EIO;
906
Jiri Pirkoc1a38312016-10-21 16:07:23 +0200907 max_vrs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS);
Ido Schimmel9011b672017-05-16 19:38:25 +0200908 mlxsw_sp->router->vrs = kcalloc(max_vrs, sizeof(struct mlxsw_sp_vr),
909 GFP_KERNEL);
910 if (!mlxsw_sp->router->vrs)
Nogah Frankel9497c042016-09-20 11:16:54 +0200911 return -ENOMEM;
912
Jiri Pirkoc1a38312016-10-21 16:07:23 +0200913 for (i = 0; i < max_vrs; i++) {
Ido Schimmel9011b672017-05-16 19:38:25 +0200914 vr = &mlxsw_sp->router->vrs[i];
Jiri Pirko6b75c482016-07-04 08:23:09 +0200915 vr->id = i;
916 }
Nogah Frankel9497c042016-09-20 11:16:54 +0200917
918 return 0;
919}
920
Ido Schimmelac571de2016-11-14 11:26:32 +0100921static void mlxsw_sp_router_fib_flush(struct mlxsw_sp *mlxsw_sp);
922
Nogah Frankel9497c042016-09-20 11:16:54 +0200923static void mlxsw_sp_vrs_fini(struct mlxsw_sp *mlxsw_sp)
924{
Ido Schimmel30572242016-12-03 16:45:01 +0100925 /* At this stage we're guaranteed not to have new incoming
926 * FIB notifications and the work queue is free from FIBs
927 * sitting on top of mlxsw netdevs. However, we can still
928 * have other FIBs queued. Flush the queue before flushing
929 * the device's tables. No need for locks, as we're the only
930 * writer.
931 */
932 mlxsw_core_flush_owq();
Ido Schimmelac571de2016-11-14 11:26:32 +0100933 mlxsw_sp_router_fib_flush(mlxsw_sp);
Ido Schimmel9011b672017-05-16 19:38:25 +0200934 kfree(mlxsw_sp->router->vrs);
Jiri Pirko6b75c482016-07-04 08:23:09 +0200935}
936
Petr Machata6ddb7422017-09-02 23:49:19 +0200937static struct net_device *
938__mlxsw_sp_ipip_netdev_ul_dev_get(const struct net_device *ol_dev)
939{
940 struct ip_tunnel *tun = netdev_priv(ol_dev);
941 struct net *net = dev_net(ol_dev);
942
943 return __dev_get_by_index(net, tun->parms.link);
944}
945
946static u32 mlxsw_sp_ipip_dev_ul_tb_id(const struct net_device *ol_dev)
947{
948 struct net_device *d = __mlxsw_sp_ipip_netdev_ul_dev_get(ol_dev);
949
950 if (d)
951 return l3mdev_fib_table(d) ? : RT_TABLE_MAIN;
952 else
953 return l3mdev_fib_table(ol_dev) ? : RT_TABLE_MAIN;
954}
955
Petr Machata1012b9a2017-09-02 23:49:23 +0200956static struct mlxsw_sp_rif *
957mlxsw_sp_rif_create(struct mlxsw_sp *mlxsw_sp,
David Ahernf8fa9b42017-10-18 09:56:56 -0700958 const struct mlxsw_sp_rif_params *params,
959 struct netlink_ext_ack *extack);
Petr Machata1012b9a2017-09-02 23:49:23 +0200960
961static struct mlxsw_sp_rif_ipip_lb *
962mlxsw_sp_ipip_ol_ipip_lb_create(struct mlxsw_sp *mlxsw_sp,
963 enum mlxsw_sp_ipip_type ipipt,
Petr Machata7e75af62017-11-03 10:03:36 +0100964 struct net_device *ol_dev,
965 struct netlink_ext_ack *extack)
Petr Machata1012b9a2017-09-02 23:49:23 +0200966{
967 struct mlxsw_sp_rif_params_ipip_lb lb_params;
968 const struct mlxsw_sp_ipip_ops *ipip_ops;
969 struct mlxsw_sp_rif *rif;
970
971 ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipipt];
972 lb_params = (struct mlxsw_sp_rif_params_ipip_lb) {
973 .common.dev = ol_dev,
974 .common.lag = false,
975 .lb_config = ipip_ops->ol_loopback_config(mlxsw_sp, ol_dev),
976 };
977
Petr Machata7e75af62017-11-03 10:03:36 +0100978 rif = mlxsw_sp_rif_create(mlxsw_sp, &lb_params.common, extack);
Petr Machata1012b9a2017-09-02 23:49:23 +0200979 if (IS_ERR(rif))
980 return ERR_CAST(rif);
981 return container_of(rif, struct mlxsw_sp_rif_ipip_lb, common);
982}
983
984static struct mlxsw_sp_ipip_entry *
985mlxsw_sp_ipip_entry_alloc(struct mlxsw_sp *mlxsw_sp,
986 enum mlxsw_sp_ipip_type ipipt,
987 struct net_device *ol_dev)
988{
989 struct mlxsw_sp_ipip_entry *ipip_entry;
990 struct mlxsw_sp_ipip_entry *ret = NULL;
991
992 ipip_entry = kzalloc(sizeof(*ipip_entry), GFP_KERNEL);
993 if (!ipip_entry)
994 return ERR_PTR(-ENOMEM);
995
996 ipip_entry->ol_lb = mlxsw_sp_ipip_ol_ipip_lb_create(mlxsw_sp, ipipt,
Petr Machata7e75af62017-11-03 10:03:36 +0100997 ol_dev, NULL);
Petr Machata1012b9a2017-09-02 23:49:23 +0200998 if (IS_ERR(ipip_entry->ol_lb)) {
999 ret = ERR_CAST(ipip_entry->ol_lb);
1000 goto err_ol_ipip_lb_create;
1001 }
1002
1003 ipip_entry->ipipt = ipipt;
1004 ipip_entry->ol_dev = ol_dev;
1005
1006 return ipip_entry;
1007
1008err_ol_ipip_lb_create:
1009 kfree(ipip_entry);
1010 return ret;
1011}
1012
1013static void
Petr Machata4cccb732017-10-16 16:26:39 +02001014mlxsw_sp_ipip_entry_dealloc(struct mlxsw_sp_ipip_entry *ipip_entry)
Petr Machata1012b9a2017-09-02 23:49:23 +02001015{
Petr Machata1012b9a2017-09-02 23:49:23 +02001016 mlxsw_sp_rif_destroy(&ipip_entry->ol_lb->common);
1017 kfree(ipip_entry);
1018}
1019
Petr Machata1012b9a2017-09-02 23:49:23 +02001020static bool mlxsw_sp_l3addr_eq(const union mlxsw_sp_l3addr *addr1,
1021 const union mlxsw_sp_l3addr *addr2)
1022{
1023 return !memcmp(addr1, addr2, sizeof(*addr1));
1024}
1025
1026static bool
1027mlxsw_sp_ipip_entry_saddr_matches(struct mlxsw_sp *mlxsw_sp,
1028 const enum mlxsw_sp_l3proto ul_proto,
1029 union mlxsw_sp_l3addr saddr,
1030 u32 ul_tb_id,
1031 struct mlxsw_sp_ipip_entry *ipip_entry)
1032{
1033 u32 tun_ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(ipip_entry->ol_dev);
1034 enum mlxsw_sp_ipip_type ipipt = ipip_entry->ipipt;
1035 union mlxsw_sp_l3addr tun_saddr;
1036
1037 if (mlxsw_sp->router->ipip_ops_arr[ipipt]->ul_proto != ul_proto)
1038 return false;
1039
1040 tun_saddr = mlxsw_sp_ipip_netdev_saddr(ul_proto, ipip_entry->ol_dev);
1041 return tun_ul_tb_id == ul_tb_id &&
1042 mlxsw_sp_l3addr_eq(&tun_saddr, &saddr);
1043}
1044
Petr Machata4607f6d2017-09-02 23:49:25 +02001045static int
1046mlxsw_sp_fib_entry_decap_init(struct mlxsw_sp *mlxsw_sp,
1047 struct mlxsw_sp_fib_entry *fib_entry,
1048 struct mlxsw_sp_ipip_entry *ipip_entry)
1049{
1050 u32 tunnel_index;
1051 int err;
1052
1053 err = mlxsw_sp_kvdl_alloc(mlxsw_sp, 1, &tunnel_index);
1054 if (err)
1055 return err;
1056
1057 ipip_entry->decap_fib_entry = fib_entry;
1058 fib_entry->decap.ipip_entry = ipip_entry;
1059 fib_entry->decap.tunnel_index = tunnel_index;
1060 return 0;
1061}
1062
1063static void mlxsw_sp_fib_entry_decap_fini(struct mlxsw_sp *mlxsw_sp,
1064 struct mlxsw_sp_fib_entry *fib_entry)
1065{
1066 /* Unlink this node from the IPIP entry that it's the decap entry of. */
1067 fib_entry->decap.ipip_entry->decap_fib_entry = NULL;
1068 fib_entry->decap.ipip_entry = NULL;
1069 mlxsw_sp_kvdl_free(mlxsw_sp, fib_entry->decap.tunnel_index);
1070}
1071
Petr Machata1cc38fb2017-09-02 23:49:26 +02001072static struct mlxsw_sp_fib_node *
1073mlxsw_sp_fib_node_lookup(struct mlxsw_sp_fib *fib, const void *addr,
1074 size_t addr_len, unsigned char prefix_len);
Petr Machata4607f6d2017-09-02 23:49:25 +02001075static int mlxsw_sp_fib_entry_update(struct mlxsw_sp *mlxsw_sp,
1076 struct mlxsw_sp_fib_entry *fib_entry);
1077
1078static void
1079mlxsw_sp_ipip_entry_demote_decap(struct mlxsw_sp *mlxsw_sp,
1080 struct mlxsw_sp_ipip_entry *ipip_entry)
1081{
1082 struct mlxsw_sp_fib_entry *fib_entry = ipip_entry->decap_fib_entry;
1083
1084 mlxsw_sp_fib_entry_decap_fini(mlxsw_sp, fib_entry);
1085 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
1086
1087 mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
1088}
1089
Petr Machata1cc38fb2017-09-02 23:49:26 +02001090static void
1091mlxsw_sp_ipip_entry_promote_decap(struct mlxsw_sp *mlxsw_sp,
1092 struct mlxsw_sp_ipip_entry *ipip_entry,
1093 struct mlxsw_sp_fib_entry *decap_fib_entry)
1094{
1095 if (mlxsw_sp_fib_entry_decap_init(mlxsw_sp, decap_fib_entry,
1096 ipip_entry))
1097 return;
1098 decap_fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP;
1099
1100 if (mlxsw_sp_fib_entry_update(mlxsw_sp, decap_fib_entry))
1101 mlxsw_sp_ipip_entry_demote_decap(mlxsw_sp, ipip_entry);
1102}
1103
1104/* Given an IPIP entry, find the corresponding decap route. */
1105static struct mlxsw_sp_fib_entry *
1106mlxsw_sp_ipip_entry_find_decap(struct mlxsw_sp *mlxsw_sp,
1107 struct mlxsw_sp_ipip_entry *ipip_entry)
1108{
1109 static struct mlxsw_sp_fib_node *fib_node;
1110 const struct mlxsw_sp_ipip_ops *ipip_ops;
1111 struct mlxsw_sp_fib_entry *fib_entry;
1112 unsigned char saddr_prefix_len;
1113 union mlxsw_sp_l3addr saddr;
1114 struct mlxsw_sp_fib *ul_fib;
1115 struct mlxsw_sp_vr *ul_vr;
1116 const void *saddrp;
1117 size_t saddr_len;
1118 u32 ul_tb_id;
1119 u32 saddr4;
1120
1121 ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt];
1122
1123 ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(ipip_entry->ol_dev);
1124 ul_vr = mlxsw_sp_vr_find(mlxsw_sp, ul_tb_id);
1125 if (!ul_vr)
1126 return NULL;
1127
1128 ul_fib = mlxsw_sp_vr_fib(ul_vr, ipip_ops->ul_proto);
1129 saddr = mlxsw_sp_ipip_netdev_saddr(ipip_ops->ul_proto,
1130 ipip_entry->ol_dev);
1131
1132 switch (ipip_ops->ul_proto) {
1133 case MLXSW_SP_L3_PROTO_IPV4:
1134 saddr4 = be32_to_cpu(saddr.addr4);
1135 saddrp = &saddr4;
1136 saddr_len = 4;
1137 saddr_prefix_len = 32;
1138 break;
1139 case MLXSW_SP_L3_PROTO_IPV6:
1140 WARN_ON(1);
1141 return NULL;
1142 }
1143
1144 fib_node = mlxsw_sp_fib_node_lookup(ul_fib, saddrp, saddr_len,
1145 saddr_prefix_len);
1146 if (!fib_node || list_empty(&fib_node->entry_list))
1147 return NULL;
1148
1149 fib_entry = list_first_entry(&fib_node->entry_list,
1150 struct mlxsw_sp_fib_entry, list);
1151 if (fib_entry->type != MLXSW_SP_FIB_ENTRY_TYPE_TRAP)
1152 return NULL;
1153
1154 return fib_entry;
1155}
1156
Petr Machata1012b9a2017-09-02 23:49:23 +02001157static struct mlxsw_sp_ipip_entry *
Petr Machata4cccb732017-10-16 16:26:39 +02001158mlxsw_sp_ipip_entry_create(struct mlxsw_sp *mlxsw_sp,
1159 enum mlxsw_sp_ipip_type ipipt,
1160 struct net_device *ol_dev)
Petr Machata1012b9a2017-09-02 23:49:23 +02001161{
Petr Machata1012b9a2017-09-02 23:49:23 +02001162 struct mlxsw_sp_ipip_entry *ipip_entry;
Petr Machata1012b9a2017-09-02 23:49:23 +02001163
1164 ipip_entry = mlxsw_sp_ipip_entry_alloc(mlxsw_sp, ipipt, ol_dev);
1165 if (IS_ERR(ipip_entry))
1166 return ipip_entry;
1167
1168 list_add_tail(&ipip_entry->ipip_list_node,
1169 &mlxsw_sp->router->ipip_list);
1170
Petr Machata1012b9a2017-09-02 23:49:23 +02001171 return ipip_entry;
1172}
1173
1174static void
Petr Machata4cccb732017-10-16 16:26:39 +02001175mlxsw_sp_ipip_entry_destroy(struct mlxsw_sp *mlxsw_sp,
1176 struct mlxsw_sp_ipip_entry *ipip_entry)
Petr Machata1012b9a2017-09-02 23:49:23 +02001177{
Petr Machata4cccb732017-10-16 16:26:39 +02001178 list_del(&ipip_entry->ipip_list_node);
1179 mlxsw_sp_ipip_entry_dealloc(ipip_entry);
Petr Machata1012b9a2017-09-02 23:49:23 +02001180}
1181
Petr Machata4607f6d2017-09-02 23:49:25 +02001182static bool
1183mlxsw_sp_ipip_entry_matches_decap(struct mlxsw_sp *mlxsw_sp,
1184 const struct net_device *ul_dev,
1185 enum mlxsw_sp_l3proto ul_proto,
1186 union mlxsw_sp_l3addr ul_dip,
1187 struct mlxsw_sp_ipip_entry *ipip_entry)
1188{
1189 u32 ul_tb_id = l3mdev_fib_table(ul_dev) ? : RT_TABLE_MAIN;
1190 enum mlxsw_sp_ipip_type ipipt = ipip_entry->ipipt;
1191 struct net_device *ipip_ul_dev;
1192
1193 if (mlxsw_sp->router->ipip_ops_arr[ipipt]->ul_proto != ul_proto)
1194 return false;
1195
1196 ipip_ul_dev = __mlxsw_sp_ipip_netdev_ul_dev_get(ipip_entry->ol_dev);
1197 return mlxsw_sp_ipip_entry_saddr_matches(mlxsw_sp, ul_proto, ul_dip,
1198 ul_tb_id, ipip_entry) &&
1199 (!ipip_ul_dev || ipip_ul_dev == ul_dev);
1200}
1201
1202/* Given decap parameters, find the corresponding IPIP entry. */
1203static struct mlxsw_sp_ipip_entry *
1204mlxsw_sp_ipip_entry_find_by_decap(struct mlxsw_sp *mlxsw_sp,
1205 const struct net_device *ul_dev,
1206 enum mlxsw_sp_l3proto ul_proto,
1207 union mlxsw_sp_l3addr ul_dip)
1208{
1209 struct mlxsw_sp_ipip_entry *ipip_entry;
1210
1211 list_for_each_entry(ipip_entry, &mlxsw_sp->router->ipip_list,
1212 ipip_list_node)
1213 if (mlxsw_sp_ipip_entry_matches_decap(mlxsw_sp, ul_dev,
1214 ul_proto, ul_dip,
1215 ipip_entry))
1216 return ipip_entry;
1217
1218 return NULL;
1219}
1220
Petr Machata6698c162017-10-16 16:26:36 +02001221static bool mlxsw_sp_netdev_ipip_type(const struct mlxsw_sp *mlxsw_sp,
1222 const struct net_device *dev,
1223 enum mlxsw_sp_ipip_type *p_type)
1224{
1225 struct mlxsw_sp_router *router = mlxsw_sp->router;
1226 const struct mlxsw_sp_ipip_ops *ipip_ops;
1227 enum mlxsw_sp_ipip_type ipipt;
1228
1229 for (ipipt = 0; ipipt < MLXSW_SP_IPIP_TYPE_MAX; ++ipipt) {
1230 ipip_ops = router->ipip_ops_arr[ipipt];
1231 if (dev->type == ipip_ops->dev_type) {
1232 if (p_type)
1233 *p_type = ipipt;
1234 return true;
1235 }
1236 }
1237 return false;
1238}
1239
Petr Machata796ec772017-11-03 10:03:29 +01001240bool mlxsw_sp_netdev_is_ipip_ol(const struct mlxsw_sp *mlxsw_sp,
1241 const struct net_device *dev)
Petr Machata00635872017-10-16 16:26:37 +02001242{
1243 return mlxsw_sp_netdev_ipip_type(mlxsw_sp, dev, NULL);
1244}
1245
1246static struct mlxsw_sp_ipip_entry *
1247mlxsw_sp_ipip_entry_find_by_ol_dev(struct mlxsw_sp *mlxsw_sp,
1248 const struct net_device *ol_dev)
1249{
1250 struct mlxsw_sp_ipip_entry *ipip_entry;
1251
1252 list_for_each_entry(ipip_entry, &mlxsw_sp->router->ipip_list,
1253 ipip_list_node)
1254 if (ipip_entry->ol_dev == ol_dev)
1255 return ipip_entry;
1256
1257 return NULL;
1258}
1259
Petr Machata61481f22017-11-03 10:03:41 +01001260static struct mlxsw_sp_ipip_entry *
1261mlxsw_sp_ipip_entry_find_by_ul_dev(const struct mlxsw_sp *mlxsw_sp,
1262 const struct net_device *ul_dev,
1263 struct mlxsw_sp_ipip_entry *start)
1264{
1265 struct mlxsw_sp_ipip_entry *ipip_entry;
1266
1267 ipip_entry = list_prepare_entry(start, &mlxsw_sp->router->ipip_list,
1268 ipip_list_node);
1269 list_for_each_entry_continue(ipip_entry, &mlxsw_sp->router->ipip_list,
1270 ipip_list_node) {
1271 struct net_device *ipip_ul_dev =
1272 __mlxsw_sp_ipip_netdev_ul_dev_get(ipip_entry->ol_dev);
1273
1274 if (ipip_ul_dev == ul_dev)
1275 return ipip_entry;
1276 }
1277
1278 return NULL;
1279}
1280
1281bool mlxsw_sp_netdev_is_ipip_ul(const struct mlxsw_sp *mlxsw_sp,
1282 const struct net_device *dev)
1283{
1284 return mlxsw_sp_ipip_entry_find_by_ul_dev(mlxsw_sp, dev, NULL);
1285}
1286
Petr Machatacafdb2a2017-11-03 10:03:30 +01001287static bool mlxsw_sp_netdevice_ipip_can_offload(struct mlxsw_sp *mlxsw_sp,
1288 const struct net_device *ol_dev,
1289 enum mlxsw_sp_ipip_type ipipt)
1290{
1291 const struct mlxsw_sp_ipip_ops *ops
1292 = mlxsw_sp->router->ipip_ops_arr[ipipt];
1293
1294 /* For deciding whether decap should be offloaded, we don't care about
1295 * overlay protocol, so ask whether either one is supported.
1296 */
1297 return ops->can_offload(mlxsw_sp, ol_dev, MLXSW_SP_L3_PROTO_IPV4) ||
1298 ops->can_offload(mlxsw_sp, ol_dev, MLXSW_SP_L3_PROTO_IPV6);
1299}
1300
Petr Machata796ec772017-11-03 10:03:29 +01001301static int mlxsw_sp_netdevice_ipip_ol_reg_event(struct mlxsw_sp *mlxsw_sp,
1302 struct net_device *ol_dev)
Petr Machata00635872017-10-16 16:26:37 +02001303{
Petr Machata00635872017-10-16 16:26:37 +02001304 struct mlxsw_sp_ipip_entry *ipip_entry;
Petr Machataaf641712017-11-03 10:03:40 +01001305 enum mlxsw_sp_l3proto ul_proto;
Petr Machata00635872017-10-16 16:26:37 +02001306 enum mlxsw_sp_ipip_type ipipt;
Petr Machataaf641712017-11-03 10:03:40 +01001307 union mlxsw_sp_l3addr saddr;
1308 u32 ul_tb_id;
Petr Machata00635872017-10-16 16:26:37 +02001309
1310 mlxsw_sp_netdev_ipip_type(mlxsw_sp, ol_dev, &ipipt);
Petr Machatacafdb2a2017-11-03 10:03:30 +01001311 if (mlxsw_sp_netdevice_ipip_can_offload(mlxsw_sp, ol_dev, ipipt)) {
Petr Machataaf641712017-11-03 10:03:40 +01001312 ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(ol_dev);
1313 ul_proto = mlxsw_sp->router->ipip_ops_arr[ipipt]->ul_proto;
1314 saddr = mlxsw_sp_ipip_netdev_saddr(ul_proto, ol_dev);
1315 if (!mlxsw_sp_ipip_demote_tunnel_by_saddr(mlxsw_sp, ul_proto,
1316 saddr, ul_tb_id,
1317 NULL)) {
1318 ipip_entry = mlxsw_sp_ipip_entry_create(mlxsw_sp, ipipt,
1319 ol_dev);
1320 if (IS_ERR(ipip_entry))
1321 return PTR_ERR(ipip_entry);
1322 }
Petr Machata00635872017-10-16 16:26:37 +02001323 }
1324
1325 return 0;
1326}
1327
Petr Machata796ec772017-11-03 10:03:29 +01001328static void mlxsw_sp_netdevice_ipip_ol_unreg_event(struct mlxsw_sp *mlxsw_sp,
1329 struct net_device *ol_dev)
Petr Machata00635872017-10-16 16:26:37 +02001330{
1331 struct mlxsw_sp_ipip_entry *ipip_entry;
1332
1333 ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
1334 if (ipip_entry)
Petr Machata4cccb732017-10-16 16:26:39 +02001335 mlxsw_sp_ipip_entry_destroy(mlxsw_sp, ipip_entry);
Petr Machata00635872017-10-16 16:26:37 +02001336}
1337
Petr Machata47518ca2017-11-03 10:03:35 +01001338static void
1339mlxsw_sp_ipip_entry_ol_up_event(struct mlxsw_sp *mlxsw_sp,
1340 struct mlxsw_sp_ipip_entry *ipip_entry)
1341{
1342 struct mlxsw_sp_fib_entry *decap_fib_entry;
1343
1344 decap_fib_entry = mlxsw_sp_ipip_entry_find_decap(mlxsw_sp, ipip_entry);
1345 if (decap_fib_entry)
1346 mlxsw_sp_ipip_entry_promote_decap(mlxsw_sp, ipip_entry,
1347 decap_fib_entry);
1348}
1349
Petr Machata6d4de442017-11-03 10:03:34 +01001350static void mlxsw_sp_netdevice_ipip_ol_up_event(struct mlxsw_sp *mlxsw_sp,
1351 struct net_device *ol_dev)
Petr Machata00635872017-10-16 16:26:37 +02001352{
Petr Machata00635872017-10-16 16:26:37 +02001353 struct mlxsw_sp_ipip_entry *ipip_entry;
1354
1355 ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
Petr Machata47518ca2017-11-03 10:03:35 +01001356 if (ipip_entry)
1357 mlxsw_sp_ipip_entry_ol_up_event(mlxsw_sp, ipip_entry);
Petr Machata00635872017-10-16 16:26:37 +02001358}
1359
Petr Machataa3fe1982017-11-03 10:03:33 +01001360static void
1361mlxsw_sp_ipip_entry_ol_down_event(struct mlxsw_sp *mlxsw_sp,
1362 struct mlxsw_sp_ipip_entry *ipip_entry)
1363{
1364 if (ipip_entry->decap_fib_entry)
1365 mlxsw_sp_ipip_entry_demote_decap(mlxsw_sp, ipip_entry);
1366}
1367
Petr Machata796ec772017-11-03 10:03:29 +01001368static void mlxsw_sp_netdevice_ipip_ol_down_event(struct mlxsw_sp *mlxsw_sp,
1369 struct net_device *ol_dev)
Petr Machata00635872017-10-16 16:26:37 +02001370{
1371 struct mlxsw_sp_ipip_entry *ipip_entry;
1372
1373 ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
Petr Machataa3fe1982017-11-03 10:03:33 +01001374 if (ipip_entry)
1375 mlxsw_sp_ipip_entry_ol_down_event(mlxsw_sp, ipip_entry);
Petr Machata00635872017-10-16 16:26:37 +02001376}
1377
Petr Machata0c5f1cd2017-11-03 10:03:38 +01001378static void mlxsw_sp_nexthop_rif_update(struct mlxsw_sp *mlxsw_sp,
1379 struct mlxsw_sp_rif *rif);
Petr Machata65a61212017-11-03 10:03:37 +01001380static int
1381mlxsw_sp_ipip_entry_ol_lb_update(struct mlxsw_sp *mlxsw_sp,
1382 struct mlxsw_sp_ipip_entry *ipip_entry,
Petr Machata0c5f1cd2017-11-03 10:03:38 +01001383 bool keep_encap,
Petr Machata65a61212017-11-03 10:03:37 +01001384 struct netlink_ext_ack *extack)
Petr Machataf63ce4e2017-10-16 16:26:38 +02001385{
Petr Machata65a61212017-11-03 10:03:37 +01001386 struct mlxsw_sp_rif_ipip_lb *old_lb_rif = ipip_entry->ol_lb;
1387 struct mlxsw_sp_rif_ipip_lb *new_lb_rif;
Petr Machataf63ce4e2017-10-16 16:26:38 +02001388
Petr Machata65a61212017-11-03 10:03:37 +01001389 new_lb_rif = mlxsw_sp_ipip_ol_ipip_lb_create(mlxsw_sp,
1390 ipip_entry->ipipt,
1391 ipip_entry->ol_dev,
1392 extack);
1393 if (IS_ERR(new_lb_rif))
1394 return PTR_ERR(new_lb_rif);
1395 ipip_entry->ol_lb = new_lb_rif;
Petr Machata0c5f1cd2017-11-03 10:03:38 +01001396
1397 if (keep_encap) {
1398 list_splice_init(&old_lb_rif->common.nexthop_list,
1399 &new_lb_rif->common.nexthop_list);
1400 mlxsw_sp_nexthop_rif_update(mlxsw_sp, &new_lb_rif->common);
1401 }
1402
Petr Machata65a61212017-11-03 10:03:37 +01001403 mlxsw_sp_rif_destroy(&old_lb_rif->common);
Petr Machataf63ce4e2017-10-16 16:26:38 +02001404
Petr Machata65a61212017-11-03 10:03:37 +01001405 return 0;
1406}
1407
Petr Machata0c5f1cd2017-11-03 10:03:38 +01001408/**
1409 * Update the offload related to an IPIP entry. This always updates decap, and
1410 * in addition to that it also:
1411 * @recreate_loopback: recreates the associated loopback RIF
1412 * @keep_encap: updates next hops that use the tunnel netdevice. This is only
1413 * relevant when recreate_loopback is true.
1414 * @update_nexthops: updates next hops, keeping the current loopback RIF. This
1415 * is only relevant when recreate_loopback is false.
1416 */
Petr Machata65a61212017-11-03 10:03:37 +01001417int __mlxsw_sp_ipip_entry_update_tunnel(struct mlxsw_sp *mlxsw_sp,
1418 struct mlxsw_sp_ipip_entry *ipip_entry,
Petr Machata0c5f1cd2017-11-03 10:03:38 +01001419 bool recreate_loopback,
1420 bool keep_encap,
1421 bool update_nexthops,
Petr Machata65a61212017-11-03 10:03:37 +01001422 struct netlink_ext_ack *extack)
1423{
1424 int err;
1425
1426 /* RIFs can't be edited, so to update loopback, we need to destroy and
1427 * recreate it. That creates a window of opportunity where RALUE and
1428 * RATR registers end up referencing a RIF that's already gone. RATRs
1429 * are handled in mlxsw_sp_ipip_entry_ol_lb_update(), and to take care
Petr Machataf63ce4e2017-10-16 16:26:38 +02001430 * of RALUE, demote the decap route back.
1431 */
1432 if (ipip_entry->decap_fib_entry)
1433 mlxsw_sp_ipip_entry_demote_decap(mlxsw_sp, ipip_entry);
1434
Petr Machata0c5f1cd2017-11-03 10:03:38 +01001435 if (recreate_loopback) {
1436 err = mlxsw_sp_ipip_entry_ol_lb_update(mlxsw_sp, ipip_entry,
1437 keep_encap, extack);
1438 if (err)
1439 return err;
1440 } else if (update_nexthops) {
1441 mlxsw_sp_nexthop_rif_update(mlxsw_sp,
1442 &ipip_entry->ol_lb->common);
1443 }
Petr Machataf63ce4e2017-10-16 16:26:38 +02001444
Petr Machata65a61212017-11-03 10:03:37 +01001445 if (ipip_entry->ol_dev->flags & IFF_UP)
1446 mlxsw_sp_ipip_entry_ol_up_event(mlxsw_sp, ipip_entry);
Petr Machataf63ce4e2017-10-16 16:26:38 +02001447
1448 return 0;
1449}
1450
Petr Machata65a61212017-11-03 10:03:37 +01001451static int mlxsw_sp_netdevice_ipip_ol_vrf_event(struct mlxsw_sp *mlxsw_sp,
1452 struct net_device *ol_dev,
1453 struct netlink_ext_ack *extack)
1454{
1455 struct mlxsw_sp_ipip_entry *ipip_entry =
1456 mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
1457
1458 if (!ipip_entry)
1459 return 0;
1460 return __mlxsw_sp_ipip_entry_update_tunnel(mlxsw_sp, ipip_entry,
Petr Machata0c5f1cd2017-11-03 10:03:38 +01001461 true, false, false, extack);
Petr Machata65a61212017-11-03 10:03:37 +01001462}
1463
Petr Machata61481f22017-11-03 10:03:41 +01001464static int
1465mlxsw_sp_netdevice_ipip_ul_vrf_event(struct mlxsw_sp *mlxsw_sp,
1466 struct mlxsw_sp_ipip_entry *ipip_entry,
1467 struct net_device *ul_dev,
1468 struct netlink_ext_ack *extack)
1469{
1470 return __mlxsw_sp_ipip_entry_update_tunnel(mlxsw_sp, ipip_entry,
1471 true, true, false, extack);
1472}
1473
Petr Machataaf641712017-11-03 10:03:40 +01001474void mlxsw_sp_ipip_entry_demote_tunnel(struct mlxsw_sp *mlxsw_sp,
1475 struct mlxsw_sp_ipip_entry *ipip_entry)
1476{
1477 struct net_device *ol_dev = ipip_entry->ol_dev;
1478
1479 if (ol_dev->flags & IFF_UP)
1480 mlxsw_sp_ipip_entry_ol_down_event(mlxsw_sp, ipip_entry);
1481 mlxsw_sp_ipip_entry_destroy(mlxsw_sp, ipip_entry);
1482}
1483
1484/* The configuration where several tunnels have the same local address in the
1485 * same underlay table needs special treatment in the HW. That is currently not
1486 * implemented in the driver. This function finds and demotes the first tunnel
1487 * with a given source address, except the one passed in in the argument
1488 * `except'.
1489 */
1490bool
1491mlxsw_sp_ipip_demote_tunnel_by_saddr(struct mlxsw_sp *mlxsw_sp,
1492 enum mlxsw_sp_l3proto ul_proto,
1493 union mlxsw_sp_l3addr saddr,
1494 u32 ul_tb_id,
1495 const struct mlxsw_sp_ipip_entry *except)
1496{
1497 struct mlxsw_sp_ipip_entry *ipip_entry, *tmp;
1498
1499 list_for_each_entry_safe(ipip_entry, tmp, &mlxsw_sp->router->ipip_list,
1500 ipip_list_node) {
1501 if (ipip_entry != except &&
1502 mlxsw_sp_ipip_entry_saddr_matches(mlxsw_sp, ul_proto, saddr,
1503 ul_tb_id, ipip_entry)) {
1504 mlxsw_sp_ipip_entry_demote_tunnel(mlxsw_sp, ipip_entry);
1505 return true;
1506 }
1507 }
1508
1509 return false;
1510}
1511
Petr Machata61481f22017-11-03 10:03:41 +01001512static void mlxsw_sp_ipip_demote_tunnel_by_ul_netdev(struct mlxsw_sp *mlxsw_sp,
1513 struct net_device *ul_dev)
1514{
1515 struct mlxsw_sp_ipip_entry *ipip_entry, *tmp;
1516
1517 list_for_each_entry_safe(ipip_entry, tmp, &mlxsw_sp->router->ipip_list,
1518 ipip_list_node) {
1519 struct net_device *ipip_ul_dev =
1520 __mlxsw_sp_ipip_netdev_ul_dev_get(ipip_entry->ol_dev);
1521
1522 if (ipip_ul_dev == ul_dev)
1523 mlxsw_sp_ipip_entry_demote_tunnel(mlxsw_sp, ipip_entry);
1524 }
1525}
1526
Petr Machata7e75af62017-11-03 10:03:36 +01001527int mlxsw_sp_netdevice_ipip_ol_event(struct mlxsw_sp *mlxsw_sp,
1528 struct net_device *ol_dev,
1529 unsigned long event,
1530 struct netdev_notifier_info *info)
Petr Machata00635872017-10-16 16:26:37 +02001531{
Petr Machata7e75af62017-11-03 10:03:36 +01001532 struct netdev_notifier_changeupper_info *chup;
1533 struct netlink_ext_ack *extack;
1534
Petr Machata00635872017-10-16 16:26:37 +02001535 switch (event) {
1536 case NETDEV_REGISTER:
Petr Machata796ec772017-11-03 10:03:29 +01001537 return mlxsw_sp_netdevice_ipip_ol_reg_event(mlxsw_sp, ol_dev);
Petr Machata00635872017-10-16 16:26:37 +02001538 case NETDEV_UNREGISTER:
Petr Machata796ec772017-11-03 10:03:29 +01001539 mlxsw_sp_netdevice_ipip_ol_unreg_event(mlxsw_sp, ol_dev);
Petr Machata00635872017-10-16 16:26:37 +02001540 return 0;
1541 case NETDEV_UP:
Petr Machata6d4de442017-11-03 10:03:34 +01001542 mlxsw_sp_netdevice_ipip_ol_up_event(mlxsw_sp, ol_dev);
1543 return 0;
Petr Machata00635872017-10-16 16:26:37 +02001544 case NETDEV_DOWN:
Petr Machata796ec772017-11-03 10:03:29 +01001545 mlxsw_sp_netdevice_ipip_ol_down_event(mlxsw_sp, ol_dev);
Petr Machata00635872017-10-16 16:26:37 +02001546 return 0;
Petr Machataf63ce4e2017-10-16 16:26:38 +02001547 case NETDEV_CHANGEUPPER:
Petr Machata7e75af62017-11-03 10:03:36 +01001548 chup = container_of(info, typeof(*chup), info);
1549 extack = info->extack;
1550 if (netif_is_l3_master(chup->upper_dev))
Petr Machata796ec772017-11-03 10:03:29 +01001551 return mlxsw_sp_netdevice_ipip_ol_vrf_event(mlxsw_sp,
Petr Machata7e75af62017-11-03 10:03:36 +01001552 ol_dev,
1553 extack);
Petr Machataf63ce4e2017-10-16 16:26:38 +02001554 return 0;
Petr Machata00635872017-10-16 16:26:37 +02001555 }
1556 return 0;
1557}
1558
Petr Machata61481f22017-11-03 10:03:41 +01001559static int
1560__mlxsw_sp_netdevice_ipip_ul_event(struct mlxsw_sp *mlxsw_sp,
1561 struct mlxsw_sp_ipip_entry *ipip_entry,
1562 struct net_device *ul_dev,
1563 unsigned long event,
1564 struct netdev_notifier_info *info)
1565{
1566 struct netdev_notifier_changeupper_info *chup;
1567 struct netlink_ext_ack *extack;
1568
1569 switch (event) {
1570 case NETDEV_CHANGEUPPER:
1571 chup = container_of(info, typeof(*chup), info);
1572 extack = info->extack;
1573 if (netif_is_l3_master(chup->upper_dev))
1574 return mlxsw_sp_netdevice_ipip_ul_vrf_event(mlxsw_sp,
1575 ipip_entry,
1576 ul_dev,
1577 extack);
1578 break;
1579 }
1580 return 0;
1581}
1582
1583int
1584mlxsw_sp_netdevice_ipip_ul_event(struct mlxsw_sp *mlxsw_sp,
1585 struct net_device *ul_dev,
1586 unsigned long event,
1587 struct netdev_notifier_info *info)
1588{
1589 struct mlxsw_sp_ipip_entry *ipip_entry = NULL;
1590 int err;
1591
1592 while ((ipip_entry = mlxsw_sp_ipip_entry_find_by_ul_dev(mlxsw_sp,
1593 ul_dev,
1594 ipip_entry))) {
1595 err = __mlxsw_sp_netdevice_ipip_ul_event(mlxsw_sp, ipip_entry,
1596 ul_dev, event, info);
1597 if (err) {
1598 mlxsw_sp_ipip_demote_tunnel_by_ul_netdev(mlxsw_sp,
1599 ul_dev);
1600 return err;
1601 }
1602 }
1603
1604 return 0;
1605}
1606
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001607struct mlxsw_sp_neigh_key {
Jiri Pirko33b13412016-11-10 12:31:04 +01001608 struct neighbour *n;
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001609};
1610
1611struct mlxsw_sp_neigh_entry {
Ido Schimmel9665b742017-02-08 11:16:42 +01001612 struct list_head rif_list_node;
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001613 struct rhash_head ht_node;
1614 struct mlxsw_sp_neigh_key key;
1615 u16 rif;
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001616 bool connected;
Yotam Gigia6bf9e92016-07-05 11:27:44 +02001617 unsigned char ha[ETH_ALEN];
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001618 struct list_head nexthop_list; /* list of nexthops using
1619 * this neigh entry
1620 */
Yotam Gigib2157142016-07-05 11:27:51 +02001621 struct list_head nexthop_neighs_list_node;
Arkadi Sharshevsky7cfcbc72017-08-24 08:40:08 +02001622 unsigned int counter_index;
1623 bool counter_valid;
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001624};
1625
1626static const struct rhashtable_params mlxsw_sp_neigh_ht_params = {
1627 .key_offset = offsetof(struct mlxsw_sp_neigh_entry, key),
1628 .head_offset = offsetof(struct mlxsw_sp_neigh_entry, ht_node),
1629 .key_len = sizeof(struct mlxsw_sp_neigh_key),
1630};
1631
Arkadi Sharshevskyf17cc842017-08-24 08:40:04 +02001632struct mlxsw_sp_neigh_entry *
1633mlxsw_sp_rif_neigh_next(struct mlxsw_sp_rif *rif,
1634 struct mlxsw_sp_neigh_entry *neigh_entry)
1635{
1636 if (!neigh_entry) {
1637 if (list_empty(&rif->neigh_list))
1638 return NULL;
1639 else
1640 return list_first_entry(&rif->neigh_list,
1641 typeof(*neigh_entry),
1642 rif_list_node);
1643 }
Arkadi Sharshevskyec2437f2017-09-25 10:32:24 +02001644 if (list_is_last(&neigh_entry->rif_list_node, &rif->neigh_list))
Arkadi Sharshevskyf17cc842017-08-24 08:40:04 +02001645 return NULL;
1646 return list_next_entry(neigh_entry, rif_list_node);
1647}
1648
1649int mlxsw_sp_neigh_entry_type(struct mlxsw_sp_neigh_entry *neigh_entry)
1650{
1651 return neigh_entry->key.n->tbl->family;
1652}
1653
1654unsigned char *
1655mlxsw_sp_neigh_entry_ha(struct mlxsw_sp_neigh_entry *neigh_entry)
1656{
1657 return neigh_entry->ha;
1658}
1659
1660u32 mlxsw_sp_neigh4_entry_dip(struct mlxsw_sp_neigh_entry *neigh_entry)
1661{
1662 struct neighbour *n;
1663
1664 n = neigh_entry->key.n;
1665 return ntohl(*((__be32 *) n->primary_key));
1666}
1667
Arkadi Sharshevsky02507682017-08-31 17:59:15 +02001668struct in6_addr *
1669mlxsw_sp_neigh6_entry_dip(struct mlxsw_sp_neigh_entry *neigh_entry)
1670{
1671 struct neighbour *n;
1672
1673 n = neigh_entry->key.n;
1674 return (struct in6_addr *) &n->primary_key;
1675}
1676
Arkadi Sharshevsky7cfcbc72017-08-24 08:40:08 +02001677int mlxsw_sp_neigh_counter_get(struct mlxsw_sp *mlxsw_sp,
1678 struct mlxsw_sp_neigh_entry *neigh_entry,
1679 u64 *p_counter)
1680{
1681 if (!neigh_entry->counter_valid)
1682 return -EINVAL;
1683
1684 return mlxsw_sp_flow_counter_get(mlxsw_sp, neigh_entry->counter_index,
1685 p_counter, NULL);
1686}
1687
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001688static struct mlxsw_sp_neigh_entry *
1689mlxsw_sp_neigh_entry_alloc(struct mlxsw_sp *mlxsw_sp, struct neighbour *n,
1690 u16 rif)
1691{
1692 struct mlxsw_sp_neigh_entry *neigh_entry;
1693
1694 neigh_entry = kzalloc(sizeof(*neigh_entry), GFP_KERNEL);
1695 if (!neigh_entry)
1696 return NULL;
1697
1698 neigh_entry->key.n = n;
1699 neigh_entry->rif = rif;
1700 INIT_LIST_HEAD(&neigh_entry->nexthop_list);
1701
1702 return neigh_entry;
1703}
1704
1705static void mlxsw_sp_neigh_entry_free(struct mlxsw_sp_neigh_entry *neigh_entry)
1706{
1707 kfree(neigh_entry);
1708}
1709
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001710static int
1711mlxsw_sp_neigh_entry_insert(struct mlxsw_sp *mlxsw_sp,
1712 struct mlxsw_sp_neigh_entry *neigh_entry)
1713{
Ido Schimmel9011b672017-05-16 19:38:25 +02001714 return rhashtable_insert_fast(&mlxsw_sp->router->neigh_ht,
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001715 &neigh_entry->ht_node,
1716 mlxsw_sp_neigh_ht_params);
1717}
1718
1719static void
1720mlxsw_sp_neigh_entry_remove(struct mlxsw_sp *mlxsw_sp,
1721 struct mlxsw_sp_neigh_entry *neigh_entry)
1722{
Ido Schimmel9011b672017-05-16 19:38:25 +02001723 rhashtable_remove_fast(&mlxsw_sp->router->neigh_ht,
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001724 &neigh_entry->ht_node,
1725 mlxsw_sp_neigh_ht_params);
1726}
1727
Arkadi Sharshevsky7cfcbc72017-08-24 08:40:08 +02001728static bool
Arkadi Sharshevsky1ed55742017-08-31 17:59:18 +02001729mlxsw_sp_neigh_counter_should_alloc(struct mlxsw_sp *mlxsw_sp,
1730 struct mlxsw_sp_neigh_entry *neigh_entry)
Arkadi Sharshevsky7cfcbc72017-08-24 08:40:08 +02001731{
1732 struct devlink *devlink;
Arkadi Sharshevsky1ed55742017-08-31 17:59:18 +02001733 const char *table_name;
1734
1735 switch (mlxsw_sp_neigh_entry_type(neigh_entry)) {
1736 case AF_INET:
1737 table_name = MLXSW_SP_DPIPE_TABLE_NAME_HOST4;
1738 break;
1739 case AF_INET6:
1740 table_name = MLXSW_SP_DPIPE_TABLE_NAME_HOST6;
1741 break;
1742 default:
1743 WARN_ON(1);
1744 return false;
1745 }
Arkadi Sharshevsky7cfcbc72017-08-24 08:40:08 +02001746
1747 devlink = priv_to_devlink(mlxsw_sp->core);
Arkadi Sharshevsky1ed55742017-08-31 17:59:18 +02001748 return devlink_dpipe_table_counter_enabled(devlink, table_name);
Arkadi Sharshevsky7cfcbc72017-08-24 08:40:08 +02001749}
1750
1751static void
1752mlxsw_sp_neigh_counter_alloc(struct mlxsw_sp *mlxsw_sp,
1753 struct mlxsw_sp_neigh_entry *neigh_entry)
1754{
Arkadi Sharshevsky1ed55742017-08-31 17:59:18 +02001755 if (!mlxsw_sp_neigh_counter_should_alloc(mlxsw_sp, neigh_entry))
Arkadi Sharshevsky7cfcbc72017-08-24 08:40:08 +02001756 return;
1757
1758 if (mlxsw_sp_flow_counter_alloc(mlxsw_sp, &neigh_entry->counter_index))
1759 return;
1760
1761 neigh_entry->counter_valid = true;
1762}
1763
1764static void
1765mlxsw_sp_neigh_counter_free(struct mlxsw_sp *mlxsw_sp,
1766 struct mlxsw_sp_neigh_entry *neigh_entry)
1767{
1768 if (!neigh_entry->counter_valid)
1769 return;
1770 mlxsw_sp_flow_counter_free(mlxsw_sp,
1771 neigh_entry->counter_index);
1772 neigh_entry->counter_valid = false;
1773}
1774
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001775static struct mlxsw_sp_neigh_entry *
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001776mlxsw_sp_neigh_entry_create(struct mlxsw_sp *mlxsw_sp, struct neighbour *n)
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001777{
1778 struct mlxsw_sp_neigh_entry *neigh_entry;
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01001779 struct mlxsw_sp_rif *rif;
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001780 int err;
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001781
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01001782 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, n->dev);
1783 if (!rif)
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001784 return ERR_PTR(-EINVAL);
1785
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01001786 neigh_entry = mlxsw_sp_neigh_entry_alloc(mlxsw_sp, n, rif->rif_index);
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001787 if (!neigh_entry)
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001788 return ERR_PTR(-ENOMEM);
1789
1790 err = mlxsw_sp_neigh_entry_insert(mlxsw_sp, neigh_entry);
1791 if (err)
1792 goto err_neigh_entry_insert;
1793
Arkadi Sharshevsky7cfcbc72017-08-24 08:40:08 +02001794 mlxsw_sp_neigh_counter_alloc(mlxsw_sp, neigh_entry);
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01001795 list_add(&neigh_entry->rif_list_node, &rif->neigh_list);
Ido Schimmel9665b742017-02-08 11:16:42 +01001796
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001797 return neigh_entry;
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001798
1799err_neigh_entry_insert:
1800 mlxsw_sp_neigh_entry_free(neigh_entry);
1801 return ERR_PTR(err);
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001802}
1803
1804static void
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001805mlxsw_sp_neigh_entry_destroy(struct mlxsw_sp *mlxsw_sp,
1806 struct mlxsw_sp_neigh_entry *neigh_entry)
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001807{
Ido Schimmel9665b742017-02-08 11:16:42 +01001808 list_del(&neigh_entry->rif_list_node);
Arkadi Sharshevsky7cfcbc72017-08-24 08:40:08 +02001809 mlxsw_sp_neigh_counter_free(mlxsw_sp, neigh_entry);
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001810 mlxsw_sp_neigh_entry_remove(mlxsw_sp, neigh_entry);
1811 mlxsw_sp_neigh_entry_free(neigh_entry);
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001812}
1813
1814static struct mlxsw_sp_neigh_entry *
Jiri Pirko33b13412016-11-10 12:31:04 +01001815mlxsw_sp_neigh_entry_lookup(struct mlxsw_sp *mlxsw_sp, struct neighbour *n)
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001816{
Jiri Pirko33b13412016-11-10 12:31:04 +01001817 struct mlxsw_sp_neigh_key key;
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001818
Jiri Pirko33b13412016-11-10 12:31:04 +01001819 key.n = n;
Ido Schimmel9011b672017-05-16 19:38:25 +02001820 return rhashtable_lookup_fast(&mlxsw_sp->router->neigh_ht,
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001821 &key, mlxsw_sp_neigh_ht_params);
1822}
1823
Yotam Gigic723c7352016-07-05 11:27:43 +02001824static void
1825mlxsw_sp_router_neighs_update_interval_init(struct mlxsw_sp *mlxsw_sp)
1826{
Arkadi Sharshevskya6c9b5d2017-07-18 10:10:18 +02001827 unsigned long interval;
Yotam Gigic723c7352016-07-05 11:27:43 +02001828
Ido Schimmelb5f3e0d2017-07-24 09:56:00 +02001829#if IS_ENABLED(CONFIG_IPV6)
Arkadi Sharshevskya6c9b5d2017-07-18 10:10:18 +02001830 interval = min_t(unsigned long,
1831 NEIGH_VAR(&arp_tbl.parms, DELAY_PROBE_TIME),
1832 NEIGH_VAR(&nd_tbl.parms, DELAY_PROBE_TIME));
Ido Schimmelb5f3e0d2017-07-24 09:56:00 +02001833#else
1834 interval = NEIGH_VAR(&arp_tbl.parms, DELAY_PROBE_TIME);
1835#endif
Ido Schimmel9011b672017-05-16 19:38:25 +02001836 mlxsw_sp->router->neighs_update.interval = jiffies_to_msecs(interval);
Yotam Gigic723c7352016-07-05 11:27:43 +02001837}
1838
1839static void mlxsw_sp_router_neigh_ent_ipv4_process(struct mlxsw_sp *mlxsw_sp,
1840 char *rauhtd_pl,
1841 int ent_index)
1842{
1843 struct net_device *dev;
1844 struct neighbour *n;
1845 __be32 dipn;
1846 u32 dip;
1847 u16 rif;
1848
1849 mlxsw_reg_rauhtd_ent_ipv4_unpack(rauhtd_pl, ent_index, &rif, &dip);
1850
Ido Schimmel5f9efff2017-05-16 19:38:27 +02001851 if (!mlxsw_sp->router->rifs[rif]) {
Yotam Gigic723c7352016-07-05 11:27:43 +02001852 dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Incorrect RIF in neighbour entry\n");
1853 return;
1854 }
1855
1856 dipn = htonl(dip);
Ido Schimmel5f9efff2017-05-16 19:38:27 +02001857 dev = mlxsw_sp->router->rifs[rif]->dev;
Yotam Gigic723c7352016-07-05 11:27:43 +02001858 n = neigh_lookup(&arp_tbl, &dipn, dev);
1859 if (!n) {
1860 netdev_err(dev, "Failed to find matching neighbour for IP=%pI4h\n",
1861 &dip);
1862 return;
1863 }
1864
1865 netdev_dbg(dev, "Updating neighbour with IP=%pI4h\n", &dip);
1866 neigh_event_send(n, NULL);
1867 neigh_release(n);
1868}
1869
Ido Schimmeldf9a21f2017-08-15 09:10:33 +02001870#if IS_ENABLED(CONFIG_IPV6)
Arkadi Sharshevsky60f040c2017-07-18 10:10:17 +02001871static void mlxsw_sp_router_neigh_ent_ipv6_process(struct mlxsw_sp *mlxsw_sp,
1872 char *rauhtd_pl,
1873 int rec_index)
1874{
1875 struct net_device *dev;
1876 struct neighbour *n;
1877 struct in6_addr dip;
1878 u16 rif;
1879
1880 mlxsw_reg_rauhtd_ent_ipv6_unpack(rauhtd_pl, rec_index, &rif,
1881 (char *) &dip);
1882
1883 if (!mlxsw_sp->router->rifs[rif]) {
1884 dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Incorrect RIF in neighbour entry\n");
1885 return;
1886 }
1887
1888 dev = mlxsw_sp->router->rifs[rif]->dev;
1889 n = neigh_lookup(&nd_tbl, &dip, dev);
1890 if (!n) {
1891 netdev_err(dev, "Failed to find matching neighbour for IP=%pI6c\n",
1892 &dip);
1893 return;
1894 }
1895
1896 netdev_dbg(dev, "Updating neighbour with IP=%pI6c\n", &dip);
1897 neigh_event_send(n, NULL);
1898 neigh_release(n);
1899}
Ido Schimmelb5f3e0d2017-07-24 09:56:00 +02001900#else
1901static void mlxsw_sp_router_neigh_ent_ipv6_process(struct mlxsw_sp *mlxsw_sp,
1902 char *rauhtd_pl,
1903 int rec_index)
1904{
1905}
1906#endif
Arkadi Sharshevsky60f040c2017-07-18 10:10:17 +02001907
Yotam Gigic723c7352016-07-05 11:27:43 +02001908static void mlxsw_sp_router_neigh_rec_ipv4_process(struct mlxsw_sp *mlxsw_sp,
1909 char *rauhtd_pl,
1910 int rec_index)
1911{
1912 u8 num_entries;
1913 int i;
1914
1915 num_entries = mlxsw_reg_rauhtd_ipv4_rec_num_entries_get(rauhtd_pl,
1916 rec_index);
1917 /* Hardware starts counting at 0, so add 1. */
1918 num_entries++;
1919
1920 /* Each record consists of several neighbour entries. */
1921 for (i = 0; i < num_entries; i++) {
1922 int ent_index;
1923
1924 ent_index = rec_index * MLXSW_REG_RAUHTD_IPV4_ENT_PER_REC + i;
1925 mlxsw_sp_router_neigh_ent_ipv4_process(mlxsw_sp, rauhtd_pl,
1926 ent_index);
1927 }
1928
1929}
1930
Arkadi Sharshevsky60f040c2017-07-18 10:10:17 +02001931static void mlxsw_sp_router_neigh_rec_ipv6_process(struct mlxsw_sp *mlxsw_sp,
1932 char *rauhtd_pl,
1933 int rec_index)
1934{
1935 /* One record contains one entry. */
1936 mlxsw_sp_router_neigh_ent_ipv6_process(mlxsw_sp, rauhtd_pl,
1937 rec_index);
1938}
1939
Yotam Gigic723c7352016-07-05 11:27:43 +02001940static void mlxsw_sp_router_neigh_rec_process(struct mlxsw_sp *mlxsw_sp,
1941 char *rauhtd_pl, int rec_index)
1942{
1943 switch (mlxsw_reg_rauhtd_rec_type_get(rauhtd_pl, rec_index)) {
1944 case MLXSW_REG_RAUHTD_TYPE_IPV4:
1945 mlxsw_sp_router_neigh_rec_ipv4_process(mlxsw_sp, rauhtd_pl,
1946 rec_index);
1947 break;
1948 case MLXSW_REG_RAUHTD_TYPE_IPV6:
Arkadi Sharshevsky60f040c2017-07-18 10:10:17 +02001949 mlxsw_sp_router_neigh_rec_ipv6_process(mlxsw_sp, rauhtd_pl,
1950 rec_index);
Yotam Gigic723c7352016-07-05 11:27:43 +02001951 break;
1952 }
1953}
1954
Arkadi Sharshevsky42cdb332016-11-11 16:34:26 +01001955static bool mlxsw_sp_router_rauhtd_is_full(char *rauhtd_pl)
1956{
1957 u8 num_rec, last_rec_index, num_entries;
1958
1959 num_rec = mlxsw_reg_rauhtd_num_rec_get(rauhtd_pl);
1960 last_rec_index = num_rec - 1;
1961
1962 if (num_rec < MLXSW_REG_RAUHTD_REC_MAX_NUM)
1963 return false;
1964 if (mlxsw_reg_rauhtd_rec_type_get(rauhtd_pl, last_rec_index) ==
1965 MLXSW_REG_RAUHTD_TYPE_IPV6)
1966 return true;
1967
1968 num_entries = mlxsw_reg_rauhtd_ipv4_rec_num_entries_get(rauhtd_pl,
1969 last_rec_index);
1970 if (++num_entries == MLXSW_REG_RAUHTD_IPV4_ENT_PER_REC)
1971 return true;
1972 return false;
1973}
1974
Arkadi Sharshevsky60f040c2017-07-18 10:10:17 +02001975static int
1976__mlxsw_sp_router_neighs_update_rauhtd(struct mlxsw_sp *mlxsw_sp,
1977 char *rauhtd_pl,
1978 enum mlxsw_reg_rauhtd_type type)
Yotam Gigic723c7352016-07-05 11:27:43 +02001979{
Arkadi Sharshevsky60f040c2017-07-18 10:10:17 +02001980 int i, num_rec;
1981 int err;
Yotam Gigic723c7352016-07-05 11:27:43 +02001982
1983 /* Make sure the neighbour's netdev isn't removed in the
1984 * process.
1985 */
1986 rtnl_lock();
1987 do {
Arkadi Sharshevsky60f040c2017-07-18 10:10:17 +02001988 mlxsw_reg_rauhtd_pack(rauhtd_pl, type);
Yotam Gigic723c7352016-07-05 11:27:43 +02001989 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(rauhtd),
1990 rauhtd_pl);
1991 if (err) {
Petr Machata7ff176f2017-10-02 12:21:57 +02001992 dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Failed to dump neighbour table\n");
Yotam Gigic723c7352016-07-05 11:27:43 +02001993 break;
1994 }
1995 num_rec = mlxsw_reg_rauhtd_num_rec_get(rauhtd_pl);
1996 for (i = 0; i < num_rec; i++)
1997 mlxsw_sp_router_neigh_rec_process(mlxsw_sp, rauhtd_pl,
1998 i);
Arkadi Sharshevsky42cdb332016-11-11 16:34:26 +01001999 } while (mlxsw_sp_router_rauhtd_is_full(rauhtd_pl));
Yotam Gigic723c7352016-07-05 11:27:43 +02002000 rtnl_unlock();
2001
Arkadi Sharshevsky60f040c2017-07-18 10:10:17 +02002002 return err;
2003}
2004
2005static int mlxsw_sp_router_neighs_update_rauhtd(struct mlxsw_sp *mlxsw_sp)
2006{
2007 enum mlxsw_reg_rauhtd_type type;
2008 char *rauhtd_pl;
2009 int err;
2010
2011 rauhtd_pl = kmalloc(MLXSW_REG_RAUHTD_LEN, GFP_KERNEL);
2012 if (!rauhtd_pl)
2013 return -ENOMEM;
2014
2015 type = MLXSW_REG_RAUHTD_TYPE_IPV4;
2016 err = __mlxsw_sp_router_neighs_update_rauhtd(mlxsw_sp, rauhtd_pl, type);
2017 if (err)
2018 goto out;
2019
2020 type = MLXSW_REG_RAUHTD_TYPE_IPV6;
2021 err = __mlxsw_sp_router_neighs_update_rauhtd(mlxsw_sp, rauhtd_pl, type);
2022out:
Yotam Gigic723c7352016-07-05 11:27:43 +02002023 kfree(rauhtd_pl);
Yotam Gigib2157142016-07-05 11:27:51 +02002024 return err;
2025}
2026
2027static void mlxsw_sp_router_neighs_update_nh(struct mlxsw_sp *mlxsw_sp)
2028{
2029 struct mlxsw_sp_neigh_entry *neigh_entry;
2030
2031 /* Take RTNL mutex here to prevent lists from changes */
2032 rtnl_lock();
Ido Schimmel9011b672017-05-16 19:38:25 +02002033 list_for_each_entry(neigh_entry, &mlxsw_sp->router->nexthop_neighs_list,
Ido Schimmel8a0b7272017-02-06 16:20:15 +01002034 nexthop_neighs_list_node)
Yotam Gigib2157142016-07-05 11:27:51 +02002035 /* If this neigh have nexthops, make the kernel think this neigh
2036 * is active regardless of the traffic.
2037 */
Ido Schimmel8a0b7272017-02-06 16:20:15 +01002038 neigh_event_send(neigh_entry->key.n, NULL);
Yotam Gigib2157142016-07-05 11:27:51 +02002039 rtnl_unlock();
2040}
2041
2042static void
2043mlxsw_sp_router_neighs_update_work_schedule(struct mlxsw_sp *mlxsw_sp)
2044{
Ido Schimmel9011b672017-05-16 19:38:25 +02002045 unsigned long interval = mlxsw_sp->router->neighs_update.interval;
Yotam Gigib2157142016-07-05 11:27:51 +02002046
Ido Schimmel9011b672017-05-16 19:38:25 +02002047 mlxsw_core_schedule_dw(&mlxsw_sp->router->neighs_update.dw,
Yotam Gigib2157142016-07-05 11:27:51 +02002048 msecs_to_jiffies(interval));
2049}
2050
2051static void mlxsw_sp_router_neighs_update_work(struct work_struct *work)
2052{
Ido Schimmel9011b672017-05-16 19:38:25 +02002053 struct mlxsw_sp_router *router;
Yotam Gigib2157142016-07-05 11:27:51 +02002054 int err;
2055
Ido Schimmel9011b672017-05-16 19:38:25 +02002056 router = container_of(work, struct mlxsw_sp_router,
2057 neighs_update.dw.work);
2058 err = mlxsw_sp_router_neighs_update_rauhtd(router->mlxsw_sp);
Yotam Gigib2157142016-07-05 11:27:51 +02002059 if (err)
Ido Schimmel9011b672017-05-16 19:38:25 +02002060 dev_err(router->mlxsw_sp->bus_info->dev, "Could not update kernel for neigh activity");
Yotam Gigib2157142016-07-05 11:27:51 +02002061
Ido Schimmel9011b672017-05-16 19:38:25 +02002062 mlxsw_sp_router_neighs_update_nh(router->mlxsw_sp);
Yotam Gigib2157142016-07-05 11:27:51 +02002063
Ido Schimmel9011b672017-05-16 19:38:25 +02002064 mlxsw_sp_router_neighs_update_work_schedule(router->mlxsw_sp);
Yotam Gigic723c7352016-07-05 11:27:43 +02002065}
2066
Yotam Gigi0b2361d2016-07-05 11:27:52 +02002067static void mlxsw_sp_router_probe_unresolved_nexthops(struct work_struct *work)
2068{
2069 struct mlxsw_sp_neigh_entry *neigh_entry;
Ido Schimmel9011b672017-05-16 19:38:25 +02002070 struct mlxsw_sp_router *router;
Yotam Gigi0b2361d2016-07-05 11:27:52 +02002071
Ido Schimmel9011b672017-05-16 19:38:25 +02002072 router = container_of(work, struct mlxsw_sp_router,
2073 nexthop_probe_dw.work);
Yotam Gigi0b2361d2016-07-05 11:27:52 +02002074 /* Iterate over nexthop neighbours, find those who are unresolved and
2075 * send arp on them. This solves the chicken-egg problem when
2076 * the nexthop wouldn't get offloaded until the neighbor is resolved
2077 * but it wouldn't get resolved ever in case traffic is flowing in HW
2078 * using different nexthop.
2079 *
2080 * Take RTNL mutex here to prevent lists from changes.
2081 */
2082 rtnl_lock();
Ido Schimmel9011b672017-05-16 19:38:25 +02002083 list_for_each_entry(neigh_entry, &router->nexthop_neighs_list,
Ido Schimmel8a0b7272017-02-06 16:20:15 +01002084 nexthop_neighs_list_node)
Ido Schimmel01b1aa32017-02-06 16:20:16 +01002085 if (!neigh_entry->connected)
Jiri Pirko33b13412016-11-10 12:31:04 +01002086 neigh_event_send(neigh_entry->key.n, NULL);
Yotam Gigi0b2361d2016-07-05 11:27:52 +02002087 rtnl_unlock();
2088
Ido Schimmel9011b672017-05-16 19:38:25 +02002089 mlxsw_core_schedule_dw(&router->nexthop_probe_dw,
Yotam Gigi0b2361d2016-07-05 11:27:52 +02002090 MLXSW_SP_UNRESOLVED_NH_PROBE_INTERVAL);
2091}
2092
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002093static void
2094mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp *mlxsw_sp,
2095 struct mlxsw_sp_neigh_entry *neigh_entry,
2096 bool removing);
2097
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002098static enum mlxsw_reg_rauht_op mlxsw_sp_rauht_op(bool adding)
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002099{
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002100 return adding ? MLXSW_REG_RAUHT_OP_WRITE_ADD :
2101 MLXSW_REG_RAUHT_OP_WRITE_DELETE;
2102}
2103
2104static void
2105mlxsw_sp_router_neigh_entry_op4(struct mlxsw_sp *mlxsw_sp,
2106 struct mlxsw_sp_neigh_entry *neigh_entry,
2107 enum mlxsw_reg_rauht_op op)
2108{
Jiri Pirko33b13412016-11-10 12:31:04 +01002109 struct neighbour *n = neigh_entry->key.n;
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002110 u32 dip = ntohl(*((__be32 *) n->primary_key));
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002111 char rauht_pl[MLXSW_REG_RAUHT_LEN];
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002112
2113 mlxsw_reg_rauht_pack4(rauht_pl, op, neigh_entry->rif, neigh_entry->ha,
2114 dip);
Arkadi Sharshevsky7cfcbc72017-08-24 08:40:08 +02002115 if (neigh_entry->counter_valid)
2116 mlxsw_reg_rauht_pack_counter(rauht_pl,
2117 neigh_entry->counter_index);
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002118 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rauht), rauht_pl);
2119}
2120
2121static void
Arkadi Sharshevskyd5eb89c2017-07-18 10:10:15 +02002122mlxsw_sp_router_neigh_entry_op6(struct mlxsw_sp *mlxsw_sp,
2123 struct mlxsw_sp_neigh_entry *neigh_entry,
2124 enum mlxsw_reg_rauht_op op)
2125{
2126 struct neighbour *n = neigh_entry->key.n;
2127 char rauht_pl[MLXSW_REG_RAUHT_LEN];
2128 const char *dip = n->primary_key;
2129
2130 mlxsw_reg_rauht_pack6(rauht_pl, op, neigh_entry->rif, neigh_entry->ha,
2131 dip);
Arkadi Sharshevsky7cfcbc72017-08-24 08:40:08 +02002132 if (neigh_entry->counter_valid)
2133 mlxsw_reg_rauht_pack_counter(rauht_pl,
2134 neigh_entry->counter_index);
Arkadi Sharshevskyd5eb89c2017-07-18 10:10:15 +02002135 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rauht), rauht_pl);
2136}
2137
Arkadi Sharshevsky1d1056d82017-08-31 17:59:13 +02002138bool mlxsw_sp_neigh_ipv6_ignore(struct mlxsw_sp_neigh_entry *neigh_entry)
Arkadi Sharshevskyd5eb89c2017-07-18 10:10:15 +02002139{
Arkadi Sharshevsky1d1056d82017-08-31 17:59:13 +02002140 struct neighbour *n = neigh_entry->key.n;
2141
Arkadi Sharshevskyd5eb89c2017-07-18 10:10:15 +02002142 /* Packets with a link-local destination address are trapped
2143 * after LPM lookup and never reach the neighbour table, so
2144 * there is no need to program such neighbours to the device.
2145 */
2146 if (ipv6_addr_type((struct in6_addr *) &n->primary_key) &
2147 IPV6_ADDR_LINKLOCAL)
2148 return true;
2149 return false;
2150}
2151
2152static void
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002153mlxsw_sp_neigh_entry_update(struct mlxsw_sp *mlxsw_sp,
2154 struct mlxsw_sp_neigh_entry *neigh_entry,
2155 bool adding)
2156{
2157 if (!adding && !neigh_entry->connected)
2158 return;
2159 neigh_entry->connected = adding;
Ido Schimmelb5f3e0d2017-07-24 09:56:00 +02002160 if (neigh_entry->key.n->tbl->family == AF_INET) {
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002161 mlxsw_sp_router_neigh_entry_op4(mlxsw_sp, neigh_entry,
2162 mlxsw_sp_rauht_op(adding));
Ido Schimmelb5f3e0d2017-07-24 09:56:00 +02002163 } else if (neigh_entry->key.n->tbl->family == AF_INET6) {
Arkadi Sharshevsky1d1056d82017-08-31 17:59:13 +02002164 if (mlxsw_sp_neigh_ipv6_ignore(neigh_entry))
Arkadi Sharshevskyd5eb89c2017-07-18 10:10:15 +02002165 return;
2166 mlxsw_sp_router_neigh_entry_op6(mlxsw_sp, neigh_entry,
2167 mlxsw_sp_rauht_op(adding));
2168 } else {
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002169 WARN_ON_ONCE(1);
Arkadi Sharshevskyd5eb89c2017-07-18 10:10:15 +02002170 }
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002171}
2172
Arkadi Sharshevskya481d712017-08-24 08:40:10 +02002173void
2174mlxsw_sp_neigh_entry_counter_update(struct mlxsw_sp *mlxsw_sp,
2175 struct mlxsw_sp_neigh_entry *neigh_entry,
2176 bool adding)
2177{
2178 if (adding)
2179 mlxsw_sp_neigh_counter_alloc(mlxsw_sp, neigh_entry);
2180 else
2181 mlxsw_sp_neigh_counter_free(mlxsw_sp, neigh_entry);
2182 mlxsw_sp_neigh_entry_update(mlxsw_sp, neigh_entry, true);
2183}
2184
Ido Schimmelceb88812017-11-02 17:14:07 +01002185struct mlxsw_sp_netevent_work {
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002186 struct work_struct work;
2187 struct mlxsw_sp *mlxsw_sp;
2188 struct neighbour *n;
2189};
2190
2191static void mlxsw_sp_router_neigh_event_work(struct work_struct *work)
2192{
Ido Schimmelceb88812017-11-02 17:14:07 +01002193 struct mlxsw_sp_netevent_work *net_work =
2194 container_of(work, struct mlxsw_sp_netevent_work, work);
2195 struct mlxsw_sp *mlxsw_sp = net_work->mlxsw_sp;
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002196 struct mlxsw_sp_neigh_entry *neigh_entry;
Ido Schimmelceb88812017-11-02 17:14:07 +01002197 struct neighbour *n = net_work->n;
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002198 unsigned char ha[ETH_ALEN];
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002199 bool entry_connected;
Ido Schimmel93a87e52016-12-23 09:32:49 +01002200 u8 nud_state, dead;
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002201
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002202 /* If these parameters are changed after we release the lock,
2203 * then we are guaranteed to receive another event letting us
2204 * know about it.
2205 */
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002206 read_lock_bh(&n->lock);
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002207 memcpy(ha, n->ha, ETH_ALEN);
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002208 nud_state = n->nud_state;
Ido Schimmel93a87e52016-12-23 09:32:49 +01002209 dead = n->dead;
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002210 read_unlock_bh(&n->lock);
2211
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002212 rtnl_lock();
Ido Schimmel93a87e52016-12-23 09:32:49 +01002213 entry_connected = nud_state & NUD_VALID && !dead;
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002214 neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp, n);
2215 if (!entry_connected && !neigh_entry)
2216 goto out;
2217 if (!neigh_entry) {
2218 neigh_entry = mlxsw_sp_neigh_entry_create(mlxsw_sp, n);
2219 if (IS_ERR(neigh_entry))
2220 goto out;
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002221 }
2222
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002223 memcpy(neigh_entry->ha, ha, ETH_ALEN);
2224 mlxsw_sp_neigh_entry_update(mlxsw_sp, neigh_entry, entry_connected);
2225 mlxsw_sp_nexthop_neigh_update(mlxsw_sp, neigh_entry, !entry_connected);
2226
2227 if (!neigh_entry->connected && list_empty(&neigh_entry->nexthop_list))
2228 mlxsw_sp_neigh_entry_destroy(mlxsw_sp, neigh_entry);
2229
2230out:
2231 rtnl_unlock();
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002232 neigh_release(n);
Ido Schimmelceb88812017-11-02 17:14:07 +01002233 kfree(net_work);
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002234}
2235
Ido Schimmel28678f02017-11-02 17:14:10 +01002236static int mlxsw_sp_mp_hash_init(struct mlxsw_sp *mlxsw_sp);
2237
2238static void mlxsw_sp_router_mp_hash_event_work(struct work_struct *work)
2239{
2240 struct mlxsw_sp_netevent_work *net_work =
2241 container_of(work, struct mlxsw_sp_netevent_work, work);
2242 struct mlxsw_sp *mlxsw_sp = net_work->mlxsw_sp;
2243
2244 mlxsw_sp_mp_hash_init(mlxsw_sp);
2245 kfree(net_work);
2246}
2247
2248static int mlxsw_sp_router_netevent_event(struct notifier_block *nb,
Ido Schimmel48fac882017-11-02 17:14:06 +01002249 unsigned long event, void *ptr)
Yotam Gigic723c7352016-07-05 11:27:43 +02002250{
Ido Schimmelceb88812017-11-02 17:14:07 +01002251 struct mlxsw_sp_netevent_work *net_work;
Yotam Gigic723c7352016-07-05 11:27:43 +02002252 struct mlxsw_sp_port *mlxsw_sp_port;
Ido Schimmel28678f02017-11-02 17:14:10 +01002253 struct mlxsw_sp_router *router;
Yotam Gigic723c7352016-07-05 11:27:43 +02002254 struct mlxsw_sp *mlxsw_sp;
2255 unsigned long interval;
2256 struct neigh_parms *p;
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002257 struct neighbour *n;
Ido Schimmel28678f02017-11-02 17:14:10 +01002258 struct net *net;
Yotam Gigic723c7352016-07-05 11:27:43 +02002259
2260 switch (event) {
2261 case NETEVENT_DELAY_PROBE_TIME_UPDATE:
2262 p = ptr;
2263
2264 /* We don't care about changes in the default table. */
Ido Schimmelb5f3e0d2017-07-24 09:56:00 +02002265 if (!p->dev || (p->tbl->family != AF_INET &&
2266 p->tbl->family != AF_INET6))
Yotam Gigic723c7352016-07-05 11:27:43 +02002267 return NOTIFY_DONE;
2268
2269 /* We are in atomic context and can't take RTNL mutex,
2270 * so use RCU variant to walk the device chain.
2271 */
2272 mlxsw_sp_port = mlxsw_sp_port_lower_dev_hold(p->dev);
2273 if (!mlxsw_sp_port)
2274 return NOTIFY_DONE;
2275
2276 mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2277 interval = jiffies_to_msecs(NEIGH_VAR(p, DELAY_PROBE_TIME));
Ido Schimmel9011b672017-05-16 19:38:25 +02002278 mlxsw_sp->router->neighs_update.interval = interval;
Yotam Gigic723c7352016-07-05 11:27:43 +02002279
2280 mlxsw_sp_port_dev_put(mlxsw_sp_port);
2281 break;
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002282 case NETEVENT_NEIGH_UPDATE:
2283 n = ptr;
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002284
Ido Schimmelb5f3e0d2017-07-24 09:56:00 +02002285 if (n->tbl->family != AF_INET && n->tbl->family != AF_INET6)
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002286 return NOTIFY_DONE;
2287
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002288 mlxsw_sp_port = mlxsw_sp_port_lower_dev_hold(n->dev);
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002289 if (!mlxsw_sp_port)
2290 return NOTIFY_DONE;
2291
Ido Schimmelceb88812017-11-02 17:14:07 +01002292 net_work = kzalloc(sizeof(*net_work), GFP_ATOMIC);
2293 if (!net_work) {
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002294 mlxsw_sp_port_dev_put(mlxsw_sp_port);
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002295 return NOTIFY_BAD;
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002296 }
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002297
Ido Schimmelceb88812017-11-02 17:14:07 +01002298 INIT_WORK(&net_work->work, mlxsw_sp_router_neigh_event_work);
2299 net_work->mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2300 net_work->n = n;
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002301
2302 /* Take a reference to ensure the neighbour won't be
2303 * destructed until we drop the reference in delayed
2304 * work.
2305 */
2306 neigh_clone(n);
Ido Schimmelceb88812017-11-02 17:14:07 +01002307 mlxsw_core_schedule_work(&net_work->work);
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002308 mlxsw_sp_port_dev_put(mlxsw_sp_port);
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002309 break;
Ido Schimmel28678f02017-11-02 17:14:10 +01002310 case NETEVENT_MULTIPATH_HASH_UPDATE:
2311 net = ptr;
2312
2313 if (!net_eq(net, &init_net))
2314 return NOTIFY_DONE;
2315
2316 net_work = kzalloc(sizeof(*net_work), GFP_ATOMIC);
2317 if (!net_work)
2318 return NOTIFY_BAD;
2319
2320 router = container_of(nb, struct mlxsw_sp_router, netevent_nb);
2321 INIT_WORK(&net_work->work, mlxsw_sp_router_mp_hash_event_work);
2322 net_work->mlxsw_sp = router->mlxsw_sp;
2323 mlxsw_core_schedule_work(&net_work->work);
2324 break;
Yotam Gigic723c7352016-07-05 11:27:43 +02002325 }
2326
2327 return NOTIFY_DONE;
2328}
2329
Jiri Pirko6cf3c972016-07-05 11:27:39 +02002330static int mlxsw_sp_neigh_init(struct mlxsw_sp *mlxsw_sp)
2331{
Yotam Gigic723c7352016-07-05 11:27:43 +02002332 int err;
2333
Ido Schimmel9011b672017-05-16 19:38:25 +02002334 err = rhashtable_init(&mlxsw_sp->router->neigh_ht,
Yotam Gigic723c7352016-07-05 11:27:43 +02002335 &mlxsw_sp_neigh_ht_params);
2336 if (err)
2337 return err;
2338
2339 /* Initialize the polling interval according to the default
2340 * table.
2341 */
2342 mlxsw_sp_router_neighs_update_interval_init(mlxsw_sp);
2343
Yotam Gigi0b2361d2016-07-05 11:27:52 +02002344 /* Create the delayed works for the activity_update */
Ido Schimmel9011b672017-05-16 19:38:25 +02002345 INIT_DELAYED_WORK(&mlxsw_sp->router->neighs_update.dw,
Yotam Gigic723c7352016-07-05 11:27:43 +02002346 mlxsw_sp_router_neighs_update_work);
Ido Schimmel9011b672017-05-16 19:38:25 +02002347 INIT_DELAYED_WORK(&mlxsw_sp->router->nexthop_probe_dw,
Yotam Gigi0b2361d2016-07-05 11:27:52 +02002348 mlxsw_sp_router_probe_unresolved_nexthops);
Ido Schimmel9011b672017-05-16 19:38:25 +02002349 mlxsw_core_schedule_dw(&mlxsw_sp->router->neighs_update.dw, 0);
2350 mlxsw_core_schedule_dw(&mlxsw_sp->router->nexthop_probe_dw, 0);
Yotam Gigic723c7352016-07-05 11:27:43 +02002351 return 0;
Jiri Pirko6cf3c972016-07-05 11:27:39 +02002352}
2353
2354static void mlxsw_sp_neigh_fini(struct mlxsw_sp *mlxsw_sp)
2355{
Ido Schimmel9011b672017-05-16 19:38:25 +02002356 cancel_delayed_work_sync(&mlxsw_sp->router->neighs_update.dw);
2357 cancel_delayed_work_sync(&mlxsw_sp->router->nexthop_probe_dw);
2358 rhashtable_destroy(&mlxsw_sp->router->neigh_ht);
Jiri Pirko6cf3c972016-07-05 11:27:39 +02002359}
2360
Ido Schimmel9665b742017-02-08 11:16:42 +01002361static void mlxsw_sp_neigh_rif_gone_sync(struct mlxsw_sp *mlxsw_sp,
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01002362 struct mlxsw_sp_rif *rif)
Ido Schimmel9665b742017-02-08 11:16:42 +01002363{
2364 struct mlxsw_sp_neigh_entry *neigh_entry, *tmp;
2365
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01002366 list_for_each_entry_safe(neigh_entry, tmp, &rif->neigh_list,
Ido Schimmel4a3c67a2017-07-21 20:31:38 +02002367 rif_list_node) {
2368 mlxsw_sp_neigh_entry_update(mlxsw_sp, neigh_entry, false);
Ido Schimmel9665b742017-02-08 11:16:42 +01002369 mlxsw_sp_neigh_entry_destroy(mlxsw_sp, neigh_entry);
Ido Schimmel4a3c67a2017-07-21 20:31:38 +02002370 }
Ido Schimmel9665b742017-02-08 11:16:42 +01002371}
2372
Petr Machata35225e42017-09-02 23:49:22 +02002373enum mlxsw_sp_nexthop_type {
2374 MLXSW_SP_NEXTHOP_TYPE_ETH,
Petr Machata1012b9a2017-09-02 23:49:23 +02002375 MLXSW_SP_NEXTHOP_TYPE_IPIP,
Petr Machata35225e42017-09-02 23:49:22 +02002376};
2377
Ido Schimmelc53b8e12017-02-08 11:16:30 +01002378struct mlxsw_sp_nexthop_key {
2379 struct fib_nh *fib_nh;
2380};
2381
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002382struct mlxsw_sp_nexthop {
2383 struct list_head neigh_list_node; /* member of neigh entry list */
Ido Schimmel9665b742017-02-08 11:16:42 +01002384 struct list_head rif_list_node;
Arkadi Sharshevskydbe45982017-09-25 10:32:23 +02002385 struct list_head router_list_node;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002386 struct mlxsw_sp_nexthop_group *nh_grp; /* pointer back to the group
2387 * this belongs to
2388 */
Ido Schimmelc53b8e12017-02-08 11:16:30 +01002389 struct rhash_head ht_node;
2390 struct mlxsw_sp_nexthop_key key;
Ido Schimmel58adf2c2017-07-18 10:10:19 +02002391 unsigned char gw_addr[sizeof(struct in6_addr)];
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02002392 int ifindex;
Ido Schimmel408bd942017-10-22 23:11:46 +02002393 int nh_weight;
Ido Schimmeleb789982017-10-22 23:11:48 +02002394 int norm_nh_weight;
2395 int num_adj_entries;
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01002396 struct mlxsw_sp_rif *rif;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002397 u8 should_offload:1, /* set indicates this neigh is connected and
2398 * should be put to KVD linear area of this group.
2399 */
2400 offloaded:1, /* set in case the neigh is actually put into
2401 * KVD linear area of this group.
2402 */
2403 update:1; /* set indicates that MAC of this neigh should be
2404 * updated in HW
2405 */
Petr Machata35225e42017-09-02 23:49:22 +02002406 enum mlxsw_sp_nexthop_type type;
2407 union {
2408 struct mlxsw_sp_neigh_entry *neigh_entry;
Petr Machata1012b9a2017-09-02 23:49:23 +02002409 struct mlxsw_sp_ipip_entry *ipip_entry;
Petr Machata35225e42017-09-02 23:49:22 +02002410 };
Arkadi Sharshevskya5390272017-09-25 10:32:28 +02002411 unsigned int counter_index;
2412 bool counter_valid;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002413};
2414
2415struct mlxsw_sp_nexthop_group {
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02002416 void *priv;
Ido Schimmele9ad5e72017-02-08 11:16:29 +01002417 struct rhash_head ht_node;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002418 struct list_head fib_list; /* list of fib entries that use this group */
Ido Schimmel58adf2c2017-07-18 10:10:19 +02002419 struct neigh_table *neigh_tbl;
Ido Schimmelb3e8d1e2017-02-08 11:16:32 +01002420 u8 adj_index_valid:1,
2421 gateway:1; /* routes using the group use a gateway */
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002422 u32 adj_index;
2423 u16 ecmp_size;
2424 u16 count;
Ido Schimmeleb789982017-10-22 23:11:48 +02002425 int sum_norm_weight;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002426 struct mlxsw_sp_nexthop nexthops[0];
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01002427#define nh_rif nexthops[0].rif
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002428};
2429
Arkadi Sharshevsky427e6522017-09-25 10:32:30 +02002430void mlxsw_sp_nexthop_counter_alloc(struct mlxsw_sp *mlxsw_sp,
2431 struct mlxsw_sp_nexthop *nh)
Arkadi Sharshevskya5390272017-09-25 10:32:28 +02002432{
2433 struct devlink *devlink;
2434
2435 devlink = priv_to_devlink(mlxsw_sp->core);
2436 if (!devlink_dpipe_table_counter_enabled(devlink,
2437 MLXSW_SP_DPIPE_TABLE_NAME_ADJ))
2438 return;
2439
2440 if (mlxsw_sp_flow_counter_alloc(mlxsw_sp, &nh->counter_index))
2441 return;
2442
2443 nh->counter_valid = true;
2444}
2445
Arkadi Sharshevsky427e6522017-09-25 10:32:30 +02002446void mlxsw_sp_nexthop_counter_free(struct mlxsw_sp *mlxsw_sp,
2447 struct mlxsw_sp_nexthop *nh)
Arkadi Sharshevskya5390272017-09-25 10:32:28 +02002448{
2449 if (!nh->counter_valid)
2450 return;
2451 mlxsw_sp_flow_counter_free(mlxsw_sp, nh->counter_index);
2452 nh->counter_valid = false;
2453}
2454
2455int mlxsw_sp_nexthop_counter_get(struct mlxsw_sp *mlxsw_sp,
2456 struct mlxsw_sp_nexthop *nh, u64 *p_counter)
2457{
2458 if (!nh->counter_valid)
2459 return -EINVAL;
2460
2461 return mlxsw_sp_flow_counter_get(mlxsw_sp, nh->counter_index,
2462 p_counter, NULL);
2463}
2464
Arkadi Sharshevskyc556cd22017-09-25 10:32:25 +02002465struct mlxsw_sp_nexthop *mlxsw_sp_nexthop_next(struct mlxsw_sp_router *router,
2466 struct mlxsw_sp_nexthop *nh)
2467{
2468 if (!nh) {
2469 if (list_empty(&router->nexthop_list))
2470 return NULL;
2471 else
2472 return list_first_entry(&router->nexthop_list,
2473 typeof(*nh), router_list_node);
2474 }
2475 if (list_is_last(&nh->router_list_node, &router->nexthop_list))
2476 return NULL;
2477 return list_next_entry(nh, router_list_node);
2478}
2479
2480bool mlxsw_sp_nexthop_offload(struct mlxsw_sp_nexthop *nh)
2481{
2482 return nh->offloaded;
2483}
2484
2485unsigned char *mlxsw_sp_nexthop_ha(struct mlxsw_sp_nexthop *nh)
2486{
2487 if (!nh->offloaded)
2488 return NULL;
2489 return nh->neigh_entry->ha;
2490}
2491
2492int mlxsw_sp_nexthop_indexes(struct mlxsw_sp_nexthop *nh, u32 *p_adj_index,
Ido Schimmele69cd9d2017-10-22 23:11:43 +02002493 u32 *p_adj_size, u32 *p_adj_hash_index)
Arkadi Sharshevskyc556cd22017-09-25 10:32:25 +02002494{
2495 struct mlxsw_sp_nexthop_group *nh_grp = nh->nh_grp;
2496 u32 adj_hash_index = 0;
2497 int i;
2498
2499 if (!nh->offloaded || !nh_grp->adj_index_valid)
2500 return -EINVAL;
2501
2502 *p_adj_index = nh_grp->adj_index;
Ido Schimmele69cd9d2017-10-22 23:11:43 +02002503 *p_adj_size = nh_grp->ecmp_size;
Arkadi Sharshevskyc556cd22017-09-25 10:32:25 +02002504
2505 for (i = 0; i < nh_grp->count; i++) {
2506 struct mlxsw_sp_nexthop *nh_iter = &nh_grp->nexthops[i];
2507
2508 if (nh_iter == nh)
2509 break;
2510 if (nh_iter->offloaded)
Ido Schimmeleb789982017-10-22 23:11:48 +02002511 adj_hash_index += nh_iter->num_adj_entries;
Arkadi Sharshevskyc556cd22017-09-25 10:32:25 +02002512 }
2513
2514 *p_adj_hash_index = adj_hash_index;
2515 return 0;
2516}
2517
2518struct mlxsw_sp_rif *mlxsw_sp_nexthop_rif(struct mlxsw_sp_nexthop *nh)
2519{
2520 return nh->rif;
2521}
2522
2523bool mlxsw_sp_nexthop_group_has_ipip(struct mlxsw_sp_nexthop *nh)
2524{
2525 struct mlxsw_sp_nexthop_group *nh_grp = nh->nh_grp;
2526 int i;
2527
2528 for (i = 0; i < nh_grp->count; i++) {
2529 struct mlxsw_sp_nexthop *nh_iter = &nh_grp->nexthops[i];
2530
2531 if (nh_iter->type == MLXSW_SP_NEXTHOP_TYPE_IPIP)
2532 return true;
2533 }
2534 return false;
2535}
2536
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02002537static struct fib_info *
2538mlxsw_sp_nexthop4_group_fi(const struct mlxsw_sp_nexthop_group *nh_grp)
2539{
2540 return nh_grp->priv;
2541}
2542
2543struct mlxsw_sp_nexthop_group_cmp_arg {
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02002544 enum mlxsw_sp_l3proto proto;
2545 union {
2546 struct fib_info *fi;
2547 struct mlxsw_sp_fib6_entry *fib6_entry;
2548 };
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02002549};
2550
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02002551static bool
2552mlxsw_sp_nexthop6_group_has_nexthop(const struct mlxsw_sp_nexthop_group *nh_grp,
2553 const struct in6_addr *gw, int ifindex)
2554{
2555 int i;
2556
2557 for (i = 0; i < nh_grp->count; i++) {
2558 const struct mlxsw_sp_nexthop *nh;
2559
2560 nh = &nh_grp->nexthops[i];
2561 if (nh->ifindex == ifindex &&
2562 ipv6_addr_equal(gw, (struct in6_addr *) nh->gw_addr))
2563 return true;
2564 }
2565
2566 return false;
2567}
2568
2569static bool
2570mlxsw_sp_nexthop6_group_cmp(const struct mlxsw_sp_nexthop_group *nh_grp,
2571 const struct mlxsw_sp_fib6_entry *fib6_entry)
2572{
2573 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
2574
2575 if (nh_grp->count != fib6_entry->nrt6)
2576 return false;
2577
2578 list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) {
2579 struct in6_addr *gw;
2580 int ifindex;
2581
2582 ifindex = mlxsw_sp_rt6->rt->dst.dev->ifindex;
2583 gw = &mlxsw_sp_rt6->rt->rt6i_gateway;
2584 if (!mlxsw_sp_nexthop6_group_has_nexthop(nh_grp, gw, ifindex))
2585 return false;
2586 }
2587
2588 return true;
2589}
2590
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02002591static int
2592mlxsw_sp_nexthop_group_cmp(struct rhashtable_compare_arg *arg, const void *ptr)
2593{
2594 const struct mlxsw_sp_nexthop_group_cmp_arg *cmp_arg = arg->key;
2595 const struct mlxsw_sp_nexthop_group *nh_grp = ptr;
2596
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02002597 switch (cmp_arg->proto) {
2598 case MLXSW_SP_L3_PROTO_IPV4:
2599 return cmp_arg->fi != mlxsw_sp_nexthop4_group_fi(nh_grp);
2600 case MLXSW_SP_L3_PROTO_IPV6:
2601 return !mlxsw_sp_nexthop6_group_cmp(nh_grp,
2602 cmp_arg->fib6_entry);
2603 default:
2604 WARN_ON(1);
2605 return 1;
2606 }
2607}
2608
2609static int
2610mlxsw_sp_nexthop_group_type(const struct mlxsw_sp_nexthop_group *nh_grp)
2611{
2612 return nh_grp->neigh_tbl->family;
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02002613}
2614
2615static u32 mlxsw_sp_nexthop_group_hash_obj(const void *data, u32 len, u32 seed)
2616{
2617 const struct mlxsw_sp_nexthop_group *nh_grp = data;
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02002618 const struct mlxsw_sp_nexthop *nh;
2619 struct fib_info *fi;
2620 unsigned int val;
2621 int i;
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02002622
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02002623 switch (mlxsw_sp_nexthop_group_type(nh_grp)) {
2624 case AF_INET:
2625 fi = mlxsw_sp_nexthop4_group_fi(nh_grp);
2626 return jhash(&fi, sizeof(fi), seed);
2627 case AF_INET6:
2628 val = nh_grp->count;
2629 for (i = 0; i < nh_grp->count; i++) {
2630 nh = &nh_grp->nexthops[i];
2631 val ^= nh->ifindex;
2632 }
2633 return jhash(&val, sizeof(val), seed);
2634 default:
2635 WARN_ON(1);
2636 return 0;
2637 }
2638}
2639
2640static u32
2641mlxsw_sp_nexthop6_group_hash(struct mlxsw_sp_fib6_entry *fib6_entry, u32 seed)
2642{
2643 unsigned int val = fib6_entry->nrt6;
2644 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
2645 struct net_device *dev;
2646
2647 list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) {
2648 dev = mlxsw_sp_rt6->rt->dst.dev;
2649 val ^= dev->ifindex;
2650 }
2651
2652 return jhash(&val, sizeof(val), seed);
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02002653}
2654
2655static u32
2656mlxsw_sp_nexthop_group_hash(const void *data, u32 len, u32 seed)
2657{
2658 const struct mlxsw_sp_nexthop_group_cmp_arg *cmp_arg = data;
2659
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02002660 switch (cmp_arg->proto) {
2661 case MLXSW_SP_L3_PROTO_IPV4:
2662 return jhash(&cmp_arg->fi, sizeof(cmp_arg->fi), seed);
2663 case MLXSW_SP_L3_PROTO_IPV6:
2664 return mlxsw_sp_nexthop6_group_hash(cmp_arg->fib6_entry, seed);
2665 default:
2666 WARN_ON(1);
2667 return 0;
2668 }
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02002669}
2670
Ido Schimmele9ad5e72017-02-08 11:16:29 +01002671static const struct rhashtable_params mlxsw_sp_nexthop_group_ht_params = {
Ido Schimmele9ad5e72017-02-08 11:16:29 +01002672 .head_offset = offsetof(struct mlxsw_sp_nexthop_group, ht_node),
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02002673 .hashfn = mlxsw_sp_nexthop_group_hash,
2674 .obj_hashfn = mlxsw_sp_nexthop_group_hash_obj,
2675 .obj_cmpfn = mlxsw_sp_nexthop_group_cmp,
Ido Schimmele9ad5e72017-02-08 11:16:29 +01002676};
2677
2678static int mlxsw_sp_nexthop_group_insert(struct mlxsw_sp *mlxsw_sp,
2679 struct mlxsw_sp_nexthop_group *nh_grp)
2680{
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02002681 if (mlxsw_sp_nexthop_group_type(nh_grp) == AF_INET6 &&
2682 !nh_grp->gateway)
2683 return 0;
2684
Ido Schimmel9011b672017-05-16 19:38:25 +02002685 return rhashtable_insert_fast(&mlxsw_sp->router->nexthop_group_ht,
Ido Schimmele9ad5e72017-02-08 11:16:29 +01002686 &nh_grp->ht_node,
2687 mlxsw_sp_nexthop_group_ht_params);
2688}
2689
2690static void mlxsw_sp_nexthop_group_remove(struct mlxsw_sp *mlxsw_sp,
2691 struct mlxsw_sp_nexthop_group *nh_grp)
2692{
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02002693 if (mlxsw_sp_nexthop_group_type(nh_grp) == AF_INET6 &&
2694 !nh_grp->gateway)
2695 return;
2696
Ido Schimmel9011b672017-05-16 19:38:25 +02002697 rhashtable_remove_fast(&mlxsw_sp->router->nexthop_group_ht,
Ido Schimmele9ad5e72017-02-08 11:16:29 +01002698 &nh_grp->ht_node,
2699 mlxsw_sp_nexthop_group_ht_params);
2700}
2701
2702static struct mlxsw_sp_nexthop_group *
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02002703mlxsw_sp_nexthop4_group_lookup(struct mlxsw_sp *mlxsw_sp,
2704 struct fib_info *fi)
Ido Schimmele9ad5e72017-02-08 11:16:29 +01002705{
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02002706 struct mlxsw_sp_nexthop_group_cmp_arg cmp_arg;
2707
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02002708 cmp_arg.proto = MLXSW_SP_L3_PROTO_IPV4;
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02002709 cmp_arg.fi = fi;
2710 return rhashtable_lookup_fast(&mlxsw_sp->router->nexthop_group_ht,
2711 &cmp_arg,
Ido Schimmele9ad5e72017-02-08 11:16:29 +01002712 mlxsw_sp_nexthop_group_ht_params);
2713}
2714
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02002715static struct mlxsw_sp_nexthop_group *
2716mlxsw_sp_nexthop6_group_lookup(struct mlxsw_sp *mlxsw_sp,
2717 struct mlxsw_sp_fib6_entry *fib6_entry)
2718{
2719 struct mlxsw_sp_nexthop_group_cmp_arg cmp_arg;
2720
2721 cmp_arg.proto = MLXSW_SP_L3_PROTO_IPV6;
2722 cmp_arg.fib6_entry = fib6_entry;
2723 return rhashtable_lookup_fast(&mlxsw_sp->router->nexthop_group_ht,
2724 &cmp_arg,
2725 mlxsw_sp_nexthop_group_ht_params);
2726}
2727
Ido Schimmelc53b8e12017-02-08 11:16:30 +01002728static const struct rhashtable_params mlxsw_sp_nexthop_ht_params = {
2729 .key_offset = offsetof(struct mlxsw_sp_nexthop, key),
2730 .head_offset = offsetof(struct mlxsw_sp_nexthop, ht_node),
2731 .key_len = sizeof(struct mlxsw_sp_nexthop_key),
2732};
2733
2734static int mlxsw_sp_nexthop_insert(struct mlxsw_sp *mlxsw_sp,
2735 struct mlxsw_sp_nexthop *nh)
2736{
Ido Schimmel9011b672017-05-16 19:38:25 +02002737 return rhashtable_insert_fast(&mlxsw_sp->router->nexthop_ht,
Ido Schimmelc53b8e12017-02-08 11:16:30 +01002738 &nh->ht_node, mlxsw_sp_nexthop_ht_params);
2739}
2740
2741static void mlxsw_sp_nexthop_remove(struct mlxsw_sp *mlxsw_sp,
2742 struct mlxsw_sp_nexthop *nh)
2743{
Ido Schimmel9011b672017-05-16 19:38:25 +02002744 rhashtable_remove_fast(&mlxsw_sp->router->nexthop_ht, &nh->ht_node,
Ido Schimmelc53b8e12017-02-08 11:16:30 +01002745 mlxsw_sp_nexthop_ht_params);
2746}
2747
Ido Schimmelad178c82017-02-08 11:16:40 +01002748static struct mlxsw_sp_nexthop *
2749mlxsw_sp_nexthop_lookup(struct mlxsw_sp *mlxsw_sp,
2750 struct mlxsw_sp_nexthop_key key)
2751{
Ido Schimmel9011b672017-05-16 19:38:25 +02002752 return rhashtable_lookup_fast(&mlxsw_sp->router->nexthop_ht, &key,
Ido Schimmelad178c82017-02-08 11:16:40 +01002753 mlxsw_sp_nexthop_ht_params);
2754}
2755
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002756static int mlxsw_sp_adj_index_mass_update_vr(struct mlxsw_sp *mlxsw_sp,
Ido Schimmel76610eb2017-03-10 08:53:41 +01002757 const struct mlxsw_sp_fib *fib,
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002758 u32 adj_index, u16 ecmp_size,
2759 u32 new_adj_index,
2760 u16 new_ecmp_size)
2761{
2762 char raleu_pl[MLXSW_REG_RALEU_LEN];
2763
Ido Schimmel1a9234e662016-09-19 08:29:26 +02002764 mlxsw_reg_raleu_pack(raleu_pl,
Ido Schimmel76610eb2017-03-10 08:53:41 +01002765 (enum mlxsw_reg_ralxx_protocol) fib->proto,
2766 fib->vr->id, adj_index, ecmp_size, new_adj_index,
Ido Schimmel1a9234e662016-09-19 08:29:26 +02002767 new_ecmp_size);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002768 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raleu), raleu_pl);
2769}
2770
2771static int mlxsw_sp_adj_index_mass_update(struct mlxsw_sp *mlxsw_sp,
2772 struct mlxsw_sp_nexthop_group *nh_grp,
2773 u32 old_adj_index, u16 old_ecmp_size)
2774{
2775 struct mlxsw_sp_fib_entry *fib_entry;
Ido Schimmel76610eb2017-03-10 08:53:41 +01002776 struct mlxsw_sp_fib *fib = NULL;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002777 int err;
2778
2779 list_for_each_entry(fib_entry, &nh_grp->fib_list, nexthop_group_node) {
Ido Schimmel76610eb2017-03-10 08:53:41 +01002780 if (fib == fib_entry->fib_node->fib)
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002781 continue;
Ido Schimmel76610eb2017-03-10 08:53:41 +01002782 fib = fib_entry->fib_node->fib;
2783 err = mlxsw_sp_adj_index_mass_update_vr(mlxsw_sp, fib,
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002784 old_adj_index,
2785 old_ecmp_size,
2786 nh_grp->adj_index,
2787 nh_grp->ecmp_size);
2788 if (err)
2789 return err;
2790 }
2791 return 0;
2792}
2793
Ido Schimmeleb789982017-10-22 23:11:48 +02002794static int __mlxsw_sp_nexthop_update(struct mlxsw_sp *mlxsw_sp, u32 adj_index,
2795 struct mlxsw_sp_nexthop *nh)
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002796{
2797 struct mlxsw_sp_neigh_entry *neigh_entry = nh->neigh_entry;
2798 char ratr_pl[MLXSW_REG_RATR_LEN];
2799
2800 mlxsw_reg_ratr_pack(ratr_pl, MLXSW_REG_RATR_OP_WRITE_WRITE_ENTRY,
Petr Machata89e41982017-09-02 23:49:15 +02002801 true, MLXSW_REG_RATR_TYPE_ETHERNET,
2802 adj_index, neigh_entry->rif);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002803 mlxsw_reg_ratr_eth_entry_pack(ratr_pl, neigh_entry->ha);
Arkadi Sharshevskya5390272017-09-25 10:32:28 +02002804 if (nh->counter_valid)
2805 mlxsw_reg_ratr_counter_pack(ratr_pl, nh->counter_index, true);
2806 else
2807 mlxsw_reg_ratr_counter_pack(ratr_pl, 0, false);
2808
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002809 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ratr), ratr_pl);
2810}
2811
Ido Schimmeleb789982017-10-22 23:11:48 +02002812int mlxsw_sp_nexthop_update(struct mlxsw_sp *mlxsw_sp, u32 adj_index,
2813 struct mlxsw_sp_nexthop *nh)
2814{
2815 int i;
2816
2817 for (i = 0; i < nh->num_adj_entries; i++) {
2818 int err;
2819
2820 err = __mlxsw_sp_nexthop_update(mlxsw_sp, adj_index + i, nh);
2821 if (err)
2822 return err;
2823 }
2824
2825 return 0;
2826}
2827
2828static int __mlxsw_sp_nexthop_ipip_update(struct mlxsw_sp *mlxsw_sp,
2829 u32 adj_index,
2830 struct mlxsw_sp_nexthop *nh)
Petr Machata1012b9a2017-09-02 23:49:23 +02002831{
2832 const struct mlxsw_sp_ipip_ops *ipip_ops;
2833
2834 ipip_ops = mlxsw_sp->router->ipip_ops_arr[nh->ipip_entry->ipipt];
2835 return ipip_ops->nexthop_update(mlxsw_sp, adj_index, nh->ipip_entry);
2836}
2837
Ido Schimmeleb789982017-10-22 23:11:48 +02002838static int mlxsw_sp_nexthop_ipip_update(struct mlxsw_sp *mlxsw_sp,
2839 u32 adj_index,
2840 struct mlxsw_sp_nexthop *nh)
2841{
2842 int i;
2843
2844 for (i = 0; i < nh->num_adj_entries; i++) {
2845 int err;
2846
2847 err = __mlxsw_sp_nexthop_ipip_update(mlxsw_sp, adj_index + i,
2848 nh);
2849 if (err)
2850 return err;
2851 }
2852
2853 return 0;
2854}
2855
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002856static int
Petr Machata35225e42017-09-02 23:49:22 +02002857mlxsw_sp_nexthop_group_update(struct mlxsw_sp *mlxsw_sp,
2858 struct mlxsw_sp_nexthop_group *nh_grp,
2859 bool reallocate)
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002860{
2861 u32 adj_index = nh_grp->adj_index; /* base */
2862 struct mlxsw_sp_nexthop *nh;
2863 int i;
2864 int err;
2865
2866 for (i = 0; i < nh_grp->count; i++) {
2867 nh = &nh_grp->nexthops[i];
2868
2869 if (!nh->should_offload) {
2870 nh->offloaded = 0;
2871 continue;
2872 }
2873
Ido Schimmela59b7e02017-01-23 11:11:42 +01002874 if (nh->update || reallocate) {
Petr Machata35225e42017-09-02 23:49:22 +02002875 switch (nh->type) {
2876 case MLXSW_SP_NEXTHOP_TYPE_ETH:
Arkadi Sharshevskya5390272017-09-25 10:32:28 +02002877 err = mlxsw_sp_nexthop_update
Petr Machata35225e42017-09-02 23:49:22 +02002878 (mlxsw_sp, adj_index, nh);
2879 break;
Petr Machata1012b9a2017-09-02 23:49:23 +02002880 case MLXSW_SP_NEXTHOP_TYPE_IPIP:
2881 err = mlxsw_sp_nexthop_ipip_update
2882 (mlxsw_sp, adj_index, nh);
2883 break;
Petr Machata35225e42017-09-02 23:49:22 +02002884 }
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002885 if (err)
2886 return err;
2887 nh->update = 0;
2888 nh->offloaded = 1;
2889 }
Ido Schimmeleb789982017-10-22 23:11:48 +02002890 adj_index += nh->num_adj_entries;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002891 }
2892 return 0;
2893}
2894
Ido Schimmel1819ae32017-07-21 18:04:28 +02002895static bool
2896mlxsw_sp_fib_node_entry_is_first(const struct mlxsw_sp_fib_node *fib_node,
2897 const struct mlxsw_sp_fib_entry *fib_entry);
2898
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002899static int
2900mlxsw_sp_nexthop_fib_entries_update(struct mlxsw_sp *mlxsw_sp,
2901 struct mlxsw_sp_nexthop_group *nh_grp)
2902{
2903 struct mlxsw_sp_fib_entry *fib_entry;
2904 int err;
2905
2906 list_for_each_entry(fib_entry, &nh_grp->fib_list, nexthop_group_node) {
Ido Schimmel1819ae32017-07-21 18:04:28 +02002907 if (!mlxsw_sp_fib_node_entry_is_first(fib_entry->fib_node,
2908 fib_entry))
2909 continue;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002910 err = mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
2911 if (err)
2912 return err;
2913 }
2914 return 0;
2915}
2916
2917static void
Ido Schimmel77d964e2017-08-02 09:56:05 +02002918mlxsw_sp_fib_entry_offload_refresh(struct mlxsw_sp_fib_entry *fib_entry,
2919 enum mlxsw_reg_ralue_op op, int err);
2920
2921static void
2922mlxsw_sp_nexthop_fib_entries_refresh(struct mlxsw_sp_nexthop_group *nh_grp)
2923{
2924 enum mlxsw_reg_ralue_op op = MLXSW_REG_RALUE_OP_WRITE_WRITE;
2925 struct mlxsw_sp_fib_entry *fib_entry;
2926
2927 list_for_each_entry(fib_entry, &nh_grp->fib_list, nexthop_group_node) {
2928 if (!mlxsw_sp_fib_node_entry_is_first(fib_entry->fib_node,
2929 fib_entry))
2930 continue;
2931 mlxsw_sp_fib_entry_offload_refresh(fib_entry, op, 0);
2932 }
2933}
2934
Ido Schimmel425a08c2017-10-22 23:11:47 +02002935static void mlxsw_sp_adj_grp_size_round_up(u16 *p_adj_grp_size)
2936{
2937 /* Valid sizes for an adjacency group are:
2938 * 1-64, 512, 1024, 2048 and 4096.
2939 */
2940 if (*p_adj_grp_size <= 64)
2941 return;
2942 else if (*p_adj_grp_size <= 512)
2943 *p_adj_grp_size = 512;
2944 else if (*p_adj_grp_size <= 1024)
2945 *p_adj_grp_size = 1024;
2946 else if (*p_adj_grp_size <= 2048)
2947 *p_adj_grp_size = 2048;
2948 else
2949 *p_adj_grp_size = 4096;
2950}
2951
2952static void mlxsw_sp_adj_grp_size_round_down(u16 *p_adj_grp_size,
2953 unsigned int alloc_size)
2954{
2955 if (alloc_size >= 4096)
2956 *p_adj_grp_size = 4096;
2957 else if (alloc_size >= 2048)
2958 *p_adj_grp_size = 2048;
2959 else if (alloc_size >= 1024)
2960 *p_adj_grp_size = 1024;
2961 else if (alloc_size >= 512)
2962 *p_adj_grp_size = 512;
2963}
2964
2965static int mlxsw_sp_fix_adj_grp_size(struct mlxsw_sp *mlxsw_sp,
2966 u16 *p_adj_grp_size)
2967{
2968 unsigned int alloc_size;
2969 int err;
2970
2971 /* Round up the requested group size to the next size supported
2972 * by the device and make sure the request can be satisfied.
2973 */
2974 mlxsw_sp_adj_grp_size_round_up(p_adj_grp_size);
2975 err = mlxsw_sp_kvdl_alloc_size_query(mlxsw_sp, *p_adj_grp_size,
2976 &alloc_size);
2977 if (err)
2978 return err;
2979 /* It is possible the allocation results in more allocated
2980 * entries than requested. Try to use as much of them as
2981 * possible.
2982 */
2983 mlxsw_sp_adj_grp_size_round_down(p_adj_grp_size, alloc_size);
2984
2985 return 0;
2986}
2987
Ido Schimmel77d964e2017-08-02 09:56:05 +02002988static void
Ido Schimmeleb789982017-10-22 23:11:48 +02002989mlxsw_sp_nexthop_group_normalize(struct mlxsw_sp_nexthop_group *nh_grp)
2990{
2991 int i, g = 0, sum_norm_weight = 0;
2992 struct mlxsw_sp_nexthop *nh;
2993
2994 for (i = 0; i < nh_grp->count; i++) {
2995 nh = &nh_grp->nexthops[i];
2996
2997 if (!nh->should_offload)
2998 continue;
2999 if (g > 0)
3000 g = gcd(nh->nh_weight, g);
3001 else
3002 g = nh->nh_weight;
3003 }
3004
3005 for (i = 0; i < nh_grp->count; i++) {
3006 nh = &nh_grp->nexthops[i];
3007
3008 if (!nh->should_offload)
3009 continue;
3010 nh->norm_nh_weight = nh->nh_weight / g;
3011 sum_norm_weight += nh->norm_nh_weight;
3012 }
3013
3014 nh_grp->sum_norm_weight = sum_norm_weight;
3015}
3016
3017static void
3018mlxsw_sp_nexthop_group_rebalance(struct mlxsw_sp_nexthop_group *nh_grp)
3019{
3020 int total = nh_grp->sum_norm_weight;
3021 u16 ecmp_size = nh_grp->ecmp_size;
3022 int i, weight = 0, lower_bound = 0;
3023
3024 for (i = 0; i < nh_grp->count; i++) {
3025 struct mlxsw_sp_nexthop *nh = &nh_grp->nexthops[i];
3026 int upper_bound;
3027
3028 if (!nh->should_offload)
3029 continue;
3030 weight += nh->norm_nh_weight;
3031 upper_bound = DIV_ROUND_CLOSEST(ecmp_size * weight, total);
3032 nh->num_adj_entries = upper_bound - lower_bound;
3033 lower_bound = upper_bound;
3034 }
3035}
3036
3037static void
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003038mlxsw_sp_nexthop_group_refresh(struct mlxsw_sp *mlxsw_sp,
3039 struct mlxsw_sp_nexthop_group *nh_grp)
3040{
Ido Schimmeleb789982017-10-22 23:11:48 +02003041 u16 ecmp_size, old_ecmp_size;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003042 struct mlxsw_sp_nexthop *nh;
3043 bool offload_change = false;
3044 u32 adj_index;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003045 bool old_adj_index_valid;
3046 u32 old_adj_index;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003047 int i;
3048 int err;
3049
Ido Schimmelb3e8d1e2017-02-08 11:16:32 +01003050 if (!nh_grp->gateway) {
3051 mlxsw_sp_nexthop_fib_entries_update(mlxsw_sp, nh_grp);
3052 return;
3053 }
3054
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003055 for (i = 0; i < nh_grp->count; i++) {
3056 nh = &nh_grp->nexthops[i];
3057
Petr Machata56b8a9e2017-07-31 09:27:29 +02003058 if (nh->should_offload != nh->offloaded) {
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003059 offload_change = true;
3060 if (nh->should_offload)
3061 nh->update = 1;
3062 }
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003063 }
3064 if (!offload_change) {
3065 /* Nothing was added or removed, so no need to reallocate. Just
3066 * update MAC on existing adjacency indexes.
3067 */
Petr Machata35225e42017-09-02 23:49:22 +02003068 err = mlxsw_sp_nexthop_group_update(mlxsw_sp, nh_grp, false);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003069 if (err) {
3070 dev_warn(mlxsw_sp->bus_info->dev, "Failed to update neigh MAC in adjacency table.\n");
3071 goto set_trap;
3072 }
3073 return;
3074 }
Ido Schimmeleb789982017-10-22 23:11:48 +02003075 mlxsw_sp_nexthop_group_normalize(nh_grp);
3076 if (!nh_grp->sum_norm_weight)
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003077 /* No neigh of this group is connected so we just set
3078 * the trap and let everthing flow through kernel.
3079 */
3080 goto set_trap;
3081
Ido Schimmeleb789982017-10-22 23:11:48 +02003082 ecmp_size = nh_grp->sum_norm_weight;
Ido Schimmel425a08c2017-10-22 23:11:47 +02003083 err = mlxsw_sp_fix_adj_grp_size(mlxsw_sp, &ecmp_size);
3084 if (err)
3085 /* No valid allocation size available. */
3086 goto set_trap;
3087
Arkadi Sharshevsky13124442017-03-25 08:28:22 +01003088 err = mlxsw_sp_kvdl_alloc(mlxsw_sp, ecmp_size, &adj_index);
3089 if (err) {
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003090 /* We ran out of KVD linear space, just set the
3091 * trap and let everything flow through kernel.
3092 */
3093 dev_warn(mlxsw_sp->bus_info->dev, "Failed to allocate KVD linear area for nexthop group.\n");
3094 goto set_trap;
3095 }
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003096 old_adj_index_valid = nh_grp->adj_index_valid;
3097 old_adj_index = nh_grp->adj_index;
3098 old_ecmp_size = nh_grp->ecmp_size;
3099 nh_grp->adj_index_valid = 1;
3100 nh_grp->adj_index = adj_index;
3101 nh_grp->ecmp_size = ecmp_size;
Ido Schimmeleb789982017-10-22 23:11:48 +02003102 mlxsw_sp_nexthop_group_rebalance(nh_grp);
Petr Machata35225e42017-09-02 23:49:22 +02003103 err = mlxsw_sp_nexthop_group_update(mlxsw_sp, nh_grp, true);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003104 if (err) {
3105 dev_warn(mlxsw_sp->bus_info->dev, "Failed to update neigh MAC in adjacency table.\n");
3106 goto set_trap;
3107 }
3108
3109 if (!old_adj_index_valid) {
3110 /* The trap was set for fib entries, so we have to call
3111 * fib entry update to unset it and use adjacency index.
3112 */
3113 err = mlxsw_sp_nexthop_fib_entries_update(mlxsw_sp, nh_grp);
3114 if (err) {
3115 dev_warn(mlxsw_sp->bus_info->dev, "Failed to add adjacency index to fib entries.\n");
3116 goto set_trap;
3117 }
3118 return;
3119 }
3120
3121 err = mlxsw_sp_adj_index_mass_update(mlxsw_sp, nh_grp,
3122 old_adj_index, old_ecmp_size);
3123 mlxsw_sp_kvdl_free(mlxsw_sp, old_adj_index);
3124 if (err) {
3125 dev_warn(mlxsw_sp->bus_info->dev, "Failed to mass-update adjacency index for nexthop group.\n");
3126 goto set_trap;
3127 }
Ido Schimmel77d964e2017-08-02 09:56:05 +02003128
3129 /* Offload state within the group changed, so update the flags. */
3130 mlxsw_sp_nexthop_fib_entries_refresh(nh_grp);
3131
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003132 return;
3133
3134set_trap:
3135 old_adj_index_valid = nh_grp->adj_index_valid;
3136 nh_grp->adj_index_valid = 0;
3137 for (i = 0; i < nh_grp->count; i++) {
3138 nh = &nh_grp->nexthops[i];
3139 nh->offloaded = 0;
3140 }
3141 err = mlxsw_sp_nexthop_fib_entries_update(mlxsw_sp, nh_grp);
3142 if (err)
3143 dev_warn(mlxsw_sp->bus_info->dev, "Failed to set traps for fib entries.\n");
3144 if (old_adj_index_valid)
3145 mlxsw_sp_kvdl_free(mlxsw_sp, nh_grp->adj_index);
3146}
3147
3148static void __mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp_nexthop *nh,
3149 bool removing)
3150{
Petr Machata213666a2017-07-31 09:27:30 +02003151 if (!removing)
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003152 nh->should_offload = 1;
Petr Machata213666a2017-07-31 09:27:30 +02003153 else if (nh->offloaded)
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003154 nh->should_offload = 0;
3155 nh->update = 1;
3156}
3157
3158static void
3159mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp *mlxsw_sp,
3160 struct mlxsw_sp_neigh_entry *neigh_entry,
3161 bool removing)
3162{
3163 struct mlxsw_sp_nexthop *nh;
3164
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003165 list_for_each_entry(nh, &neigh_entry->nexthop_list,
3166 neigh_list_node) {
3167 __mlxsw_sp_nexthop_neigh_update(nh, removing);
3168 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nh_grp);
3169 }
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003170}
3171
Ido Schimmel9665b742017-02-08 11:16:42 +01003172static void mlxsw_sp_nexthop_rif_init(struct mlxsw_sp_nexthop *nh,
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003173 struct mlxsw_sp_rif *rif)
Ido Schimmel9665b742017-02-08 11:16:42 +01003174{
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003175 if (nh->rif)
Ido Schimmel9665b742017-02-08 11:16:42 +01003176 return;
3177
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003178 nh->rif = rif;
3179 list_add(&nh->rif_list_node, &rif->nexthop_list);
Ido Schimmel9665b742017-02-08 11:16:42 +01003180}
3181
3182static void mlxsw_sp_nexthop_rif_fini(struct mlxsw_sp_nexthop *nh)
3183{
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003184 if (!nh->rif)
Ido Schimmel9665b742017-02-08 11:16:42 +01003185 return;
3186
3187 list_del(&nh->rif_list_node);
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003188 nh->rif = NULL;
Ido Schimmel9665b742017-02-08 11:16:42 +01003189}
3190
Ido Schimmela8c97012017-02-08 11:16:35 +01003191static int mlxsw_sp_nexthop_neigh_init(struct mlxsw_sp *mlxsw_sp,
3192 struct mlxsw_sp_nexthop *nh)
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003193{
3194 struct mlxsw_sp_neigh_entry *neigh_entry;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003195 struct neighbour *n;
Ido Schimmel93a87e52016-12-23 09:32:49 +01003196 u8 nud_state, dead;
Ido Schimmelc53b8e12017-02-08 11:16:30 +01003197 int err;
3198
Ido Schimmelad178c82017-02-08 11:16:40 +01003199 if (!nh->nh_grp->gateway || nh->neigh_entry)
Ido Schimmelb8399a12017-02-08 11:16:33 +01003200 return 0;
3201
Jiri Pirko33b13412016-11-10 12:31:04 +01003202 /* Take a reference of neigh here ensuring that neigh would
Petr Machata8de3c172017-07-31 09:27:25 +02003203 * not be destructed before the nexthop entry is finished.
Jiri Pirko33b13412016-11-10 12:31:04 +01003204 * The reference is taken either in neigh_lookup() or
Ido Schimmelfd76d912017-02-06 16:20:17 +01003205 * in neigh_create() in case n is not found.
Jiri Pirko33b13412016-11-10 12:31:04 +01003206 */
Ido Schimmel58adf2c2017-07-18 10:10:19 +02003207 n = neigh_lookup(nh->nh_grp->neigh_tbl, &nh->gw_addr, nh->rif->dev);
Jiri Pirko33b13412016-11-10 12:31:04 +01003208 if (!n) {
Ido Schimmel58adf2c2017-07-18 10:10:19 +02003209 n = neigh_create(nh->nh_grp->neigh_tbl, &nh->gw_addr,
3210 nh->rif->dev);
Ido Schimmela8c97012017-02-08 11:16:35 +01003211 if (IS_ERR(n))
3212 return PTR_ERR(n);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003213 neigh_event_send(n, NULL);
Jiri Pirko33b13412016-11-10 12:31:04 +01003214 }
3215 neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp, n);
3216 if (!neigh_entry) {
Ido Schimmel5c8802f2017-02-06 16:20:13 +01003217 neigh_entry = mlxsw_sp_neigh_entry_create(mlxsw_sp, n);
3218 if (IS_ERR(neigh_entry)) {
Ido Schimmelc53b8e12017-02-08 11:16:30 +01003219 err = -EINVAL;
3220 goto err_neigh_entry_create;
Ido Schimmel5c8802f2017-02-06 16:20:13 +01003221 }
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003222 }
Yotam Gigib2157142016-07-05 11:27:51 +02003223
3224 /* If that is the first nexthop connected to that neigh, add to
3225 * nexthop_neighs_list
3226 */
3227 if (list_empty(&neigh_entry->nexthop_list))
3228 list_add_tail(&neigh_entry->nexthop_neighs_list_node,
Ido Schimmel9011b672017-05-16 19:38:25 +02003229 &mlxsw_sp->router->nexthop_neighs_list);
Yotam Gigib2157142016-07-05 11:27:51 +02003230
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003231 nh->neigh_entry = neigh_entry;
3232 list_add_tail(&nh->neigh_list_node, &neigh_entry->nexthop_list);
3233 read_lock_bh(&n->lock);
3234 nud_state = n->nud_state;
Ido Schimmel93a87e52016-12-23 09:32:49 +01003235 dead = n->dead;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003236 read_unlock_bh(&n->lock);
Ido Schimmel93a87e52016-12-23 09:32:49 +01003237 __mlxsw_sp_nexthop_neigh_update(nh, !(nud_state & NUD_VALID && !dead));
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003238
3239 return 0;
Ido Schimmelc53b8e12017-02-08 11:16:30 +01003240
3241err_neigh_entry_create:
3242 neigh_release(n);
Ido Schimmelc53b8e12017-02-08 11:16:30 +01003243 return err;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003244}
3245
Ido Schimmela8c97012017-02-08 11:16:35 +01003246static void mlxsw_sp_nexthop_neigh_fini(struct mlxsw_sp *mlxsw_sp,
3247 struct mlxsw_sp_nexthop *nh)
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003248{
3249 struct mlxsw_sp_neigh_entry *neigh_entry = nh->neigh_entry;
Ido Schimmela8c97012017-02-08 11:16:35 +01003250 struct neighbour *n;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003251
Ido Schimmelb8399a12017-02-08 11:16:33 +01003252 if (!neigh_entry)
Ido Schimmela8c97012017-02-08 11:16:35 +01003253 return;
3254 n = neigh_entry->key.n;
Ido Schimmelb8399a12017-02-08 11:16:33 +01003255
Ido Schimmel58312122016-12-23 09:32:50 +01003256 __mlxsw_sp_nexthop_neigh_update(nh, true);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003257 list_del(&nh->neigh_list_node);
Ido Schimmele58be792017-02-08 11:16:28 +01003258 nh->neigh_entry = NULL;
Yotam Gigib2157142016-07-05 11:27:51 +02003259
3260 /* If that is the last nexthop connected to that neigh, remove from
3261 * nexthop_neighs_list
3262 */
Ido Schimmele58be792017-02-08 11:16:28 +01003263 if (list_empty(&neigh_entry->nexthop_list))
3264 list_del(&neigh_entry->nexthop_neighs_list_node);
Yotam Gigib2157142016-07-05 11:27:51 +02003265
Ido Schimmel5c8802f2017-02-06 16:20:13 +01003266 if (!neigh_entry->connected && list_empty(&neigh_entry->nexthop_list))
3267 mlxsw_sp_neigh_entry_destroy(mlxsw_sp, neigh_entry);
3268
3269 neigh_release(n);
Ido Schimmela8c97012017-02-08 11:16:35 +01003270}
Ido Schimmelc53b8e12017-02-08 11:16:30 +01003271
Petr Machata1012b9a2017-09-02 23:49:23 +02003272static int mlxsw_sp_nexthop_ipip_init(struct mlxsw_sp *mlxsw_sp,
Petr Machata1012b9a2017-09-02 23:49:23 +02003273 struct mlxsw_sp_nexthop *nh,
3274 struct net_device *ol_dev)
3275{
3276 if (!nh->nh_grp->gateway || nh->ipip_entry)
3277 return 0;
3278
Petr Machata4cccb732017-10-16 16:26:39 +02003279 nh->ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
3280 if (!nh->ipip_entry)
3281 return -ENOENT;
Petr Machata1012b9a2017-09-02 23:49:23 +02003282
3283 __mlxsw_sp_nexthop_neigh_update(nh, false);
3284 return 0;
3285}
3286
3287static void mlxsw_sp_nexthop_ipip_fini(struct mlxsw_sp *mlxsw_sp,
3288 struct mlxsw_sp_nexthop *nh)
3289{
3290 struct mlxsw_sp_ipip_entry *ipip_entry = nh->ipip_entry;
3291
3292 if (!ipip_entry)
3293 return;
3294
3295 __mlxsw_sp_nexthop_neigh_update(nh, true);
Petr Machata1012b9a2017-09-02 23:49:23 +02003296 nh->ipip_entry = NULL;
3297}
3298
3299static bool mlxsw_sp_nexthop4_ipip_type(const struct mlxsw_sp *mlxsw_sp,
3300 const struct fib_nh *fib_nh,
3301 enum mlxsw_sp_ipip_type *p_ipipt)
3302{
3303 struct net_device *dev = fib_nh->nh_dev;
3304
3305 return dev &&
3306 fib_nh->nh_parent->fib_type == RTN_UNICAST &&
3307 mlxsw_sp_netdev_ipip_type(mlxsw_sp, dev, p_ipipt);
3308}
3309
Petr Machata35225e42017-09-02 23:49:22 +02003310static void mlxsw_sp_nexthop_type_fini(struct mlxsw_sp *mlxsw_sp,
3311 struct mlxsw_sp_nexthop *nh)
3312{
3313 switch (nh->type) {
3314 case MLXSW_SP_NEXTHOP_TYPE_ETH:
3315 mlxsw_sp_nexthop_neigh_fini(mlxsw_sp, nh);
3316 mlxsw_sp_nexthop_rif_fini(nh);
3317 break;
Petr Machata1012b9a2017-09-02 23:49:23 +02003318 case MLXSW_SP_NEXTHOP_TYPE_IPIP:
Petr Machatade0f43c2017-10-02 12:14:57 +02003319 mlxsw_sp_nexthop_rif_fini(nh);
Petr Machata1012b9a2017-09-02 23:49:23 +02003320 mlxsw_sp_nexthop_ipip_fini(mlxsw_sp, nh);
3321 break;
Petr Machata35225e42017-09-02 23:49:22 +02003322 }
3323}
3324
3325static int mlxsw_sp_nexthop4_type_init(struct mlxsw_sp *mlxsw_sp,
3326 struct mlxsw_sp_nexthop *nh,
3327 struct fib_nh *fib_nh)
3328{
Petr Machata1012b9a2017-09-02 23:49:23 +02003329 struct mlxsw_sp_router *router = mlxsw_sp->router;
Petr Machata35225e42017-09-02 23:49:22 +02003330 struct net_device *dev = fib_nh->nh_dev;
Petr Machata1012b9a2017-09-02 23:49:23 +02003331 enum mlxsw_sp_ipip_type ipipt;
Petr Machata35225e42017-09-02 23:49:22 +02003332 struct mlxsw_sp_rif *rif;
3333 int err;
3334
Petr Machata1012b9a2017-09-02 23:49:23 +02003335 if (mlxsw_sp_nexthop4_ipip_type(mlxsw_sp, fib_nh, &ipipt) &&
3336 router->ipip_ops_arr[ipipt]->can_offload(mlxsw_sp, dev,
3337 MLXSW_SP_L3_PROTO_IPV4)) {
3338 nh->type = MLXSW_SP_NEXTHOP_TYPE_IPIP;
Petr Machata4cccb732017-10-16 16:26:39 +02003339 err = mlxsw_sp_nexthop_ipip_init(mlxsw_sp, nh, dev);
Petr Machatade0f43c2017-10-02 12:14:57 +02003340 if (err)
3341 return err;
3342 mlxsw_sp_nexthop_rif_init(nh, &nh->ipip_entry->ol_lb->common);
3343 return 0;
Petr Machata1012b9a2017-09-02 23:49:23 +02003344 }
3345
Petr Machata35225e42017-09-02 23:49:22 +02003346 nh->type = MLXSW_SP_NEXTHOP_TYPE_ETH;
3347 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
3348 if (!rif)
3349 return 0;
3350
3351 mlxsw_sp_nexthop_rif_init(nh, rif);
3352 err = mlxsw_sp_nexthop_neigh_init(mlxsw_sp, nh);
3353 if (err)
3354 goto err_neigh_init;
3355
3356 return 0;
3357
3358err_neigh_init:
3359 mlxsw_sp_nexthop_rif_fini(nh);
3360 return err;
3361}
3362
3363static void mlxsw_sp_nexthop4_type_fini(struct mlxsw_sp *mlxsw_sp,
3364 struct mlxsw_sp_nexthop *nh)
3365{
3366 mlxsw_sp_nexthop_type_fini(mlxsw_sp, nh);
3367}
3368
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02003369static int mlxsw_sp_nexthop4_init(struct mlxsw_sp *mlxsw_sp,
3370 struct mlxsw_sp_nexthop_group *nh_grp,
3371 struct mlxsw_sp_nexthop *nh,
3372 struct fib_nh *fib_nh)
Ido Schimmela8c97012017-02-08 11:16:35 +01003373{
3374 struct net_device *dev = fib_nh->nh_dev;
Ido Schimmeldf6dd79b2017-02-08 14:36:49 +01003375 struct in_device *in_dev;
Ido Schimmela8c97012017-02-08 11:16:35 +01003376 int err;
3377
3378 nh->nh_grp = nh_grp;
3379 nh->key.fib_nh = fib_nh;
Ido Schimmel408bd942017-10-22 23:11:46 +02003380#ifdef CONFIG_IP_ROUTE_MULTIPATH
3381 nh->nh_weight = fib_nh->nh_weight;
3382#else
3383 nh->nh_weight = 1;
3384#endif
Ido Schimmel58adf2c2017-07-18 10:10:19 +02003385 memcpy(&nh->gw_addr, &fib_nh->nh_gw, sizeof(fib_nh->nh_gw));
Ido Schimmela8c97012017-02-08 11:16:35 +01003386 err = mlxsw_sp_nexthop_insert(mlxsw_sp, nh);
3387 if (err)
3388 return err;
3389
Arkadi Sharshevskya5390272017-09-25 10:32:28 +02003390 mlxsw_sp_nexthop_counter_alloc(mlxsw_sp, nh);
Arkadi Sharshevskydbe45982017-09-25 10:32:23 +02003391 list_add_tail(&nh->router_list_node, &mlxsw_sp->router->nexthop_list);
3392
Ido Schimmel97989ee2017-03-10 08:53:38 +01003393 if (!dev)
3394 return 0;
3395
Ido Schimmeldf6dd79b2017-02-08 14:36:49 +01003396 in_dev = __in_dev_get_rtnl(dev);
3397 if (in_dev && IN_DEV_IGNORE_ROUTES_WITH_LINKDOWN(in_dev) &&
3398 fib_nh->nh_flags & RTNH_F_LINKDOWN)
3399 return 0;
3400
Petr Machata35225e42017-09-02 23:49:22 +02003401 err = mlxsw_sp_nexthop4_type_init(mlxsw_sp, nh, fib_nh);
Ido Schimmela8c97012017-02-08 11:16:35 +01003402 if (err)
3403 goto err_nexthop_neigh_init;
3404
3405 return 0;
3406
3407err_nexthop_neigh_init:
3408 mlxsw_sp_nexthop_remove(mlxsw_sp, nh);
3409 return err;
3410}
3411
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02003412static void mlxsw_sp_nexthop4_fini(struct mlxsw_sp *mlxsw_sp,
3413 struct mlxsw_sp_nexthop *nh)
Ido Schimmela8c97012017-02-08 11:16:35 +01003414{
Petr Machata35225e42017-09-02 23:49:22 +02003415 mlxsw_sp_nexthop4_type_fini(mlxsw_sp, nh);
Arkadi Sharshevskydbe45982017-09-25 10:32:23 +02003416 list_del(&nh->router_list_node);
Arkadi Sharshevskya5390272017-09-25 10:32:28 +02003417 mlxsw_sp_nexthop_counter_free(mlxsw_sp, nh);
Ido Schimmelc53b8e12017-02-08 11:16:30 +01003418 mlxsw_sp_nexthop_remove(mlxsw_sp, nh);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003419}
3420
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02003421static void mlxsw_sp_nexthop4_event(struct mlxsw_sp *mlxsw_sp,
3422 unsigned long event, struct fib_nh *fib_nh)
Ido Schimmelad178c82017-02-08 11:16:40 +01003423{
3424 struct mlxsw_sp_nexthop_key key;
3425 struct mlxsw_sp_nexthop *nh;
Ido Schimmelad178c82017-02-08 11:16:40 +01003426
Ido Schimmel9011b672017-05-16 19:38:25 +02003427 if (mlxsw_sp->router->aborted)
Ido Schimmelad178c82017-02-08 11:16:40 +01003428 return;
3429
3430 key.fib_nh = fib_nh;
3431 nh = mlxsw_sp_nexthop_lookup(mlxsw_sp, key);
3432 if (WARN_ON_ONCE(!nh))
3433 return;
3434
Ido Schimmelad178c82017-02-08 11:16:40 +01003435 switch (event) {
3436 case FIB_EVENT_NH_ADD:
Petr Machata35225e42017-09-02 23:49:22 +02003437 mlxsw_sp_nexthop4_type_init(mlxsw_sp, nh, fib_nh);
Ido Schimmelad178c82017-02-08 11:16:40 +01003438 break;
3439 case FIB_EVENT_NH_DEL:
Petr Machata35225e42017-09-02 23:49:22 +02003440 mlxsw_sp_nexthop4_type_fini(mlxsw_sp, nh);
Ido Schimmelad178c82017-02-08 11:16:40 +01003441 break;
3442 }
3443
3444 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nh_grp);
3445}
3446
Petr Machata0c5f1cd2017-11-03 10:03:38 +01003447static void mlxsw_sp_nexthop_rif_update(struct mlxsw_sp *mlxsw_sp,
3448 struct mlxsw_sp_rif *rif)
3449{
3450 struct mlxsw_sp_nexthop *nh;
3451
3452 list_for_each_entry(nh, &rif->nexthop_list, rif_list_node) {
3453 __mlxsw_sp_nexthop_neigh_update(nh, false);
3454 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nh_grp);
3455 }
3456}
3457
Ido Schimmel9665b742017-02-08 11:16:42 +01003458static void mlxsw_sp_nexthop_rif_gone_sync(struct mlxsw_sp *mlxsw_sp,
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003459 struct mlxsw_sp_rif *rif)
Ido Schimmel9665b742017-02-08 11:16:42 +01003460{
3461 struct mlxsw_sp_nexthop *nh, *tmp;
3462
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003463 list_for_each_entry_safe(nh, tmp, &rif->nexthop_list, rif_list_node) {
Petr Machata35225e42017-09-02 23:49:22 +02003464 mlxsw_sp_nexthop_type_fini(mlxsw_sp, nh);
Ido Schimmel9665b742017-02-08 11:16:42 +01003465 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nh_grp);
3466 }
3467}
3468
Petr Machata9b014512017-09-02 23:49:20 +02003469static bool mlxsw_sp_fi_is_gateway(const struct mlxsw_sp *mlxsw_sp,
3470 const struct fib_info *fi)
3471{
Petr Machata1012b9a2017-09-02 23:49:23 +02003472 return fi->fib_nh->nh_scope == RT_SCOPE_LINK ||
3473 mlxsw_sp_nexthop4_ipip_type(mlxsw_sp, fi->fib_nh, NULL);
Petr Machata9b014512017-09-02 23:49:20 +02003474}
3475
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003476static struct mlxsw_sp_nexthop_group *
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02003477mlxsw_sp_nexthop4_group_create(struct mlxsw_sp *mlxsw_sp, struct fib_info *fi)
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003478{
3479 struct mlxsw_sp_nexthop_group *nh_grp;
3480 struct mlxsw_sp_nexthop *nh;
3481 struct fib_nh *fib_nh;
3482 size_t alloc_size;
3483 int i;
3484 int err;
3485
3486 alloc_size = sizeof(*nh_grp) +
3487 fi->fib_nhs * sizeof(struct mlxsw_sp_nexthop);
3488 nh_grp = kzalloc(alloc_size, GFP_KERNEL);
3489 if (!nh_grp)
3490 return ERR_PTR(-ENOMEM);
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02003491 nh_grp->priv = fi;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003492 INIT_LIST_HEAD(&nh_grp->fib_list);
Ido Schimmel58adf2c2017-07-18 10:10:19 +02003493 nh_grp->neigh_tbl = &arp_tbl;
3494
Petr Machata9b014512017-09-02 23:49:20 +02003495 nh_grp->gateway = mlxsw_sp_fi_is_gateway(mlxsw_sp, fi);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003496 nh_grp->count = fi->fib_nhs;
Ido Schimmel7387dbb2017-07-12 09:12:53 +02003497 fib_info_hold(fi);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003498 for (i = 0; i < nh_grp->count; i++) {
3499 nh = &nh_grp->nexthops[i];
3500 fib_nh = &fi->fib_nh[i];
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02003501 err = mlxsw_sp_nexthop4_init(mlxsw_sp, nh_grp, nh, fib_nh);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003502 if (err)
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02003503 goto err_nexthop4_init;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003504 }
Ido Schimmele9ad5e72017-02-08 11:16:29 +01003505 err = mlxsw_sp_nexthop_group_insert(mlxsw_sp, nh_grp);
3506 if (err)
3507 goto err_nexthop_group_insert;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003508 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
3509 return nh_grp;
3510
Ido Schimmele9ad5e72017-02-08 11:16:29 +01003511err_nexthop_group_insert:
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02003512err_nexthop4_init:
Ido Schimmeldf6dd79b2017-02-08 14:36:49 +01003513 for (i--; i >= 0; i--) {
3514 nh = &nh_grp->nexthops[i];
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02003515 mlxsw_sp_nexthop4_fini(mlxsw_sp, nh);
Ido Schimmeldf6dd79b2017-02-08 14:36:49 +01003516 }
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02003517 fib_info_put(fi);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003518 kfree(nh_grp);
3519 return ERR_PTR(err);
3520}
3521
3522static void
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02003523mlxsw_sp_nexthop4_group_destroy(struct mlxsw_sp *mlxsw_sp,
3524 struct mlxsw_sp_nexthop_group *nh_grp)
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003525{
3526 struct mlxsw_sp_nexthop *nh;
3527 int i;
3528
Ido Schimmele9ad5e72017-02-08 11:16:29 +01003529 mlxsw_sp_nexthop_group_remove(mlxsw_sp, nh_grp);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003530 for (i = 0; i < nh_grp->count; i++) {
3531 nh = &nh_grp->nexthops[i];
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02003532 mlxsw_sp_nexthop4_fini(mlxsw_sp, nh);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003533 }
Ido Schimmel58312122016-12-23 09:32:50 +01003534 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
3535 WARN_ON_ONCE(nh_grp->adj_index_valid);
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02003536 fib_info_put(mlxsw_sp_nexthop4_group_fi(nh_grp));
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003537 kfree(nh_grp);
3538}
3539
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02003540static int mlxsw_sp_nexthop4_group_get(struct mlxsw_sp *mlxsw_sp,
3541 struct mlxsw_sp_fib_entry *fib_entry,
3542 struct fib_info *fi)
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003543{
3544 struct mlxsw_sp_nexthop_group *nh_grp;
3545
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02003546 nh_grp = mlxsw_sp_nexthop4_group_lookup(mlxsw_sp, fi);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003547 if (!nh_grp) {
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02003548 nh_grp = mlxsw_sp_nexthop4_group_create(mlxsw_sp, fi);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003549 if (IS_ERR(nh_grp))
3550 return PTR_ERR(nh_grp);
3551 }
3552 list_add_tail(&fib_entry->nexthop_group_node, &nh_grp->fib_list);
3553 fib_entry->nh_group = nh_grp;
3554 return 0;
3555}
3556
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02003557static void mlxsw_sp_nexthop4_group_put(struct mlxsw_sp *mlxsw_sp,
3558 struct mlxsw_sp_fib_entry *fib_entry)
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003559{
3560 struct mlxsw_sp_nexthop_group *nh_grp = fib_entry->nh_group;
3561
3562 list_del(&fib_entry->nexthop_group_node);
3563 if (!list_empty(&nh_grp->fib_list))
3564 return;
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02003565 mlxsw_sp_nexthop4_group_destroy(mlxsw_sp, nh_grp);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003566}
3567
Ido Schimmel013b20f2017-02-08 11:16:36 +01003568static bool
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02003569mlxsw_sp_fib4_entry_should_offload(const struct mlxsw_sp_fib_entry *fib_entry)
3570{
3571 struct mlxsw_sp_fib4_entry *fib4_entry;
3572
3573 fib4_entry = container_of(fib_entry, struct mlxsw_sp_fib4_entry,
3574 common);
3575 return !fib4_entry->tos;
3576}
3577
3578static bool
Ido Schimmel013b20f2017-02-08 11:16:36 +01003579mlxsw_sp_fib_entry_should_offload(const struct mlxsw_sp_fib_entry *fib_entry)
3580{
3581 struct mlxsw_sp_nexthop_group *nh_group = fib_entry->nh_group;
3582
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02003583 switch (fib_entry->fib_node->fib->proto) {
3584 case MLXSW_SP_L3_PROTO_IPV4:
3585 if (!mlxsw_sp_fib4_entry_should_offload(fib_entry))
3586 return false;
3587 break;
3588 case MLXSW_SP_L3_PROTO_IPV6:
3589 break;
3590 }
Ido Schimmel9aecce12017-02-09 10:28:42 +01003591
Ido Schimmel013b20f2017-02-08 11:16:36 +01003592 switch (fib_entry->type) {
3593 case MLXSW_SP_FIB_ENTRY_TYPE_REMOTE:
3594 return !!nh_group->adj_index_valid;
3595 case MLXSW_SP_FIB_ENTRY_TYPE_LOCAL:
Ido Schimmel70ad3502017-02-08 11:16:38 +01003596 return !!nh_group->nh_rif;
Petr Machata4607f6d2017-09-02 23:49:25 +02003597 case MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP:
3598 return true;
Ido Schimmel013b20f2017-02-08 11:16:36 +01003599 default:
3600 return false;
3601 }
3602}
3603
Ido Schimmel428b8512017-08-03 13:28:28 +02003604static struct mlxsw_sp_nexthop *
3605mlxsw_sp_rt6_nexthop(struct mlxsw_sp_nexthop_group *nh_grp,
3606 const struct mlxsw_sp_rt6 *mlxsw_sp_rt6)
3607{
3608 int i;
3609
3610 for (i = 0; i < nh_grp->count; i++) {
3611 struct mlxsw_sp_nexthop *nh = &nh_grp->nexthops[i];
3612 struct rt6_info *rt = mlxsw_sp_rt6->rt;
3613
3614 if (nh->rif && nh->rif->dev == rt->dst.dev &&
3615 ipv6_addr_equal((const struct in6_addr *) &nh->gw_addr,
3616 &rt->rt6i_gateway))
3617 return nh;
3618 continue;
3619 }
3620
3621 return NULL;
3622}
3623
Ido Schimmel3984d1a2017-08-02 09:56:03 +02003624static void
3625mlxsw_sp_fib4_entry_offload_set(struct mlxsw_sp_fib_entry *fib_entry)
3626{
3627 struct mlxsw_sp_nexthop_group *nh_grp = fib_entry->nh_group;
3628 int i;
3629
Petr Machata4607f6d2017-09-02 23:49:25 +02003630 if (fib_entry->type == MLXSW_SP_FIB_ENTRY_TYPE_LOCAL ||
3631 fib_entry->type == MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP) {
Ido Schimmel3984d1a2017-08-02 09:56:03 +02003632 nh_grp->nexthops->key.fib_nh->nh_flags |= RTNH_F_OFFLOAD;
3633 return;
3634 }
3635
3636 for (i = 0; i < nh_grp->count; i++) {
3637 struct mlxsw_sp_nexthop *nh = &nh_grp->nexthops[i];
3638
3639 if (nh->offloaded)
3640 nh->key.fib_nh->nh_flags |= RTNH_F_OFFLOAD;
3641 else
3642 nh->key.fib_nh->nh_flags &= ~RTNH_F_OFFLOAD;
3643 }
3644}
3645
3646static void
3647mlxsw_sp_fib4_entry_offload_unset(struct mlxsw_sp_fib_entry *fib_entry)
3648{
3649 struct mlxsw_sp_nexthop_group *nh_grp = fib_entry->nh_group;
3650 int i;
3651
3652 for (i = 0; i < nh_grp->count; i++) {
3653 struct mlxsw_sp_nexthop *nh = &nh_grp->nexthops[i];
3654
3655 nh->key.fib_nh->nh_flags &= ~RTNH_F_OFFLOAD;
3656 }
3657}
3658
Ido Schimmel428b8512017-08-03 13:28:28 +02003659static void
3660mlxsw_sp_fib6_entry_offload_set(struct mlxsw_sp_fib_entry *fib_entry)
3661{
3662 struct mlxsw_sp_fib6_entry *fib6_entry;
3663 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
3664
3665 fib6_entry = container_of(fib_entry, struct mlxsw_sp_fib6_entry,
3666 common);
3667
3668 if (fib_entry->type == MLXSW_SP_FIB_ENTRY_TYPE_LOCAL) {
3669 list_first_entry(&fib6_entry->rt6_list, struct mlxsw_sp_rt6,
Ido Schimmelfe400792017-08-15 09:09:49 +02003670 list)->rt->rt6i_nh_flags |= RTNH_F_OFFLOAD;
Ido Schimmel428b8512017-08-03 13:28:28 +02003671 return;
3672 }
3673
3674 list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) {
3675 struct mlxsw_sp_nexthop_group *nh_grp = fib_entry->nh_group;
3676 struct mlxsw_sp_nexthop *nh;
3677
3678 nh = mlxsw_sp_rt6_nexthop(nh_grp, mlxsw_sp_rt6);
3679 if (nh && nh->offloaded)
Ido Schimmelfe400792017-08-15 09:09:49 +02003680 mlxsw_sp_rt6->rt->rt6i_nh_flags |= RTNH_F_OFFLOAD;
Ido Schimmel428b8512017-08-03 13:28:28 +02003681 else
Ido Schimmelfe400792017-08-15 09:09:49 +02003682 mlxsw_sp_rt6->rt->rt6i_nh_flags &= ~RTNH_F_OFFLOAD;
Ido Schimmel428b8512017-08-03 13:28:28 +02003683 }
3684}
3685
3686static void
3687mlxsw_sp_fib6_entry_offload_unset(struct mlxsw_sp_fib_entry *fib_entry)
3688{
3689 struct mlxsw_sp_fib6_entry *fib6_entry;
3690 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
3691
3692 fib6_entry = container_of(fib_entry, struct mlxsw_sp_fib6_entry,
3693 common);
3694 list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) {
3695 struct rt6_info *rt = mlxsw_sp_rt6->rt;
3696
Ido Schimmelfe400792017-08-15 09:09:49 +02003697 rt->rt6i_nh_flags &= ~RTNH_F_OFFLOAD;
Ido Schimmel428b8512017-08-03 13:28:28 +02003698 }
3699}
3700
Ido Schimmel013b20f2017-02-08 11:16:36 +01003701static void mlxsw_sp_fib_entry_offload_set(struct mlxsw_sp_fib_entry *fib_entry)
3702{
Ido Schimmel76610eb2017-03-10 08:53:41 +01003703 switch (fib_entry->fib_node->fib->proto) {
Ido Schimmel013b20f2017-02-08 11:16:36 +01003704 case MLXSW_SP_L3_PROTO_IPV4:
Ido Schimmel3984d1a2017-08-02 09:56:03 +02003705 mlxsw_sp_fib4_entry_offload_set(fib_entry);
Ido Schimmel013b20f2017-02-08 11:16:36 +01003706 break;
3707 case MLXSW_SP_L3_PROTO_IPV6:
Ido Schimmel428b8512017-08-03 13:28:28 +02003708 mlxsw_sp_fib6_entry_offload_set(fib_entry);
3709 break;
Ido Schimmel013b20f2017-02-08 11:16:36 +01003710 }
3711}
3712
3713static void
3714mlxsw_sp_fib_entry_offload_unset(struct mlxsw_sp_fib_entry *fib_entry)
3715{
Ido Schimmel76610eb2017-03-10 08:53:41 +01003716 switch (fib_entry->fib_node->fib->proto) {
Ido Schimmel013b20f2017-02-08 11:16:36 +01003717 case MLXSW_SP_L3_PROTO_IPV4:
Ido Schimmel3984d1a2017-08-02 09:56:03 +02003718 mlxsw_sp_fib4_entry_offload_unset(fib_entry);
Ido Schimmel013b20f2017-02-08 11:16:36 +01003719 break;
3720 case MLXSW_SP_L3_PROTO_IPV6:
Ido Schimmel428b8512017-08-03 13:28:28 +02003721 mlxsw_sp_fib6_entry_offload_unset(fib_entry);
3722 break;
Ido Schimmel013b20f2017-02-08 11:16:36 +01003723 }
Ido Schimmel013b20f2017-02-08 11:16:36 +01003724}
3725
3726static void
3727mlxsw_sp_fib_entry_offload_refresh(struct mlxsw_sp_fib_entry *fib_entry,
3728 enum mlxsw_reg_ralue_op op, int err)
3729{
3730 switch (op) {
3731 case MLXSW_REG_RALUE_OP_WRITE_DELETE:
Ido Schimmel013b20f2017-02-08 11:16:36 +01003732 return mlxsw_sp_fib_entry_offload_unset(fib_entry);
3733 case MLXSW_REG_RALUE_OP_WRITE_WRITE:
3734 if (err)
3735 return;
Ido Schimmel1353ee72017-08-02 09:56:04 +02003736 if (mlxsw_sp_fib_entry_should_offload(fib_entry))
Ido Schimmel013b20f2017-02-08 11:16:36 +01003737 mlxsw_sp_fib_entry_offload_set(fib_entry);
Petr Machata85f44a12017-10-02 12:21:58 +02003738 else
Ido Schimmel013b20f2017-02-08 11:16:36 +01003739 mlxsw_sp_fib_entry_offload_unset(fib_entry);
3740 return;
3741 default:
3742 return;
3743 }
3744}
3745
Ido Schimmel9dbf4d72017-07-18 10:10:24 +02003746static void
3747mlxsw_sp_fib_entry_ralue_pack(char *ralue_pl,
3748 const struct mlxsw_sp_fib_entry *fib_entry,
3749 enum mlxsw_reg_ralue_op op)
3750{
3751 struct mlxsw_sp_fib *fib = fib_entry->fib_node->fib;
3752 enum mlxsw_reg_ralxx_protocol proto;
3753 u32 *p_dip;
3754
3755 proto = (enum mlxsw_reg_ralxx_protocol) fib->proto;
3756
3757 switch (fib->proto) {
3758 case MLXSW_SP_L3_PROTO_IPV4:
3759 p_dip = (u32 *) fib_entry->fib_node->key.addr;
3760 mlxsw_reg_ralue_pack4(ralue_pl, proto, op, fib->vr->id,
3761 fib_entry->fib_node->key.prefix_len,
3762 *p_dip);
3763 break;
3764 case MLXSW_SP_L3_PROTO_IPV6:
3765 mlxsw_reg_ralue_pack6(ralue_pl, proto, op, fib->vr->id,
3766 fib_entry->fib_node->key.prefix_len,
3767 fib_entry->fib_node->key.addr);
3768 break;
3769 }
3770}
3771
3772static int mlxsw_sp_fib_entry_op_remote(struct mlxsw_sp *mlxsw_sp,
3773 struct mlxsw_sp_fib_entry *fib_entry,
3774 enum mlxsw_reg_ralue_op op)
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003775{
3776 char ralue_pl[MLXSW_REG_RALUE_LEN];
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003777 enum mlxsw_reg_ralue_trap_action trap_action;
3778 u16 trap_id = 0;
3779 u32 adjacency_index = 0;
3780 u16 ecmp_size = 0;
3781
3782 /* In case the nexthop group adjacency index is valid, use it
3783 * with provided ECMP size. Otherwise, setup trap and pass
3784 * traffic to kernel.
3785 */
Ido Schimmel4b411472017-02-08 11:16:37 +01003786 if (mlxsw_sp_fib_entry_should_offload(fib_entry)) {
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003787 trap_action = MLXSW_REG_RALUE_TRAP_ACTION_NOP;
3788 adjacency_index = fib_entry->nh_group->adj_index;
3789 ecmp_size = fib_entry->nh_group->ecmp_size;
3790 } else {
3791 trap_action = MLXSW_REG_RALUE_TRAP_ACTION_TRAP;
3792 trap_id = MLXSW_TRAP_ID_RTR_INGRESS0;
3793 }
3794
Ido Schimmel9dbf4d72017-07-18 10:10:24 +02003795 mlxsw_sp_fib_entry_ralue_pack(ralue_pl, fib_entry, op);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003796 mlxsw_reg_ralue_act_remote_pack(ralue_pl, trap_action, trap_id,
3797 adjacency_index, ecmp_size);
3798 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
3799}
3800
Ido Schimmel9dbf4d72017-07-18 10:10:24 +02003801static int mlxsw_sp_fib_entry_op_local(struct mlxsw_sp *mlxsw_sp,
3802 struct mlxsw_sp_fib_entry *fib_entry,
3803 enum mlxsw_reg_ralue_op op)
Jiri Pirko61c503f2016-07-04 08:23:11 +02003804{
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003805 struct mlxsw_sp_rif *rif = fib_entry->nh_group->nh_rif;
Ido Schimmel70ad3502017-02-08 11:16:38 +01003806 enum mlxsw_reg_ralue_trap_action trap_action;
Jiri Pirko61c503f2016-07-04 08:23:11 +02003807 char ralue_pl[MLXSW_REG_RALUE_LEN];
Ido Schimmel70ad3502017-02-08 11:16:38 +01003808 u16 trap_id = 0;
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003809 u16 rif_index = 0;
Ido Schimmel70ad3502017-02-08 11:16:38 +01003810
3811 if (mlxsw_sp_fib_entry_should_offload(fib_entry)) {
3812 trap_action = MLXSW_REG_RALUE_TRAP_ACTION_NOP;
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003813 rif_index = rif->rif_index;
Ido Schimmel70ad3502017-02-08 11:16:38 +01003814 } else {
3815 trap_action = MLXSW_REG_RALUE_TRAP_ACTION_TRAP;
3816 trap_id = MLXSW_TRAP_ID_RTR_INGRESS0;
3817 }
Jiri Pirko61c503f2016-07-04 08:23:11 +02003818
Ido Schimmel9dbf4d72017-07-18 10:10:24 +02003819 mlxsw_sp_fib_entry_ralue_pack(ralue_pl, fib_entry, op);
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003820 mlxsw_reg_ralue_act_local_pack(ralue_pl, trap_action, trap_id,
3821 rif_index);
Jiri Pirko61c503f2016-07-04 08:23:11 +02003822 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
3823}
3824
Ido Schimmel9dbf4d72017-07-18 10:10:24 +02003825static int mlxsw_sp_fib_entry_op_trap(struct mlxsw_sp *mlxsw_sp,
3826 struct mlxsw_sp_fib_entry *fib_entry,
3827 enum mlxsw_reg_ralue_op op)
Jiri Pirko61c503f2016-07-04 08:23:11 +02003828{
3829 char ralue_pl[MLXSW_REG_RALUE_LEN];
Jiri Pirko61c503f2016-07-04 08:23:11 +02003830
Ido Schimmel9dbf4d72017-07-18 10:10:24 +02003831 mlxsw_sp_fib_entry_ralue_pack(ralue_pl, fib_entry, op);
Jiri Pirko61c503f2016-07-04 08:23:11 +02003832 mlxsw_reg_ralue_act_ip2me_pack(ralue_pl);
3833 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
3834}
3835
Petr Machata4607f6d2017-09-02 23:49:25 +02003836static int
3837mlxsw_sp_fib_entry_op_ipip_decap(struct mlxsw_sp *mlxsw_sp,
3838 struct mlxsw_sp_fib_entry *fib_entry,
3839 enum mlxsw_reg_ralue_op op)
3840{
3841 struct mlxsw_sp_ipip_entry *ipip_entry = fib_entry->decap.ipip_entry;
3842 const struct mlxsw_sp_ipip_ops *ipip_ops;
3843
3844 if (WARN_ON(!ipip_entry))
3845 return -EINVAL;
3846
3847 ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt];
3848 return ipip_ops->fib_entry_op(mlxsw_sp, ipip_entry, op,
3849 fib_entry->decap.tunnel_index);
3850}
3851
Ido Schimmel9dbf4d72017-07-18 10:10:24 +02003852static int __mlxsw_sp_fib_entry_op(struct mlxsw_sp *mlxsw_sp,
3853 struct mlxsw_sp_fib_entry *fib_entry,
3854 enum mlxsw_reg_ralue_op op)
Jiri Pirko61c503f2016-07-04 08:23:11 +02003855{
3856 switch (fib_entry->type) {
3857 case MLXSW_SP_FIB_ENTRY_TYPE_REMOTE:
Ido Schimmel9dbf4d72017-07-18 10:10:24 +02003858 return mlxsw_sp_fib_entry_op_remote(mlxsw_sp, fib_entry, op);
Jiri Pirko61c503f2016-07-04 08:23:11 +02003859 case MLXSW_SP_FIB_ENTRY_TYPE_LOCAL:
Ido Schimmel9dbf4d72017-07-18 10:10:24 +02003860 return mlxsw_sp_fib_entry_op_local(mlxsw_sp, fib_entry, op);
Jiri Pirko61c503f2016-07-04 08:23:11 +02003861 case MLXSW_SP_FIB_ENTRY_TYPE_TRAP:
Ido Schimmel9dbf4d72017-07-18 10:10:24 +02003862 return mlxsw_sp_fib_entry_op_trap(mlxsw_sp, fib_entry, op);
Petr Machata4607f6d2017-09-02 23:49:25 +02003863 case MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP:
3864 return mlxsw_sp_fib_entry_op_ipip_decap(mlxsw_sp,
3865 fib_entry, op);
Jiri Pirko61c503f2016-07-04 08:23:11 +02003866 }
3867 return -EINVAL;
3868}
3869
3870static int mlxsw_sp_fib_entry_op(struct mlxsw_sp *mlxsw_sp,
3871 struct mlxsw_sp_fib_entry *fib_entry,
3872 enum mlxsw_reg_ralue_op op)
3873{
Ido Schimmel9dbf4d72017-07-18 10:10:24 +02003874 int err = __mlxsw_sp_fib_entry_op(mlxsw_sp, fib_entry, op);
Ido Schimmel013b20f2017-02-08 11:16:36 +01003875
Ido Schimmel013b20f2017-02-08 11:16:36 +01003876 mlxsw_sp_fib_entry_offload_refresh(fib_entry, op, err);
Ido Schimmel9dbf4d72017-07-18 10:10:24 +02003877
Ido Schimmel013b20f2017-02-08 11:16:36 +01003878 return err;
Jiri Pirko61c503f2016-07-04 08:23:11 +02003879}
3880
3881static int mlxsw_sp_fib_entry_update(struct mlxsw_sp *mlxsw_sp,
3882 struct mlxsw_sp_fib_entry *fib_entry)
3883{
Jiri Pirko7146da32016-09-01 10:37:41 +02003884 return mlxsw_sp_fib_entry_op(mlxsw_sp, fib_entry,
3885 MLXSW_REG_RALUE_OP_WRITE_WRITE);
Jiri Pirko61c503f2016-07-04 08:23:11 +02003886}
3887
3888static int mlxsw_sp_fib_entry_del(struct mlxsw_sp *mlxsw_sp,
3889 struct mlxsw_sp_fib_entry *fib_entry)
3890{
3891 return mlxsw_sp_fib_entry_op(mlxsw_sp, fib_entry,
3892 MLXSW_REG_RALUE_OP_WRITE_DELETE);
3893}
3894
Jiri Pirko61c503f2016-07-04 08:23:11 +02003895static int
Ido Schimmel013b20f2017-02-08 11:16:36 +01003896mlxsw_sp_fib4_entry_type_set(struct mlxsw_sp *mlxsw_sp,
3897 const struct fib_entry_notifier_info *fen_info,
3898 struct mlxsw_sp_fib_entry *fib_entry)
Jiri Pirko61c503f2016-07-04 08:23:11 +02003899{
Petr Machata4607f6d2017-09-02 23:49:25 +02003900 union mlxsw_sp_l3addr dip = { .addr4 = htonl(fen_info->dst) };
3901 struct net_device *dev = fen_info->fi->fib_dev;
3902 struct mlxsw_sp_ipip_entry *ipip_entry;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02003903 struct fib_info *fi = fen_info->fi;
Jiri Pirko61c503f2016-07-04 08:23:11 +02003904
Ido Schimmel97989ee2017-03-10 08:53:38 +01003905 switch (fen_info->type) {
Ido Schimmel97989ee2017-03-10 08:53:38 +01003906 case RTN_LOCAL:
Petr Machata4607f6d2017-09-02 23:49:25 +02003907 ipip_entry = mlxsw_sp_ipip_entry_find_by_decap(mlxsw_sp, dev,
3908 MLXSW_SP_L3_PROTO_IPV4, dip);
3909 if (ipip_entry) {
3910 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP;
3911 return mlxsw_sp_fib_entry_decap_init(mlxsw_sp,
3912 fib_entry,
3913 ipip_entry);
3914 }
3915 /* fall through */
3916 case RTN_BROADCAST:
Jiri Pirko61c503f2016-07-04 08:23:11 +02003917 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
3918 return 0;
Ido Schimmel97989ee2017-03-10 08:53:38 +01003919 case RTN_UNREACHABLE: /* fall through */
3920 case RTN_BLACKHOLE: /* fall through */
3921 case RTN_PROHIBIT:
3922 /* Packets hitting these routes need to be trapped, but
3923 * can do so with a lower priority than packets directed
3924 * at the host, so use action type local instead of trap.
3925 */
Jiri Pirkob45f64d2016-09-26 12:52:31 +02003926 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_LOCAL;
Ido Schimmel97989ee2017-03-10 08:53:38 +01003927 return 0;
3928 case RTN_UNICAST:
Petr Machata9b014512017-09-02 23:49:20 +02003929 if (mlxsw_sp_fi_is_gateway(mlxsw_sp, fi))
Ido Schimmel97989ee2017-03-10 08:53:38 +01003930 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_REMOTE;
Petr Machata9b014512017-09-02 23:49:20 +02003931 else
3932 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_LOCAL;
Ido Schimmel97989ee2017-03-10 08:53:38 +01003933 return 0;
3934 default:
3935 return -EINVAL;
3936 }
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003937}
3938
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02003939static struct mlxsw_sp_fib4_entry *
Ido Schimmel9aecce12017-02-09 10:28:42 +01003940mlxsw_sp_fib4_entry_create(struct mlxsw_sp *mlxsw_sp,
3941 struct mlxsw_sp_fib_node *fib_node,
3942 const struct fib_entry_notifier_info *fen_info)
Jiri Pirko5b004412016-09-01 10:37:40 +02003943{
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02003944 struct mlxsw_sp_fib4_entry *fib4_entry;
Jiri Pirko5b004412016-09-01 10:37:40 +02003945 struct mlxsw_sp_fib_entry *fib_entry;
Ido Schimmel9aecce12017-02-09 10:28:42 +01003946 int err;
3947
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02003948 fib4_entry = kzalloc(sizeof(*fib4_entry), GFP_KERNEL);
3949 if (!fib4_entry)
3950 return ERR_PTR(-ENOMEM);
3951 fib_entry = &fib4_entry->common;
Ido Schimmel9aecce12017-02-09 10:28:42 +01003952
3953 err = mlxsw_sp_fib4_entry_type_set(mlxsw_sp, fen_info, fib_entry);
3954 if (err)
3955 goto err_fib4_entry_type_set;
3956
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02003957 err = mlxsw_sp_nexthop4_group_get(mlxsw_sp, fib_entry, fen_info->fi);
Ido Schimmel9aecce12017-02-09 10:28:42 +01003958 if (err)
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02003959 goto err_nexthop4_group_get;
Ido Schimmel9aecce12017-02-09 10:28:42 +01003960
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02003961 fib4_entry->prio = fen_info->fi->fib_priority;
3962 fib4_entry->tb_id = fen_info->tb_id;
3963 fib4_entry->type = fen_info->type;
3964 fib4_entry->tos = fen_info->tos;
Ido Schimmel9aecce12017-02-09 10:28:42 +01003965
3966 fib_entry->fib_node = fib_node;
3967
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02003968 return fib4_entry;
Ido Schimmel9aecce12017-02-09 10:28:42 +01003969
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02003970err_nexthop4_group_get:
Ido Schimmel9aecce12017-02-09 10:28:42 +01003971err_fib4_entry_type_set:
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02003972 kfree(fib4_entry);
Ido Schimmel9aecce12017-02-09 10:28:42 +01003973 return ERR_PTR(err);
3974}
3975
3976static void mlxsw_sp_fib4_entry_destroy(struct mlxsw_sp *mlxsw_sp,
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02003977 struct mlxsw_sp_fib4_entry *fib4_entry)
Ido Schimmel9aecce12017-02-09 10:28:42 +01003978{
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02003979 mlxsw_sp_nexthop4_group_put(mlxsw_sp, &fib4_entry->common);
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02003980 kfree(fib4_entry);
Ido Schimmel9aecce12017-02-09 10:28:42 +01003981}
3982
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02003983static struct mlxsw_sp_fib4_entry *
Ido Schimmel9aecce12017-02-09 10:28:42 +01003984mlxsw_sp_fib4_entry_lookup(struct mlxsw_sp *mlxsw_sp,
3985 const struct fib_entry_notifier_info *fen_info)
3986{
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02003987 struct mlxsw_sp_fib4_entry *fib4_entry;
Ido Schimmel9aecce12017-02-09 10:28:42 +01003988 struct mlxsw_sp_fib_node *fib_node;
Ido Schimmel160e22a2017-07-18 10:10:20 +02003989 struct mlxsw_sp_fib *fib;
3990 struct mlxsw_sp_vr *vr;
Ido Schimmel9aecce12017-02-09 10:28:42 +01003991
Ido Schimmel160e22a2017-07-18 10:10:20 +02003992 vr = mlxsw_sp_vr_find(mlxsw_sp, fen_info->tb_id);
3993 if (!vr)
3994 return NULL;
3995 fib = mlxsw_sp_vr_fib(vr, MLXSW_SP_L3_PROTO_IPV4);
3996
3997 fib_node = mlxsw_sp_fib_node_lookup(fib, &fen_info->dst,
3998 sizeof(fen_info->dst),
3999 fen_info->dst_len);
4000 if (!fib_node)
Ido Schimmel9aecce12017-02-09 10:28:42 +01004001 return NULL;
4002
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004003 list_for_each_entry(fib4_entry, &fib_node->entry_list, common.list) {
4004 if (fib4_entry->tb_id == fen_info->tb_id &&
4005 fib4_entry->tos == fen_info->tos &&
4006 fib4_entry->type == fen_info->type &&
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02004007 mlxsw_sp_nexthop4_group_fi(fib4_entry->common.nh_group) ==
4008 fen_info->fi) {
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004009 return fib4_entry;
Ido Schimmel9aecce12017-02-09 10:28:42 +01004010 }
4011 }
4012
4013 return NULL;
4014}
4015
4016static const struct rhashtable_params mlxsw_sp_fib_ht_params = {
4017 .key_offset = offsetof(struct mlxsw_sp_fib_node, key),
4018 .head_offset = offsetof(struct mlxsw_sp_fib_node, ht_node),
4019 .key_len = sizeof(struct mlxsw_sp_fib_key),
4020 .automatic_shrinking = true,
4021};
4022
4023static int mlxsw_sp_fib_node_insert(struct mlxsw_sp_fib *fib,
4024 struct mlxsw_sp_fib_node *fib_node)
4025{
4026 return rhashtable_insert_fast(&fib->ht, &fib_node->ht_node,
4027 mlxsw_sp_fib_ht_params);
4028}
4029
4030static void mlxsw_sp_fib_node_remove(struct mlxsw_sp_fib *fib,
4031 struct mlxsw_sp_fib_node *fib_node)
4032{
4033 rhashtable_remove_fast(&fib->ht, &fib_node->ht_node,
4034 mlxsw_sp_fib_ht_params);
4035}
4036
4037static struct mlxsw_sp_fib_node *
4038mlxsw_sp_fib_node_lookup(struct mlxsw_sp_fib *fib, const void *addr,
4039 size_t addr_len, unsigned char prefix_len)
4040{
4041 struct mlxsw_sp_fib_key key;
4042
4043 memset(&key, 0, sizeof(key));
4044 memcpy(key.addr, addr, addr_len);
4045 key.prefix_len = prefix_len;
4046 return rhashtable_lookup_fast(&fib->ht, &key, mlxsw_sp_fib_ht_params);
4047}
4048
4049static struct mlxsw_sp_fib_node *
Ido Schimmel76610eb2017-03-10 08:53:41 +01004050mlxsw_sp_fib_node_create(struct mlxsw_sp_fib *fib, const void *addr,
Ido Schimmel9aecce12017-02-09 10:28:42 +01004051 size_t addr_len, unsigned char prefix_len)
4052{
4053 struct mlxsw_sp_fib_node *fib_node;
4054
4055 fib_node = kzalloc(sizeof(*fib_node), GFP_KERNEL);
4056 if (!fib_node)
4057 return NULL;
4058
4059 INIT_LIST_HEAD(&fib_node->entry_list);
Ido Schimmel76610eb2017-03-10 08:53:41 +01004060 list_add(&fib_node->list, &fib->node_list);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004061 memcpy(fib_node->key.addr, addr, addr_len);
4062 fib_node->key.prefix_len = prefix_len;
Ido Schimmel9aecce12017-02-09 10:28:42 +01004063
4064 return fib_node;
4065}
4066
4067static void mlxsw_sp_fib_node_destroy(struct mlxsw_sp_fib_node *fib_node)
4068{
Ido Schimmel9aecce12017-02-09 10:28:42 +01004069 list_del(&fib_node->list);
4070 WARN_ON(!list_empty(&fib_node->entry_list));
4071 kfree(fib_node);
4072}
4073
4074static bool
4075mlxsw_sp_fib_node_entry_is_first(const struct mlxsw_sp_fib_node *fib_node,
4076 const struct mlxsw_sp_fib_entry *fib_entry)
4077{
4078 return list_first_entry(&fib_node->entry_list,
4079 struct mlxsw_sp_fib_entry, list) == fib_entry;
4080}
4081
Ido Schimmelfc922bb2017-08-14 10:54:05 +02004082static int mlxsw_sp_fib_lpm_tree_link(struct mlxsw_sp *mlxsw_sp,
4083 struct mlxsw_sp_fib *fib,
4084 struct mlxsw_sp_fib_node *fib_node)
4085{
4086 struct mlxsw_sp_prefix_usage req_prefix_usage = {{ 0 } };
4087 struct mlxsw_sp_lpm_tree *lpm_tree;
4088 int err;
4089
4090 /* Since the tree is shared between all virtual routers we must
4091 * make sure it contains all the required prefix lengths. This
4092 * can be computed by either adding the new prefix length to the
4093 * existing prefix usage of a bound tree, or by aggregating the
4094 * prefix lengths across all virtual routers and adding the new
4095 * one as well.
4096 */
4097 if (fib->lpm_tree)
4098 mlxsw_sp_prefix_usage_cpy(&req_prefix_usage,
4099 &fib->lpm_tree->prefix_usage);
4100 else
4101 mlxsw_sp_vrs_prefixes(mlxsw_sp, fib->proto, &req_prefix_usage);
4102 mlxsw_sp_prefix_usage_set(&req_prefix_usage, fib_node->key.prefix_len);
4103
4104 lpm_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, &req_prefix_usage,
4105 fib->proto);
4106 if (IS_ERR(lpm_tree))
4107 return PTR_ERR(lpm_tree);
4108
4109 if (fib->lpm_tree && fib->lpm_tree->id == lpm_tree->id)
4110 return 0;
4111
4112 err = mlxsw_sp_vrs_lpm_tree_replace(mlxsw_sp, fib, lpm_tree);
4113 if (err)
4114 return err;
4115
4116 return 0;
4117}
4118
4119static void mlxsw_sp_fib_lpm_tree_unlink(struct mlxsw_sp *mlxsw_sp,
4120 struct mlxsw_sp_fib *fib)
4121{
Ido Schimmelfc922bb2017-08-14 10:54:05 +02004122 if (!mlxsw_sp_prefix_usage_none(&fib->prefix_usage))
4123 return;
4124 mlxsw_sp_vr_lpm_tree_unbind(mlxsw_sp, fib);
4125 mlxsw_sp_lpm_tree_put(mlxsw_sp, fib->lpm_tree);
4126 fib->lpm_tree = NULL;
4127}
4128
Ido Schimmel9aecce12017-02-09 10:28:42 +01004129static void mlxsw_sp_fib_node_prefix_inc(struct mlxsw_sp_fib_node *fib_node)
4130{
4131 unsigned char prefix_len = fib_node->key.prefix_len;
Ido Schimmel76610eb2017-03-10 08:53:41 +01004132 struct mlxsw_sp_fib *fib = fib_node->fib;
Ido Schimmel9aecce12017-02-09 10:28:42 +01004133
4134 if (fib->prefix_ref_count[prefix_len]++ == 0)
4135 mlxsw_sp_prefix_usage_set(&fib->prefix_usage, prefix_len);
4136}
4137
4138static void mlxsw_sp_fib_node_prefix_dec(struct mlxsw_sp_fib_node *fib_node)
4139{
4140 unsigned char prefix_len = fib_node->key.prefix_len;
Ido Schimmel76610eb2017-03-10 08:53:41 +01004141 struct mlxsw_sp_fib *fib = fib_node->fib;
Ido Schimmel9aecce12017-02-09 10:28:42 +01004142
4143 if (--fib->prefix_ref_count[prefix_len] == 0)
4144 mlxsw_sp_prefix_usage_clear(&fib->prefix_usage, prefix_len);
4145}
4146
Ido Schimmel76610eb2017-03-10 08:53:41 +01004147static int mlxsw_sp_fib_node_init(struct mlxsw_sp *mlxsw_sp,
4148 struct mlxsw_sp_fib_node *fib_node,
4149 struct mlxsw_sp_fib *fib)
4150{
Ido Schimmel76610eb2017-03-10 08:53:41 +01004151 int err;
4152
4153 err = mlxsw_sp_fib_node_insert(fib, fib_node);
4154 if (err)
4155 return err;
4156 fib_node->fib = fib;
4157
Ido Schimmelfc922bb2017-08-14 10:54:05 +02004158 err = mlxsw_sp_fib_lpm_tree_link(mlxsw_sp, fib, fib_node);
4159 if (err)
4160 goto err_fib_lpm_tree_link;
Ido Schimmel76610eb2017-03-10 08:53:41 +01004161
4162 mlxsw_sp_fib_node_prefix_inc(fib_node);
4163
4164 return 0;
4165
Ido Schimmelfc922bb2017-08-14 10:54:05 +02004166err_fib_lpm_tree_link:
Ido Schimmel76610eb2017-03-10 08:53:41 +01004167 fib_node->fib = NULL;
4168 mlxsw_sp_fib_node_remove(fib, fib_node);
4169 return err;
4170}
4171
4172static void mlxsw_sp_fib_node_fini(struct mlxsw_sp *mlxsw_sp,
4173 struct mlxsw_sp_fib_node *fib_node)
4174{
Ido Schimmel76610eb2017-03-10 08:53:41 +01004175 struct mlxsw_sp_fib *fib = fib_node->fib;
4176
4177 mlxsw_sp_fib_node_prefix_dec(fib_node);
Ido Schimmelfc922bb2017-08-14 10:54:05 +02004178 mlxsw_sp_fib_lpm_tree_unlink(mlxsw_sp, fib);
Ido Schimmel76610eb2017-03-10 08:53:41 +01004179 fib_node->fib = NULL;
4180 mlxsw_sp_fib_node_remove(fib, fib_node);
4181}
4182
Ido Schimmel9aecce12017-02-09 10:28:42 +01004183static struct mlxsw_sp_fib_node *
Ido Schimmel731ea1c2017-07-18 10:10:21 +02004184mlxsw_sp_fib_node_get(struct mlxsw_sp *mlxsw_sp, u32 tb_id, const void *addr,
4185 size_t addr_len, unsigned char prefix_len,
4186 enum mlxsw_sp_l3proto proto)
Ido Schimmel9aecce12017-02-09 10:28:42 +01004187{
4188 struct mlxsw_sp_fib_node *fib_node;
Ido Schimmel76610eb2017-03-10 08:53:41 +01004189 struct mlxsw_sp_fib *fib;
Jiri Pirko5b004412016-09-01 10:37:40 +02004190 struct mlxsw_sp_vr *vr;
4191 int err;
4192
David Ahernf8fa9b42017-10-18 09:56:56 -07004193 vr = mlxsw_sp_vr_get(mlxsw_sp, tb_id, NULL);
Jiri Pirko5b004412016-09-01 10:37:40 +02004194 if (IS_ERR(vr))
4195 return ERR_CAST(vr);
Ido Schimmel731ea1c2017-07-18 10:10:21 +02004196 fib = mlxsw_sp_vr_fib(vr, proto);
Jiri Pirko5b004412016-09-01 10:37:40 +02004197
Ido Schimmel731ea1c2017-07-18 10:10:21 +02004198 fib_node = mlxsw_sp_fib_node_lookup(fib, addr, addr_len, prefix_len);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004199 if (fib_node)
4200 return fib_node;
4201
Ido Schimmel731ea1c2017-07-18 10:10:21 +02004202 fib_node = mlxsw_sp_fib_node_create(fib, addr, addr_len, prefix_len);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004203 if (!fib_node) {
Jiri Pirko5b004412016-09-01 10:37:40 +02004204 err = -ENOMEM;
Ido Schimmel9aecce12017-02-09 10:28:42 +01004205 goto err_fib_node_create;
Jiri Pirko5b004412016-09-01 10:37:40 +02004206 }
Jiri Pirko5b004412016-09-01 10:37:40 +02004207
Ido Schimmel76610eb2017-03-10 08:53:41 +01004208 err = mlxsw_sp_fib_node_init(mlxsw_sp, fib_node, fib);
4209 if (err)
4210 goto err_fib_node_init;
4211
Ido Schimmel9aecce12017-02-09 10:28:42 +01004212 return fib_node;
Jiri Pirko5b004412016-09-01 10:37:40 +02004213
Ido Schimmel76610eb2017-03-10 08:53:41 +01004214err_fib_node_init:
4215 mlxsw_sp_fib_node_destroy(fib_node);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004216err_fib_node_create:
Ido Schimmel76610eb2017-03-10 08:53:41 +01004217 mlxsw_sp_vr_put(vr);
Jiri Pirko5b004412016-09-01 10:37:40 +02004218 return ERR_PTR(err);
4219}
4220
Ido Schimmel731ea1c2017-07-18 10:10:21 +02004221static void mlxsw_sp_fib_node_put(struct mlxsw_sp *mlxsw_sp,
4222 struct mlxsw_sp_fib_node *fib_node)
Jiri Pirko5b004412016-09-01 10:37:40 +02004223{
Ido Schimmel76610eb2017-03-10 08:53:41 +01004224 struct mlxsw_sp_vr *vr = fib_node->fib->vr;
Jiri Pirko5b004412016-09-01 10:37:40 +02004225
Ido Schimmel9aecce12017-02-09 10:28:42 +01004226 if (!list_empty(&fib_node->entry_list))
4227 return;
Ido Schimmel76610eb2017-03-10 08:53:41 +01004228 mlxsw_sp_fib_node_fini(mlxsw_sp, fib_node);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004229 mlxsw_sp_fib_node_destroy(fib_node);
Ido Schimmel76610eb2017-03-10 08:53:41 +01004230 mlxsw_sp_vr_put(vr);
Jiri Pirko5b004412016-09-01 10:37:40 +02004231}
4232
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004233static struct mlxsw_sp_fib4_entry *
Ido Schimmel9aecce12017-02-09 10:28:42 +01004234mlxsw_sp_fib4_node_entry_find(const struct mlxsw_sp_fib_node *fib_node,
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004235 const struct mlxsw_sp_fib4_entry *new4_entry)
Jiri Pirko61c503f2016-07-04 08:23:11 +02004236{
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004237 struct mlxsw_sp_fib4_entry *fib4_entry;
Ido Schimmel9aecce12017-02-09 10:28:42 +01004238
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004239 list_for_each_entry(fib4_entry, &fib_node->entry_list, common.list) {
4240 if (fib4_entry->tb_id > new4_entry->tb_id)
Ido Schimmel9aecce12017-02-09 10:28:42 +01004241 continue;
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004242 if (fib4_entry->tb_id != new4_entry->tb_id)
Ido Schimmel9aecce12017-02-09 10:28:42 +01004243 break;
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004244 if (fib4_entry->tos > new4_entry->tos)
Ido Schimmel9aecce12017-02-09 10:28:42 +01004245 continue;
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004246 if (fib4_entry->prio >= new4_entry->prio ||
4247 fib4_entry->tos < new4_entry->tos)
4248 return fib4_entry;
Ido Schimmel9aecce12017-02-09 10:28:42 +01004249 }
4250
4251 return NULL;
4252}
4253
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004254static int
4255mlxsw_sp_fib4_node_list_append(struct mlxsw_sp_fib4_entry *fib4_entry,
4256 struct mlxsw_sp_fib4_entry *new4_entry)
Ido Schimmel4283bce2017-02-09 10:28:43 +01004257{
4258 struct mlxsw_sp_fib_node *fib_node;
4259
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004260 if (WARN_ON(!fib4_entry))
Ido Schimmel4283bce2017-02-09 10:28:43 +01004261 return -EINVAL;
4262
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004263 fib_node = fib4_entry->common.fib_node;
4264 list_for_each_entry_from(fib4_entry, &fib_node->entry_list,
4265 common.list) {
4266 if (fib4_entry->tb_id != new4_entry->tb_id ||
4267 fib4_entry->tos != new4_entry->tos ||
4268 fib4_entry->prio != new4_entry->prio)
Ido Schimmel4283bce2017-02-09 10:28:43 +01004269 break;
4270 }
4271
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004272 list_add_tail(&new4_entry->common.list, &fib4_entry->common.list);
Ido Schimmel4283bce2017-02-09 10:28:43 +01004273 return 0;
4274}
4275
Ido Schimmel9aecce12017-02-09 10:28:42 +01004276static int
Ido Schimmel9efbee62017-07-18 10:10:28 +02004277mlxsw_sp_fib4_node_list_insert(struct mlxsw_sp_fib4_entry *new4_entry,
Ido Schimmel599cf8f2017-02-09 10:28:44 +01004278 bool replace, bool append)
Ido Schimmel9aecce12017-02-09 10:28:42 +01004279{
Ido Schimmel9efbee62017-07-18 10:10:28 +02004280 struct mlxsw_sp_fib_node *fib_node = new4_entry->common.fib_node;
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004281 struct mlxsw_sp_fib4_entry *fib4_entry;
Ido Schimmel9aecce12017-02-09 10:28:42 +01004282
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004283 fib4_entry = mlxsw_sp_fib4_node_entry_find(fib_node, new4_entry);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004284
Ido Schimmel4283bce2017-02-09 10:28:43 +01004285 if (append)
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004286 return mlxsw_sp_fib4_node_list_append(fib4_entry, new4_entry);
4287 if (replace && WARN_ON(!fib4_entry))
Ido Schimmel599cf8f2017-02-09 10:28:44 +01004288 return -EINVAL;
Ido Schimmel4283bce2017-02-09 10:28:43 +01004289
Ido Schimmel599cf8f2017-02-09 10:28:44 +01004290 /* Insert new entry before replaced one, so that we can later
4291 * remove the second.
4292 */
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004293 if (fib4_entry) {
4294 list_add_tail(&new4_entry->common.list,
4295 &fib4_entry->common.list);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004296 } else {
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004297 struct mlxsw_sp_fib4_entry *last;
Ido Schimmel9aecce12017-02-09 10:28:42 +01004298
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004299 list_for_each_entry(last, &fib_node->entry_list, common.list) {
4300 if (new4_entry->tb_id > last->tb_id)
Ido Schimmel9aecce12017-02-09 10:28:42 +01004301 break;
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004302 fib4_entry = last;
Ido Schimmel9aecce12017-02-09 10:28:42 +01004303 }
4304
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004305 if (fib4_entry)
4306 list_add(&new4_entry->common.list,
4307 &fib4_entry->common.list);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004308 else
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004309 list_add(&new4_entry->common.list,
4310 &fib_node->entry_list);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004311 }
4312
4313 return 0;
4314}
4315
4316static void
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004317mlxsw_sp_fib4_node_list_remove(struct mlxsw_sp_fib4_entry *fib4_entry)
Ido Schimmel9aecce12017-02-09 10:28:42 +01004318{
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004319 list_del(&fib4_entry->common.list);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004320}
4321
Ido Schimmel80c238f2017-07-18 10:10:29 +02004322static int mlxsw_sp_fib_node_entry_add(struct mlxsw_sp *mlxsw_sp,
4323 struct mlxsw_sp_fib_entry *fib_entry)
Ido Schimmel9aecce12017-02-09 10:28:42 +01004324{
Ido Schimmel9efbee62017-07-18 10:10:28 +02004325 struct mlxsw_sp_fib_node *fib_node = fib_entry->fib_node;
4326
Ido Schimmel9aecce12017-02-09 10:28:42 +01004327 if (!mlxsw_sp_fib_node_entry_is_first(fib_node, fib_entry))
4328 return 0;
4329
4330 /* To prevent packet loss, overwrite the previously offloaded
4331 * entry.
4332 */
4333 if (!list_is_singular(&fib_node->entry_list)) {
4334 enum mlxsw_reg_ralue_op op = MLXSW_REG_RALUE_OP_WRITE_DELETE;
4335 struct mlxsw_sp_fib_entry *n = list_next_entry(fib_entry, list);
4336
4337 mlxsw_sp_fib_entry_offload_refresh(n, op, 0);
4338 }
4339
4340 return mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
4341}
4342
Ido Schimmel80c238f2017-07-18 10:10:29 +02004343static void mlxsw_sp_fib_node_entry_del(struct mlxsw_sp *mlxsw_sp,
4344 struct mlxsw_sp_fib_entry *fib_entry)
Ido Schimmel9aecce12017-02-09 10:28:42 +01004345{
Ido Schimmel9efbee62017-07-18 10:10:28 +02004346 struct mlxsw_sp_fib_node *fib_node = fib_entry->fib_node;
4347
Ido Schimmel9aecce12017-02-09 10:28:42 +01004348 if (!mlxsw_sp_fib_node_entry_is_first(fib_node, fib_entry))
4349 return;
4350
4351 /* Promote the next entry by overwriting the deleted entry */
4352 if (!list_is_singular(&fib_node->entry_list)) {
4353 struct mlxsw_sp_fib_entry *n = list_next_entry(fib_entry, list);
4354 enum mlxsw_reg_ralue_op op = MLXSW_REG_RALUE_OP_WRITE_DELETE;
4355
4356 mlxsw_sp_fib_entry_update(mlxsw_sp, n);
4357 mlxsw_sp_fib_entry_offload_refresh(fib_entry, op, 0);
4358 return;
4359 }
4360
4361 mlxsw_sp_fib_entry_del(mlxsw_sp, fib_entry);
4362}
4363
4364static int mlxsw_sp_fib4_node_entry_link(struct mlxsw_sp *mlxsw_sp,
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004365 struct mlxsw_sp_fib4_entry *fib4_entry,
Ido Schimmel599cf8f2017-02-09 10:28:44 +01004366 bool replace, bool append)
Ido Schimmel9aecce12017-02-09 10:28:42 +01004367{
Ido Schimmel9aecce12017-02-09 10:28:42 +01004368 int err;
4369
Ido Schimmel9efbee62017-07-18 10:10:28 +02004370 err = mlxsw_sp_fib4_node_list_insert(fib4_entry, replace, append);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004371 if (err)
4372 return err;
4373
Ido Schimmel80c238f2017-07-18 10:10:29 +02004374 err = mlxsw_sp_fib_node_entry_add(mlxsw_sp, &fib4_entry->common);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004375 if (err)
Ido Schimmel80c238f2017-07-18 10:10:29 +02004376 goto err_fib_node_entry_add;
Ido Schimmel9aecce12017-02-09 10:28:42 +01004377
Ido Schimmel9aecce12017-02-09 10:28:42 +01004378 return 0;
4379
Ido Schimmel80c238f2017-07-18 10:10:29 +02004380err_fib_node_entry_add:
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004381 mlxsw_sp_fib4_node_list_remove(fib4_entry);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004382 return err;
4383}
4384
4385static void
4386mlxsw_sp_fib4_node_entry_unlink(struct mlxsw_sp *mlxsw_sp,
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004387 struct mlxsw_sp_fib4_entry *fib4_entry)
Ido Schimmel9aecce12017-02-09 10:28:42 +01004388{
Ido Schimmel80c238f2017-07-18 10:10:29 +02004389 mlxsw_sp_fib_node_entry_del(mlxsw_sp, &fib4_entry->common);
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004390 mlxsw_sp_fib4_node_list_remove(fib4_entry);
Petr Machata4607f6d2017-09-02 23:49:25 +02004391
4392 if (fib4_entry->common.type == MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP)
4393 mlxsw_sp_fib_entry_decap_fini(mlxsw_sp, &fib4_entry->common);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004394}
4395
Ido Schimmel599cf8f2017-02-09 10:28:44 +01004396static void mlxsw_sp_fib4_entry_replace(struct mlxsw_sp *mlxsw_sp,
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004397 struct mlxsw_sp_fib4_entry *fib4_entry,
Ido Schimmel599cf8f2017-02-09 10:28:44 +01004398 bool replace)
4399{
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004400 struct mlxsw_sp_fib_node *fib_node = fib4_entry->common.fib_node;
4401 struct mlxsw_sp_fib4_entry *replaced;
Ido Schimmel599cf8f2017-02-09 10:28:44 +01004402
4403 if (!replace)
4404 return;
4405
4406 /* We inserted the new entry before replaced one */
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004407 replaced = list_next_entry(fib4_entry, common.list);
Ido Schimmel599cf8f2017-02-09 10:28:44 +01004408
4409 mlxsw_sp_fib4_node_entry_unlink(mlxsw_sp, replaced);
4410 mlxsw_sp_fib4_entry_destroy(mlxsw_sp, replaced);
Ido Schimmel731ea1c2017-07-18 10:10:21 +02004411 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
Ido Schimmel599cf8f2017-02-09 10:28:44 +01004412}
4413
Ido Schimmel9aecce12017-02-09 10:28:42 +01004414static int
4415mlxsw_sp_router_fib4_add(struct mlxsw_sp *mlxsw_sp,
Ido Schimmel4283bce2017-02-09 10:28:43 +01004416 const struct fib_entry_notifier_info *fen_info,
Ido Schimmel599cf8f2017-02-09 10:28:44 +01004417 bool replace, bool append)
Ido Schimmel9aecce12017-02-09 10:28:42 +01004418{
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004419 struct mlxsw_sp_fib4_entry *fib4_entry;
Ido Schimmel9aecce12017-02-09 10:28:42 +01004420 struct mlxsw_sp_fib_node *fib_node;
Jiri Pirko61c503f2016-07-04 08:23:11 +02004421 int err;
4422
Ido Schimmel9011b672017-05-16 19:38:25 +02004423 if (mlxsw_sp->router->aborted)
Jiri Pirkob45f64d2016-09-26 12:52:31 +02004424 return 0;
4425
Ido Schimmel731ea1c2017-07-18 10:10:21 +02004426 fib_node = mlxsw_sp_fib_node_get(mlxsw_sp, fen_info->tb_id,
4427 &fen_info->dst, sizeof(fen_info->dst),
4428 fen_info->dst_len,
4429 MLXSW_SP_L3_PROTO_IPV4);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004430 if (IS_ERR(fib_node)) {
4431 dev_warn(mlxsw_sp->bus_info->dev, "Failed to get FIB node\n");
4432 return PTR_ERR(fib_node);
4433 }
4434
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004435 fib4_entry = mlxsw_sp_fib4_entry_create(mlxsw_sp, fib_node, fen_info);
4436 if (IS_ERR(fib4_entry)) {
Ido Schimmel9aecce12017-02-09 10:28:42 +01004437 dev_warn(mlxsw_sp->bus_info->dev, "Failed to create FIB entry\n");
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004438 err = PTR_ERR(fib4_entry);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004439 goto err_fib4_entry_create;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02004440 }
Jiri Pirko61c503f2016-07-04 08:23:11 +02004441
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004442 err = mlxsw_sp_fib4_node_entry_link(mlxsw_sp, fib4_entry, replace,
Ido Schimmel599cf8f2017-02-09 10:28:44 +01004443 append);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02004444 if (err) {
Ido Schimmel9aecce12017-02-09 10:28:42 +01004445 dev_warn(mlxsw_sp->bus_info->dev, "Failed to link FIB entry to node\n");
4446 goto err_fib4_node_entry_link;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02004447 }
Ido Schimmel9aecce12017-02-09 10:28:42 +01004448
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004449 mlxsw_sp_fib4_entry_replace(mlxsw_sp, fib4_entry, replace);
Ido Schimmel599cf8f2017-02-09 10:28:44 +01004450
Jiri Pirko61c503f2016-07-04 08:23:11 +02004451 return 0;
4452
Ido Schimmel9aecce12017-02-09 10:28:42 +01004453err_fib4_node_entry_link:
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004454 mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib4_entry);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004455err_fib4_entry_create:
Ido Schimmel731ea1c2017-07-18 10:10:21 +02004456 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
Jiri Pirko61c503f2016-07-04 08:23:11 +02004457 return err;
4458}
4459
Jiri Pirko37956d72016-10-20 16:05:43 +02004460static void mlxsw_sp_router_fib4_del(struct mlxsw_sp *mlxsw_sp,
4461 struct fib_entry_notifier_info *fen_info)
Jiri Pirko61c503f2016-07-04 08:23:11 +02004462{
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004463 struct mlxsw_sp_fib4_entry *fib4_entry;
Ido Schimmel9aecce12017-02-09 10:28:42 +01004464 struct mlxsw_sp_fib_node *fib_node;
Jiri Pirko61c503f2016-07-04 08:23:11 +02004465
Ido Schimmel9011b672017-05-16 19:38:25 +02004466 if (mlxsw_sp->router->aborted)
Jiri Pirko37956d72016-10-20 16:05:43 +02004467 return;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02004468
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004469 fib4_entry = mlxsw_sp_fib4_entry_lookup(mlxsw_sp, fen_info);
4470 if (WARN_ON(!fib4_entry))
Jiri Pirko37956d72016-10-20 16:05:43 +02004471 return;
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004472 fib_node = fib4_entry->common.fib_node;
Jiri Pirko5b004412016-09-01 10:37:40 +02004473
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004474 mlxsw_sp_fib4_node_entry_unlink(mlxsw_sp, fib4_entry);
4475 mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib4_entry);
Ido Schimmel731ea1c2017-07-18 10:10:21 +02004476 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
Jiri Pirko61c503f2016-07-04 08:23:11 +02004477}
Jiri Pirkob45f64d2016-09-26 12:52:31 +02004478
Ido Schimmel428b8512017-08-03 13:28:28 +02004479static bool mlxsw_sp_fib6_rt_should_ignore(const struct rt6_info *rt)
4480{
4481 /* Packets with link-local destination IP arriving to the router
4482 * are trapped to the CPU, so no need to program specific routes
4483 * for them.
4484 */
4485 if (ipv6_addr_type(&rt->rt6i_dst.addr) & IPV6_ADDR_LINKLOCAL)
4486 return true;
4487
4488 /* Multicast routes aren't supported, so ignore them. Neighbour
4489 * Discovery packets are specifically trapped.
4490 */
4491 if (ipv6_addr_type(&rt->rt6i_dst.addr) & IPV6_ADDR_MULTICAST)
4492 return true;
4493
4494 /* Cloned routes are irrelevant in the forwarding path. */
4495 if (rt->rt6i_flags & RTF_CACHE)
4496 return true;
4497
4498 return false;
4499}
4500
4501static struct mlxsw_sp_rt6 *mlxsw_sp_rt6_create(struct rt6_info *rt)
4502{
4503 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
4504
4505 mlxsw_sp_rt6 = kzalloc(sizeof(*mlxsw_sp_rt6), GFP_KERNEL);
4506 if (!mlxsw_sp_rt6)
4507 return ERR_PTR(-ENOMEM);
4508
4509 /* In case of route replace, replaced route is deleted with
4510 * no notification. Take reference to prevent accessing freed
4511 * memory.
4512 */
4513 mlxsw_sp_rt6->rt = rt;
4514 rt6_hold(rt);
4515
4516 return mlxsw_sp_rt6;
4517}
4518
4519#if IS_ENABLED(CONFIG_IPV6)
4520static void mlxsw_sp_rt6_release(struct rt6_info *rt)
4521{
4522 rt6_release(rt);
4523}
4524#else
4525static void mlxsw_sp_rt6_release(struct rt6_info *rt)
4526{
4527}
4528#endif
4529
4530static void mlxsw_sp_rt6_destroy(struct mlxsw_sp_rt6 *mlxsw_sp_rt6)
4531{
4532 mlxsw_sp_rt6_release(mlxsw_sp_rt6->rt);
4533 kfree(mlxsw_sp_rt6);
4534}
4535
4536static bool mlxsw_sp_fib6_rt_can_mp(const struct rt6_info *rt)
4537{
4538 /* RTF_CACHE routes are ignored */
4539 return (rt->rt6i_flags & (RTF_GATEWAY | RTF_ADDRCONF)) == RTF_GATEWAY;
4540}
4541
4542static struct rt6_info *
4543mlxsw_sp_fib6_entry_rt(const struct mlxsw_sp_fib6_entry *fib6_entry)
4544{
4545 return list_first_entry(&fib6_entry->rt6_list, struct mlxsw_sp_rt6,
4546 list)->rt;
4547}
4548
4549static struct mlxsw_sp_fib6_entry *
4550mlxsw_sp_fib6_node_mp_entry_find(const struct mlxsw_sp_fib_node *fib_node,
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02004551 const struct rt6_info *nrt, bool replace)
Ido Schimmel428b8512017-08-03 13:28:28 +02004552{
4553 struct mlxsw_sp_fib6_entry *fib6_entry;
4554
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02004555 if (!mlxsw_sp_fib6_rt_can_mp(nrt) || replace)
Ido Schimmel428b8512017-08-03 13:28:28 +02004556 return NULL;
4557
4558 list_for_each_entry(fib6_entry, &fib_node->entry_list, common.list) {
4559 struct rt6_info *rt = mlxsw_sp_fib6_entry_rt(fib6_entry);
4560
4561 /* RT6_TABLE_LOCAL and RT6_TABLE_MAIN share the same
4562 * virtual router.
4563 */
4564 if (rt->rt6i_table->tb6_id > nrt->rt6i_table->tb6_id)
4565 continue;
4566 if (rt->rt6i_table->tb6_id != nrt->rt6i_table->tb6_id)
4567 break;
4568 if (rt->rt6i_metric < nrt->rt6i_metric)
4569 continue;
4570 if (rt->rt6i_metric == nrt->rt6i_metric &&
4571 mlxsw_sp_fib6_rt_can_mp(rt))
4572 return fib6_entry;
4573 if (rt->rt6i_metric > nrt->rt6i_metric)
4574 break;
4575 }
4576
4577 return NULL;
4578}
4579
4580static struct mlxsw_sp_rt6 *
4581mlxsw_sp_fib6_entry_rt_find(const struct mlxsw_sp_fib6_entry *fib6_entry,
4582 const struct rt6_info *rt)
4583{
4584 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
4585
4586 list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) {
4587 if (mlxsw_sp_rt6->rt == rt)
4588 return mlxsw_sp_rt6;
4589 }
4590
4591 return NULL;
4592}
4593
Petr Machata8f28a302017-09-02 23:49:24 +02004594static bool mlxsw_sp_nexthop6_ipip_type(const struct mlxsw_sp *mlxsw_sp,
4595 const struct rt6_info *rt,
4596 enum mlxsw_sp_ipip_type *ret)
4597{
4598 return rt->dst.dev &&
4599 mlxsw_sp_netdev_ipip_type(mlxsw_sp, rt->dst.dev, ret);
4600}
4601
Petr Machata35225e42017-09-02 23:49:22 +02004602static int mlxsw_sp_nexthop6_type_init(struct mlxsw_sp *mlxsw_sp,
4603 struct mlxsw_sp_nexthop_group *nh_grp,
4604 struct mlxsw_sp_nexthop *nh,
4605 const struct rt6_info *rt)
Ido Schimmel428b8512017-08-03 13:28:28 +02004606{
Petr Machata8f28a302017-09-02 23:49:24 +02004607 struct mlxsw_sp_router *router = mlxsw_sp->router;
Ido Schimmel428b8512017-08-03 13:28:28 +02004608 struct net_device *dev = rt->dst.dev;
Petr Machata8f28a302017-09-02 23:49:24 +02004609 enum mlxsw_sp_ipip_type ipipt;
Ido Schimmel428b8512017-08-03 13:28:28 +02004610 struct mlxsw_sp_rif *rif;
4611 int err;
4612
Petr Machata8f28a302017-09-02 23:49:24 +02004613 if (mlxsw_sp_nexthop6_ipip_type(mlxsw_sp, rt, &ipipt) &&
4614 router->ipip_ops_arr[ipipt]->can_offload(mlxsw_sp, dev,
4615 MLXSW_SP_L3_PROTO_IPV6)) {
4616 nh->type = MLXSW_SP_NEXTHOP_TYPE_IPIP;
Petr Machata4cccb732017-10-16 16:26:39 +02004617 err = mlxsw_sp_nexthop_ipip_init(mlxsw_sp, nh, dev);
Petr Machatade0f43c2017-10-02 12:14:57 +02004618 if (err)
4619 return err;
4620 mlxsw_sp_nexthop_rif_init(nh, &nh->ipip_entry->ol_lb->common);
4621 return 0;
Petr Machata8f28a302017-09-02 23:49:24 +02004622 }
4623
Petr Machata35225e42017-09-02 23:49:22 +02004624 nh->type = MLXSW_SP_NEXTHOP_TYPE_ETH;
Ido Schimmel428b8512017-08-03 13:28:28 +02004625 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
4626 if (!rif)
4627 return 0;
4628 mlxsw_sp_nexthop_rif_init(nh, rif);
4629
4630 err = mlxsw_sp_nexthop_neigh_init(mlxsw_sp, nh);
4631 if (err)
4632 goto err_nexthop_neigh_init;
4633
4634 return 0;
4635
4636err_nexthop_neigh_init:
4637 mlxsw_sp_nexthop_rif_fini(nh);
4638 return err;
4639}
4640
Petr Machata35225e42017-09-02 23:49:22 +02004641static void mlxsw_sp_nexthop6_type_fini(struct mlxsw_sp *mlxsw_sp,
4642 struct mlxsw_sp_nexthop *nh)
4643{
4644 mlxsw_sp_nexthop_type_fini(mlxsw_sp, nh);
4645}
4646
4647static int mlxsw_sp_nexthop6_init(struct mlxsw_sp *mlxsw_sp,
4648 struct mlxsw_sp_nexthop_group *nh_grp,
4649 struct mlxsw_sp_nexthop *nh,
4650 const struct rt6_info *rt)
4651{
4652 struct net_device *dev = rt->dst.dev;
4653
4654 nh->nh_grp = nh_grp;
Ido Schimmel408bd942017-10-22 23:11:46 +02004655 nh->nh_weight = 1;
Petr Machata35225e42017-09-02 23:49:22 +02004656 memcpy(&nh->gw_addr, &rt->rt6i_gateway, sizeof(nh->gw_addr));
Arkadi Sharshevskya5390272017-09-25 10:32:28 +02004657 mlxsw_sp_nexthop_counter_alloc(mlxsw_sp, nh);
Petr Machata35225e42017-09-02 23:49:22 +02004658
Arkadi Sharshevskydbe45982017-09-25 10:32:23 +02004659 list_add_tail(&nh->router_list_node, &mlxsw_sp->router->nexthop_list);
4660
Petr Machata35225e42017-09-02 23:49:22 +02004661 if (!dev)
4662 return 0;
4663 nh->ifindex = dev->ifindex;
4664
4665 return mlxsw_sp_nexthop6_type_init(mlxsw_sp, nh_grp, nh, rt);
4666}
4667
Ido Schimmel428b8512017-08-03 13:28:28 +02004668static void mlxsw_sp_nexthop6_fini(struct mlxsw_sp *mlxsw_sp,
4669 struct mlxsw_sp_nexthop *nh)
4670{
Petr Machata35225e42017-09-02 23:49:22 +02004671 mlxsw_sp_nexthop6_type_fini(mlxsw_sp, nh);
Arkadi Sharshevskydbe45982017-09-25 10:32:23 +02004672 list_del(&nh->router_list_node);
Arkadi Sharshevskya5390272017-09-25 10:32:28 +02004673 mlxsw_sp_nexthop_counter_free(mlxsw_sp, nh);
Ido Schimmel428b8512017-08-03 13:28:28 +02004674}
4675
Petr Machataf6050ee2017-09-02 23:49:21 +02004676static bool mlxsw_sp_rt6_is_gateway(const struct mlxsw_sp *mlxsw_sp,
4677 const struct rt6_info *rt)
4678{
Petr Machata8f28a302017-09-02 23:49:24 +02004679 return rt->rt6i_flags & RTF_GATEWAY ||
4680 mlxsw_sp_nexthop6_ipip_type(mlxsw_sp, rt, NULL);
Petr Machataf6050ee2017-09-02 23:49:21 +02004681}
4682
Ido Schimmel428b8512017-08-03 13:28:28 +02004683static struct mlxsw_sp_nexthop_group *
4684mlxsw_sp_nexthop6_group_create(struct mlxsw_sp *mlxsw_sp,
4685 struct mlxsw_sp_fib6_entry *fib6_entry)
4686{
4687 struct mlxsw_sp_nexthop_group *nh_grp;
4688 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
4689 struct mlxsw_sp_nexthop *nh;
4690 size_t alloc_size;
4691 int i = 0;
4692 int err;
4693
4694 alloc_size = sizeof(*nh_grp) +
4695 fib6_entry->nrt6 * sizeof(struct mlxsw_sp_nexthop);
4696 nh_grp = kzalloc(alloc_size, GFP_KERNEL);
4697 if (!nh_grp)
4698 return ERR_PTR(-ENOMEM);
4699 INIT_LIST_HEAD(&nh_grp->fib_list);
4700#if IS_ENABLED(CONFIG_IPV6)
4701 nh_grp->neigh_tbl = &nd_tbl;
4702#endif
4703 mlxsw_sp_rt6 = list_first_entry(&fib6_entry->rt6_list,
4704 struct mlxsw_sp_rt6, list);
Petr Machataf6050ee2017-09-02 23:49:21 +02004705 nh_grp->gateway = mlxsw_sp_rt6_is_gateway(mlxsw_sp, mlxsw_sp_rt6->rt);
Ido Schimmel428b8512017-08-03 13:28:28 +02004706 nh_grp->count = fib6_entry->nrt6;
4707 for (i = 0; i < nh_grp->count; i++) {
4708 struct rt6_info *rt = mlxsw_sp_rt6->rt;
4709
4710 nh = &nh_grp->nexthops[i];
4711 err = mlxsw_sp_nexthop6_init(mlxsw_sp, nh_grp, nh, rt);
4712 if (err)
4713 goto err_nexthop6_init;
4714 mlxsw_sp_rt6 = list_next_entry(mlxsw_sp_rt6, list);
4715 }
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02004716
4717 err = mlxsw_sp_nexthop_group_insert(mlxsw_sp, nh_grp);
4718 if (err)
4719 goto err_nexthop_group_insert;
4720
Ido Schimmel428b8512017-08-03 13:28:28 +02004721 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
4722 return nh_grp;
4723
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02004724err_nexthop_group_insert:
Ido Schimmel428b8512017-08-03 13:28:28 +02004725err_nexthop6_init:
4726 for (i--; i >= 0; i--) {
4727 nh = &nh_grp->nexthops[i];
4728 mlxsw_sp_nexthop6_fini(mlxsw_sp, nh);
4729 }
4730 kfree(nh_grp);
4731 return ERR_PTR(err);
4732}
4733
4734static void
4735mlxsw_sp_nexthop6_group_destroy(struct mlxsw_sp *mlxsw_sp,
4736 struct mlxsw_sp_nexthop_group *nh_grp)
4737{
4738 struct mlxsw_sp_nexthop *nh;
4739 int i = nh_grp->count;
4740
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02004741 mlxsw_sp_nexthop_group_remove(mlxsw_sp, nh_grp);
Ido Schimmel428b8512017-08-03 13:28:28 +02004742 for (i--; i >= 0; i--) {
4743 nh = &nh_grp->nexthops[i];
4744 mlxsw_sp_nexthop6_fini(mlxsw_sp, nh);
4745 }
4746 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
4747 WARN_ON(nh_grp->adj_index_valid);
4748 kfree(nh_grp);
4749}
4750
4751static int mlxsw_sp_nexthop6_group_get(struct mlxsw_sp *mlxsw_sp,
4752 struct mlxsw_sp_fib6_entry *fib6_entry)
4753{
4754 struct mlxsw_sp_nexthop_group *nh_grp;
4755
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02004756 nh_grp = mlxsw_sp_nexthop6_group_lookup(mlxsw_sp, fib6_entry);
4757 if (!nh_grp) {
4758 nh_grp = mlxsw_sp_nexthop6_group_create(mlxsw_sp, fib6_entry);
4759 if (IS_ERR(nh_grp))
4760 return PTR_ERR(nh_grp);
4761 }
Ido Schimmel428b8512017-08-03 13:28:28 +02004762
4763 list_add_tail(&fib6_entry->common.nexthop_group_node,
4764 &nh_grp->fib_list);
4765 fib6_entry->common.nh_group = nh_grp;
4766
4767 return 0;
4768}
4769
4770static void mlxsw_sp_nexthop6_group_put(struct mlxsw_sp *mlxsw_sp,
4771 struct mlxsw_sp_fib_entry *fib_entry)
4772{
4773 struct mlxsw_sp_nexthop_group *nh_grp = fib_entry->nh_group;
4774
4775 list_del(&fib_entry->nexthop_group_node);
4776 if (!list_empty(&nh_grp->fib_list))
4777 return;
4778 mlxsw_sp_nexthop6_group_destroy(mlxsw_sp, nh_grp);
4779}
4780
4781static int
4782mlxsw_sp_nexthop6_group_update(struct mlxsw_sp *mlxsw_sp,
4783 struct mlxsw_sp_fib6_entry *fib6_entry)
4784{
4785 struct mlxsw_sp_nexthop_group *old_nh_grp = fib6_entry->common.nh_group;
4786 int err;
4787
4788 fib6_entry->common.nh_group = NULL;
4789 list_del(&fib6_entry->common.nexthop_group_node);
4790
4791 err = mlxsw_sp_nexthop6_group_get(mlxsw_sp, fib6_entry);
4792 if (err)
4793 goto err_nexthop6_group_get;
4794
4795 /* In case this entry is offloaded, then the adjacency index
4796 * currently associated with it in the device's table is that
4797 * of the old group. Start using the new one instead.
4798 */
4799 err = mlxsw_sp_fib_node_entry_add(mlxsw_sp, &fib6_entry->common);
4800 if (err)
4801 goto err_fib_node_entry_add;
4802
4803 if (list_empty(&old_nh_grp->fib_list))
4804 mlxsw_sp_nexthop6_group_destroy(mlxsw_sp, old_nh_grp);
4805
4806 return 0;
4807
4808err_fib_node_entry_add:
4809 mlxsw_sp_nexthop6_group_put(mlxsw_sp, &fib6_entry->common);
4810err_nexthop6_group_get:
4811 list_add_tail(&fib6_entry->common.nexthop_group_node,
4812 &old_nh_grp->fib_list);
4813 fib6_entry->common.nh_group = old_nh_grp;
4814 return err;
4815}
4816
4817static int
4818mlxsw_sp_fib6_entry_nexthop_add(struct mlxsw_sp *mlxsw_sp,
4819 struct mlxsw_sp_fib6_entry *fib6_entry,
4820 struct rt6_info *rt)
4821{
4822 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
4823 int err;
4824
4825 mlxsw_sp_rt6 = mlxsw_sp_rt6_create(rt);
4826 if (IS_ERR(mlxsw_sp_rt6))
4827 return PTR_ERR(mlxsw_sp_rt6);
4828
4829 list_add_tail(&mlxsw_sp_rt6->list, &fib6_entry->rt6_list);
4830 fib6_entry->nrt6++;
4831
4832 err = mlxsw_sp_nexthop6_group_update(mlxsw_sp, fib6_entry);
4833 if (err)
4834 goto err_nexthop6_group_update;
4835
4836 return 0;
4837
4838err_nexthop6_group_update:
4839 fib6_entry->nrt6--;
4840 list_del(&mlxsw_sp_rt6->list);
4841 mlxsw_sp_rt6_destroy(mlxsw_sp_rt6);
4842 return err;
4843}
4844
4845static void
4846mlxsw_sp_fib6_entry_nexthop_del(struct mlxsw_sp *mlxsw_sp,
4847 struct mlxsw_sp_fib6_entry *fib6_entry,
4848 struct rt6_info *rt)
4849{
4850 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
4851
4852 mlxsw_sp_rt6 = mlxsw_sp_fib6_entry_rt_find(fib6_entry, rt);
4853 if (WARN_ON(!mlxsw_sp_rt6))
4854 return;
4855
4856 fib6_entry->nrt6--;
4857 list_del(&mlxsw_sp_rt6->list);
4858 mlxsw_sp_nexthop6_group_update(mlxsw_sp, fib6_entry);
4859 mlxsw_sp_rt6_destroy(mlxsw_sp_rt6);
4860}
4861
Petr Machataf6050ee2017-09-02 23:49:21 +02004862static void mlxsw_sp_fib6_entry_type_set(struct mlxsw_sp *mlxsw_sp,
4863 struct mlxsw_sp_fib_entry *fib_entry,
Ido Schimmel428b8512017-08-03 13:28:28 +02004864 const struct rt6_info *rt)
4865{
4866 /* Packets hitting RTF_REJECT routes need to be discarded by the
4867 * stack. We can rely on their destination device not having a
4868 * RIF (it's the loopback device) and can thus use action type
4869 * local, which will cause them to be trapped with a lower
4870 * priority than packets that need to be locally received.
4871 */
Ido Schimmeld3b6d372017-09-01 10:58:55 +02004872 if (rt->rt6i_flags & (RTF_LOCAL | RTF_ANYCAST))
Ido Schimmel428b8512017-08-03 13:28:28 +02004873 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
4874 else if (rt->rt6i_flags & RTF_REJECT)
4875 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_LOCAL;
Petr Machataf6050ee2017-09-02 23:49:21 +02004876 else if (mlxsw_sp_rt6_is_gateway(mlxsw_sp, rt))
Ido Schimmel428b8512017-08-03 13:28:28 +02004877 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_REMOTE;
4878 else
4879 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_LOCAL;
4880}
4881
4882static void
4883mlxsw_sp_fib6_entry_rt_destroy_all(struct mlxsw_sp_fib6_entry *fib6_entry)
4884{
4885 struct mlxsw_sp_rt6 *mlxsw_sp_rt6, *tmp;
4886
4887 list_for_each_entry_safe(mlxsw_sp_rt6, tmp, &fib6_entry->rt6_list,
4888 list) {
4889 fib6_entry->nrt6--;
4890 list_del(&mlxsw_sp_rt6->list);
4891 mlxsw_sp_rt6_destroy(mlxsw_sp_rt6);
4892 }
4893}
4894
4895static struct mlxsw_sp_fib6_entry *
4896mlxsw_sp_fib6_entry_create(struct mlxsw_sp *mlxsw_sp,
4897 struct mlxsw_sp_fib_node *fib_node,
4898 struct rt6_info *rt)
4899{
4900 struct mlxsw_sp_fib6_entry *fib6_entry;
4901 struct mlxsw_sp_fib_entry *fib_entry;
4902 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
4903 int err;
4904
4905 fib6_entry = kzalloc(sizeof(*fib6_entry), GFP_KERNEL);
4906 if (!fib6_entry)
4907 return ERR_PTR(-ENOMEM);
4908 fib_entry = &fib6_entry->common;
4909
4910 mlxsw_sp_rt6 = mlxsw_sp_rt6_create(rt);
4911 if (IS_ERR(mlxsw_sp_rt6)) {
4912 err = PTR_ERR(mlxsw_sp_rt6);
4913 goto err_rt6_create;
4914 }
4915
Petr Machataf6050ee2017-09-02 23:49:21 +02004916 mlxsw_sp_fib6_entry_type_set(mlxsw_sp, fib_entry, mlxsw_sp_rt6->rt);
Ido Schimmel428b8512017-08-03 13:28:28 +02004917
4918 INIT_LIST_HEAD(&fib6_entry->rt6_list);
4919 list_add_tail(&mlxsw_sp_rt6->list, &fib6_entry->rt6_list);
4920 fib6_entry->nrt6 = 1;
4921 err = mlxsw_sp_nexthop6_group_get(mlxsw_sp, fib6_entry);
4922 if (err)
4923 goto err_nexthop6_group_get;
4924
4925 fib_entry->fib_node = fib_node;
4926
4927 return fib6_entry;
4928
4929err_nexthop6_group_get:
4930 list_del(&mlxsw_sp_rt6->list);
4931 mlxsw_sp_rt6_destroy(mlxsw_sp_rt6);
4932err_rt6_create:
4933 kfree(fib6_entry);
4934 return ERR_PTR(err);
4935}
4936
4937static void mlxsw_sp_fib6_entry_destroy(struct mlxsw_sp *mlxsw_sp,
4938 struct mlxsw_sp_fib6_entry *fib6_entry)
4939{
4940 mlxsw_sp_nexthop6_group_put(mlxsw_sp, &fib6_entry->common);
4941 mlxsw_sp_fib6_entry_rt_destroy_all(fib6_entry);
4942 WARN_ON(fib6_entry->nrt6);
4943 kfree(fib6_entry);
4944}
4945
4946static struct mlxsw_sp_fib6_entry *
4947mlxsw_sp_fib6_node_entry_find(const struct mlxsw_sp_fib_node *fib_node,
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02004948 const struct rt6_info *nrt, bool replace)
Ido Schimmel428b8512017-08-03 13:28:28 +02004949{
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02004950 struct mlxsw_sp_fib6_entry *fib6_entry, *fallback = NULL;
Ido Schimmel428b8512017-08-03 13:28:28 +02004951
4952 list_for_each_entry(fib6_entry, &fib_node->entry_list, common.list) {
4953 struct rt6_info *rt = mlxsw_sp_fib6_entry_rt(fib6_entry);
4954
4955 if (rt->rt6i_table->tb6_id > nrt->rt6i_table->tb6_id)
4956 continue;
4957 if (rt->rt6i_table->tb6_id != nrt->rt6i_table->tb6_id)
4958 break;
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02004959 if (replace && rt->rt6i_metric == nrt->rt6i_metric) {
4960 if (mlxsw_sp_fib6_rt_can_mp(rt) ==
4961 mlxsw_sp_fib6_rt_can_mp(nrt))
4962 return fib6_entry;
4963 if (mlxsw_sp_fib6_rt_can_mp(nrt))
4964 fallback = fallback ?: fib6_entry;
4965 }
Ido Schimmel428b8512017-08-03 13:28:28 +02004966 if (rt->rt6i_metric > nrt->rt6i_metric)
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02004967 return fallback ?: fib6_entry;
Ido Schimmel428b8512017-08-03 13:28:28 +02004968 }
4969
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02004970 return fallback;
Ido Schimmel428b8512017-08-03 13:28:28 +02004971}
4972
4973static int
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02004974mlxsw_sp_fib6_node_list_insert(struct mlxsw_sp_fib6_entry *new6_entry,
4975 bool replace)
Ido Schimmel428b8512017-08-03 13:28:28 +02004976{
4977 struct mlxsw_sp_fib_node *fib_node = new6_entry->common.fib_node;
4978 struct rt6_info *nrt = mlxsw_sp_fib6_entry_rt(new6_entry);
4979 struct mlxsw_sp_fib6_entry *fib6_entry;
4980
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02004981 fib6_entry = mlxsw_sp_fib6_node_entry_find(fib_node, nrt, replace);
4982
4983 if (replace && WARN_ON(!fib6_entry))
4984 return -EINVAL;
Ido Schimmel428b8512017-08-03 13:28:28 +02004985
4986 if (fib6_entry) {
4987 list_add_tail(&new6_entry->common.list,
4988 &fib6_entry->common.list);
4989 } else {
4990 struct mlxsw_sp_fib6_entry *last;
4991
4992 list_for_each_entry(last, &fib_node->entry_list, common.list) {
4993 struct rt6_info *rt = mlxsw_sp_fib6_entry_rt(last);
4994
4995 if (nrt->rt6i_table->tb6_id > rt->rt6i_table->tb6_id)
4996 break;
4997 fib6_entry = last;
4998 }
4999
5000 if (fib6_entry)
5001 list_add(&new6_entry->common.list,
5002 &fib6_entry->common.list);
5003 else
5004 list_add(&new6_entry->common.list,
5005 &fib_node->entry_list);
5006 }
5007
5008 return 0;
5009}
5010
5011static void
5012mlxsw_sp_fib6_node_list_remove(struct mlxsw_sp_fib6_entry *fib6_entry)
5013{
5014 list_del(&fib6_entry->common.list);
5015}
5016
5017static int mlxsw_sp_fib6_node_entry_link(struct mlxsw_sp *mlxsw_sp,
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02005018 struct mlxsw_sp_fib6_entry *fib6_entry,
5019 bool replace)
Ido Schimmel428b8512017-08-03 13:28:28 +02005020{
5021 int err;
5022
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02005023 err = mlxsw_sp_fib6_node_list_insert(fib6_entry, replace);
Ido Schimmel428b8512017-08-03 13:28:28 +02005024 if (err)
5025 return err;
5026
5027 err = mlxsw_sp_fib_node_entry_add(mlxsw_sp, &fib6_entry->common);
5028 if (err)
5029 goto err_fib_node_entry_add;
5030
5031 return 0;
5032
5033err_fib_node_entry_add:
5034 mlxsw_sp_fib6_node_list_remove(fib6_entry);
5035 return err;
5036}
5037
5038static void
5039mlxsw_sp_fib6_node_entry_unlink(struct mlxsw_sp *mlxsw_sp,
5040 struct mlxsw_sp_fib6_entry *fib6_entry)
5041{
5042 mlxsw_sp_fib_node_entry_del(mlxsw_sp, &fib6_entry->common);
5043 mlxsw_sp_fib6_node_list_remove(fib6_entry);
5044}
5045
5046static struct mlxsw_sp_fib6_entry *
5047mlxsw_sp_fib6_entry_lookup(struct mlxsw_sp *mlxsw_sp,
5048 const struct rt6_info *rt)
5049{
5050 struct mlxsw_sp_fib6_entry *fib6_entry;
5051 struct mlxsw_sp_fib_node *fib_node;
5052 struct mlxsw_sp_fib *fib;
5053 struct mlxsw_sp_vr *vr;
5054
5055 vr = mlxsw_sp_vr_find(mlxsw_sp, rt->rt6i_table->tb6_id);
5056 if (!vr)
5057 return NULL;
5058 fib = mlxsw_sp_vr_fib(vr, MLXSW_SP_L3_PROTO_IPV6);
5059
5060 fib_node = mlxsw_sp_fib_node_lookup(fib, &rt->rt6i_dst.addr,
5061 sizeof(rt->rt6i_dst.addr),
5062 rt->rt6i_dst.plen);
5063 if (!fib_node)
5064 return NULL;
5065
5066 list_for_each_entry(fib6_entry, &fib_node->entry_list, common.list) {
5067 struct rt6_info *iter_rt = mlxsw_sp_fib6_entry_rt(fib6_entry);
5068
5069 if (rt->rt6i_table->tb6_id == iter_rt->rt6i_table->tb6_id &&
5070 rt->rt6i_metric == iter_rt->rt6i_metric &&
5071 mlxsw_sp_fib6_entry_rt_find(fib6_entry, rt))
5072 return fib6_entry;
5073 }
5074
5075 return NULL;
5076}
5077
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02005078static void mlxsw_sp_fib6_entry_replace(struct mlxsw_sp *mlxsw_sp,
5079 struct mlxsw_sp_fib6_entry *fib6_entry,
5080 bool replace)
5081{
5082 struct mlxsw_sp_fib_node *fib_node = fib6_entry->common.fib_node;
5083 struct mlxsw_sp_fib6_entry *replaced;
5084
5085 if (!replace)
5086 return;
5087
5088 replaced = list_next_entry(fib6_entry, common.list);
5089
5090 mlxsw_sp_fib6_node_entry_unlink(mlxsw_sp, replaced);
5091 mlxsw_sp_fib6_entry_destroy(mlxsw_sp, replaced);
5092 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
5093}
5094
Ido Schimmel428b8512017-08-03 13:28:28 +02005095static int mlxsw_sp_router_fib6_add(struct mlxsw_sp *mlxsw_sp,
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02005096 struct rt6_info *rt, bool replace)
Ido Schimmel428b8512017-08-03 13:28:28 +02005097{
5098 struct mlxsw_sp_fib6_entry *fib6_entry;
5099 struct mlxsw_sp_fib_node *fib_node;
5100 int err;
5101
5102 if (mlxsw_sp->router->aborted)
5103 return 0;
5104
Ido Schimmelf36f5ac2017-08-03 13:28:30 +02005105 if (rt->rt6i_src.plen)
5106 return -EINVAL;
5107
Ido Schimmel428b8512017-08-03 13:28:28 +02005108 if (mlxsw_sp_fib6_rt_should_ignore(rt))
5109 return 0;
5110
5111 fib_node = mlxsw_sp_fib_node_get(mlxsw_sp, rt->rt6i_table->tb6_id,
5112 &rt->rt6i_dst.addr,
5113 sizeof(rt->rt6i_dst.addr),
5114 rt->rt6i_dst.plen,
5115 MLXSW_SP_L3_PROTO_IPV6);
5116 if (IS_ERR(fib_node))
5117 return PTR_ERR(fib_node);
5118
5119 /* Before creating a new entry, try to append route to an existing
5120 * multipath entry.
5121 */
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02005122 fib6_entry = mlxsw_sp_fib6_node_mp_entry_find(fib_node, rt, replace);
Ido Schimmel428b8512017-08-03 13:28:28 +02005123 if (fib6_entry) {
5124 err = mlxsw_sp_fib6_entry_nexthop_add(mlxsw_sp, fib6_entry, rt);
5125 if (err)
5126 goto err_fib6_entry_nexthop_add;
5127 return 0;
5128 }
5129
5130 fib6_entry = mlxsw_sp_fib6_entry_create(mlxsw_sp, fib_node, rt);
5131 if (IS_ERR(fib6_entry)) {
5132 err = PTR_ERR(fib6_entry);
5133 goto err_fib6_entry_create;
5134 }
5135
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02005136 err = mlxsw_sp_fib6_node_entry_link(mlxsw_sp, fib6_entry, replace);
Ido Schimmel428b8512017-08-03 13:28:28 +02005137 if (err)
5138 goto err_fib6_node_entry_link;
5139
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02005140 mlxsw_sp_fib6_entry_replace(mlxsw_sp, fib6_entry, replace);
5141
Ido Schimmel428b8512017-08-03 13:28:28 +02005142 return 0;
5143
5144err_fib6_node_entry_link:
5145 mlxsw_sp_fib6_entry_destroy(mlxsw_sp, fib6_entry);
5146err_fib6_entry_create:
5147err_fib6_entry_nexthop_add:
5148 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
5149 return err;
5150}
5151
5152static void mlxsw_sp_router_fib6_del(struct mlxsw_sp *mlxsw_sp,
5153 struct rt6_info *rt)
5154{
5155 struct mlxsw_sp_fib6_entry *fib6_entry;
5156 struct mlxsw_sp_fib_node *fib_node;
5157
5158 if (mlxsw_sp->router->aborted)
5159 return;
5160
5161 if (mlxsw_sp_fib6_rt_should_ignore(rt))
5162 return;
5163
5164 fib6_entry = mlxsw_sp_fib6_entry_lookup(mlxsw_sp, rt);
5165 if (WARN_ON(!fib6_entry))
5166 return;
5167
5168 /* If route is part of a multipath entry, but not the last one
5169 * removed, then only reduce its nexthop group.
5170 */
5171 if (!list_is_singular(&fib6_entry->rt6_list)) {
5172 mlxsw_sp_fib6_entry_nexthop_del(mlxsw_sp, fib6_entry, rt);
5173 return;
5174 }
5175
5176 fib_node = fib6_entry->common.fib_node;
5177
5178 mlxsw_sp_fib6_node_entry_unlink(mlxsw_sp, fib6_entry);
5179 mlxsw_sp_fib6_entry_destroy(mlxsw_sp, fib6_entry);
5180 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
5181}
5182
Ido Schimmelbc65a8a2017-07-18 10:10:25 +02005183static int __mlxsw_sp_router_set_abort_trap(struct mlxsw_sp *mlxsw_sp,
5184 enum mlxsw_reg_ralxx_protocol proto,
5185 u8 tree_id)
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005186{
5187 char ralta_pl[MLXSW_REG_RALTA_LEN];
5188 char ralst_pl[MLXSW_REG_RALST_LEN];
Ido Schimmelb5d90e62017-03-10 08:53:43 +01005189 int i, err;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005190
Ido Schimmelbc65a8a2017-07-18 10:10:25 +02005191 mlxsw_reg_ralta_pack(ralta_pl, true, proto, tree_id);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005192 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralta), ralta_pl);
5193 if (err)
5194 return err;
5195
Ido Schimmelbc65a8a2017-07-18 10:10:25 +02005196 mlxsw_reg_ralst_pack(ralst_pl, 0xff, tree_id);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005197 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralst), ralst_pl);
5198 if (err)
5199 return err;
5200
Ido Schimmelb5d90e62017-03-10 08:53:43 +01005201 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
Ido Schimmel9011b672017-05-16 19:38:25 +02005202 struct mlxsw_sp_vr *vr = &mlxsw_sp->router->vrs[i];
Ido Schimmelb5d90e62017-03-10 08:53:43 +01005203 char raltb_pl[MLXSW_REG_RALTB_LEN];
5204 char ralue_pl[MLXSW_REG_RALUE_LEN];
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005205
Ido Schimmelbc65a8a2017-07-18 10:10:25 +02005206 mlxsw_reg_raltb_pack(raltb_pl, vr->id, proto, tree_id);
Ido Schimmelb5d90e62017-03-10 08:53:43 +01005207 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raltb),
5208 raltb_pl);
5209 if (err)
5210 return err;
5211
Ido Schimmelbc65a8a2017-07-18 10:10:25 +02005212 mlxsw_reg_ralue_pack(ralue_pl, proto,
5213 MLXSW_REG_RALUE_OP_WRITE_WRITE, vr->id, 0);
Ido Schimmelb5d90e62017-03-10 08:53:43 +01005214 mlxsw_reg_ralue_act_ip2me_pack(ralue_pl);
5215 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue),
5216 ralue_pl);
5217 if (err)
5218 return err;
5219 }
5220
5221 return 0;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005222}
5223
Yotam Gigid42b0962017-09-27 08:23:20 +02005224static int mlxsw_sp_router_fibmr_add(struct mlxsw_sp *mlxsw_sp,
5225 struct mfc_entry_notifier_info *men_info,
5226 bool replace)
5227{
5228 struct mlxsw_sp_vr *vr;
5229
5230 if (mlxsw_sp->router->aborted)
5231 return 0;
5232
David Ahernf8fa9b42017-10-18 09:56:56 -07005233 vr = mlxsw_sp_vr_get(mlxsw_sp, men_info->tb_id, NULL);
Yotam Gigid42b0962017-09-27 08:23:20 +02005234 if (IS_ERR(vr))
5235 return PTR_ERR(vr);
5236
5237 return mlxsw_sp_mr_route4_add(vr->mr4_table, men_info->mfc, replace);
5238}
5239
5240static void mlxsw_sp_router_fibmr_del(struct mlxsw_sp *mlxsw_sp,
5241 struct mfc_entry_notifier_info *men_info)
5242{
5243 struct mlxsw_sp_vr *vr;
5244
5245 if (mlxsw_sp->router->aborted)
5246 return;
5247
5248 vr = mlxsw_sp_vr_find(mlxsw_sp, men_info->tb_id);
5249 if (WARN_ON(!vr))
5250 return;
5251
5252 mlxsw_sp_mr_route4_del(vr->mr4_table, men_info->mfc);
5253 mlxsw_sp_vr_put(vr);
5254}
5255
5256static int
5257mlxsw_sp_router_fibmr_vif_add(struct mlxsw_sp *mlxsw_sp,
5258 struct vif_entry_notifier_info *ven_info)
5259{
5260 struct mlxsw_sp_rif *rif;
5261 struct mlxsw_sp_vr *vr;
5262
5263 if (mlxsw_sp->router->aborted)
5264 return 0;
5265
David Ahernf8fa9b42017-10-18 09:56:56 -07005266 vr = mlxsw_sp_vr_get(mlxsw_sp, ven_info->tb_id, NULL);
Yotam Gigid42b0962017-09-27 08:23:20 +02005267 if (IS_ERR(vr))
5268 return PTR_ERR(vr);
5269
5270 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, ven_info->dev);
5271 return mlxsw_sp_mr_vif_add(vr->mr4_table, ven_info->dev,
5272 ven_info->vif_index,
5273 ven_info->vif_flags, rif);
5274}
5275
5276static void
5277mlxsw_sp_router_fibmr_vif_del(struct mlxsw_sp *mlxsw_sp,
5278 struct vif_entry_notifier_info *ven_info)
5279{
5280 struct mlxsw_sp_vr *vr;
5281
5282 if (mlxsw_sp->router->aborted)
5283 return;
5284
5285 vr = mlxsw_sp_vr_find(mlxsw_sp, ven_info->tb_id);
5286 if (WARN_ON(!vr))
5287 return;
5288
5289 mlxsw_sp_mr_vif_del(vr->mr4_table, ven_info->vif_index);
5290 mlxsw_sp_vr_put(vr);
5291}
5292
Ido Schimmelbc65a8a2017-07-18 10:10:25 +02005293static int mlxsw_sp_router_set_abort_trap(struct mlxsw_sp *mlxsw_sp)
5294{
5295 enum mlxsw_reg_ralxx_protocol proto = MLXSW_REG_RALXX_PROTOCOL_IPV4;
5296 int err;
5297
5298 err = __mlxsw_sp_router_set_abort_trap(mlxsw_sp, proto,
5299 MLXSW_SP_LPM_TREE_MIN);
5300 if (err)
5301 return err;
5302
Yotam Gigid42b0962017-09-27 08:23:20 +02005303 /* The multicast router code does not need an abort trap as by default,
5304 * packets that don't match any routes are trapped to the CPU.
5305 */
5306
Ido Schimmelbc65a8a2017-07-18 10:10:25 +02005307 proto = MLXSW_REG_RALXX_PROTOCOL_IPV6;
5308 return __mlxsw_sp_router_set_abort_trap(mlxsw_sp, proto,
5309 MLXSW_SP_LPM_TREE_MIN + 1);
5310}
5311
Ido Schimmel9aecce12017-02-09 10:28:42 +01005312static void mlxsw_sp_fib4_node_flush(struct mlxsw_sp *mlxsw_sp,
5313 struct mlxsw_sp_fib_node *fib_node)
5314{
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02005315 struct mlxsw_sp_fib4_entry *fib4_entry, *tmp;
Ido Schimmel9aecce12017-02-09 10:28:42 +01005316
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02005317 list_for_each_entry_safe(fib4_entry, tmp, &fib_node->entry_list,
5318 common.list) {
5319 bool do_break = &tmp->common.list == &fib_node->entry_list;
Ido Schimmel9aecce12017-02-09 10:28:42 +01005320
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02005321 mlxsw_sp_fib4_node_entry_unlink(mlxsw_sp, fib4_entry);
5322 mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib4_entry);
Ido Schimmel731ea1c2017-07-18 10:10:21 +02005323 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
Ido Schimmel9aecce12017-02-09 10:28:42 +01005324 /* Break when entry list is empty and node was freed.
5325 * Otherwise, we'll access freed memory in the next
5326 * iteration.
5327 */
5328 if (do_break)
5329 break;
5330 }
5331}
5332
Ido Schimmel428b8512017-08-03 13:28:28 +02005333static void mlxsw_sp_fib6_node_flush(struct mlxsw_sp *mlxsw_sp,
5334 struct mlxsw_sp_fib_node *fib_node)
5335{
5336 struct mlxsw_sp_fib6_entry *fib6_entry, *tmp;
5337
5338 list_for_each_entry_safe(fib6_entry, tmp, &fib_node->entry_list,
5339 common.list) {
5340 bool do_break = &tmp->common.list == &fib_node->entry_list;
5341
5342 mlxsw_sp_fib6_node_entry_unlink(mlxsw_sp, fib6_entry);
5343 mlxsw_sp_fib6_entry_destroy(mlxsw_sp, fib6_entry);
5344 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
5345 if (do_break)
5346 break;
5347 }
5348}
5349
Ido Schimmel9aecce12017-02-09 10:28:42 +01005350static void mlxsw_sp_fib_node_flush(struct mlxsw_sp *mlxsw_sp,
5351 struct mlxsw_sp_fib_node *fib_node)
5352{
Ido Schimmel76610eb2017-03-10 08:53:41 +01005353 switch (fib_node->fib->proto) {
Ido Schimmel9aecce12017-02-09 10:28:42 +01005354 case MLXSW_SP_L3_PROTO_IPV4:
5355 mlxsw_sp_fib4_node_flush(mlxsw_sp, fib_node);
5356 break;
5357 case MLXSW_SP_L3_PROTO_IPV6:
Ido Schimmel428b8512017-08-03 13:28:28 +02005358 mlxsw_sp_fib6_node_flush(mlxsw_sp, fib_node);
Ido Schimmel9aecce12017-02-09 10:28:42 +01005359 break;
5360 }
5361}
5362
Ido Schimmel76610eb2017-03-10 08:53:41 +01005363static void mlxsw_sp_vr_fib_flush(struct mlxsw_sp *mlxsw_sp,
5364 struct mlxsw_sp_vr *vr,
5365 enum mlxsw_sp_l3proto proto)
5366{
5367 struct mlxsw_sp_fib *fib = mlxsw_sp_vr_fib(vr, proto);
5368 struct mlxsw_sp_fib_node *fib_node, *tmp;
5369
5370 list_for_each_entry_safe(fib_node, tmp, &fib->node_list, list) {
5371 bool do_break = &tmp->list == &fib->node_list;
5372
5373 mlxsw_sp_fib_node_flush(mlxsw_sp, fib_node);
5374 if (do_break)
5375 break;
5376 }
5377}
5378
Ido Schimmelac571de2016-11-14 11:26:32 +01005379static void mlxsw_sp_router_fib_flush(struct mlxsw_sp *mlxsw_sp)
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005380{
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005381 int i;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005382
Jiri Pirkoc1a38312016-10-21 16:07:23 +02005383 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
Ido Schimmel9011b672017-05-16 19:38:25 +02005384 struct mlxsw_sp_vr *vr = &mlxsw_sp->router->vrs[i];
Ido Schimmelac571de2016-11-14 11:26:32 +01005385
Ido Schimmel76610eb2017-03-10 08:53:41 +01005386 if (!mlxsw_sp_vr_is_used(vr))
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005387 continue;
Yotam Gigid42b0962017-09-27 08:23:20 +02005388
5389 mlxsw_sp_mr_table_flush(vr->mr4_table);
Ido Schimmel76610eb2017-03-10 08:53:41 +01005390 mlxsw_sp_vr_fib_flush(mlxsw_sp, vr, MLXSW_SP_L3_PROTO_IPV4);
Ido Schimmela3d9bc52017-07-18 10:10:22 +02005391
5392 /* If virtual router was only used for IPv4, then it's no
5393 * longer used.
5394 */
5395 if (!mlxsw_sp_vr_is_used(vr))
5396 continue;
5397 mlxsw_sp_vr_fib_flush(mlxsw_sp, vr, MLXSW_SP_L3_PROTO_IPV6);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005398 }
Ido Schimmelac571de2016-11-14 11:26:32 +01005399}
5400
Ido Schimmelbc65a8a2017-07-18 10:10:25 +02005401static void mlxsw_sp_router_fib_abort(struct mlxsw_sp *mlxsw_sp)
Ido Schimmelac571de2016-11-14 11:26:32 +01005402{
5403 int err;
5404
Ido Schimmel9011b672017-05-16 19:38:25 +02005405 if (mlxsw_sp->router->aborted)
Ido Schimmeld331d302016-11-16 09:51:58 +01005406 return;
5407 dev_warn(mlxsw_sp->bus_info->dev, "FIB abort triggered. Note that FIB entries are no longer being offloaded to this device.\n");
Ido Schimmelac571de2016-11-14 11:26:32 +01005408 mlxsw_sp_router_fib_flush(mlxsw_sp);
Ido Schimmel9011b672017-05-16 19:38:25 +02005409 mlxsw_sp->router->aborted = true;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005410 err = mlxsw_sp_router_set_abort_trap(mlxsw_sp);
5411 if (err)
5412 dev_warn(mlxsw_sp->bus_info->dev, "Failed to set abort trap.\n");
5413}
5414
Ido Schimmel30572242016-12-03 16:45:01 +01005415struct mlxsw_sp_fib_event_work {
Ido Schimmela0e47612017-02-06 16:20:10 +01005416 struct work_struct work;
Ido Schimmelad178c82017-02-08 11:16:40 +01005417 union {
Ido Schimmel428b8512017-08-03 13:28:28 +02005418 struct fib6_entry_notifier_info fen6_info;
Ido Schimmelad178c82017-02-08 11:16:40 +01005419 struct fib_entry_notifier_info fen_info;
Ido Schimmel5d7bfd12017-03-16 09:08:14 +01005420 struct fib_rule_notifier_info fr_info;
Ido Schimmelad178c82017-02-08 11:16:40 +01005421 struct fib_nh_notifier_info fnh_info;
Yotam Gigid42b0962017-09-27 08:23:20 +02005422 struct mfc_entry_notifier_info men_info;
5423 struct vif_entry_notifier_info ven_info;
Ido Schimmelad178c82017-02-08 11:16:40 +01005424 };
Ido Schimmel30572242016-12-03 16:45:01 +01005425 struct mlxsw_sp *mlxsw_sp;
5426 unsigned long event;
5427};
5428
Ido Schimmel66a57632017-08-03 13:28:26 +02005429static void mlxsw_sp_router_fib4_event_work(struct work_struct *work)
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005430{
Ido Schimmel30572242016-12-03 16:45:01 +01005431 struct mlxsw_sp_fib_event_work *fib_work =
Ido Schimmela0e47612017-02-06 16:20:10 +01005432 container_of(work, struct mlxsw_sp_fib_event_work, work);
Ido Schimmel30572242016-12-03 16:45:01 +01005433 struct mlxsw_sp *mlxsw_sp = fib_work->mlxsw_sp;
Ido Schimmel599cf8f2017-02-09 10:28:44 +01005434 bool replace, append;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005435 int err;
5436
Ido Schimmel30572242016-12-03 16:45:01 +01005437 /* Protect internal structures from changes */
5438 rtnl_lock();
5439 switch (fib_work->event) {
Ido Schimmel599cf8f2017-02-09 10:28:44 +01005440 case FIB_EVENT_ENTRY_REPLACE: /* fall through */
Ido Schimmel4283bce2017-02-09 10:28:43 +01005441 case FIB_EVENT_ENTRY_APPEND: /* fall through */
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005442 case FIB_EVENT_ENTRY_ADD:
Ido Schimmel599cf8f2017-02-09 10:28:44 +01005443 replace = fib_work->event == FIB_EVENT_ENTRY_REPLACE;
Ido Schimmel4283bce2017-02-09 10:28:43 +01005444 append = fib_work->event == FIB_EVENT_ENTRY_APPEND;
5445 err = mlxsw_sp_router_fib4_add(mlxsw_sp, &fib_work->fen_info,
Ido Schimmel599cf8f2017-02-09 10:28:44 +01005446 replace, append);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005447 if (err)
Ido Schimmelbc65a8a2017-07-18 10:10:25 +02005448 mlxsw_sp_router_fib_abort(mlxsw_sp);
Ido Schimmel30572242016-12-03 16:45:01 +01005449 fib_info_put(fib_work->fen_info.fi);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005450 break;
5451 case FIB_EVENT_ENTRY_DEL:
Ido Schimmel30572242016-12-03 16:45:01 +01005452 mlxsw_sp_router_fib4_del(mlxsw_sp, &fib_work->fen_info);
5453 fib_info_put(fib_work->fen_info.fi);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005454 break;
David Ahern1f279232017-10-27 17:37:14 -07005455 case FIB_EVENT_RULE_ADD:
5456 /* if we get here, a rule was added that we do not support.
5457 * just do the fib_abort
5458 */
5459 mlxsw_sp_router_fib_abort(mlxsw_sp);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005460 break;
Ido Schimmelad178c82017-02-08 11:16:40 +01005461 case FIB_EVENT_NH_ADD: /* fall through */
5462 case FIB_EVENT_NH_DEL:
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02005463 mlxsw_sp_nexthop4_event(mlxsw_sp, fib_work->event,
5464 fib_work->fnh_info.fib_nh);
Ido Schimmelad178c82017-02-08 11:16:40 +01005465 fib_info_put(fib_work->fnh_info.fib_nh->nh_parent);
5466 break;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005467 }
Ido Schimmel30572242016-12-03 16:45:01 +01005468 rtnl_unlock();
5469 kfree(fib_work);
5470}
5471
Ido Schimmel66a57632017-08-03 13:28:26 +02005472static void mlxsw_sp_router_fib6_event_work(struct work_struct *work)
5473{
Ido Schimmel583419f2017-08-03 13:28:27 +02005474 struct mlxsw_sp_fib_event_work *fib_work =
5475 container_of(work, struct mlxsw_sp_fib_event_work, work);
5476 struct mlxsw_sp *mlxsw_sp = fib_work->mlxsw_sp;
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02005477 bool replace;
Ido Schimmel428b8512017-08-03 13:28:28 +02005478 int err;
Ido Schimmel583419f2017-08-03 13:28:27 +02005479
5480 rtnl_lock();
5481 switch (fib_work->event) {
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02005482 case FIB_EVENT_ENTRY_REPLACE: /* fall through */
Ido Schimmel428b8512017-08-03 13:28:28 +02005483 case FIB_EVENT_ENTRY_ADD:
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02005484 replace = fib_work->event == FIB_EVENT_ENTRY_REPLACE;
Ido Schimmel428b8512017-08-03 13:28:28 +02005485 err = mlxsw_sp_router_fib6_add(mlxsw_sp,
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02005486 fib_work->fen6_info.rt, replace);
Ido Schimmel428b8512017-08-03 13:28:28 +02005487 if (err)
5488 mlxsw_sp_router_fib_abort(mlxsw_sp);
5489 mlxsw_sp_rt6_release(fib_work->fen6_info.rt);
5490 break;
5491 case FIB_EVENT_ENTRY_DEL:
5492 mlxsw_sp_router_fib6_del(mlxsw_sp, fib_work->fen6_info.rt);
5493 mlxsw_sp_rt6_release(fib_work->fen6_info.rt);
5494 break;
David Ahern1f279232017-10-27 17:37:14 -07005495 case FIB_EVENT_RULE_ADD:
5496 /* if we get here, a rule was added that we do not support.
5497 * just do the fib_abort
5498 */
5499 mlxsw_sp_router_fib_abort(mlxsw_sp);
Ido Schimmel583419f2017-08-03 13:28:27 +02005500 break;
5501 }
5502 rtnl_unlock();
5503 kfree(fib_work);
Ido Schimmel66a57632017-08-03 13:28:26 +02005504}
5505
Yotam Gigid42b0962017-09-27 08:23:20 +02005506static void mlxsw_sp_router_fibmr_event_work(struct work_struct *work)
5507{
5508 struct mlxsw_sp_fib_event_work *fib_work =
5509 container_of(work, struct mlxsw_sp_fib_event_work, work);
5510 struct mlxsw_sp *mlxsw_sp = fib_work->mlxsw_sp;
Yotam Gigid42b0962017-09-27 08:23:20 +02005511 bool replace;
5512 int err;
5513
5514 rtnl_lock();
5515 switch (fib_work->event) {
5516 case FIB_EVENT_ENTRY_REPLACE: /* fall through */
5517 case FIB_EVENT_ENTRY_ADD:
5518 replace = fib_work->event == FIB_EVENT_ENTRY_REPLACE;
5519
5520 err = mlxsw_sp_router_fibmr_add(mlxsw_sp, &fib_work->men_info,
5521 replace);
5522 if (err)
5523 mlxsw_sp_router_fib_abort(mlxsw_sp);
5524 ipmr_cache_put(fib_work->men_info.mfc);
5525 break;
5526 case FIB_EVENT_ENTRY_DEL:
5527 mlxsw_sp_router_fibmr_del(mlxsw_sp, &fib_work->men_info);
5528 ipmr_cache_put(fib_work->men_info.mfc);
5529 break;
5530 case FIB_EVENT_VIF_ADD:
5531 err = mlxsw_sp_router_fibmr_vif_add(mlxsw_sp,
5532 &fib_work->ven_info);
5533 if (err)
5534 mlxsw_sp_router_fib_abort(mlxsw_sp);
5535 dev_put(fib_work->ven_info.dev);
5536 break;
5537 case FIB_EVENT_VIF_DEL:
5538 mlxsw_sp_router_fibmr_vif_del(mlxsw_sp,
5539 &fib_work->ven_info);
5540 dev_put(fib_work->ven_info.dev);
5541 break;
David Ahern1f279232017-10-27 17:37:14 -07005542 case FIB_EVENT_RULE_ADD:
5543 /* if we get here, a rule was added that we do not support.
5544 * just do the fib_abort
5545 */
5546 mlxsw_sp_router_fib_abort(mlxsw_sp);
Yotam Gigid42b0962017-09-27 08:23:20 +02005547 break;
5548 }
5549 rtnl_unlock();
5550 kfree(fib_work);
5551}
5552
Ido Schimmel66a57632017-08-03 13:28:26 +02005553static void mlxsw_sp_router_fib4_event(struct mlxsw_sp_fib_event_work *fib_work,
5554 struct fib_notifier_info *info)
5555{
David Ahern3c75f9b2017-10-18 15:01:38 -07005556 struct fib_entry_notifier_info *fen_info;
David Ahern3c75f9b2017-10-18 15:01:38 -07005557 struct fib_nh_notifier_info *fnh_info;
5558
Ido Schimmel66a57632017-08-03 13:28:26 +02005559 switch (fib_work->event) {
5560 case FIB_EVENT_ENTRY_REPLACE: /* fall through */
5561 case FIB_EVENT_ENTRY_APPEND: /* fall through */
5562 case FIB_EVENT_ENTRY_ADD: /* fall through */
5563 case FIB_EVENT_ENTRY_DEL:
David Ahern3c75f9b2017-10-18 15:01:38 -07005564 fen_info = container_of(info, struct fib_entry_notifier_info,
5565 info);
5566 fib_work->fen_info = *fen_info;
5567 /* Take reference on fib_info to prevent it from being
Ido Schimmel66a57632017-08-03 13:28:26 +02005568 * freed while work is queued. Release it afterwards.
5569 */
5570 fib_info_hold(fib_work->fen_info.fi);
5571 break;
Ido Schimmel66a57632017-08-03 13:28:26 +02005572 case FIB_EVENT_NH_ADD: /* fall through */
5573 case FIB_EVENT_NH_DEL:
David Ahern3c75f9b2017-10-18 15:01:38 -07005574 fnh_info = container_of(info, struct fib_nh_notifier_info,
5575 info);
5576 fib_work->fnh_info = *fnh_info;
Ido Schimmel66a57632017-08-03 13:28:26 +02005577 fib_info_hold(fib_work->fnh_info.fib_nh->nh_parent);
5578 break;
5579 }
5580}
5581
5582static void mlxsw_sp_router_fib6_event(struct mlxsw_sp_fib_event_work *fib_work,
5583 struct fib_notifier_info *info)
5584{
David Ahern3c75f9b2017-10-18 15:01:38 -07005585 struct fib6_entry_notifier_info *fen6_info;
David Ahern3c75f9b2017-10-18 15:01:38 -07005586
Ido Schimmel583419f2017-08-03 13:28:27 +02005587 switch (fib_work->event) {
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02005588 case FIB_EVENT_ENTRY_REPLACE: /* fall through */
Ido Schimmel428b8512017-08-03 13:28:28 +02005589 case FIB_EVENT_ENTRY_ADD: /* fall through */
5590 case FIB_EVENT_ENTRY_DEL:
David Ahern3c75f9b2017-10-18 15:01:38 -07005591 fen6_info = container_of(info, struct fib6_entry_notifier_info,
5592 info);
5593 fib_work->fen6_info = *fen6_info;
Ido Schimmel428b8512017-08-03 13:28:28 +02005594 rt6_hold(fib_work->fen6_info.rt);
5595 break;
Ido Schimmel583419f2017-08-03 13:28:27 +02005596 }
Ido Schimmel66a57632017-08-03 13:28:26 +02005597}
5598
Yotam Gigid42b0962017-09-27 08:23:20 +02005599static void
5600mlxsw_sp_router_fibmr_event(struct mlxsw_sp_fib_event_work *fib_work,
5601 struct fib_notifier_info *info)
5602{
5603 switch (fib_work->event) {
5604 case FIB_EVENT_ENTRY_REPLACE: /* fall through */
5605 case FIB_EVENT_ENTRY_ADD: /* fall through */
5606 case FIB_EVENT_ENTRY_DEL:
5607 memcpy(&fib_work->men_info, info, sizeof(fib_work->men_info));
5608 ipmr_cache_hold(fib_work->men_info.mfc);
5609 break;
5610 case FIB_EVENT_VIF_ADD: /* fall through */
5611 case FIB_EVENT_VIF_DEL:
5612 memcpy(&fib_work->ven_info, info, sizeof(fib_work->ven_info));
5613 dev_hold(fib_work->ven_info.dev);
5614 break;
David Ahern1f279232017-10-27 17:37:14 -07005615 }
5616}
5617
5618static int mlxsw_sp_router_fib_rule_event(unsigned long event,
5619 struct fib_notifier_info *info,
5620 struct mlxsw_sp *mlxsw_sp)
5621{
5622 struct netlink_ext_ack *extack = info->extack;
5623 struct fib_rule_notifier_info *fr_info;
5624 struct fib_rule *rule;
5625 int err = 0;
5626
5627 /* nothing to do at the moment */
5628 if (event == FIB_EVENT_RULE_DEL)
5629 return 0;
5630
5631 if (mlxsw_sp->router->aborted)
5632 return 0;
5633
5634 fr_info = container_of(info, struct fib_rule_notifier_info, info);
5635 rule = fr_info->rule;
5636
5637 switch (info->family) {
5638 case AF_INET:
5639 if (!fib4_rule_default(rule) && !rule->l3mdev)
5640 err = -1;
5641 break;
5642 case AF_INET6:
5643 if (!fib6_rule_default(rule) && !rule->l3mdev)
5644 err = -1;
5645 break;
5646 case RTNL_FAMILY_IPMR:
5647 if (!ipmr_rule_default(rule) && !rule->l3mdev)
5648 err = -1;
Yotam Gigid42b0962017-09-27 08:23:20 +02005649 break;
5650 }
David Ahern1f279232017-10-27 17:37:14 -07005651
5652 if (err < 0)
5653 NL_SET_ERR_MSG(extack, "spectrum: FIB rules not supported. Aborting offload");
5654
5655 return err;
Yotam Gigid42b0962017-09-27 08:23:20 +02005656}
5657
Ido Schimmel30572242016-12-03 16:45:01 +01005658/* Called with rcu_read_lock() */
5659static int mlxsw_sp_router_fib_event(struct notifier_block *nb,
5660 unsigned long event, void *ptr)
5661{
Ido Schimmel30572242016-12-03 16:45:01 +01005662 struct mlxsw_sp_fib_event_work *fib_work;
5663 struct fib_notifier_info *info = ptr;
Ido Schimmel7e39d112017-05-16 19:38:28 +02005664 struct mlxsw_sp_router *router;
David Ahern1f279232017-10-27 17:37:14 -07005665 int err;
Ido Schimmel30572242016-12-03 16:45:01 +01005666
Ido Schimmel8e29f972017-09-15 15:31:07 +02005667 if (!net_eq(info->net, &init_net) ||
Yotam Gigi664375e2017-09-27 08:23:22 +02005668 (info->family != AF_INET && info->family != AF_INET6 &&
5669 info->family != RTNL_FAMILY_IPMR))
Ido Schimmel30572242016-12-03 16:45:01 +01005670 return NOTIFY_DONE;
5671
David Ahern1f279232017-10-27 17:37:14 -07005672 router = container_of(nb, struct mlxsw_sp_router, fib_nb);
5673
5674 switch (event) {
5675 case FIB_EVENT_RULE_ADD: /* fall through */
5676 case FIB_EVENT_RULE_DEL:
5677 err = mlxsw_sp_router_fib_rule_event(event, info,
5678 router->mlxsw_sp);
5679 if (!err)
5680 return NOTIFY_DONE;
5681 }
5682
Ido Schimmel30572242016-12-03 16:45:01 +01005683 fib_work = kzalloc(sizeof(*fib_work), GFP_ATOMIC);
5684 if (WARN_ON(!fib_work))
5685 return NOTIFY_BAD;
5686
Ido Schimmel7e39d112017-05-16 19:38:28 +02005687 fib_work->mlxsw_sp = router->mlxsw_sp;
Ido Schimmel30572242016-12-03 16:45:01 +01005688 fib_work->event = event;
5689
Ido Schimmel66a57632017-08-03 13:28:26 +02005690 switch (info->family) {
5691 case AF_INET:
5692 INIT_WORK(&fib_work->work, mlxsw_sp_router_fib4_event_work);
5693 mlxsw_sp_router_fib4_event(fib_work, info);
Ido Schimmel30572242016-12-03 16:45:01 +01005694 break;
Ido Schimmel66a57632017-08-03 13:28:26 +02005695 case AF_INET6:
5696 INIT_WORK(&fib_work->work, mlxsw_sp_router_fib6_event_work);
5697 mlxsw_sp_router_fib6_event(fib_work, info);
Ido Schimmelad178c82017-02-08 11:16:40 +01005698 break;
Yotam Gigid42b0962017-09-27 08:23:20 +02005699 case RTNL_FAMILY_IPMR:
5700 INIT_WORK(&fib_work->work, mlxsw_sp_router_fibmr_event_work);
5701 mlxsw_sp_router_fibmr_event(fib_work, info);
5702 break;
Ido Schimmel30572242016-12-03 16:45:01 +01005703 }
5704
Ido Schimmela0e47612017-02-06 16:20:10 +01005705 mlxsw_core_schedule_work(&fib_work->work);
Ido Schimmel30572242016-12-03 16:45:01 +01005706
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005707 return NOTIFY_DONE;
5708}
5709
Ido Schimmel4724ba562017-03-10 08:53:39 +01005710static struct mlxsw_sp_rif *
5711mlxsw_sp_rif_find_by_dev(const struct mlxsw_sp *mlxsw_sp,
5712 const struct net_device *dev)
5713{
5714 int i;
5715
5716 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++)
Ido Schimmel5f9efff2017-05-16 19:38:27 +02005717 if (mlxsw_sp->router->rifs[i] &&
5718 mlxsw_sp->router->rifs[i]->dev == dev)
5719 return mlxsw_sp->router->rifs[i];
Ido Schimmel4724ba562017-03-10 08:53:39 +01005720
5721 return NULL;
5722}
5723
5724static int mlxsw_sp_router_rif_disable(struct mlxsw_sp *mlxsw_sp, u16 rif)
5725{
5726 char ritr_pl[MLXSW_REG_RITR_LEN];
5727 int err;
5728
5729 mlxsw_reg_ritr_rif_pack(ritr_pl, rif);
5730 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
5731 if (WARN_ON_ONCE(err))
5732 return err;
5733
5734 mlxsw_reg_ritr_enable_set(ritr_pl, false);
5735 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
5736}
5737
5738static void mlxsw_sp_router_rif_gone_sync(struct mlxsw_sp *mlxsw_sp,
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01005739 struct mlxsw_sp_rif *rif)
Ido Schimmel4724ba562017-03-10 08:53:39 +01005740{
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01005741 mlxsw_sp_router_rif_disable(mlxsw_sp, rif->rif_index);
5742 mlxsw_sp_nexthop_rif_gone_sync(mlxsw_sp, rif);
5743 mlxsw_sp_neigh_rif_gone_sync(mlxsw_sp, rif);
Ido Schimmel4724ba562017-03-10 08:53:39 +01005744}
5745
Arkadi Sharshevsky5ea12372017-07-18 10:10:13 +02005746static bool
5747mlxsw_sp_rif_should_config(struct mlxsw_sp_rif *rif, struct net_device *dev,
5748 unsigned long event)
Ido Schimmel4724ba562017-03-10 08:53:39 +01005749{
Arkadi Sharshevsky5ea12372017-07-18 10:10:13 +02005750 struct inet6_dev *inet6_dev;
5751 bool addr_list_empty = true;
5752 struct in_device *idev;
5753
Ido Schimmel4724ba562017-03-10 08:53:39 +01005754 switch (event) {
5755 case NETDEV_UP:
Petr Machataf1b1f272017-07-31 09:27:28 +02005756 return rif == NULL;
Ido Schimmel4724ba562017-03-10 08:53:39 +01005757 case NETDEV_DOWN:
Arkadi Sharshevsky5ea12372017-07-18 10:10:13 +02005758 idev = __in_dev_get_rtnl(dev);
5759 if (idev && idev->ifa_list)
5760 addr_list_empty = false;
5761
5762 inet6_dev = __in6_dev_get(dev);
5763 if (addr_list_empty && inet6_dev &&
5764 !list_empty(&inet6_dev->addr_list))
5765 addr_list_empty = false;
5766
5767 if (rif && addr_list_empty &&
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01005768 !netif_is_l3_slave(rif->dev))
Ido Schimmel4724ba562017-03-10 08:53:39 +01005769 return true;
5770 /* It is possible we already removed the RIF ourselves
5771 * if it was assigned to a netdev that is now a bridge
5772 * or LAG slave.
5773 */
5774 return false;
5775 }
5776
5777 return false;
5778}
5779
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02005780static enum mlxsw_sp_rif_type
5781mlxsw_sp_dev_rif_type(const struct mlxsw_sp *mlxsw_sp,
5782 const struct net_device *dev)
5783{
5784 enum mlxsw_sp_fid_type type;
5785
Petr Machata6ddb7422017-09-02 23:49:19 +02005786 if (mlxsw_sp_netdev_ipip_type(mlxsw_sp, dev, NULL))
5787 return MLXSW_SP_RIF_TYPE_IPIP_LB;
5788
5789 /* Otherwise RIF type is derived from the type of the underlying FID. */
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02005790 if (is_vlan_dev(dev) && netif_is_bridge_master(vlan_dev_real_dev(dev)))
5791 type = MLXSW_SP_FID_TYPE_8021Q;
5792 else if (netif_is_bridge_master(dev) && br_vlan_enabled(dev))
5793 type = MLXSW_SP_FID_TYPE_8021Q;
5794 else if (netif_is_bridge_master(dev))
5795 type = MLXSW_SP_FID_TYPE_8021D;
5796 else
5797 type = MLXSW_SP_FID_TYPE_RFID;
5798
5799 return mlxsw_sp_fid_type_rif_type(mlxsw_sp, type);
5800}
5801
Ido Schimmelde5ed992017-06-04 16:53:40 +02005802static int mlxsw_sp_rif_index_alloc(struct mlxsw_sp *mlxsw_sp, u16 *p_rif_index)
Ido Schimmel4724ba562017-03-10 08:53:39 +01005803{
5804 int i;
5805
Ido Schimmelde5ed992017-06-04 16:53:40 +02005806 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++) {
5807 if (!mlxsw_sp->router->rifs[i]) {
5808 *p_rif_index = i;
5809 return 0;
5810 }
5811 }
Ido Schimmel4724ba562017-03-10 08:53:39 +01005812
Ido Schimmelde5ed992017-06-04 16:53:40 +02005813 return -ENOBUFS;
Ido Schimmel4724ba562017-03-10 08:53:39 +01005814}
5815
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02005816static struct mlxsw_sp_rif *mlxsw_sp_rif_alloc(size_t rif_size, u16 rif_index,
5817 u16 vr_id,
5818 struct net_device *l3_dev)
Ido Schimmel4724ba562017-03-10 08:53:39 +01005819{
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01005820 struct mlxsw_sp_rif *rif;
Ido Schimmel4724ba562017-03-10 08:53:39 +01005821
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02005822 rif = kzalloc(rif_size, GFP_KERNEL);
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01005823 if (!rif)
Ido Schimmel4724ba562017-03-10 08:53:39 +01005824 return NULL;
5825
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01005826 INIT_LIST_HEAD(&rif->nexthop_list);
5827 INIT_LIST_HEAD(&rif->neigh_list);
5828 ether_addr_copy(rif->addr, l3_dev->dev_addr);
5829 rif->mtu = l3_dev->mtu;
5830 rif->vr_id = vr_id;
5831 rif->dev = l3_dev;
5832 rif->rif_index = rif_index;
Ido Schimmel4724ba562017-03-10 08:53:39 +01005833
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01005834 return rif;
Ido Schimmel4724ba562017-03-10 08:53:39 +01005835}
5836
Ido Schimmel5f9efff2017-05-16 19:38:27 +02005837struct mlxsw_sp_rif *mlxsw_sp_rif_by_index(const struct mlxsw_sp *mlxsw_sp,
5838 u16 rif_index)
5839{
5840 return mlxsw_sp->router->rifs[rif_index];
5841}
5842
Arkadi Sharshevskyfd1b9d42017-03-28 17:24:16 +02005843u16 mlxsw_sp_rif_index(const struct mlxsw_sp_rif *rif)
5844{
5845 return rif->rif_index;
5846}
5847
Petr Machata92107cf2017-09-02 23:49:28 +02005848u16 mlxsw_sp_ipip_lb_rif_index(const struct mlxsw_sp_rif_ipip_lb *lb_rif)
5849{
5850 return lb_rif->common.rif_index;
5851}
5852
5853u16 mlxsw_sp_ipip_lb_ul_vr_id(const struct mlxsw_sp_rif_ipip_lb *lb_rif)
5854{
5855 return lb_rif->ul_vr_id;
5856}
5857
Arkadi Sharshevskyfd1b9d42017-03-28 17:24:16 +02005858int mlxsw_sp_rif_dev_ifindex(const struct mlxsw_sp_rif *rif)
5859{
5860 return rif->dev->ifindex;
5861}
5862
Yotam Gigi91e4d592017-09-19 10:00:19 +02005863const struct net_device *mlxsw_sp_rif_dev(const struct mlxsw_sp_rif *rif)
5864{
5865 return rif->dev;
5866}
5867
Ido Schimmel4724ba562017-03-10 08:53:39 +01005868static struct mlxsw_sp_rif *
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02005869mlxsw_sp_rif_create(struct mlxsw_sp *mlxsw_sp,
David Ahernf8fa9b42017-10-18 09:56:56 -07005870 const struct mlxsw_sp_rif_params *params,
5871 struct netlink_ext_ack *extack)
Ido Schimmel4724ba562017-03-10 08:53:39 +01005872{
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02005873 u32 tb_id = l3mdev_fib_table(params->dev);
5874 const struct mlxsw_sp_rif_ops *ops;
Petr Machata010cadf2017-09-02 23:49:18 +02005875 struct mlxsw_sp_fid *fid = NULL;
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02005876 enum mlxsw_sp_rif_type type;
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01005877 struct mlxsw_sp_rif *rif;
Ido Schimmela1107482017-05-26 08:37:39 +02005878 struct mlxsw_sp_vr *vr;
5879 u16 rif_index;
Ido Schimmel4724ba562017-03-10 08:53:39 +01005880 int err;
5881
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02005882 type = mlxsw_sp_dev_rif_type(mlxsw_sp, params->dev);
5883 ops = mlxsw_sp->router->rif_ops_arr[type];
5884
David Ahernf8fa9b42017-10-18 09:56:56 -07005885 vr = mlxsw_sp_vr_get(mlxsw_sp, tb_id ? : RT_TABLE_MAIN, extack);
Ido Schimmelc9ec53f2017-05-26 08:37:38 +02005886 if (IS_ERR(vr))
5887 return ERR_CAST(vr);
Petr Machata28a04c72017-10-02 12:14:56 +02005888 vr->rif_count++;
Ido Schimmelc9ec53f2017-05-26 08:37:38 +02005889
Ido Schimmelde5ed992017-06-04 16:53:40 +02005890 err = mlxsw_sp_rif_index_alloc(mlxsw_sp, &rif_index);
David Ahernf8fa9b42017-10-18 09:56:56 -07005891 if (err) {
5892 NL_SET_ERR_MSG(extack, "spectrum: Exceeded number of supported router interfaces");
Ido Schimmelde5ed992017-06-04 16:53:40 +02005893 goto err_rif_index_alloc;
David Ahernf8fa9b42017-10-18 09:56:56 -07005894 }
Ido Schimmel4724ba562017-03-10 08:53:39 +01005895
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02005896 rif = mlxsw_sp_rif_alloc(ops->rif_size, rif_index, vr->id, params->dev);
Ido Schimmela13a5942017-05-26 08:37:33 +02005897 if (!rif) {
5898 err = -ENOMEM;
5899 goto err_rif_alloc;
5900 }
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02005901 rif->mlxsw_sp = mlxsw_sp;
5902 rif->ops = ops;
Ido Schimmela13a5942017-05-26 08:37:33 +02005903
Petr Machata010cadf2017-09-02 23:49:18 +02005904 if (ops->fid_get) {
5905 fid = ops->fid_get(rif);
5906 if (IS_ERR(fid)) {
5907 err = PTR_ERR(fid);
5908 goto err_fid_get;
5909 }
5910 rif->fid = fid;
Ido Schimmel4d93cee2017-05-26 08:37:34 +02005911 }
5912
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02005913 if (ops->setup)
5914 ops->setup(rif, params);
5915
5916 err = ops->configure(rif);
Ido Schimmel4724ba562017-03-10 08:53:39 +01005917 if (err)
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02005918 goto err_configure;
Ido Schimmel4724ba562017-03-10 08:53:39 +01005919
Yotam Gigid42b0962017-09-27 08:23:20 +02005920 err = mlxsw_sp_mr_rif_add(vr->mr4_table, rif);
5921 if (err)
5922 goto err_mr_rif_add;
5923
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02005924 mlxsw_sp_rif_counters_alloc(rif);
Ido Schimmel5f9efff2017-05-16 19:38:27 +02005925 mlxsw_sp->router->rifs[rif_index] = rif;
Ido Schimmel4724ba562017-03-10 08:53:39 +01005926
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01005927 return rif;
Ido Schimmel4724ba562017-03-10 08:53:39 +01005928
Yotam Gigid42b0962017-09-27 08:23:20 +02005929err_mr_rif_add:
5930 ops->deconfigure(rif);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02005931err_configure:
Petr Machata010cadf2017-09-02 23:49:18 +02005932 if (fid)
5933 mlxsw_sp_fid_put(fid);
Ido Schimmela1107482017-05-26 08:37:39 +02005934err_fid_get:
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02005935 kfree(rif);
5936err_rif_alloc:
Ido Schimmelde5ed992017-06-04 16:53:40 +02005937err_rif_index_alloc:
Petr Machata28a04c72017-10-02 12:14:56 +02005938 vr->rif_count--;
Ido Schimmelc9ec53f2017-05-26 08:37:38 +02005939 mlxsw_sp_vr_put(vr);
Ido Schimmel4724ba562017-03-10 08:53:39 +01005940 return ERR_PTR(err);
5941}
5942
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02005943void mlxsw_sp_rif_destroy(struct mlxsw_sp_rif *rif)
Ido Schimmel4724ba562017-03-10 08:53:39 +01005944{
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02005945 const struct mlxsw_sp_rif_ops *ops = rif->ops;
5946 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
Ido Schimmela1107482017-05-26 08:37:39 +02005947 struct mlxsw_sp_fid *fid = rif->fid;
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02005948 struct mlxsw_sp_vr *vr;
Ido Schimmel4724ba562017-03-10 08:53:39 +01005949
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01005950 mlxsw_sp_router_rif_gone_sync(mlxsw_sp, rif);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02005951 vr = &mlxsw_sp->router->vrs[rif->vr_id];
Arkadi Sharshevskye0c0afd2017-03-28 17:24:15 +02005952
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02005953 mlxsw_sp->router->rifs[rif->rif_index] = NULL;
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02005954 mlxsw_sp_rif_counters_free(rif);
Yotam Gigid42b0962017-09-27 08:23:20 +02005955 mlxsw_sp_mr_rif_del(vr->mr4_table, rif);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02005956 ops->deconfigure(rif);
Petr Machata010cadf2017-09-02 23:49:18 +02005957 if (fid)
5958 /* Loopback RIFs are not associated with a FID. */
5959 mlxsw_sp_fid_put(fid);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02005960 kfree(rif);
Petr Machata28a04c72017-10-02 12:14:56 +02005961 vr->rif_count--;
Ido Schimmelc9ec53f2017-05-26 08:37:38 +02005962 mlxsw_sp_vr_put(vr);
Ido Schimmel4724ba562017-03-10 08:53:39 +01005963}
5964
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02005965static void
5966mlxsw_sp_rif_subport_params_init(struct mlxsw_sp_rif_params *params,
5967 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
5968{
5969 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
5970
5971 params->vid = mlxsw_sp_port_vlan->vid;
5972 params->lag = mlxsw_sp_port->lagged;
5973 if (params->lag)
5974 params->lag_id = mlxsw_sp_port->lag_id;
5975 else
5976 params->system_port = mlxsw_sp_port->local_port;
5977}
5978
Ido Schimmel7cbecf22017-05-26 08:37:28 +02005979static int
Ido Schimmela1107482017-05-26 08:37:39 +02005980mlxsw_sp_port_vlan_router_join(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan,
David Ahernf8fa9b42017-10-18 09:56:56 -07005981 struct net_device *l3_dev,
5982 struct netlink_ext_ack *extack)
Ido Schimmel4724ba562017-03-10 08:53:39 +01005983{
Ido Schimmel7cbecf22017-05-26 08:37:28 +02005984 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
Ido Schimmel1b8f09a2017-05-26 08:37:36 +02005985 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
Ido Schimmel7cbecf22017-05-26 08:37:28 +02005986 u16 vid = mlxsw_sp_port_vlan->vid;
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01005987 struct mlxsw_sp_rif *rif;
Ido Schimmela1107482017-05-26 08:37:39 +02005988 struct mlxsw_sp_fid *fid;
Ido Schimmel03ea01e2017-05-23 21:56:30 +02005989 int err;
Ido Schimmel4724ba562017-03-10 08:53:39 +01005990
Ido Schimmel1b8f09a2017-05-26 08:37:36 +02005991 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev);
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01005992 if (!rif) {
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02005993 struct mlxsw_sp_rif_params params = {
5994 .dev = l3_dev,
5995 };
5996
5997 mlxsw_sp_rif_subport_params_init(&params, mlxsw_sp_port_vlan);
David Ahernf8fa9b42017-10-18 09:56:56 -07005998 rif = mlxsw_sp_rif_create(mlxsw_sp, &params, extack);
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01005999 if (IS_ERR(rif))
6000 return PTR_ERR(rif);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006001 }
6002
Ido Schimmela1107482017-05-26 08:37:39 +02006003 /* FID was already created, just take a reference */
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006004 fid = rif->ops->fid_get(rif);
Ido Schimmela1107482017-05-26 08:37:39 +02006005 err = mlxsw_sp_fid_port_vid_map(fid, mlxsw_sp_port, vid);
6006 if (err)
6007 goto err_fid_port_vid_map;
6008
Ido Schimmel7cbecf22017-05-26 08:37:28 +02006009 err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, false);
Ido Schimmel03ea01e2017-05-23 21:56:30 +02006010 if (err)
6011 goto err_port_vid_learning_set;
6012
Ido Schimmel7cbecf22017-05-26 08:37:28 +02006013 err = mlxsw_sp_port_vid_stp_set(mlxsw_sp_port, vid,
Ido Schimmel03ea01e2017-05-23 21:56:30 +02006014 BR_STATE_FORWARDING);
6015 if (err)
6016 goto err_port_vid_stp_set;
6017
Ido Schimmela1107482017-05-26 08:37:39 +02006018 mlxsw_sp_port_vlan->fid = fid;
Ido Schimmel4724ba562017-03-10 08:53:39 +01006019
Ido Schimmel4724ba562017-03-10 08:53:39 +01006020 return 0;
Ido Schimmel03ea01e2017-05-23 21:56:30 +02006021
6022err_port_vid_stp_set:
Ido Schimmel7cbecf22017-05-26 08:37:28 +02006023 mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, true);
Ido Schimmel03ea01e2017-05-23 21:56:30 +02006024err_port_vid_learning_set:
Ido Schimmela1107482017-05-26 08:37:39 +02006025 mlxsw_sp_fid_port_vid_unmap(fid, mlxsw_sp_port, vid);
6026err_fid_port_vid_map:
6027 mlxsw_sp_fid_put(fid);
Ido Schimmel03ea01e2017-05-23 21:56:30 +02006028 return err;
Ido Schimmel4724ba562017-03-10 08:53:39 +01006029}
6030
Ido Schimmela1107482017-05-26 08:37:39 +02006031void
6032mlxsw_sp_port_vlan_router_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
Ido Schimmel4724ba562017-03-10 08:53:39 +01006033{
Ido Schimmelce95e152017-05-26 08:37:27 +02006034 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
Ido Schimmel7cbecf22017-05-26 08:37:28 +02006035 struct mlxsw_sp_fid *fid = mlxsw_sp_port_vlan->fid;
Ido Schimmelce95e152017-05-26 08:37:27 +02006036 u16 vid = mlxsw_sp_port_vlan->vid;
Ido Schimmelce95e152017-05-26 08:37:27 +02006037
Ido Schimmela1107482017-05-26 08:37:39 +02006038 if (WARN_ON(mlxsw_sp_fid_type(fid) != MLXSW_SP_FID_TYPE_RFID))
6039 return;
Ido Schimmel4aafc362017-05-26 08:37:25 +02006040
Ido Schimmela1107482017-05-26 08:37:39 +02006041 mlxsw_sp_port_vlan->fid = NULL;
Ido Schimmel7cbecf22017-05-26 08:37:28 +02006042 mlxsw_sp_port_vid_stp_set(mlxsw_sp_port, vid, BR_STATE_BLOCKING);
6043 mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, true);
Ido Schimmela1107482017-05-26 08:37:39 +02006044 mlxsw_sp_fid_port_vid_unmap(fid, mlxsw_sp_port, vid);
6045 /* If router port holds the last reference on the rFID, then the
6046 * associated Sub-port RIF will be destroyed.
6047 */
6048 mlxsw_sp_fid_put(fid);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006049}
6050
Ido Schimmel7cbecf22017-05-26 08:37:28 +02006051static int mlxsw_sp_inetaddr_port_vlan_event(struct net_device *l3_dev,
6052 struct net_device *port_dev,
David Ahernf8fa9b42017-10-18 09:56:56 -07006053 unsigned long event, u16 vid,
6054 struct netlink_ext_ack *extack)
Ido Schimmel4724ba562017-03-10 08:53:39 +01006055{
6056 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(port_dev);
Ido Schimmelce95e152017-05-26 08:37:27 +02006057 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
Ido Schimmel4724ba562017-03-10 08:53:39 +01006058
Ido Schimmelce95e152017-05-26 08:37:27 +02006059 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
Ido Schimmel7cbecf22017-05-26 08:37:28 +02006060 if (WARN_ON(!mlxsw_sp_port_vlan))
6061 return -EINVAL;
Ido Schimmel4724ba562017-03-10 08:53:39 +01006062
6063 switch (event) {
6064 case NETDEV_UP:
Ido Schimmela1107482017-05-26 08:37:39 +02006065 return mlxsw_sp_port_vlan_router_join(mlxsw_sp_port_vlan,
David Ahernf8fa9b42017-10-18 09:56:56 -07006066 l3_dev, extack);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006067 case NETDEV_DOWN:
Ido Schimmela1107482017-05-26 08:37:39 +02006068 mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port_vlan);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006069 break;
6070 }
6071
6072 return 0;
6073}
6074
6075static int mlxsw_sp_inetaddr_port_event(struct net_device *port_dev,
David Ahernf8fa9b42017-10-18 09:56:56 -07006076 unsigned long event,
6077 struct netlink_ext_ack *extack)
Ido Schimmel4724ba562017-03-10 08:53:39 +01006078{
Jiri Pirko2b94e582017-04-18 16:55:37 +02006079 if (netif_is_bridge_port(port_dev) ||
6080 netif_is_lag_port(port_dev) ||
6081 netif_is_ovs_port(port_dev))
Ido Schimmel4724ba562017-03-10 08:53:39 +01006082 return 0;
6083
David Ahernf8fa9b42017-10-18 09:56:56 -07006084 return mlxsw_sp_inetaddr_port_vlan_event(port_dev, port_dev, event, 1,
6085 extack);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006086}
6087
6088static int __mlxsw_sp_inetaddr_lag_event(struct net_device *l3_dev,
6089 struct net_device *lag_dev,
David Ahernf8fa9b42017-10-18 09:56:56 -07006090 unsigned long event, u16 vid,
6091 struct netlink_ext_ack *extack)
Ido Schimmel4724ba562017-03-10 08:53:39 +01006092{
6093 struct net_device *port_dev;
6094 struct list_head *iter;
6095 int err;
6096
6097 netdev_for_each_lower_dev(lag_dev, port_dev, iter) {
6098 if (mlxsw_sp_port_dev_check(port_dev)) {
Ido Schimmel7cbecf22017-05-26 08:37:28 +02006099 err = mlxsw_sp_inetaddr_port_vlan_event(l3_dev,
6100 port_dev,
David Ahernf8fa9b42017-10-18 09:56:56 -07006101 event, vid,
6102 extack);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006103 if (err)
6104 return err;
6105 }
6106 }
6107
6108 return 0;
6109}
6110
6111static int mlxsw_sp_inetaddr_lag_event(struct net_device *lag_dev,
David Ahernf8fa9b42017-10-18 09:56:56 -07006112 unsigned long event,
6113 struct netlink_ext_ack *extack)
Ido Schimmel4724ba562017-03-10 08:53:39 +01006114{
6115 if (netif_is_bridge_port(lag_dev))
6116 return 0;
6117
David Ahernf8fa9b42017-10-18 09:56:56 -07006118 return __mlxsw_sp_inetaddr_lag_event(lag_dev, lag_dev, event, 1,
6119 extack);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006120}
6121
Ido Schimmel4724ba562017-03-10 08:53:39 +01006122static int mlxsw_sp_inetaddr_bridge_event(struct net_device *l3_dev,
David Ahernf8fa9b42017-10-18 09:56:56 -07006123 unsigned long event,
6124 struct netlink_ext_ack *extack)
Ido Schimmel4724ba562017-03-10 08:53:39 +01006125{
6126 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(l3_dev);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006127 struct mlxsw_sp_rif_params params = {
6128 .dev = l3_dev,
6129 };
Ido Schimmela1107482017-05-26 08:37:39 +02006130 struct mlxsw_sp_rif *rif;
Ido Schimmel4724ba562017-03-10 08:53:39 +01006131
6132 switch (event) {
6133 case NETDEV_UP:
David Ahernf8fa9b42017-10-18 09:56:56 -07006134 rif = mlxsw_sp_rif_create(mlxsw_sp, &params, extack);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006135 if (IS_ERR(rif))
6136 return PTR_ERR(rif);
6137 break;
Ido Schimmel4724ba562017-03-10 08:53:39 +01006138 case NETDEV_DOWN:
Ido Schimmela1107482017-05-26 08:37:39 +02006139 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006140 mlxsw_sp_rif_destroy(rif);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006141 break;
6142 }
6143
6144 return 0;
6145}
6146
6147static int mlxsw_sp_inetaddr_vlan_event(struct net_device *vlan_dev,
David Ahernf8fa9b42017-10-18 09:56:56 -07006148 unsigned long event,
6149 struct netlink_ext_ack *extack)
Ido Schimmel4724ba562017-03-10 08:53:39 +01006150{
6151 struct net_device *real_dev = vlan_dev_real_dev(vlan_dev);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006152 u16 vid = vlan_dev_vlan_id(vlan_dev);
6153
Ido Schimmel6b27c8a2017-06-28 09:03:12 +03006154 if (netif_is_bridge_port(vlan_dev))
6155 return 0;
6156
Ido Schimmel4724ba562017-03-10 08:53:39 +01006157 if (mlxsw_sp_port_dev_check(real_dev))
Ido Schimmel7cbecf22017-05-26 08:37:28 +02006158 return mlxsw_sp_inetaddr_port_vlan_event(vlan_dev, real_dev,
David Ahernf8fa9b42017-10-18 09:56:56 -07006159 event, vid, extack);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006160 else if (netif_is_lag_master(real_dev))
6161 return __mlxsw_sp_inetaddr_lag_event(vlan_dev, real_dev, event,
David Ahernf8fa9b42017-10-18 09:56:56 -07006162 vid, extack);
Ido Schimmelc57529e2017-05-26 08:37:31 +02006163 else if (netif_is_bridge_master(real_dev) && br_vlan_enabled(real_dev))
David Ahernf8fa9b42017-10-18 09:56:56 -07006164 return mlxsw_sp_inetaddr_bridge_event(vlan_dev, event, extack);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006165
6166 return 0;
6167}
6168
Ido Schimmelb1e45522017-04-30 19:47:14 +03006169static int __mlxsw_sp_inetaddr_event(struct net_device *dev,
David Ahernf8fa9b42017-10-18 09:56:56 -07006170 unsigned long event,
6171 struct netlink_ext_ack *extack)
Ido Schimmelb1e45522017-04-30 19:47:14 +03006172{
6173 if (mlxsw_sp_port_dev_check(dev))
David Ahernf8fa9b42017-10-18 09:56:56 -07006174 return mlxsw_sp_inetaddr_port_event(dev, event, extack);
Ido Schimmelb1e45522017-04-30 19:47:14 +03006175 else if (netif_is_lag_master(dev))
David Ahernf8fa9b42017-10-18 09:56:56 -07006176 return mlxsw_sp_inetaddr_lag_event(dev, event, extack);
Ido Schimmelb1e45522017-04-30 19:47:14 +03006177 else if (netif_is_bridge_master(dev))
David Ahernf8fa9b42017-10-18 09:56:56 -07006178 return mlxsw_sp_inetaddr_bridge_event(dev, event, extack);
Ido Schimmelb1e45522017-04-30 19:47:14 +03006179 else if (is_vlan_dev(dev))
David Ahernf8fa9b42017-10-18 09:56:56 -07006180 return mlxsw_sp_inetaddr_vlan_event(dev, event, extack);
Ido Schimmelb1e45522017-04-30 19:47:14 +03006181 else
6182 return 0;
6183}
6184
Ido Schimmel4724ba562017-03-10 08:53:39 +01006185int mlxsw_sp_inetaddr_event(struct notifier_block *unused,
6186 unsigned long event, void *ptr)
6187{
6188 struct in_ifaddr *ifa = (struct in_ifaddr *) ptr;
6189 struct net_device *dev = ifa->ifa_dev->dev;
6190 struct mlxsw_sp *mlxsw_sp;
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01006191 struct mlxsw_sp_rif *rif;
Ido Schimmel4724ba562017-03-10 08:53:39 +01006192 int err = 0;
6193
David Ahern89d5dd22017-10-18 09:56:55 -07006194 /* NETDEV_UP event is handled by mlxsw_sp_inetaddr_valid_event */
6195 if (event == NETDEV_UP)
6196 goto out;
6197
6198 mlxsw_sp = mlxsw_sp_lower_get(dev);
6199 if (!mlxsw_sp)
6200 goto out;
6201
6202 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
6203 if (!mlxsw_sp_rif_should_config(rif, dev, event))
6204 goto out;
6205
David Ahernf8fa9b42017-10-18 09:56:56 -07006206 err = __mlxsw_sp_inetaddr_event(dev, event, NULL);
David Ahern89d5dd22017-10-18 09:56:55 -07006207out:
6208 return notifier_from_errno(err);
6209}
6210
6211int mlxsw_sp_inetaddr_valid_event(struct notifier_block *unused,
6212 unsigned long event, void *ptr)
6213{
6214 struct in_validator_info *ivi = (struct in_validator_info *) ptr;
6215 struct net_device *dev = ivi->ivi_dev->dev;
6216 struct mlxsw_sp *mlxsw_sp;
6217 struct mlxsw_sp_rif *rif;
6218 int err = 0;
6219
Ido Schimmel4724ba562017-03-10 08:53:39 +01006220 mlxsw_sp = mlxsw_sp_lower_get(dev);
6221 if (!mlxsw_sp)
6222 goto out;
6223
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01006224 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
Arkadi Sharshevsky5ea12372017-07-18 10:10:13 +02006225 if (!mlxsw_sp_rif_should_config(rif, dev, event))
Ido Schimmel4724ba562017-03-10 08:53:39 +01006226 goto out;
6227
David Ahernf8fa9b42017-10-18 09:56:56 -07006228 err = __mlxsw_sp_inetaddr_event(dev, event, ivi->extack);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006229out:
6230 return notifier_from_errno(err);
6231}
6232
Arkadi Sharshevsky5ea12372017-07-18 10:10:13 +02006233struct mlxsw_sp_inet6addr_event_work {
6234 struct work_struct work;
6235 struct net_device *dev;
6236 unsigned long event;
6237};
6238
6239static void mlxsw_sp_inet6addr_event_work(struct work_struct *work)
6240{
6241 struct mlxsw_sp_inet6addr_event_work *inet6addr_work =
6242 container_of(work, struct mlxsw_sp_inet6addr_event_work, work);
6243 struct net_device *dev = inet6addr_work->dev;
6244 unsigned long event = inet6addr_work->event;
6245 struct mlxsw_sp *mlxsw_sp;
6246 struct mlxsw_sp_rif *rif;
6247
6248 rtnl_lock();
6249 mlxsw_sp = mlxsw_sp_lower_get(dev);
6250 if (!mlxsw_sp)
6251 goto out;
6252
6253 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
6254 if (!mlxsw_sp_rif_should_config(rif, dev, event))
6255 goto out;
6256
David Ahernf8fa9b42017-10-18 09:56:56 -07006257 __mlxsw_sp_inetaddr_event(dev, event, NULL);
Arkadi Sharshevsky5ea12372017-07-18 10:10:13 +02006258out:
6259 rtnl_unlock();
6260 dev_put(dev);
6261 kfree(inet6addr_work);
6262}
6263
6264/* Called with rcu_read_lock() */
6265int mlxsw_sp_inet6addr_event(struct notifier_block *unused,
6266 unsigned long event, void *ptr)
6267{
6268 struct inet6_ifaddr *if6 = (struct inet6_ifaddr *) ptr;
6269 struct mlxsw_sp_inet6addr_event_work *inet6addr_work;
6270 struct net_device *dev = if6->idev->dev;
6271
David Ahern89d5dd22017-10-18 09:56:55 -07006272 /* NETDEV_UP event is handled by mlxsw_sp_inet6addr_valid_event */
6273 if (event == NETDEV_UP)
6274 return NOTIFY_DONE;
6275
Arkadi Sharshevsky5ea12372017-07-18 10:10:13 +02006276 if (!mlxsw_sp_port_dev_lower_find_rcu(dev))
6277 return NOTIFY_DONE;
6278
6279 inet6addr_work = kzalloc(sizeof(*inet6addr_work), GFP_ATOMIC);
6280 if (!inet6addr_work)
6281 return NOTIFY_BAD;
6282
6283 INIT_WORK(&inet6addr_work->work, mlxsw_sp_inet6addr_event_work);
6284 inet6addr_work->dev = dev;
6285 inet6addr_work->event = event;
6286 dev_hold(dev);
6287 mlxsw_core_schedule_work(&inet6addr_work->work);
6288
6289 return NOTIFY_DONE;
6290}
6291
David Ahern89d5dd22017-10-18 09:56:55 -07006292int mlxsw_sp_inet6addr_valid_event(struct notifier_block *unused,
6293 unsigned long event, void *ptr)
6294{
6295 struct in6_validator_info *i6vi = (struct in6_validator_info *) ptr;
6296 struct net_device *dev = i6vi->i6vi_dev->dev;
6297 struct mlxsw_sp *mlxsw_sp;
6298 struct mlxsw_sp_rif *rif;
6299 int err = 0;
6300
6301 mlxsw_sp = mlxsw_sp_lower_get(dev);
6302 if (!mlxsw_sp)
6303 goto out;
6304
6305 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
6306 if (!mlxsw_sp_rif_should_config(rif, dev, event))
6307 goto out;
6308
David Ahernf8fa9b42017-10-18 09:56:56 -07006309 err = __mlxsw_sp_inetaddr_event(dev, event, i6vi->extack);
David Ahern89d5dd22017-10-18 09:56:55 -07006310out:
6311 return notifier_from_errno(err);
6312}
6313
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01006314static int mlxsw_sp_rif_edit(struct mlxsw_sp *mlxsw_sp, u16 rif_index,
Ido Schimmel4724ba562017-03-10 08:53:39 +01006315 const char *mac, int mtu)
6316{
6317 char ritr_pl[MLXSW_REG_RITR_LEN];
6318 int err;
6319
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01006320 mlxsw_reg_ritr_rif_pack(ritr_pl, rif_index);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006321 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
6322 if (err)
6323 return err;
6324
6325 mlxsw_reg_ritr_mtu_set(ritr_pl, mtu);
6326 mlxsw_reg_ritr_if_mac_memcpy_to(ritr_pl, mac);
6327 mlxsw_reg_ritr_op_set(ritr_pl, MLXSW_REG_RITR_RIF_CREATE);
6328 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
6329}
6330
6331int mlxsw_sp_netdevice_router_port_event(struct net_device *dev)
6332{
6333 struct mlxsw_sp *mlxsw_sp;
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01006334 struct mlxsw_sp_rif *rif;
Ido Schimmela1107482017-05-26 08:37:39 +02006335 u16 fid_index;
Ido Schimmel4724ba562017-03-10 08:53:39 +01006336 int err;
6337
6338 mlxsw_sp = mlxsw_sp_lower_get(dev);
6339 if (!mlxsw_sp)
6340 return 0;
6341
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01006342 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
6343 if (!rif)
Ido Schimmel4724ba562017-03-10 08:53:39 +01006344 return 0;
Ido Schimmela1107482017-05-26 08:37:39 +02006345 fid_index = mlxsw_sp_fid_index(rif->fid);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006346
Ido Schimmela1107482017-05-26 08:37:39 +02006347 err = mlxsw_sp_rif_fdb_op(mlxsw_sp, rif->addr, fid_index, false);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006348 if (err)
6349 return err;
6350
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01006351 err = mlxsw_sp_rif_edit(mlxsw_sp, rif->rif_index, dev->dev_addr,
6352 dev->mtu);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006353 if (err)
6354 goto err_rif_edit;
6355
Ido Schimmela1107482017-05-26 08:37:39 +02006356 err = mlxsw_sp_rif_fdb_op(mlxsw_sp, dev->dev_addr, fid_index, true);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006357 if (err)
6358 goto err_rif_fdb_op;
6359
Yotam Gigifd890fe2017-09-27 08:23:21 +02006360 if (rif->mtu != dev->mtu) {
6361 struct mlxsw_sp_vr *vr;
6362
6363 /* The RIF is relevant only to its mr_table instance, as unlike
6364 * unicast routing, in multicast routing a RIF cannot be shared
6365 * between several multicast routing tables.
6366 */
6367 vr = &mlxsw_sp->router->vrs[rif->vr_id];
6368 mlxsw_sp_mr_rif_mtu_update(vr->mr4_table, rif, dev->mtu);
6369 }
6370
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01006371 ether_addr_copy(rif->addr, dev->dev_addr);
6372 rif->mtu = dev->mtu;
Ido Schimmel4724ba562017-03-10 08:53:39 +01006373
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01006374 netdev_dbg(dev, "Updated RIF=%d\n", rif->rif_index);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006375
6376 return 0;
6377
6378err_rif_fdb_op:
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01006379 mlxsw_sp_rif_edit(mlxsw_sp, rif->rif_index, rif->addr, rif->mtu);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006380err_rif_edit:
Ido Schimmela1107482017-05-26 08:37:39 +02006381 mlxsw_sp_rif_fdb_op(mlxsw_sp, rif->addr, fid_index, true);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006382 return err;
6383}
6384
Ido Schimmelb1e45522017-04-30 19:47:14 +03006385static int mlxsw_sp_port_vrf_join(struct mlxsw_sp *mlxsw_sp,
David Ahernf8fa9b42017-10-18 09:56:56 -07006386 struct net_device *l3_dev,
6387 struct netlink_ext_ack *extack)
Ido Schimmel7179eb52017-03-16 09:08:18 +01006388{
Ido Schimmelb1e45522017-04-30 19:47:14 +03006389 struct mlxsw_sp_rif *rif;
Ido Schimmel7179eb52017-03-16 09:08:18 +01006390
Ido Schimmelb1e45522017-04-30 19:47:14 +03006391 /* If netdev is already associated with a RIF, then we need to
6392 * destroy it and create a new one with the new virtual router ID.
Ido Schimmel7179eb52017-03-16 09:08:18 +01006393 */
Ido Schimmelb1e45522017-04-30 19:47:14 +03006394 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev);
6395 if (rif)
David Ahernf8fa9b42017-10-18 09:56:56 -07006396 __mlxsw_sp_inetaddr_event(l3_dev, NETDEV_DOWN, extack);
Ido Schimmel7179eb52017-03-16 09:08:18 +01006397
David Ahernf8fa9b42017-10-18 09:56:56 -07006398 return __mlxsw_sp_inetaddr_event(l3_dev, NETDEV_UP, extack);
Ido Schimmel7179eb52017-03-16 09:08:18 +01006399}
6400
Ido Schimmelb1e45522017-04-30 19:47:14 +03006401static void mlxsw_sp_port_vrf_leave(struct mlxsw_sp *mlxsw_sp,
6402 struct net_device *l3_dev)
Ido Schimmel7179eb52017-03-16 09:08:18 +01006403{
Ido Schimmelb1e45522017-04-30 19:47:14 +03006404 struct mlxsw_sp_rif *rif;
Ido Schimmel7179eb52017-03-16 09:08:18 +01006405
Ido Schimmelb1e45522017-04-30 19:47:14 +03006406 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev);
6407 if (!rif)
Ido Schimmel7179eb52017-03-16 09:08:18 +01006408 return;
David Ahernf8fa9b42017-10-18 09:56:56 -07006409 __mlxsw_sp_inetaddr_event(l3_dev, NETDEV_DOWN, NULL);
Ido Schimmel7179eb52017-03-16 09:08:18 +01006410}
6411
Ido Schimmelb1e45522017-04-30 19:47:14 +03006412int mlxsw_sp_netdevice_vrf_event(struct net_device *l3_dev, unsigned long event,
6413 struct netdev_notifier_changeupper_info *info)
Ido Schimmel3d70e4582017-03-16 09:08:19 +01006414{
Ido Schimmelb1e45522017-04-30 19:47:14 +03006415 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(l3_dev);
6416 int err = 0;
Ido Schimmel3d70e4582017-03-16 09:08:19 +01006417
Ido Schimmelb1e45522017-04-30 19:47:14 +03006418 if (!mlxsw_sp)
6419 return 0;
Ido Schimmel3d70e4582017-03-16 09:08:19 +01006420
Ido Schimmelb1e45522017-04-30 19:47:14 +03006421 switch (event) {
6422 case NETDEV_PRECHANGEUPPER:
6423 return 0;
6424 case NETDEV_CHANGEUPPER:
David Ahernf8fa9b42017-10-18 09:56:56 -07006425 if (info->linking) {
6426 struct netlink_ext_ack *extack;
6427
6428 extack = netdev_notifier_info_to_extack(&info->info);
6429 err = mlxsw_sp_port_vrf_join(mlxsw_sp, l3_dev, extack);
6430 } else {
Ido Schimmelb1e45522017-04-30 19:47:14 +03006431 mlxsw_sp_port_vrf_leave(mlxsw_sp, l3_dev);
David Ahernf8fa9b42017-10-18 09:56:56 -07006432 }
Ido Schimmelb1e45522017-04-30 19:47:14 +03006433 break;
6434 }
Ido Schimmel3d70e4582017-03-16 09:08:19 +01006435
Ido Schimmelb1e45522017-04-30 19:47:14 +03006436 return err;
Ido Schimmel3d70e4582017-03-16 09:08:19 +01006437}
6438
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006439static struct mlxsw_sp_rif_subport *
6440mlxsw_sp_rif_subport_rif(const struct mlxsw_sp_rif *rif)
Ido Schimmela1107482017-05-26 08:37:39 +02006441{
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006442 return container_of(rif, struct mlxsw_sp_rif_subport, common);
Ido Schimmela1107482017-05-26 08:37:39 +02006443}
6444
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006445static void mlxsw_sp_rif_subport_setup(struct mlxsw_sp_rif *rif,
6446 const struct mlxsw_sp_rif_params *params)
6447{
6448 struct mlxsw_sp_rif_subport *rif_subport;
6449
6450 rif_subport = mlxsw_sp_rif_subport_rif(rif);
6451 rif_subport->vid = params->vid;
6452 rif_subport->lag = params->lag;
6453 if (params->lag)
6454 rif_subport->lag_id = params->lag_id;
6455 else
6456 rif_subport->system_port = params->system_port;
6457}
6458
6459static int mlxsw_sp_rif_subport_op(struct mlxsw_sp_rif *rif, bool enable)
6460{
6461 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
6462 struct mlxsw_sp_rif_subport *rif_subport;
6463 char ritr_pl[MLXSW_REG_RITR_LEN];
6464
6465 rif_subport = mlxsw_sp_rif_subport_rif(rif);
6466 mlxsw_reg_ritr_pack(ritr_pl, enable, MLXSW_REG_RITR_SP_IF,
Petr Machata9571e822017-09-02 23:49:14 +02006467 rif->rif_index, rif->vr_id, rif->dev->mtu);
6468 mlxsw_reg_ritr_mac_pack(ritr_pl, rif->dev->dev_addr);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006469 mlxsw_reg_ritr_sp_if_pack(ritr_pl, rif_subport->lag,
6470 rif_subport->lag ? rif_subport->lag_id :
6471 rif_subport->system_port,
6472 rif_subport->vid);
6473
6474 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
6475}
6476
6477static int mlxsw_sp_rif_subport_configure(struct mlxsw_sp_rif *rif)
6478{
Petr Machata010cadf2017-09-02 23:49:18 +02006479 int err;
6480
6481 err = mlxsw_sp_rif_subport_op(rif, true);
6482 if (err)
6483 return err;
6484
6485 err = mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
6486 mlxsw_sp_fid_index(rif->fid), true);
6487 if (err)
6488 goto err_rif_fdb_op;
6489
6490 mlxsw_sp_fid_rif_set(rif->fid, rif);
6491 return 0;
6492
6493err_rif_fdb_op:
6494 mlxsw_sp_rif_subport_op(rif, false);
6495 return err;
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006496}
6497
6498static void mlxsw_sp_rif_subport_deconfigure(struct mlxsw_sp_rif *rif)
6499{
Petr Machata010cadf2017-09-02 23:49:18 +02006500 struct mlxsw_sp_fid *fid = rif->fid;
6501
6502 mlxsw_sp_fid_rif_set(fid, NULL);
6503 mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
6504 mlxsw_sp_fid_index(fid), false);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006505 mlxsw_sp_rif_subport_op(rif, false);
6506}
6507
6508static struct mlxsw_sp_fid *
6509mlxsw_sp_rif_subport_fid_get(struct mlxsw_sp_rif *rif)
6510{
6511 return mlxsw_sp_fid_rfid_get(rif->mlxsw_sp, rif->rif_index);
6512}
6513
6514static const struct mlxsw_sp_rif_ops mlxsw_sp_rif_subport_ops = {
6515 .type = MLXSW_SP_RIF_TYPE_SUBPORT,
6516 .rif_size = sizeof(struct mlxsw_sp_rif_subport),
6517 .setup = mlxsw_sp_rif_subport_setup,
6518 .configure = mlxsw_sp_rif_subport_configure,
6519 .deconfigure = mlxsw_sp_rif_subport_deconfigure,
6520 .fid_get = mlxsw_sp_rif_subport_fid_get,
6521};
6522
6523static int mlxsw_sp_rif_vlan_fid_op(struct mlxsw_sp_rif *rif,
6524 enum mlxsw_reg_ritr_if_type type,
6525 u16 vid_fid, bool enable)
6526{
6527 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
6528 char ritr_pl[MLXSW_REG_RITR_LEN];
6529
6530 mlxsw_reg_ritr_pack(ritr_pl, enable, type, rif->rif_index, rif->vr_id,
Petr Machata9571e822017-09-02 23:49:14 +02006531 rif->dev->mtu);
6532 mlxsw_reg_ritr_mac_pack(ritr_pl, rif->dev->dev_addr);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006533 mlxsw_reg_ritr_fid_set(ritr_pl, type, vid_fid);
6534
6535 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
6536}
6537
Yotam Gigib35750f2017-10-09 11:15:33 +02006538u8 mlxsw_sp_router_port(const struct mlxsw_sp *mlxsw_sp)
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006539{
6540 return mlxsw_core_max_ports(mlxsw_sp->core) + 1;
6541}
6542
6543static int mlxsw_sp_rif_vlan_configure(struct mlxsw_sp_rif *rif)
6544{
6545 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
6546 u16 vid = mlxsw_sp_fid_8021q_vid(rif->fid);
6547 int err;
6548
6549 err = mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_VLAN_IF, vid, true);
6550 if (err)
6551 return err;
6552
Ido Schimmel0d284812017-07-18 10:10:12 +02006553 err = mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
6554 mlxsw_sp_router_port(mlxsw_sp), true);
6555 if (err)
6556 goto err_fid_mc_flood_set;
6557
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006558 err = mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
6559 mlxsw_sp_router_port(mlxsw_sp), true);
6560 if (err)
6561 goto err_fid_bc_flood_set;
6562
Petr Machata010cadf2017-09-02 23:49:18 +02006563 err = mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
6564 mlxsw_sp_fid_index(rif->fid), true);
6565 if (err)
6566 goto err_rif_fdb_op;
6567
6568 mlxsw_sp_fid_rif_set(rif->fid, rif);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006569 return 0;
6570
Petr Machata010cadf2017-09-02 23:49:18 +02006571err_rif_fdb_op:
6572 mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
6573 mlxsw_sp_router_port(mlxsw_sp), false);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006574err_fid_bc_flood_set:
Ido Schimmel0d284812017-07-18 10:10:12 +02006575 mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
6576 mlxsw_sp_router_port(mlxsw_sp), false);
6577err_fid_mc_flood_set:
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006578 mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_VLAN_IF, vid, false);
6579 return err;
6580}
6581
6582static void mlxsw_sp_rif_vlan_deconfigure(struct mlxsw_sp_rif *rif)
6583{
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006584 u16 vid = mlxsw_sp_fid_8021q_vid(rif->fid);
Petr Machata010cadf2017-09-02 23:49:18 +02006585 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
6586 struct mlxsw_sp_fid *fid = rif->fid;
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006587
Petr Machata010cadf2017-09-02 23:49:18 +02006588 mlxsw_sp_fid_rif_set(fid, NULL);
6589 mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
6590 mlxsw_sp_fid_index(fid), false);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006591 mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
6592 mlxsw_sp_router_port(mlxsw_sp), false);
Ido Schimmel0d284812017-07-18 10:10:12 +02006593 mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
6594 mlxsw_sp_router_port(mlxsw_sp), false);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006595 mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_VLAN_IF, vid, false);
6596}
6597
6598static struct mlxsw_sp_fid *
6599mlxsw_sp_rif_vlan_fid_get(struct mlxsw_sp_rif *rif)
6600{
6601 u16 vid = is_vlan_dev(rif->dev) ? vlan_dev_vlan_id(rif->dev) : 1;
6602
6603 return mlxsw_sp_fid_8021q_get(rif->mlxsw_sp, vid);
6604}
6605
6606static const struct mlxsw_sp_rif_ops mlxsw_sp_rif_vlan_ops = {
6607 .type = MLXSW_SP_RIF_TYPE_VLAN,
6608 .rif_size = sizeof(struct mlxsw_sp_rif),
6609 .configure = mlxsw_sp_rif_vlan_configure,
6610 .deconfigure = mlxsw_sp_rif_vlan_deconfigure,
6611 .fid_get = mlxsw_sp_rif_vlan_fid_get,
6612};
6613
6614static int mlxsw_sp_rif_fid_configure(struct mlxsw_sp_rif *rif)
6615{
6616 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
6617 u16 fid_index = mlxsw_sp_fid_index(rif->fid);
6618 int err;
6619
6620 err = mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_FID_IF, fid_index,
6621 true);
6622 if (err)
6623 return err;
6624
Ido Schimmel0d284812017-07-18 10:10:12 +02006625 err = mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
6626 mlxsw_sp_router_port(mlxsw_sp), true);
6627 if (err)
6628 goto err_fid_mc_flood_set;
6629
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006630 err = mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
6631 mlxsw_sp_router_port(mlxsw_sp), true);
6632 if (err)
6633 goto err_fid_bc_flood_set;
6634
Petr Machata010cadf2017-09-02 23:49:18 +02006635 err = mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
6636 mlxsw_sp_fid_index(rif->fid), true);
6637 if (err)
6638 goto err_rif_fdb_op;
6639
6640 mlxsw_sp_fid_rif_set(rif->fid, rif);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006641 return 0;
6642
Petr Machata010cadf2017-09-02 23:49:18 +02006643err_rif_fdb_op:
6644 mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
6645 mlxsw_sp_router_port(mlxsw_sp), false);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006646err_fid_bc_flood_set:
Ido Schimmel0d284812017-07-18 10:10:12 +02006647 mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
6648 mlxsw_sp_router_port(mlxsw_sp), false);
6649err_fid_mc_flood_set:
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006650 mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_FID_IF, fid_index, false);
6651 return err;
6652}
6653
6654static void mlxsw_sp_rif_fid_deconfigure(struct mlxsw_sp_rif *rif)
6655{
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006656 u16 fid_index = mlxsw_sp_fid_index(rif->fid);
Petr Machata010cadf2017-09-02 23:49:18 +02006657 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
6658 struct mlxsw_sp_fid *fid = rif->fid;
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006659
Petr Machata010cadf2017-09-02 23:49:18 +02006660 mlxsw_sp_fid_rif_set(fid, NULL);
6661 mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
6662 mlxsw_sp_fid_index(fid), false);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006663 mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
6664 mlxsw_sp_router_port(mlxsw_sp), false);
Ido Schimmel0d284812017-07-18 10:10:12 +02006665 mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
6666 mlxsw_sp_router_port(mlxsw_sp), false);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006667 mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_FID_IF, fid_index, false);
6668}
6669
6670static struct mlxsw_sp_fid *
6671mlxsw_sp_rif_fid_fid_get(struct mlxsw_sp_rif *rif)
6672{
6673 return mlxsw_sp_fid_8021d_get(rif->mlxsw_sp, rif->dev->ifindex);
6674}
6675
6676static const struct mlxsw_sp_rif_ops mlxsw_sp_rif_fid_ops = {
6677 .type = MLXSW_SP_RIF_TYPE_FID,
6678 .rif_size = sizeof(struct mlxsw_sp_rif),
6679 .configure = mlxsw_sp_rif_fid_configure,
6680 .deconfigure = mlxsw_sp_rif_fid_deconfigure,
6681 .fid_get = mlxsw_sp_rif_fid_fid_get,
6682};
6683
Petr Machata6ddb7422017-09-02 23:49:19 +02006684static struct mlxsw_sp_rif_ipip_lb *
6685mlxsw_sp_rif_ipip_lb_rif(struct mlxsw_sp_rif *rif)
6686{
6687 return container_of(rif, struct mlxsw_sp_rif_ipip_lb, common);
6688}
6689
6690static void
6691mlxsw_sp_rif_ipip_lb_setup(struct mlxsw_sp_rif *rif,
6692 const struct mlxsw_sp_rif_params *params)
6693{
6694 struct mlxsw_sp_rif_params_ipip_lb *params_lb;
6695 struct mlxsw_sp_rif_ipip_lb *rif_lb;
6696
6697 params_lb = container_of(params, struct mlxsw_sp_rif_params_ipip_lb,
6698 common);
6699 rif_lb = mlxsw_sp_rif_ipip_lb_rif(rif);
6700 rif_lb->lb_config = params_lb->lb_config;
6701}
6702
6703static int
6704mlxsw_sp_rif_ipip_lb_op(struct mlxsw_sp_rif_ipip_lb *lb_rif,
6705 struct mlxsw_sp_vr *ul_vr, bool enable)
6706{
6707 struct mlxsw_sp_rif_ipip_lb_config lb_cf = lb_rif->lb_config;
6708 struct mlxsw_sp_rif *rif = &lb_rif->common;
6709 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
6710 char ritr_pl[MLXSW_REG_RITR_LEN];
6711 u32 saddr4;
6712
6713 switch (lb_cf.ul_protocol) {
6714 case MLXSW_SP_L3_PROTO_IPV4:
6715 saddr4 = be32_to_cpu(lb_cf.saddr.addr4);
6716 mlxsw_reg_ritr_pack(ritr_pl, enable, MLXSW_REG_RITR_LOOPBACK_IF,
6717 rif->rif_index, rif->vr_id, rif->dev->mtu);
6718 mlxsw_reg_ritr_loopback_ipip4_pack(ritr_pl, lb_cf.lb_ipipt,
6719 MLXSW_REG_RITR_LOOPBACK_IPIP_OPTIONS_GRE_KEY_PRESET,
6720 ul_vr->id, saddr4, lb_cf.okey);
6721 break;
6722
6723 case MLXSW_SP_L3_PROTO_IPV6:
6724 return -EAFNOSUPPORT;
6725 }
6726
6727 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
6728}
6729
6730static int
6731mlxsw_sp_rif_ipip_lb_configure(struct mlxsw_sp_rif *rif)
6732{
6733 struct mlxsw_sp_rif_ipip_lb *lb_rif = mlxsw_sp_rif_ipip_lb_rif(rif);
6734 u32 ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(rif->dev);
6735 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
6736 struct mlxsw_sp_vr *ul_vr;
6737 int err;
6738
David Ahernf8fa9b42017-10-18 09:56:56 -07006739 ul_vr = mlxsw_sp_vr_get(mlxsw_sp, ul_tb_id, NULL);
Petr Machata6ddb7422017-09-02 23:49:19 +02006740 if (IS_ERR(ul_vr))
6741 return PTR_ERR(ul_vr);
6742
6743 err = mlxsw_sp_rif_ipip_lb_op(lb_rif, ul_vr, true);
6744 if (err)
6745 goto err_loopback_op;
6746
6747 lb_rif->ul_vr_id = ul_vr->id;
6748 ++ul_vr->rif_count;
6749 return 0;
6750
6751err_loopback_op:
6752 mlxsw_sp_vr_put(ul_vr);
6753 return err;
6754}
6755
6756static void mlxsw_sp_rif_ipip_lb_deconfigure(struct mlxsw_sp_rif *rif)
6757{
6758 struct mlxsw_sp_rif_ipip_lb *lb_rif = mlxsw_sp_rif_ipip_lb_rif(rif);
6759 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
6760 struct mlxsw_sp_vr *ul_vr;
6761
6762 ul_vr = &mlxsw_sp->router->vrs[lb_rif->ul_vr_id];
6763 mlxsw_sp_rif_ipip_lb_op(lb_rif, ul_vr, false);
6764
6765 --ul_vr->rif_count;
6766 mlxsw_sp_vr_put(ul_vr);
6767}
6768
6769static const struct mlxsw_sp_rif_ops mlxsw_sp_rif_ipip_lb_ops = {
6770 .type = MLXSW_SP_RIF_TYPE_IPIP_LB,
6771 .rif_size = sizeof(struct mlxsw_sp_rif_ipip_lb),
6772 .setup = mlxsw_sp_rif_ipip_lb_setup,
6773 .configure = mlxsw_sp_rif_ipip_lb_configure,
6774 .deconfigure = mlxsw_sp_rif_ipip_lb_deconfigure,
6775};
6776
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006777static const struct mlxsw_sp_rif_ops *mlxsw_sp_rif_ops_arr[] = {
6778 [MLXSW_SP_RIF_TYPE_SUBPORT] = &mlxsw_sp_rif_subport_ops,
6779 [MLXSW_SP_RIF_TYPE_VLAN] = &mlxsw_sp_rif_vlan_ops,
6780 [MLXSW_SP_RIF_TYPE_FID] = &mlxsw_sp_rif_fid_ops,
Petr Machata6ddb7422017-09-02 23:49:19 +02006781 [MLXSW_SP_RIF_TYPE_IPIP_LB] = &mlxsw_sp_rif_ipip_lb_ops,
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006782};
6783
Ido Schimmel348b8fc2017-05-16 19:38:29 +02006784static int mlxsw_sp_rifs_init(struct mlxsw_sp *mlxsw_sp)
6785{
6786 u64 max_rifs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS);
6787
6788 mlxsw_sp->router->rifs = kcalloc(max_rifs,
6789 sizeof(struct mlxsw_sp_rif *),
6790 GFP_KERNEL);
6791 if (!mlxsw_sp->router->rifs)
6792 return -ENOMEM;
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006793
6794 mlxsw_sp->router->rif_ops_arr = mlxsw_sp_rif_ops_arr;
6795
Ido Schimmel348b8fc2017-05-16 19:38:29 +02006796 return 0;
6797}
6798
6799static void mlxsw_sp_rifs_fini(struct mlxsw_sp *mlxsw_sp)
6800{
6801 int i;
6802
6803 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++)
6804 WARN_ON_ONCE(mlxsw_sp->router->rifs[i]);
6805
6806 kfree(mlxsw_sp->router->rifs);
6807}
6808
Petr Machatadcbda282017-10-20 09:16:16 +02006809static int
6810mlxsw_sp_ipip_config_tigcr(struct mlxsw_sp *mlxsw_sp)
6811{
6812 char tigcr_pl[MLXSW_REG_TIGCR_LEN];
6813
6814 mlxsw_reg_tigcr_pack(tigcr_pl, true, 0);
6815 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(tigcr), tigcr_pl);
6816}
6817
Petr Machata38ebc0f2017-09-02 23:49:17 +02006818static int mlxsw_sp_ipips_init(struct mlxsw_sp *mlxsw_sp)
6819{
6820 mlxsw_sp->router->ipip_ops_arr = mlxsw_sp_ipip_ops_arr;
Petr Machata1012b9a2017-09-02 23:49:23 +02006821 INIT_LIST_HEAD(&mlxsw_sp->router->ipip_list);
Petr Machatadcbda282017-10-20 09:16:16 +02006822 return mlxsw_sp_ipip_config_tigcr(mlxsw_sp);
Petr Machata38ebc0f2017-09-02 23:49:17 +02006823}
6824
6825static void mlxsw_sp_ipips_fini(struct mlxsw_sp *mlxsw_sp)
6826{
Petr Machata1012b9a2017-09-02 23:49:23 +02006827 WARN_ON(!list_empty(&mlxsw_sp->router->ipip_list));
Petr Machata38ebc0f2017-09-02 23:49:17 +02006828}
6829
Ido Schimmelc3852ef2016-12-03 16:45:07 +01006830static void mlxsw_sp_router_fib_dump_flush(struct notifier_block *nb)
6831{
Ido Schimmel7e39d112017-05-16 19:38:28 +02006832 struct mlxsw_sp_router *router;
Ido Schimmelc3852ef2016-12-03 16:45:07 +01006833
6834 /* Flush pending FIB notifications and then flush the device's
6835 * table before requesting another dump. The FIB notification
6836 * block is unregistered, so no need to take RTNL.
6837 */
6838 mlxsw_core_flush_owq();
Ido Schimmel7e39d112017-05-16 19:38:28 +02006839 router = container_of(nb, struct mlxsw_sp_router, fib_nb);
6840 mlxsw_sp_router_fib_flush(router->mlxsw_sp);
Ido Schimmelc3852ef2016-12-03 16:45:07 +01006841}
6842
Ido Schimmelaf658b62017-11-02 17:14:09 +01006843#ifdef CONFIG_IP_ROUTE_MULTIPATH
6844static void mlxsw_sp_mp_hash_header_set(char *recr2_pl, int header)
6845{
6846 mlxsw_reg_recr2_outer_header_enables_set(recr2_pl, header, true);
6847}
6848
6849static void mlxsw_sp_mp_hash_field_set(char *recr2_pl, int field)
6850{
6851 mlxsw_reg_recr2_outer_header_fields_enable_set(recr2_pl, field, true);
6852}
6853
6854static void mlxsw_sp_mp4_hash_init(char *recr2_pl)
6855{
6856 bool only_l3 = !init_net.ipv4.sysctl_fib_multipath_hash_policy;
6857
6858 mlxsw_sp_mp_hash_header_set(recr2_pl,
6859 MLXSW_REG_RECR2_IPV4_EN_NOT_TCP_NOT_UDP);
6860 mlxsw_sp_mp_hash_header_set(recr2_pl, MLXSW_REG_RECR2_IPV4_EN_TCP_UDP);
6861 mlxsw_reg_recr2_ipv4_sip_enable(recr2_pl);
6862 mlxsw_reg_recr2_ipv4_dip_enable(recr2_pl);
6863 if (only_l3)
6864 return;
6865 mlxsw_sp_mp_hash_header_set(recr2_pl, MLXSW_REG_RECR2_TCP_UDP_EN_IPV4);
6866 mlxsw_sp_mp_hash_field_set(recr2_pl, MLXSW_REG_RECR2_IPV4_PROTOCOL);
6867 mlxsw_sp_mp_hash_field_set(recr2_pl, MLXSW_REG_RECR2_TCP_UDP_SPORT);
6868 mlxsw_sp_mp_hash_field_set(recr2_pl, MLXSW_REG_RECR2_TCP_UDP_DPORT);
6869}
6870
6871static void mlxsw_sp_mp6_hash_init(char *recr2_pl)
6872{
6873 mlxsw_sp_mp_hash_header_set(recr2_pl,
6874 MLXSW_REG_RECR2_IPV6_EN_NOT_TCP_NOT_UDP);
6875 mlxsw_sp_mp_hash_header_set(recr2_pl, MLXSW_REG_RECR2_IPV6_EN_TCP_UDP);
6876 mlxsw_reg_recr2_ipv6_sip_enable(recr2_pl);
6877 mlxsw_reg_recr2_ipv6_dip_enable(recr2_pl);
6878 mlxsw_sp_mp_hash_field_set(recr2_pl, MLXSW_REG_RECR2_IPV6_FLOW_LABEL);
6879 mlxsw_sp_mp_hash_field_set(recr2_pl, MLXSW_REG_RECR2_IPV6_NEXT_HEADER);
6880}
6881
6882static int mlxsw_sp_mp_hash_init(struct mlxsw_sp *mlxsw_sp)
6883{
6884 char recr2_pl[MLXSW_REG_RECR2_LEN];
6885 u32 seed;
6886
6887 get_random_bytes(&seed, sizeof(seed));
6888 mlxsw_reg_recr2_pack(recr2_pl, seed);
6889 mlxsw_sp_mp4_hash_init(recr2_pl);
6890 mlxsw_sp_mp6_hash_init(recr2_pl);
6891
6892 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(recr2), recr2_pl);
6893}
6894#else
6895static int mlxsw_sp_mp_hash_init(struct mlxsw_sp *mlxsw_sp)
6896{
6897 return 0;
6898}
6899#endif
6900
Ido Schimmel4724ba562017-03-10 08:53:39 +01006901static int __mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp)
6902{
6903 char rgcr_pl[MLXSW_REG_RGCR_LEN];
6904 u64 max_rifs;
6905 int err;
6906
6907 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_RIFS))
6908 return -EIO;
Ido Schimmel4724ba562017-03-10 08:53:39 +01006909 max_rifs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006910
Arkadi Sharshevskye29237e2017-07-18 10:10:09 +02006911 mlxsw_reg_rgcr_pack(rgcr_pl, true, true);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006912 mlxsw_reg_rgcr_max_router_interfaces_set(rgcr_pl, max_rifs);
6913 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rgcr), rgcr_pl);
6914 if (err)
Ido Schimmel348b8fc2017-05-16 19:38:29 +02006915 return err;
Ido Schimmel4724ba562017-03-10 08:53:39 +01006916 return 0;
Ido Schimmel4724ba562017-03-10 08:53:39 +01006917}
6918
6919static void __mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp)
6920{
6921 char rgcr_pl[MLXSW_REG_RGCR_LEN];
Ido Schimmel4724ba562017-03-10 08:53:39 +01006922
Arkadi Sharshevskye29237e2017-07-18 10:10:09 +02006923 mlxsw_reg_rgcr_pack(rgcr_pl, false, false);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006924 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rgcr), rgcr_pl);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006925}
6926
Jiri Pirkob45f64d2016-09-26 12:52:31 +02006927int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp)
6928{
Ido Schimmel9011b672017-05-16 19:38:25 +02006929 struct mlxsw_sp_router *router;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02006930 int err;
6931
Ido Schimmel9011b672017-05-16 19:38:25 +02006932 router = kzalloc(sizeof(*mlxsw_sp->router), GFP_KERNEL);
6933 if (!router)
6934 return -ENOMEM;
6935 mlxsw_sp->router = router;
6936 router->mlxsw_sp = mlxsw_sp;
6937
6938 INIT_LIST_HEAD(&mlxsw_sp->router->nexthop_neighs_list);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02006939 err = __mlxsw_sp_router_init(mlxsw_sp);
6940 if (err)
Ido Schimmel9011b672017-05-16 19:38:25 +02006941 goto err_router_init;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02006942
Ido Schimmel348b8fc2017-05-16 19:38:29 +02006943 err = mlxsw_sp_rifs_init(mlxsw_sp);
6944 if (err)
6945 goto err_rifs_init;
6946
Petr Machata38ebc0f2017-09-02 23:49:17 +02006947 err = mlxsw_sp_ipips_init(mlxsw_sp);
6948 if (err)
6949 goto err_ipips_init;
6950
Ido Schimmel9011b672017-05-16 19:38:25 +02006951 err = rhashtable_init(&mlxsw_sp->router->nexthop_ht,
Ido Schimmelc53b8e12017-02-08 11:16:30 +01006952 &mlxsw_sp_nexthop_ht_params);
6953 if (err)
6954 goto err_nexthop_ht_init;
6955
Ido Schimmel9011b672017-05-16 19:38:25 +02006956 err = rhashtable_init(&mlxsw_sp->router->nexthop_group_ht,
Ido Schimmele9ad5e72017-02-08 11:16:29 +01006957 &mlxsw_sp_nexthop_group_ht_params);
6958 if (err)
6959 goto err_nexthop_group_ht_init;
6960
Arkadi Sharshevskydbe45982017-09-25 10:32:23 +02006961 INIT_LIST_HEAD(&mlxsw_sp->router->nexthop_list);
Ido Schimmel8494ab02017-03-24 08:02:47 +01006962 err = mlxsw_sp_lpm_init(mlxsw_sp);
6963 if (err)
6964 goto err_lpm_init;
6965
Yotam Gigid42b0962017-09-27 08:23:20 +02006966 err = mlxsw_sp_mr_init(mlxsw_sp, &mlxsw_sp_mr_tcam_ops);
6967 if (err)
6968 goto err_mr_init;
6969
Jiri Pirkob45f64d2016-09-26 12:52:31 +02006970 err = mlxsw_sp_vrs_init(mlxsw_sp);
6971 if (err)
6972 goto err_vrs_init;
6973
Ido Schimmel8c9583a2016-10-27 15:12:57 +02006974 err = mlxsw_sp_neigh_init(mlxsw_sp);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02006975 if (err)
6976 goto err_neigh_init;
6977
Ido Schimmel48fac882017-11-02 17:14:06 +01006978 mlxsw_sp->router->netevent_nb.notifier_call =
6979 mlxsw_sp_router_netevent_event;
6980 err = register_netevent_notifier(&mlxsw_sp->router->netevent_nb);
6981 if (err)
6982 goto err_register_netevent_notifier;
6983
Ido Schimmelaf658b62017-11-02 17:14:09 +01006984 err = mlxsw_sp_mp_hash_init(mlxsw_sp);
6985 if (err)
6986 goto err_mp_hash_init;
6987
Ido Schimmel7e39d112017-05-16 19:38:28 +02006988 mlxsw_sp->router->fib_nb.notifier_call = mlxsw_sp_router_fib_event;
6989 err = register_fib_notifier(&mlxsw_sp->router->fib_nb,
Ido Schimmelc3852ef2016-12-03 16:45:07 +01006990 mlxsw_sp_router_fib_dump_flush);
6991 if (err)
6992 goto err_register_fib_notifier;
6993
Jiri Pirkob45f64d2016-09-26 12:52:31 +02006994 return 0;
6995
Ido Schimmelc3852ef2016-12-03 16:45:07 +01006996err_register_fib_notifier:
Ido Schimmelaf658b62017-11-02 17:14:09 +01006997err_mp_hash_init:
Ido Schimmel48fac882017-11-02 17:14:06 +01006998 unregister_netevent_notifier(&mlxsw_sp->router->netevent_nb);
6999err_register_netevent_notifier:
Ido Schimmelc3852ef2016-12-03 16:45:07 +01007000 mlxsw_sp_neigh_fini(mlxsw_sp);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02007001err_neigh_init:
7002 mlxsw_sp_vrs_fini(mlxsw_sp);
7003err_vrs_init:
Yotam Gigid42b0962017-09-27 08:23:20 +02007004 mlxsw_sp_mr_fini(mlxsw_sp);
7005err_mr_init:
Ido Schimmel8494ab02017-03-24 08:02:47 +01007006 mlxsw_sp_lpm_fini(mlxsw_sp);
7007err_lpm_init:
Ido Schimmel9011b672017-05-16 19:38:25 +02007008 rhashtable_destroy(&mlxsw_sp->router->nexthop_group_ht);
Ido Schimmele9ad5e72017-02-08 11:16:29 +01007009err_nexthop_group_ht_init:
Ido Schimmel9011b672017-05-16 19:38:25 +02007010 rhashtable_destroy(&mlxsw_sp->router->nexthop_ht);
Ido Schimmelc53b8e12017-02-08 11:16:30 +01007011err_nexthop_ht_init:
Petr Machata38ebc0f2017-09-02 23:49:17 +02007012 mlxsw_sp_ipips_fini(mlxsw_sp);
7013err_ipips_init:
Ido Schimmel348b8fc2017-05-16 19:38:29 +02007014 mlxsw_sp_rifs_fini(mlxsw_sp);
7015err_rifs_init:
Jiri Pirkob45f64d2016-09-26 12:52:31 +02007016 __mlxsw_sp_router_fini(mlxsw_sp);
Ido Schimmel9011b672017-05-16 19:38:25 +02007017err_router_init:
7018 kfree(mlxsw_sp->router);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02007019 return err;
7020}
7021
7022void mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp)
7023{
Ido Schimmel7e39d112017-05-16 19:38:28 +02007024 unregister_fib_notifier(&mlxsw_sp->router->fib_nb);
Ido Schimmel48fac882017-11-02 17:14:06 +01007025 unregister_netevent_notifier(&mlxsw_sp->router->netevent_nb);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02007026 mlxsw_sp_neigh_fini(mlxsw_sp);
7027 mlxsw_sp_vrs_fini(mlxsw_sp);
Yotam Gigid42b0962017-09-27 08:23:20 +02007028 mlxsw_sp_mr_fini(mlxsw_sp);
Ido Schimmel8494ab02017-03-24 08:02:47 +01007029 mlxsw_sp_lpm_fini(mlxsw_sp);
Ido Schimmel9011b672017-05-16 19:38:25 +02007030 rhashtable_destroy(&mlxsw_sp->router->nexthop_group_ht);
7031 rhashtable_destroy(&mlxsw_sp->router->nexthop_ht);
Petr Machata38ebc0f2017-09-02 23:49:17 +02007032 mlxsw_sp_ipips_fini(mlxsw_sp);
Ido Schimmel348b8fc2017-05-16 19:38:29 +02007033 mlxsw_sp_rifs_fini(mlxsw_sp);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02007034 __mlxsw_sp_router_fini(mlxsw_sp);
Ido Schimmel9011b672017-05-16 19:38:25 +02007035 kfree(mlxsw_sp->router);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02007036}