blob: a23e452f7261b9e67dc90920e25c4b9fa88e7192 [file] [log] [blame]
Ido Schimmel464dce12016-07-02 11:00:15 +02001/*
2 * drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
Petr Machata6ddb7422017-09-02 23:49:19 +02003 * Copyright (c) 2016-2017 Mellanox Technologies. All rights reserved.
Ido Schimmel464dce12016-07-02 11:00:15 +02004 * Copyright (c) 2016 Jiri Pirko <jiri@mellanox.com>
5 * Copyright (c) 2016 Ido Schimmel <idosch@mellanox.com>
Yotam Gigic723c7352016-07-05 11:27:43 +02006 * Copyright (c) 2016 Yotam Gigi <yotamg@mellanox.com>
Petr Machata6ddb7422017-09-02 23:49:19 +02007 * Copyright (c) 2017 Petr Machata <petrm@mellanox.com>
Ido Schimmel464dce12016-07-02 11:00:15 +02008 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions are met:
11 *
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. Neither the names of the copyright holders nor the names of its
18 * contributors may be used to endorse or promote products derived from
19 * this software without specific prior written permission.
20 *
21 * Alternatively, this software may be distributed under the terms of the
22 * GNU General Public License ("GPL") version 2 as published by the Free
23 * Software Foundation.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
26 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
29 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
36 */
37
38#include <linux/kernel.h>
39#include <linux/types.h>
Jiri Pirko5e9c16c2016-07-04 08:23:04 +020040#include <linux/rhashtable.h>
41#include <linux/bitops.h>
42#include <linux/in6.h>
Yotam Gigic723c7352016-07-05 11:27:43 +020043#include <linux/notifier.h>
Ido Schimmeldf6dd79b2017-02-08 14:36:49 +010044#include <linux/inetdevice.h>
Ido Schimmel9db032b2017-03-16 09:08:17 +010045#include <linux/netdevice.h>
Ido Schimmel03ea01e2017-05-23 21:56:30 +020046#include <linux/if_bridge.h>
Ido Schimmelb5f3e0d2017-07-24 09:56:00 +020047#include <linux/socket.h>
Ido Schimmel428b8512017-08-03 13:28:28 +020048#include <linux/route.h>
Ido Schimmeleb789982017-10-22 23:11:48 +020049#include <linux/gcd.h>
Ido Schimmelaf658b62017-11-02 17:14:09 +010050#include <linux/random.h>
Yotam Gigic723c7352016-07-05 11:27:43 +020051#include <net/netevent.h>
Jiri Pirko6cf3c972016-07-05 11:27:39 +020052#include <net/neighbour.h>
53#include <net/arp.h>
Jiri Pirkob45f64d2016-09-26 12:52:31 +020054#include <net/ip_fib.h>
Ido Schimmel583419f2017-08-03 13:28:27 +020055#include <net/ip6_fib.h>
Ido Schimmel5d7bfd12017-03-16 09:08:14 +010056#include <net/fib_rules.h>
Petr Machata6ddb7422017-09-02 23:49:19 +020057#include <net/ip_tunnels.h>
Ido Schimmel57837882017-03-16 09:08:16 +010058#include <net/l3mdev.h>
Arkadi Sharshevsky5ea12372017-07-18 10:10:13 +020059#include <net/addrconf.h>
Arkadi Sharshevskyd5eb89c2017-07-18 10:10:15 +020060#include <net/ndisc.h>
61#include <net/ipv6.h>
Ido Schimmel04b1d4e2017-08-03 13:28:11 +020062#include <net/fib_notifier.h>
Ido Schimmel464dce12016-07-02 11:00:15 +020063
64#include "spectrum.h"
65#include "core.h"
66#include "reg.h"
Arkadi Sharshevskye0c0afd2017-03-28 17:24:15 +020067#include "spectrum_cnt.h"
68#include "spectrum_dpipe.h"
Petr Machata38ebc0f2017-09-02 23:49:17 +020069#include "spectrum_ipip.h"
Yotam Gigid42b0962017-09-27 08:23:20 +020070#include "spectrum_mr.h"
71#include "spectrum_mr_tcam.h"
Arkadi Sharshevskye0c0afd2017-03-28 17:24:15 +020072#include "spectrum_router.h"
Ido Schimmel464dce12016-07-02 11:00:15 +020073
Ido Schimmel9011b672017-05-16 19:38:25 +020074struct mlxsw_sp_vr;
75struct mlxsw_sp_lpm_tree;
Ido Schimmele4f3c1c2017-05-26 08:37:40 +020076struct mlxsw_sp_rif_ops;
Ido Schimmel9011b672017-05-16 19:38:25 +020077
78struct mlxsw_sp_router {
79 struct mlxsw_sp *mlxsw_sp;
Ido Schimmel5f9efff2017-05-16 19:38:27 +020080 struct mlxsw_sp_rif **rifs;
Ido Schimmel9011b672017-05-16 19:38:25 +020081 struct mlxsw_sp_vr *vrs;
82 struct rhashtable neigh_ht;
83 struct rhashtable nexthop_group_ht;
84 struct rhashtable nexthop_ht;
Arkadi Sharshevskydbe45982017-09-25 10:32:23 +020085 struct list_head nexthop_list;
Ido Schimmel9011b672017-05-16 19:38:25 +020086 struct {
87 struct mlxsw_sp_lpm_tree *trees;
88 unsigned int tree_count;
89 } lpm;
90 struct {
91 struct delayed_work dw;
92 unsigned long interval; /* ms */
93 } neighs_update;
94 struct delayed_work nexthop_probe_dw;
95#define MLXSW_SP_UNRESOLVED_NH_PROBE_INTERVAL 5000 /* ms */
96 struct list_head nexthop_neighs_list;
Petr Machata1012b9a2017-09-02 23:49:23 +020097 struct list_head ipip_list;
Ido Schimmel9011b672017-05-16 19:38:25 +020098 bool aborted;
Ido Schimmel7e39d112017-05-16 19:38:28 +020099 struct notifier_block fib_nb;
Ido Schimmel48fac882017-11-02 17:14:06 +0100100 struct notifier_block netevent_nb;
Ido Schimmele4f3c1c2017-05-26 08:37:40 +0200101 const struct mlxsw_sp_rif_ops **rif_ops_arr;
Petr Machata38ebc0f2017-09-02 23:49:17 +0200102 const struct mlxsw_sp_ipip_ops **ipip_ops_arr;
Ido Schimmel9011b672017-05-16 19:38:25 +0200103};
104
Ido Schimmel4724ba562017-03-10 08:53:39 +0100105struct mlxsw_sp_rif {
106 struct list_head nexthop_list;
107 struct list_head neigh_list;
108 struct net_device *dev;
Ido Schimmela1107482017-05-26 08:37:39 +0200109 struct mlxsw_sp_fid *fid;
Ido Schimmel4724ba562017-03-10 08:53:39 +0100110 unsigned char addr[ETH_ALEN];
111 int mtu;
Arkadi Sharshevskybf952332017-03-17 09:38:00 +0100112 u16 rif_index;
Ido Schimmel69132292017-03-10 08:53:42 +0100113 u16 vr_id;
Ido Schimmele4f3c1c2017-05-26 08:37:40 +0200114 const struct mlxsw_sp_rif_ops *ops;
115 struct mlxsw_sp *mlxsw_sp;
116
Arkadi Sharshevskye0c0afd2017-03-28 17:24:15 +0200117 unsigned int counter_ingress;
118 bool counter_ingress_valid;
119 unsigned int counter_egress;
120 bool counter_egress_valid;
Ido Schimmel4724ba562017-03-10 08:53:39 +0100121};
122
Ido Schimmele4f3c1c2017-05-26 08:37:40 +0200123struct mlxsw_sp_rif_params {
124 struct net_device *dev;
125 union {
126 u16 system_port;
127 u16 lag_id;
128 };
129 u16 vid;
130 bool lag;
131};
132
Ido Schimmel4d93cee2017-05-26 08:37:34 +0200133struct mlxsw_sp_rif_subport {
134 struct mlxsw_sp_rif common;
135 union {
136 u16 system_port;
137 u16 lag_id;
138 };
139 u16 vid;
140 bool lag;
141};
142
Petr Machata6ddb7422017-09-02 23:49:19 +0200143struct mlxsw_sp_rif_ipip_lb {
144 struct mlxsw_sp_rif common;
145 struct mlxsw_sp_rif_ipip_lb_config lb_config;
146 u16 ul_vr_id; /* Reserved for Spectrum-2. */
147};
148
149struct mlxsw_sp_rif_params_ipip_lb {
150 struct mlxsw_sp_rif_params common;
151 struct mlxsw_sp_rif_ipip_lb_config lb_config;
152};
153
Ido Schimmele4f3c1c2017-05-26 08:37:40 +0200154struct mlxsw_sp_rif_ops {
155 enum mlxsw_sp_rif_type type;
156 size_t rif_size;
157
158 void (*setup)(struct mlxsw_sp_rif *rif,
159 const struct mlxsw_sp_rif_params *params);
160 int (*configure)(struct mlxsw_sp_rif *rif);
161 void (*deconfigure)(struct mlxsw_sp_rif *rif);
162 struct mlxsw_sp_fid * (*fid_get)(struct mlxsw_sp_rif *rif);
163};
164
Arkadi Sharshevskye0c0afd2017-03-28 17:24:15 +0200165static unsigned int *
166mlxsw_sp_rif_p_counter_get(struct mlxsw_sp_rif *rif,
167 enum mlxsw_sp_rif_counter_dir dir)
168{
169 switch (dir) {
170 case MLXSW_SP_RIF_COUNTER_EGRESS:
171 return &rif->counter_egress;
172 case MLXSW_SP_RIF_COUNTER_INGRESS:
173 return &rif->counter_ingress;
174 }
175 return NULL;
176}
177
178static bool
179mlxsw_sp_rif_counter_valid_get(struct mlxsw_sp_rif *rif,
180 enum mlxsw_sp_rif_counter_dir dir)
181{
182 switch (dir) {
183 case MLXSW_SP_RIF_COUNTER_EGRESS:
184 return rif->counter_egress_valid;
185 case MLXSW_SP_RIF_COUNTER_INGRESS:
186 return rif->counter_ingress_valid;
187 }
188 return false;
189}
190
191static void
192mlxsw_sp_rif_counter_valid_set(struct mlxsw_sp_rif *rif,
193 enum mlxsw_sp_rif_counter_dir dir,
194 bool valid)
195{
196 switch (dir) {
197 case MLXSW_SP_RIF_COUNTER_EGRESS:
198 rif->counter_egress_valid = valid;
199 break;
200 case MLXSW_SP_RIF_COUNTER_INGRESS:
201 rif->counter_ingress_valid = valid;
202 break;
203 }
204}
205
206static int mlxsw_sp_rif_counter_edit(struct mlxsw_sp *mlxsw_sp, u16 rif_index,
207 unsigned int counter_index, bool enable,
208 enum mlxsw_sp_rif_counter_dir dir)
209{
210 char ritr_pl[MLXSW_REG_RITR_LEN];
211 bool is_egress = false;
212 int err;
213
214 if (dir == MLXSW_SP_RIF_COUNTER_EGRESS)
215 is_egress = true;
216 mlxsw_reg_ritr_rif_pack(ritr_pl, rif_index);
217 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
218 if (err)
219 return err;
220
221 mlxsw_reg_ritr_counter_pack(ritr_pl, counter_index, enable,
222 is_egress);
223 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
224}
225
226int mlxsw_sp_rif_counter_value_get(struct mlxsw_sp *mlxsw_sp,
227 struct mlxsw_sp_rif *rif,
228 enum mlxsw_sp_rif_counter_dir dir, u64 *cnt)
229{
230 char ricnt_pl[MLXSW_REG_RICNT_LEN];
231 unsigned int *p_counter_index;
232 bool valid;
233 int err;
234
235 valid = mlxsw_sp_rif_counter_valid_get(rif, dir);
236 if (!valid)
237 return -EINVAL;
238
239 p_counter_index = mlxsw_sp_rif_p_counter_get(rif, dir);
240 if (!p_counter_index)
241 return -EINVAL;
242 mlxsw_reg_ricnt_pack(ricnt_pl, *p_counter_index,
243 MLXSW_REG_RICNT_OPCODE_NOP);
244 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ricnt), ricnt_pl);
245 if (err)
246 return err;
247 *cnt = mlxsw_reg_ricnt_good_unicast_packets_get(ricnt_pl);
248 return 0;
249}
250
251static int mlxsw_sp_rif_counter_clear(struct mlxsw_sp *mlxsw_sp,
252 unsigned int counter_index)
253{
254 char ricnt_pl[MLXSW_REG_RICNT_LEN];
255
256 mlxsw_reg_ricnt_pack(ricnt_pl, counter_index,
257 MLXSW_REG_RICNT_OPCODE_CLEAR);
258 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ricnt), ricnt_pl);
259}
260
261int mlxsw_sp_rif_counter_alloc(struct mlxsw_sp *mlxsw_sp,
262 struct mlxsw_sp_rif *rif,
263 enum mlxsw_sp_rif_counter_dir dir)
264{
265 unsigned int *p_counter_index;
266 int err;
267
268 p_counter_index = mlxsw_sp_rif_p_counter_get(rif, dir);
269 if (!p_counter_index)
270 return -EINVAL;
271 err = mlxsw_sp_counter_alloc(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_RIF,
272 p_counter_index);
273 if (err)
274 return err;
275
276 err = mlxsw_sp_rif_counter_clear(mlxsw_sp, *p_counter_index);
277 if (err)
278 goto err_counter_clear;
279
280 err = mlxsw_sp_rif_counter_edit(mlxsw_sp, rif->rif_index,
281 *p_counter_index, true, dir);
282 if (err)
283 goto err_counter_edit;
284 mlxsw_sp_rif_counter_valid_set(rif, dir, true);
285 return 0;
286
287err_counter_edit:
288err_counter_clear:
289 mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_RIF,
290 *p_counter_index);
291 return err;
292}
293
294void mlxsw_sp_rif_counter_free(struct mlxsw_sp *mlxsw_sp,
295 struct mlxsw_sp_rif *rif,
296 enum mlxsw_sp_rif_counter_dir dir)
297{
298 unsigned int *p_counter_index;
299
Arkadi Sharshevsky6b1206b2017-05-18 09:18:53 +0200300 if (!mlxsw_sp_rif_counter_valid_get(rif, dir))
301 return;
302
Arkadi Sharshevskye0c0afd2017-03-28 17:24:15 +0200303 p_counter_index = mlxsw_sp_rif_p_counter_get(rif, dir);
304 if (WARN_ON(!p_counter_index))
305 return;
306 mlxsw_sp_rif_counter_edit(mlxsw_sp, rif->rif_index,
307 *p_counter_index, false, dir);
308 mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_RIF,
309 *p_counter_index);
310 mlxsw_sp_rif_counter_valid_set(rif, dir, false);
311}
312
Ido Schimmele4f3c1c2017-05-26 08:37:40 +0200313static void mlxsw_sp_rif_counters_alloc(struct mlxsw_sp_rif *rif)
314{
315 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
316 struct devlink *devlink;
317
318 devlink = priv_to_devlink(mlxsw_sp->core);
319 if (!devlink_dpipe_table_counter_enabled(devlink,
320 MLXSW_SP_DPIPE_TABLE_NAME_ERIF))
321 return;
322 mlxsw_sp_rif_counter_alloc(mlxsw_sp, rif, MLXSW_SP_RIF_COUNTER_EGRESS);
323}
324
325static void mlxsw_sp_rif_counters_free(struct mlxsw_sp_rif *rif)
326{
327 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
328
329 mlxsw_sp_rif_counter_free(mlxsw_sp, rif, MLXSW_SP_RIF_COUNTER_EGRESS);
330}
331
Ido Schimmel4724ba562017-03-10 08:53:39 +0100332static struct mlxsw_sp_rif *
333mlxsw_sp_rif_find_by_dev(const struct mlxsw_sp *mlxsw_sp,
334 const struct net_device *dev);
335
Ido Schimmel7dcc18a2017-07-18 10:10:30 +0200336#define MLXSW_SP_PREFIX_COUNT (sizeof(struct in6_addr) * BITS_PER_BYTE + 1)
Ido Schimmel9011b672017-05-16 19:38:25 +0200337
338struct mlxsw_sp_prefix_usage {
339 DECLARE_BITMAP(b, MLXSW_SP_PREFIX_COUNT);
340};
341
Jiri Pirko53342022016-07-04 08:23:08 +0200342#define mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage) \
343 for_each_set_bit(prefix, (prefix_usage)->b, MLXSW_SP_PREFIX_COUNT)
344
345static bool
346mlxsw_sp_prefix_usage_eq(struct mlxsw_sp_prefix_usage *prefix_usage1,
347 struct mlxsw_sp_prefix_usage *prefix_usage2)
348{
349 return !memcmp(prefix_usage1, prefix_usage2, sizeof(*prefix_usage1));
350}
351
Jiri Pirko6b75c482016-07-04 08:23:09 +0200352static bool
353mlxsw_sp_prefix_usage_none(struct mlxsw_sp_prefix_usage *prefix_usage)
354{
355 struct mlxsw_sp_prefix_usage prefix_usage_none = {{ 0 } };
356
357 return mlxsw_sp_prefix_usage_eq(prefix_usage, &prefix_usage_none);
358}
359
360static void
361mlxsw_sp_prefix_usage_cpy(struct mlxsw_sp_prefix_usage *prefix_usage1,
362 struct mlxsw_sp_prefix_usage *prefix_usage2)
363{
364 memcpy(prefix_usage1, prefix_usage2, sizeof(*prefix_usage1));
365}
366
367static void
Jiri Pirko5e9c16c2016-07-04 08:23:04 +0200368mlxsw_sp_prefix_usage_set(struct mlxsw_sp_prefix_usage *prefix_usage,
369 unsigned char prefix_len)
370{
371 set_bit(prefix_len, prefix_usage->b);
372}
373
374static void
375mlxsw_sp_prefix_usage_clear(struct mlxsw_sp_prefix_usage *prefix_usage,
376 unsigned char prefix_len)
377{
378 clear_bit(prefix_len, prefix_usage->b);
379}
380
381struct mlxsw_sp_fib_key {
382 unsigned char addr[sizeof(struct in6_addr)];
383 unsigned char prefix_len;
384};
385
Jiri Pirko61c503f2016-07-04 08:23:11 +0200386enum mlxsw_sp_fib_entry_type {
387 MLXSW_SP_FIB_ENTRY_TYPE_REMOTE,
388 MLXSW_SP_FIB_ENTRY_TYPE_LOCAL,
389 MLXSW_SP_FIB_ENTRY_TYPE_TRAP,
Petr Machata4607f6d2017-09-02 23:49:25 +0200390
391 /* This is a special case of local delivery, where a packet should be
392 * decapsulated on reception. Note that there is no corresponding ENCAP,
393 * because that's a type of next hop, not of FIB entry. (There can be
394 * several next hops in a REMOTE entry, and some of them may be
395 * encapsulating entries.)
396 */
397 MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP,
Jiri Pirko61c503f2016-07-04 08:23:11 +0200398};
399
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +0200400struct mlxsw_sp_nexthop_group;
Ido Schimmel9011b672017-05-16 19:38:25 +0200401struct mlxsw_sp_fib;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +0200402
Ido Schimmel9aecce12017-02-09 10:28:42 +0100403struct mlxsw_sp_fib_node {
404 struct list_head entry_list;
Jiri Pirkob45f64d2016-09-26 12:52:31 +0200405 struct list_head list;
Ido Schimmel9aecce12017-02-09 10:28:42 +0100406 struct rhash_head ht_node;
Ido Schimmel76610eb2017-03-10 08:53:41 +0100407 struct mlxsw_sp_fib *fib;
Ido Schimmel9aecce12017-02-09 10:28:42 +0100408 struct mlxsw_sp_fib_key key;
409};
410
Petr Machata4607f6d2017-09-02 23:49:25 +0200411struct mlxsw_sp_fib_entry_decap {
412 struct mlxsw_sp_ipip_entry *ipip_entry;
413 u32 tunnel_index;
414};
415
Ido Schimmel9aecce12017-02-09 10:28:42 +0100416struct mlxsw_sp_fib_entry {
417 struct list_head list;
418 struct mlxsw_sp_fib_node *fib_node;
419 enum mlxsw_sp_fib_entry_type type;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +0200420 struct list_head nexthop_group_node;
421 struct mlxsw_sp_nexthop_group *nh_group;
Petr Machata4607f6d2017-09-02 23:49:25 +0200422 struct mlxsw_sp_fib_entry_decap decap; /* Valid for decap entries. */
Jiri Pirko5e9c16c2016-07-04 08:23:04 +0200423};
424
Ido Schimmel4f1c7f12017-07-18 10:10:26 +0200425struct mlxsw_sp_fib4_entry {
426 struct mlxsw_sp_fib_entry common;
427 u32 tb_id;
428 u32 prio;
429 u8 tos;
430 u8 type;
431};
432
Ido Schimmel428b8512017-08-03 13:28:28 +0200433struct mlxsw_sp_fib6_entry {
434 struct mlxsw_sp_fib_entry common;
435 struct list_head rt6_list;
436 unsigned int nrt6;
437};
438
439struct mlxsw_sp_rt6 {
440 struct list_head list;
441 struct rt6_info *rt;
442};
443
Ido Schimmel9011b672017-05-16 19:38:25 +0200444struct mlxsw_sp_lpm_tree {
445 u8 id; /* tree ID */
446 unsigned int ref_count;
447 enum mlxsw_sp_l3proto proto;
448 struct mlxsw_sp_prefix_usage prefix_usage;
449};
450
Jiri Pirko5e9c16c2016-07-04 08:23:04 +0200451struct mlxsw_sp_fib {
452 struct rhashtable ht;
Ido Schimmel9aecce12017-02-09 10:28:42 +0100453 struct list_head node_list;
Ido Schimmel76610eb2017-03-10 08:53:41 +0100454 struct mlxsw_sp_vr *vr;
455 struct mlxsw_sp_lpm_tree *lpm_tree;
Jiri Pirko5e9c16c2016-07-04 08:23:04 +0200456 unsigned long prefix_ref_count[MLXSW_SP_PREFIX_COUNT];
457 struct mlxsw_sp_prefix_usage prefix_usage;
Ido Schimmel76610eb2017-03-10 08:53:41 +0100458 enum mlxsw_sp_l3proto proto;
Jiri Pirko5e9c16c2016-07-04 08:23:04 +0200459};
460
Ido Schimmel9011b672017-05-16 19:38:25 +0200461struct mlxsw_sp_vr {
462 u16 id; /* virtual router ID */
463 u32 tb_id; /* kernel fib table id */
464 unsigned int rif_count;
465 struct mlxsw_sp_fib *fib4;
Ido Schimmela3d9bc52017-07-18 10:10:22 +0200466 struct mlxsw_sp_fib *fib6;
Yotam Gigid42b0962017-09-27 08:23:20 +0200467 struct mlxsw_sp_mr_table *mr4_table;
Ido Schimmel9011b672017-05-16 19:38:25 +0200468};
469
Ido Schimmel9aecce12017-02-09 10:28:42 +0100470static const struct rhashtable_params mlxsw_sp_fib_ht_params;
Jiri Pirko5e9c16c2016-07-04 08:23:04 +0200471
Ido Schimmel76610eb2017-03-10 08:53:41 +0100472static struct mlxsw_sp_fib *mlxsw_sp_fib_create(struct mlxsw_sp_vr *vr,
473 enum mlxsw_sp_l3proto proto)
Jiri Pirko5e9c16c2016-07-04 08:23:04 +0200474{
475 struct mlxsw_sp_fib *fib;
476 int err;
477
478 fib = kzalloc(sizeof(*fib), GFP_KERNEL);
479 if (!fib)
480 return ERR_PTR(-ENOMEM);
481 err = rhashtable_init(&fib->ht, &mlxsw_sp_fib_ht_params);
482 if (err)
483 goto err_rhashtable_init;
Ido Schimmel9aecce12017-02-09 10:28:42 +0100484 INIT_LIST_HEAD(&fib->node_list);
Ido Schimmel76610eb2017-03-10 08:53:41 +0100485 fib->proto = proto;
486 fib->vr = vr;
Jiri Pirko5e9c16c2016-07-04 08:23:04 +0200487 return fib;
488
489err_rhashtable_init:
490 kfree(fib);
491 return ERR_PTR(err);
492}
493
494static void mlxsw_sp_fib_destroy(struct mlxsw_sp_fib *fib)
495{
Ido Schimmel9aecce12017-02-09 10:28:42 +0100496 WARN_ON(!list_empty(&fib->node_list));
Ido Schimmel76610eb2017-03-10 08:53:41 +0100497 WARN_ON(fib->lpm_tree);
Jiri Pirko5e9c16c2016-07-04 08:23:04 +0200498 rhashtable_destroy(&fib->ht);
499 kfree(fib);
500}
501
Jiri Pirko53342022016-07-04 08:23:08 +0200502static struct mlxsw_sp_lpm_tree *
Ido Schimmel382dbb42017-03-10 08:53:40 +0100503mlxsw_sp_lpm_tree_find_unused(struct mlxsw_sp *mlxsw_sp)
Jiri Pirko53342022016-07-04 08:23:08 +0200504{
505 static struct mlxsw_sp_lpm_tree *lpm_tree;
506 int i;
507
Ido Schimmel9011b672017-05-16 19:38:25 +0200508 for (i = 0; i < mlxsw_sp->router->lpm.tree_count; i++) {
509 lpm_tree = &mlxsw_sp->router->lpm.trees[i];
Ido Schimmel382dbb42017-03-10 08:53:40 +0100510 if (lpm_tree->ref_count == 0)
511 return lpm_tree;
Jiri Pirko53342022016-07-04 08:23:08 +0200512 }
513 return NULL;
514}
515
516static int mlxsw_sp_lpm_tree_alloc(struct mlxsw_sp *mlxsw_sp,
517 struct mlxsw_sp_lpm_tree *lpm_tree)
518{
519 char ralta_pl[MLXSW_REG_RALTA_LEN];
520
Ido Schimmel1a9234e662016-09-19 08:29:26 +0200521 mlxsw_reg_ralta_pack(ralta_pl, true,
522 (enum mlxsw_reg_ralxx_protocol) lpm_tree->proto,
523 lpm_tree->id);
Jiri Pirko53342022016-07-04 08:23:08 +0200524 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralta), ralta_pl);
525}
526
Ido Schimmelcc702672017-08-14 10:54:03 +0200527static void mlxsw_sp_lpm_tree_free(struct mlxsw_sp *mlxsw_sp,
528 struct mlxsw_sp_lpm_tree *lpm_tree)
Jiri Pirko53342022016-07-04 08:23:08 +0200529{
530 char ralta_pl[MLXSW_REG_RALTA_LEN];
531
Ido Schimmel1a9234e662016-09-19 08:29:26 +0200532 mlxsw_reg_ralta_pack(ralta_pl, false,
533 (enum mlxsw_reg_ralxx_protocol) lpm_tree->proto,
534 lpm_tree->id);
Ido Schimmelcc702672017-08-14 10:54:03 +0200535 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralta), ralta_pl);
Jiri Pirko53342022016-07-04 08:23:08 +0200536}
537
538static int
539mlxsw_sp_lpm_tree_left_struct_set(struct mlxsw_sp *mlxsw_sp,
540 struct mlxsw_sp_prefix_usage *prefix_usage,
541 struct mlxsw_sp_lpm_tree *lpm_tree)
542{
543 char ralst_pl[MLXSW_REG_RALST_LEN];
544 u8 root_bin = 0;
545 u8 prefix;
546 u8 last_prefix = MLXSW_REG_RALST_BIN_NO_CHILD;
547
548 mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage)
549 root_bin = prefix;
550
551 mlxsw_reg_ralst_pack(ralst_pl, root_bin, lpm_tree->id);
552 mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage) {
553 if (prefix == 0)
554 continue;
555 mlxsw_reg_ralst_bin_pack(ralst_pl, prefix, last_prefix,
556 MLXSW_REG_RALST_BIN_NO_CHILD);
557 last_prefix = prefix;
558 }
559 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralst), ralst_pl);
560}
561
562static struct mlxsw_sp_lpm_tree *
563mlxsw_sp_lpm_tree_create(struct mlxsw_sp *mlxsw_sp,
564 struct mlxsw_sp_prefix_usage *prefix_usage,
Ido Schimmel382dbb42017-03-10 08:53:40 +0100565 enum mlxsw_sp_l3proto proto)
Jiri Pirko53342022016-07-04 08:23:08 +0200566{
567 struct mlxsw_sp_lpm_tree *lpm_tree;
568 int err;
569
Ido Schimmel382dbb42017-03-10 08:53:40 +0100570 lpm_tree = mlxsw_sp_lpm_tree_find_unused(mlxsw_sp);
Jiri Pirko53342022016-07-04 08:23:08 +0200571 if (!lpm_tree)
572 return ERR_PTR(-EBUSY);
573 lpm_tree->proto = proto;
574 err = mlxsw_sp_lpm_tree_alloc(mlxsw_sp, lpm_tree);
575 if (err)
576 return ERR_PTR(err);
577
578 err = mlxsw_sp_lpm_tree_left_struct_set(mlxsw_sp, prefix_usage,
579 lpm_tree);
580 if (err)
581 goto err_left_struct_set;
Jiri Pirko2083d362016-10-25 11:25:56 +0200582 memcpy(&lpm_tree->prefix_usage, prefix_usage,
583 sizeof(lpm_tree->prefix_usage));
Jiri Pirko53342022016-07-04 08:23:08 +0200584 return lpm_tree;
585
586err_left_struct_set:
587 mlxsw_sp_lpm_tree_free(mlxsw_sp, lpm_tree);
588 return ERR_PTR(err);
589}
590
Ido Schimmelcc702672017-08-14 10:54:03 +0200591static void mlxsw_sp_lpm_tree_destroy(struct mlxsw_sp *mlxsw_sp,
592 struct mlxsw_sp_lpm_tree *lpm_tree)
Jiri Pirko53342022016-07-04 08:23:08 +0200593{
Ido Schimmelcc702672017-08-14 10:54:03 +0200594 mlxsw_sp_lpm_tree_free(mlxsw_sp, lpm_tree);
Jiri Pirko53342022016-07-04 08:23:08 +0200595}
596
597static struct mlxsw_sp_lpm_tree *
598mlxsw_sp_lpm_tree_get(struct mlxsw_sp *mlxsw_sp,
599 struct mlxsw_sp_prefix_usage *prefix_usage,
Ido Schimmel382dbb42017-03-10 08:53:40 +0100600 enum mlxsw_sp_l3proto proto)
Jiri Pirko53342022016-07-04 08:23:08 +0200601{
602 struct mlxsw_sp_lpm_tree *lpm_tree;
603 int i;
604
Ido Schimmel9011b672017-05-16 19:38:25 +0200605 for (i = 0; i < mlxsw_sp->router->lpm.tree_count; i++) {
606 lpm_tree = &mlxsw_sp->router->lpm.trees[i];
Jiri Pirko8b99bec2016-10-25 11:25:57 +0200607 if (lpm_tree->ref_count != 0 &&
608 lpm_tree->proto == proto &&
Jiri Pirko53342022016-07-04 08:23:08 +0200609 mlxsw_sp_prefix_usage_eq(&lpm_tree->prefix_usage,
610 prefix_usage))
Ido Schimmelfc922bb2017-08-14 10:54:05 +0200611 return lpm_tree;
Jiri Pirko53342022016-07-04 08:23:08 +0200612 }
Ido Schimmelfc922bb2017-08-14 10:54:05 +0200613 return mlxsw_sp_lpm_tree_create(mlxsw_sp, prefix_usage, proto);
614}
Jiri Pirko53342022016-07-04 08:23:08 +0200615
Ido Schimmelfc922bb2017-08-14 10:54:05 +0200616static void mlxsw_sp_lpm_tree_hold(struct mlxsw_sp_lpm_tree *lpm_tree)
617{
Jiri Pirko53342022016-07-04 08:23:08 +0200618 lpm_tree->ref_count++;
Jiri Pirko53342022016-07-04 08:23:08 +0200619}
620
Ido Schimmelcc702672017-08-14 10:54:03 +0200621static void mlxsw_sp_lpm_tree_put(struct mlxsw_sp *mlxsw_sp,
622 struct mlxsw_sp_lpm_tree *lpm_tree)
Jiri Pirko53342022016-07-04 08:23:08 +0200623{
624 if (--lpm_tree->ref_count == 0)
Ido Schimmelcc702672017-08-14 10:54:03 +0200625 mlxsw_sp_lpm_tree_destroy(mlxsw_sp, lpm_tree);
Jiri Pirko53342022016-07-04 08:23:08 +0200626}
627
Ido Schimmeld7a60302017-06-08 08:47:43 +0200628#define MLXSW_SP_LPM_TREE_MIN 1 /* tree 0 is reserved */
Ido Schimmel8494ab02017-03-24 08:02:47 +0100629
630static int mlxsw_sp_lpm_init(struct mlxsw_sp *mlxsw_sp)
Jiri Pirko53342022016-07-04 08:23:08 +0200631{
632 struct mlxsw_sp_lpm_tree *lpm_tree;
Ido Schimmel8494ab02017-03-24 08:02:47 +0100633 u64 max_trees;
Jiri Pirko53342022016-07-04 08:23:08 +0200634 int i;
635
Ido Schimmel8494ab02017-03-24 08:02:47 +0100636 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_LPM_TREES))
637 return -EIO;
638
639 max_trees = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_LPM_TREES);
Ido Schimmel9011b672017-05-16 19:38:25 +0200640 mlxsw_sp->router->lpm.tree_count = max_trees - MLXSW_SP_LPM_TREE_MIN;
641 mlxsw_sp->router->lpm.trees = kcalloc(mlxsw_sp->router->lpm.tree_count,
Ido Schimmel8494ab02017-03-24 08:02:47 +0100642 sizeof(struct mlxsw_sp_lpm_tree),
643 GFP_KERNEL);
Ido Schimmel9011b672017-05-16 19:38:25 +0200644 if (!mlxsw_sp->router->lpm.trees)
Ido Schimmel8494ab02017-03-24 08:02:47 +0100645 return -ENOMEM;
646
Ido Schimmel9011b672017-05-16 19:38:25 +0200647 for (i = 0; i < mlxsw_sp->router->lpm.tree_count; i++) {
648 lpm_tree = &mlxsw_sp->router->lpm.trees[i];
Jiri Pirko53342022016-07-04 08:23:08 +0200649 lpm_tree->id = i + MLXSW_SP_LPM_TREE_MIN;
650 }
Ido Schimmel8494ab02017-03-24 08:02:47 +0100651
652 return 0;
653}
654
655static void mlxsw_sp_lpm_fini(struct mlxsw_sp *mlxsw_sp)
656{
Ido Schimmel9011b672017-05-16 19:38:25 +0200657 kfree(mlxsw_sp->router->lpm.trees);
Jiri Pirko53342022016-07-04 08:23:08 +0200658}
659
Ido Schimmel76610eb2017-03-10 08:53:41 +0100660static bool mlxsw_sp_vr_is_used(const struct mlxsw_sp_vr *vr)
661{
Yotam Gigid42b0962017-09-27 08:23:20 +0200662 return !!vr->fib4 || !!vr->fib6 || !!vr->mr4_table;
Ido Schimmel76610eb2017-03-10 08:53:41 +0100663}
664
Jiri Pirko6b75c482016-07-04 08:23:09 +0200665static struct mlxsw_sp_vr *mlxsw_sp_vr_find_unused(struct mlxsw_sp *mlxsw_sp)
666{
667 struct mlxsw_sp_vr *vr;
668 int i;
669
Jiri Pirkoc1a38312016-10-21 16:07:23 +0200670 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
Ido Schimmel9011b672017-05-16 19:38:25 +0200671 vr = &mlxsw_sp->router->vrs[i];
Ido Schimmel76610eb2017-03-10 08:53:41 +0100672 if (!mlxsw_sp_vr_is_used(vr))
Jiri Pirko6b75c482016-07-04 08:23:09 +0200673 return vr;
674 }
675 return NULL;
676}
677
678static int mlxsw_sp_vr_lpm_tree_bind(struct mlxsw_sp *mlxsw_sp,
Ido Schimmel0adb2142017-08-14 10:54:04 +0200679 const struct mlxsw_sp_fib *fib, u8 tree_id)
Jiri Pirko6b75c482016-07-04 08:23:09 +0200680{
681 char raltb_pl[MLXSW_REG_RALTB_LEN];
682
Ido Schimmel76610eb2017-03-10 08:53:41 +0100683 mlxsw_reg_raltb_pack(raltb_pl, fib->vr->id,
684 (enum mlxsw_reg_ralxx_protocol) fib->proto,
Ido Schimmel0adb2142017-08-14 10:54:04 +0200685 tree_id);
Jiri Pirko6b75c482016-07-04 08:23:09 +0200686 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raltb), raltb_pl);
687}
688
689static int mlxsw_sp_vr_lpm_tree_unbind(struct mlxsw_sp *mlxsw_sp,
Ido Schimmel76610eb2017-03-10 08:53:41 +0100690 const struct mlxsw_sp_fib *fib)
Jiri Pirko6b75c482016-07-04 08:23:09 +0200691{
692 char raltb_pl[MLXSW_REG_RALTB_LEN];
693
694 /* Bind to tree 0 which is default */
Ido Schimmel76610eb2017-03-10 08:53:41 +0100695 mlxsw_reg_raltb_pack(raltb_pl, fib->vr->id,
696 (enum mlxsw_reg_ralxx_protocol) fib->proto, 0);
Jiri Pirko6b75c482016-07-04 08:23:09 +0200697 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raltb), raltb_pl);
698}
699
700static u32 mlxsw_sp_fix_tb_id(u32 tb_id)
701{
Yotam Gigi7e50d432017-09-27 08:23:19 +0200702 /* For our purpose, squash main, default and local tables into one */
703 if (tb_id == RT_TABLE_LOCAL || tb_id == RT_TABLE_DEFAULT)
Jiri Pirko6b75c482016-07-04 08:23:09 +0200704 tb_id = RT_TABLE_MAIN;
705 return tb_id;
706}
707
708static struct mlxsw_sp_vr *mlxsw_sp_vr_find(struct mlxsw_sp *mlxsw_sp,
Ido Schimmel76610eb2017-03-10 08:53:41 +0100709 u32 tb_id)
Jiri Pirko6b75c482016-07-04 08:23:09 +0200710{
711 struct mlxsw_sp_vr *vr;
712 int i;
713
714 tb_id = mlxsw_sp_fix_tb_id(tb_id);
Nogah Frankel9497c042016-09-20 11:16:54 +0200715
Jiri Pirkoc1a38312016-10-21 16:07:23 +0200716 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
Ido Schimmel9011b672017-05-16 19:38:25 +0200717 vr = &mlxsw_sp->router->vrs[i];
Ido Schimmel76610eb2017-03-10 08:53:41 +0100718 if (mlxsw_sp_vr_is_used(vr) && vr->tb_id == tb_id)
Jiri Pirko6b75c482016-07-04 08:23:09 +0200719 return vr;
720 }
721 return NULL;
722}
723
Ido Schimmel76610eb2017-03-10 08:53:41 +0100724static struct mlxsw_sp_fib *mlxsw_sp_vr_fib(const struct mlxsw_sp_vr *vr,
725 enum mlxsw_sp_l3proto proto)
Jiri Pirko6b75c482016-07-04 08:23:09 +0200726{
Ido Schimmel76610eb2017-03-10 08:53:41 +0100727 switch (proto) {
728 case MLXSW_SP_L3_PROTO_IPV4:
729 return vr->fib4;
730 case MLXSW_SP_L3_PROTO_IPV6:
Ido Schimmela3d9bc52017-07-18 10:10:22 +0200731 return vr->fib6;
Ido Schimmel76610eb2017-03-10 08:53:41 +0100732 }
733 return NULL;
734}
735
736static struct mlxsw_sp_vr *mlxsw_sp_vr_create(struct mlxsw_sp *mlxsw_sp,
David Ahernf8fa9b42017-10-18 09:56:56 -0700737 u32 tb_id,
738 struct netlink_ext_ack *extack)
Ido Schimmel76610eb2017-03-10 08:53:41 +0100739{
Jiri Pirko6b75c482016-07-04 08:23:09 +0200740 struct mlxsw_sp_vr *vr;
Ido Schimmela3d9bc52017-07-18 10:10:22 +0200741 int err;
Jiri Pirko6b75c482016-07-04 08:23:09 +0200742
743 vr = mlxsw_sp_vr_find_unused(mlxsw_sp);
David Ahernf8fa9b42017-10-18 09:56:56 -0700744 if (!vr) {
745 NL_SET_ERR_MSG(extack, "spectrum: Exceeded number of supported virtual routers");
Jiri Pirko6b75c482016-07-04 08:23:09 +0200746 return ERR_PTR(-EBUSY);
David Ahernf8fa9b42017-10-18 09:56:56 -0700747 }
Ido Schimmel76610eb2017-03-10 08:53:41 +0100748 vr->fib4 = mlxsw_sp_fib_create(vr, MLXSW_SP_L3_PROTO_IPV4);
749 if (IS_ERR(vr->fib4))
750 return ERR_CAST(vr->fib4);
Ido Schimmela3d9bc52017-07-18 10:10:22 +0200751 vr->fib6 = mlxsw_sp_fib_create(vr, MLXSW_SP_L3_PROTO_IPV6);
752 if (IS_ERR(vr->fib6)) {
753 err = PTR_ERR(vr->fib6);
754 goto err_fib6_create;
755 }
Yotam Gigid42b0962017-09-27 08:23:20 +0200756 vr->mr4_table = mlxsw_sp_mr_table_create(mlxsw_sp, vr->id,
757 MLXSW_SP_L3_PROTO_IPV4);
758 if (IS_ERR(vr->mr4_table)) {
759 err = PTR_ERR(vr->mr4_table);
760 goto err_mr_table_create;
761 }
Jiri Pirko6b75c482016-07-04 08:23:09 +0200762 vr->tb_id = tb_id;
Jiri Pirko6b75c482016-07-04 08:23:09 +0200763 return vr;
Ido Schimmela3d9bc52017-07-18 10:10:22 +0200764
Yotam Gigid42b0962017-09-27 08:23:20 +0200765err_mr_table_create:
766 mlxsw_sp_fib_destroy(vr->fib6);
767 vr->fib6 = NULL;
Ido Schimmela3d9bc52017-07-18 10:10:22 +0200768err_fib6_create:
769 mlxsw_sp_fib_destroy(vr->fib4);
770 vr->fib4 = NULL;
771 return ERR_PTR(err);
Jiri Pirko6b75c482016-07-04 08:23:09 +0200772}
773
Ido Schimmel76610eb2017-03-10 08:53:41 +0100774static void mlxsw_sp_vr_destroy(struct mlxsw_sp_vr *vr)
Jiri Pirko6b75c482016-07-04 08:23:09 +0200775{
Yotam Gigid42b0962017-09-27 08:23:20 +0200776 mlxsw_sp_mr_table_destroy(vr->mr4_table);
777 vr->mr4_table = NULL;
Ido Schimmela3d9bc52017-07-18 10:10:22 +0200778 mlxsw_sp_fib_destroy(vr->fib6);
779 vr->fib6 = NULL;
Ido Schimmel76610eb2017-03-10 08:53:41 +0100780 mlxsw_sp_fib_destroy(vr->fib4);
781 vr->fib4 = NULL;
Jiri Pirko6b75c482016-07-04 08:23:09 +0200782}
783
David Ahernf8fa9b42017-10-18 09:56:56 -0700784static struct mlxsw_sp_vr *mlxsw_sp_vr_get(struct mlxsw_sp *mlxsw_sp, u32 tb_id,
785 struct netlink_ext_ack *extack)
Jiri Pirko6b75c482016-07-04 08:23:09 +0200786{
787 struct mlxsw_sp_vr *vr;
Jiri Pirko6b75c482016-07-04 08:23:09 +0200788
789 tb_id = mlxsw_sp_fix_tb_id(tb_id);
Ido Schimmel76610eb2017-03-10 08:53:41 +0100790 vr = mlxsw_sp_vr_find(mlxsw_sp, tb_id);
791 if (!vr)
David Ahernf8fa9b42017-10-18 09:56:56 -0700792 vr = mlxsw_sp_vr_create(mlxsw_sp, tb_id, extack);
Jiri Pirko6b75c482016-07-04 08:23:09 +0200793 return vr;
794}
795
Ido Schimmel76610eb2017-03-10 08:53:41 +0100796static void mlxsw_sp_vr_put(struct mlxsw_sp_vr *vr)
Jiri Pirko6b75c482016-07-04 08:23:09 +0200797{
Ido Schimmela3d9bc52017-07-18 10:10:22 +0200798 if (!vr->rif_count && list_empty(&vr->fib4->node_list) &&
Yotam Gigid42b0962017-09-27 08:23:20 +0200799 list_empty(&vr->fib6->node_list) &&
800 mlxsw_sp_mr_table_empty(vr->mr4_table))
Ido Schimmel76610eb2017-03-10 08:53:41 +0100801 mlxsw_sp_vr_destroy(vr);
Jiri Pirko6b75c482016-07-04 08:23:09 +0200802}
803
Ido Schimmelfc922bb2017-08-14 10:54:05 +0200804static bool
805mlxsw_sp_vr_lpm_tree_should_replace(struct mlxsw_sp_vr *vr,
806 enum mlxsw_sp_l3proto proto, u8 tree_id)
807{
808 struct mlxsw_sp_fib *fib = mlxsw_sp_vr_fib(vr, proto);
809
810 if (!mlxsw_sp_vr_is_used(vr))
811 return false;
812 if (fib->lpm_tree && fib->lpm_tree->id == tree_id)
813 return true;
814 return false;
815}
816
817static int mlxsw_sp_vr_lpm_tree_replace(struct mlxsw_sp *mlxsw_sp,
818 struct mlxsw_sp_fib *fib,
819 struct mlxsw_sp_lpm_tree *new_tree)
820{
821 struct mlxsw_sp_lpm_tree *old_tree = fib->lpm_tree;
822 int err;
823
824 err = mlxsw_sp_vr_lpm_tree_bind(mlxsw_sp, fib, new_tree->id);
825 if (err)
826 return err;
827 fib->lpm_tree = new_tree;
828 mlxsw_sp_lpm_tree_hold(new_tree);
829 mlxsw_sp_lpm_tree_put(mlxsw_sp, old_tree);
830 return 0;
831}
832
833static int mlxsw_sp_vrs_lpm_tree_replace(struct mlxsw_sp *mlxsw_sp,
834 struct mlxsw_sp_fib *fib,
835 struct mlxsw_sp_lpm_tree *new_tree)
836{
837 struct mlxsw_sp_lpm_tree *old_tree = fib->lpm_tree;
838 enum mlxsw_sp_l3proto proto = fib->proto;
839 u8 old_id, new_id = new_tree->id;
840 struct mlxsw_sp_vr *vr;
841 int i, err;
842
843 if (!old_tree)
844 goto no_replace;
845 old_id = old_tree->id;
846
847 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
848 vr = &mlxsw_sp->router->vrs[i];
849 if (!mlxsw_sp_vr_lpm_tree_should_replace(vr, proto, old_id))
850 continue;
851 err = mlxsw_sp_vr_lpm_tree_replace(mlxsw_sp,
852 mlxsw_sp_vr_fib(vr, proto),
853 new_tree);
854 if (err)
855 goto err_tree_replace;
856 }
857
858 return 0;
859
860err_tree_replace:
861 for (i--; i >= 0; i--) {
862 if (!mlxsw_sp_vr_lpm_tree_should_replace(vr, proto, new_id))
863 continue;
864 mlxsw_sp_vr_lpm_tree_replace(mlxsw_sp,
865 mlxsw_sp_vr_fib(vr, proto),
866 old_tree);
867 }
868 return err;
869
870no_replace:
871 err = mlxsw_sp_vr_lpm_tree_bind(mlxsw_sp, fib, new_tree->id);
872 if (err)
873 return err;
874 fib->lpm_tree = new_tree;
875 mlxsw_sp_lpm_tree_hold(new_tree);
876 return 0;
877}
878
879static void
880mlxsw_sp_vrs_prefixes(struct mlxsw_sp *mlxsw_sp,
881 enum mlxsw_sp_l3proto proto,
882 struct mlxsw_sp_prefix_usage *req_prefix_usage)
883{
884 int i;
885
886 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
887 struct mlxsw_sp_vr *vr = &mlxsw_sp->router->vrs[i];
888 struct mlxsw_sp_fib *fib = mlxsw_sp_vr_fib(vr, proto);
889 unsigned char prefix;
890
891 if (!mlxsw_sp_vr_is_used(vr))
892 continue;
893 mlxsw_sp_prefix_usage_for_each(prefix, &fib->prefix_usage)
894 mlxsw_sp_prefix_usage_set(req_prefix_usage, prefix);
895 }
896}
897
Nogah Frankel9497c042016-09-20 11:16:54 +0200898static int mlxsw_sp_vrs_init(struct mlxsw_sp *mlxsw_sp)
Jiri Pirko6b75c482016-07-04 08:23:09 +0200899{
900 struct mlxsw_sp_vr *vr;
Jiri Pirkoc1a38312016-10-21 16:07:23 +0200901 u64 max_vrs;
Jiri Pirko6b75c482016-07-04 08:23:09 +0200902 int i;
903
Jiri Pirkoc1a38312016-10-21 16:07:23 +0200904 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_VRS))
Nogah Frankel9497c042016-09-20 11:16:54 +0200905 return -EIO;
906
Jiri Pirkoc1a38312016-10-21 16:07:23 +0200907 max_vrs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS);
Ido Schimmel9011b672017-05-16 19:38:25 +0200908 mlxsw_sp->router->vrs = kcalloc(max_vrs, sizeof(struct mlxsw_sp_vr),
909 GFP_KERNEL);
910 if (!mlxsw_sp->router->vrs)
Nogah Frankel9497c042016-09-20 11:16:54 +0200911 return -ENOMEM;
912
Jiri Pirkoc1a38312016-10-21 16:07:23 +0200913 for (i = 0; i < max_vrs; i++) {
Ido Schimmel9011b672017-05-16 19:38:25 +0200914 vr = &mlxsw_sp->router->vrs[i];
Jiri Pirko6b75c482016-07-04 08:23:09 +0200915 vr->id = i;
916 }
Nogah Frankel9497c042016-09-20 11:16:54 +0200917
918 return 0;
919}
920
Ido Schimmelac571de2016-11-14 11:26:32 +0100921static void mlxsw_sp_router_fib_flush(struct mlxsw_sp *mlxsw_sp);
922
Nogah Frankel9497c042016-09-20 11:16:54 +0200923static void mlxsw_sp_vrs_fini(struct mlxsw_sp *mlxsw_sp)
924{
Ido Schimmel30572242016-12-03 16:45:01 +0100925 /* At this stage we're guaranteed not to have new incoming
926 * FIB notifications and the work queue is free from FIBs
927 * sitting on top of mlxsw netdevs. However, we can still
928 * have other FIBs queued. Flush the queue before flushing
929 * the device's tables. No need for locks, as we're the only
930 * writer.
931 */
932 mlxsw_core_flush_owq();
Ido Schimmelac571de2016-11-14 11:26:32 +0100933 mlxsw_sp_router_fib_flush(mlxsw_sp);
Ido Schimmel9011b672017-05-16 19:38:25 +0200934 kfree(mlxsw_sp->router->vrs);
Jiri Pirko6b75c482016-07-04 08:23:09 +0200935}
936
Petr Machata6ddb7422017-09-02 23:49:19 +0200937static struct net_device *
938__mlxsw_sp_ipip_netdev_ul_dev_get(const struct net_device *ol_dev)
939{
940 struct ip_tunnel *tun = netdev_priv(ol_dev);
941 struct net *net = dev_net(ol_dev);
942
943 return __dev_get_by_index(net, tun->parms.link);
944}
945
Petr Machata4cf04f32017-11-03 10:03:42 +0100946u32 mlxsw_sp_ipip_dev_ul_tb_id(const struct net_device *ol_dev)
Petr Machata6ddb7422017-09-02 23:49:19 +0200947{
948 struct net_device *d = __mlxsw_sp_ipip_netdev_ul_dev_get(ol_dev);
949
950 if (d)
951 return l3mdev_fib_table(d) ? : RT_TABLE_MAIN;
952 else
953 return l3mdev_fib_table(ol_dev) ? : RT_TABLE_MAIN;
954}
955
Petr Machata1012b9a2017-09-02 23:49:23 +0200956static struct mlxsw_sp_rif *
957mlxsw_sp_rif_create(struct mlxsw_sp *mlxsw_sp,
David Ahernf8fa9b42017-10-18 09:56:56 -0700958 const struct mlxsw_sp_rif_params *params,
959 struct netlink_ext_ack *extack);
Petr Machata1012b9a2017-09-02 23:49:23 +0200960
961static struct mlxsw_sp_rif_ipip_lb *
962mlxsw_sp_ipip_ol_ipip_lb_create(struct mlxsw_sp *mlxsw_sp,
963 enum mlxsw_sp_ipip_type ipipt,
Petr Machata7e75af62017-11-03 10:03:36 +0100964 struct net_device *ol_dev,
965 struct netlink_ext_ack *extack)
Petr Machata1012b9a2017-09-02 23:49:23 +0200966{
967 struct mlxsw_sp_rif_params_ipip_lb lb_params;
968 const struct mlxsw_sp_ipip_ops *ipip_ops;
969 struct mlxsw_sp_rif *rif;
970
971 ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipipt];
972 lb_params = (struct mlxsw_sp_rif_params_ipip_lb) {
973 .common.dev = ol_dev,
974 .common.lag = false,
975 .lb_config = ipip_ops->ol_loopback_config(mlxsw_sp, ol_dev),
976 };
977
Petr Machata7e75af62017-11-03 10:03:36 +0100978 rif = mlxsw_sp_rif_create(mlxsw_sp, &lb_params.common, extack);
Petr Machata1012b9a2017-09-02 23:49:23 +0200979 if (IS_ERR(rif))
980 return ERR_CAST(rif);
981 return container_of(rif, struct mlxsw_sp_rif_ipip_lb, common);
982}
983
984static struct mlxsw_sp_ipip_entry *
985mlxsw_sp_ipip_entry_alloc(struct mlxsw_sp *mlxsw_sp,
986 enum mlxsw_sp_ipip_type ipipt,
987 struct net_device *ol_dev)
988{
989 struct mlxsw_sp_ipip_entry *ipip_entry;
990 struct mlxsw_sp_ipip_entry *ret = NULL;
991
992 ipip_entry = kzalloc(sizeof(*ipip_entry), GFP_KERNEL);
993 if (!ipip_entry)
994 return ERR_PTR(-ENOMEM);
995
996 ipip_entry->ol_lb = mlxsw_sp_ipip_ol_ipip_lb_create(mlxsw_sp, ipipt,
Petr Machata7e75af62017-11-03 10:03:36 +0100997 ol_dev, NULL);
Petr Machata1012b9a2017-09-02 23:49:23 +0200998 if (IS_ERR(ipip_entry->ol_lb)) {
999 ret = ERR_CAST(ipip_entry->ol_lb);
1000 goto err_ol_ipip_lb_create;
1001 }
1002
1003 ipip_entry->ipipt = ipipt;
1004 ipip_entry->ol_dev = ol_dev;
Petr Machata4cf04f32017-11-03 10:03:42 +01001005 ipip_entry->parms = mlxsw_sp_ipip_netdev_parms(ol_dev);
Petr Machata1012b9a2017-09-02 23:49:23 +02001006
1007 return ipip_entry;
1008
1009err_ol_ipip_lb_create:
1010 kfree(ipip_entry);
1011 return ret;
1012}
1013
1014static void
Petr Machata4cccb732017-10-16 16:26:39 +02001015mlxsw_sp_ipip_entry_dealloc(struct mlxsw_sp_ipip_entry *ipip_entry)
Petr Machata1012b9a2017-09-02 23:49:23 +02001016{
Petr Machata1012b9a2017-09-02 23:49:23 +02001017 mlxsw_sp_rif_destroy(&ipip_entry->ol_lb->common);
1018 kfree(ipip_entry);
1019}
1020
Petr Machata1012b9a2017-09-02 23:49:23 +02001021static bool
1022mlxsw_sp_ipip_entry_saddr_matches(struct mlxsw_sp *mlxsw_sp,
1023 const enum mlxsw_sp_l3proto ul_proto,
1024 union mlxsw_sp_l3addr saddr,
1025 u32 ul_tb_id,
1026 struct mlxsw_sp_ipip_entry *ipip_entry)
1027{
1028 u32 tun_ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(ipip_entry->ol_dev);
1029 enum mlxsw_sp_ipip_type ipipt = ipip_entry->ipipt;
1030 union mlxsw_sp_l3addr tun_saddr;
1031
1032 if (mlxsw_sp->router->ipip_ops_arr[ipipt]->ul_proto != ul_proto)
1033 return false;
1034
1035 tun_saddr = mlxsw_sp_ipip_netdev_saddr(ul_proto, ipip_entry->ol_dev);
1036 return tun_ul_tb_id == ul_tb_id &&
1037 mlxsw_sp_l3addr_eq(&tun_saddr, &saddr);
1038}
1039
Petr Machata4607f6d2017-09-02 23:49:25 +02001040static int
1041mlxsw_sp_fib_entry_decap_init(struct mlxsw_sp *mlxsw_sp,
1042 struct mlxsw_sp_fib_entry *fib_entry,
1043 struct mlxsw_sp_ipip_entry *ipip_entry)
1044{
1045 u32 tunnel_index;
1046 int err;
1047
1048 err = mlxsw_sp_kvdl_alloc(mlxsw_sp, 1, &tunnel_index);
1049 if (err)
1050 return err;
1051
1052 ipip_entry->decap_fib_entry = fib_entry;
1053 fib_entry->decap.ipip_entry = ipip_entry;
1054 fib_entry->decap.tunnel_index = tunnel_index;
1055 return 0;
1056}
1057
1058static void mlxsw_sp_fib_entry_decap_fini(struct mlxsw_sp *mlxsw_sp,
1059 struct mlxsw_sp_fib_entry *fib_entry)
1060{
1061 /* Unlink this node from the IPIP entry that it's the decap entry of. */
1062 fib_entry->decap.ipip_entry->decap_fib_entry = NULL;
1063 fib_entry->decap.ipip_entry = NULL;
1064 mlxsw_sp_kvdl_free(mlxsw_sp, fib_entry->decap.tunnel_index);
1065}
1066
Petr Machata1cc38fb2017-09-02 23:49:26 +02001067static struct mlxsw_sp_fib_node *
1068mlxsw_sp_fib_node_lookup(struct mlxsw_sp_fib *fib, const void *addr,
1069 size_t addr_len, unsigned char prefix_len);
Petr Machata4607f6d2017-09-02 23:49:25 +02001070static int mlxsw_sp_fib_entry_update(struct mlxsw_sp *mlxsw_sp,
1071 struct mlxsw_sp_fib_entry *fib_entry);
1072
1073static void
1074mlxsw_sp_ipip_entry_demote_decap(struct mlxsw_sp *mlxsw_sp,
1075 struct mlxsw_sp_ipip_entry *ipip_entry)
1076{
1077 struct mlxsw_sp_fib_entry *fib_entry = ipip_entry->decap_fib_entry;
1078
1079 mlxsw_sp_fib_entry_decap_fini(mlxsw_sp, fib_entry);
1080 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
1081
1082 mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
1083}
1084
Petr Machata1cc38fb2017-09-02 23:49:26 +02001085static void
1086mlxsw_sp_ipip_entry_promote_decap(struct mlxsw_sp *mlxsw_sp,
1087 struct mlxsw_sp_ipip_entry *ipip_entry,
1088 struct mlxsw_sp_fib_entry *decap_fib_entry)
1089{
1090 if (mlxsw_sp_fib_entry_decap_init(mlxsw_sp, decap_fib_entry,
1091 ipip_entry))
1092 return;
1093 decap_fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP;
1094
1095 if (mlxsw_sp_fib_entry_update(mlxsw_sp, decap_fib_entry))
1096 mlxsw_sp_ipip_entry_demote_decap(mlxsw_sp, ipip_entry);
1097}
1098
1099/* Given an IPIP entry, find the corresponding decap route. */
1100static struct mlxsw_sp_fib_entry *
1101mlxsw_sp_ipip_entry_find_decap(struct mlxsw_sp *mlxsw_sp,
1102 struct mlxsw_sp_ipip_entry *ipip_entry)
1103{
1104 static struct mlxsw_sp_fib_node *fib_node;
1105 const struct mlxsw_sp_ipip_ops *ipip_ops;
1106 struct mlxsw_sp_fib_entry *fib_entry;
1107 unsigned char saddr_prefix_len;
1108 union mlxsw_sp_l3addr saddr;
1109 struct mlxsw_sp_fib *ul_fib;
1110 struct mlxsw_sp_vr *ul_vr;
1111 const void *saddrp;
1112 size_t saddr_len;
1113 u32 ul_tb_id;
1114 u32 saddr4;
1115
1116 ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt];
1117
1118 ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(ipip_entry->ol_dev);
1119 ul_vr = mlxsw_sp_vr_find(mlxsw_sp, ul_tb_id);
1120 if (!ul_vr)
1121 return NULL;
1122
1123 ul_fib = mlxsw_sp_vr_fib(ul_vr, ipip_ops->ul_proto);
1124 saddr = mlxsw_sp_ipip_netdev_saddr(ipip_ops->ul_proto,
1125 ipip_entry->ol_dev);
1126
1127 switch (ipip_ops->ul_proto) {
1128 case MLXSW_SP_L3_PROTO_IPV4:
1129 saddr4 = be32_to_cpu(saddr.addr4);
1130 saddrp = &saddr4;
1131 saddr_len = 4;
1132 saddr_prefix_len = 32;
1133 break;
1134 case MLXSW_SP_L3_PROTO_IPV6:
1135 WARN_ON(1);
1136 return NULL;
1137 }
1138
1139 fib_node = mlxsw_sp_fib_node_lookup(ul_fib, saddrp, saddr_len,
1140 saddr_prefix_len);
1141 if (!fib_node || list_empty(&fib_node->entry_list))
1142 return NULL;
1143
1144 fib_entry = list_first_entry(&fib_node->entry_list,
1145 struct mlxsw_sp_fib_entry, list);
1146 if (fib_entry->type != MLXSW_SP_FIB_ENTRY_TYPE_TRAP)
1147 return NULL;
1148
1149 return fib_entry;
1150}
1151
Petr Machata1012b9a2017-09-02 23:49:23 +02001152static struct mlxsw_sp_ipip_entry *
Petr Machata4cccb732017-10-16 16:26:39 +02001153mlxsw_sp_ipip_entry_create(struct mlxsw_sp *mlxsw_sp,
1154 enum mlxsw_sp_ipip_type ipipt,
1155 struct net_device *ol_dev)
Petr Machata1012b9a2017-09-02 23:49:23 +02001156{
Petr Machata1012b9a2017-09-02 23:49:23 +02001157 struct mlxsw_sp_ipip_entry *ipip_entry;
Petr Machata1012b9a2017-09-02 23:49:23 +02001158
1159 ipip_entry = mlxsw_sp_ipip_entry_alloc(mlxsw_sp, ipipt, ol_dev);
1160 if (IS_ERR(ipip_entry))
1161 return ipip_entry;
1162
1163 list_add_tail(&ipip_entry->ipip_list_node,
1164 &mlxsw_sp->router->ipip_list);
1165
Petr Machata1012b9a2017-09-02 23:49:23 +02001166 return ipip_entry;
1167}
1168
1169static void
Petr Machata4cccb732017-10-16 16:26:39 +02001170mlxsw_sp_ipip_entry_destroy(struct mlxsw_sp *mlxsw_sp,
1171 struct mlxsw_sp_ipip_entry *ipip_entry)
Petr Machata1012b9a2017-09-02 23:49:23 +02001172{
Petr Machata4cccb732017-10-16 16:26:39 +02001173 list_del(&ipip_entry->ipip_list_node);
1174 mlxsw_sp_ipip_entry_dealloc(ipip_entry);
Petr Machata1012b9a2017-09-02 23:49:23 +02001175}
1176
Petr Machata4607f6d2017-09-02 23:49:25 +02001177static bool
1178mlxsw_sp_ipip_entry_matches_decap(struct mlxsw_sp *mlxsw_sp,
1179 const struct net_device *ul_dev,
1180 enum mlxsw_sp_l3proto ul_proto,
1181 union mlxsw_sp_l3addr ul_dip,
1182 struct mlxsw_sp_ipip_entry *ipip_entry)
1183{
1184 u32 ul_tb_id = l3mdev_fib_table(ul_dev) ? : RT_TABLE_MAIN;
1185 enum mlxsw_sp_ipip_type ipipt = ipip_entry->ipipt;
1186 struct net_device *ipip_ul_dev;
1187
1188 if (mlxsw_sp->router->ipip_ops_arr[ipipt]->ul_proto != ul_proto)
1189 return false;
1190
1191 ipip_ul_dev = __mlxsw_sp_ipip_netdev_ul_dev_get(ipip_entry->ol_dev);
1192 return mlxsw_sp_ipip_entry_saddr_matches(mlxsw_sp, ul_proto, ul_dip,
1193 ul_tb_id, ipip_entry) &&
1194 (!ipip_ul_dev || ipip_ul_dev == ul_dev);
1195}
1196
1197/* Given decap parameters, find the corresponding IPIP entry. */
1198static struct mlxsw_sp_ipip_entry *
1199mlxsw_sp_ipip_entry_find_by_decap(struct mlxsw_sp *mlxsw_sp,
1200 const struct net_device *ul_dev,
1201 enum mlxsw_sp_l3proto ul_proto,
1202 union mlxsw_sp_l3addr ul_dip)
1203{
1204 struct mlxsw_sp_ipip_entry *ipip_entry;
1205
1206 list_for_each_entry(ipip_entry, &mlxsw_sp->router->ipip_list,
1207 ipip_list_node)
1208 if (mlxsw_sp_ipip_entry_matches_decap(mlxsw_sp, ul_dev,
1209 ul_proto, ul_dip,
1210 ipip_entry))
1211 return ipip_entry;
1212
1213 return NULL;
1214}
1215
Petr Machata6698c162017-10-16 16:26:36 +02001216static bool mlxsw_sp_netdev_ipip_type(const struct mlxsw_sp *mlxsw_sp,
1217 const struct net_device *dev,
1218 enum mlxsw_sp_ipip_type *p_type)
1219{
1220 struct mlxsw_sp_router *router = mlxsw_sp->router;
1221 const struct mlxsw_sp_ipip_ops *ipip_ops;
1222 enum mlxsw_sp_ipip_type ipipt;
1223
1224 for (ipipt = 0; ipipt < MLXSW_SP_IPIP_TYPE_MAX; ++ipipt) {
1225 ipip_ops = router->ipip_ops_arr[ipipt];
1226 if (dev->type == ipip_ops->dev_type) {
1227 if (p_type)
1228 *p_type = ipipt;
1229 return true;
1230 }
1231 }
1232 return false;
1233}
1234
Petr Machata796ec772017-11-03 10:03:29 +01001235bool mlxsw_sp_netdev_is_ipip_ol(const struct mlxsw_sp *mlxsw_sp,
1236 const struct net_device *dev)
Petr Machata00635872017-10-16 16:26:37 +02001237{
1238 return mlxsw_sp_netdev_ipip_type(mlxsw_sp, dev, NULL);
1239}
1240
1241static struct mlxsw_sp_ipip_entry *
1242mlxsw_sp_ipip_entry_find_by_ol_dev(struct mlxsw_sp *mlxsw_sp,
1243 const struct net_device *ol_dev)
1244{
1245 struct mlxsw_sp_ipip_entry *ipip_entry;
1246
1247 list_for_each_entry(ipip_entry, &mlxsw_sp->router->ipip_list,
1248 ipip_list_node)
1249 if (ipip_entry->ol_dev == ol_dev)
1250 return ipip_entry;
1251
1252 return NULL;
1253}
1254
Petr Machata61481f22017-11-03 10:03:41 +01001255static struct mlxsw_sp_ipip_entry *
1256mlxsw_sp_ipip_entry_find_by_ul_dev(const struct mlxsw_sp *mlxsw_sp,
1257 const struct net_device *ul_dev,
1258 struct mlxsw_sp_ipip_entry *start)
1259{
1260 struct mlxsw_sp_ipip_entry *ipip_entry;
1261
1262 ipip_entry = list_prepare_entry(start, &mlxsw_sp->router->ipip_list,
1263 ipip_list_node);
1264 list_for_each_entry_continue(ipip_entry, &mlxsw_sp->router->ipip_list,
1265 ipip_list_node) {
1266 struct net_device *ipip_ul_dev =
1267 __mlxsw_sp_ipip_netdev_ul_dev_get(ipip_entry->ol_dev);
1268
1269 if (ipip_ul_dev == ul_dev)
1270 return ipip_entry;
1271 }
1272
1273 return NULL;
1274}
1275
1276bool mlxsw_sp_netdev_is_ipip_ul(const struct mlxsw_sp *mlxsw_sp,
1277 const struct net_device *dev)
1278{
1279 return mlxsw_sp_ipip_entry_find_by_ul_dev(mlxsw_sp, dev, NULL);
1280}
1281
Petr Machatacafdb2a2017-11-03 10:03:30 +01001282static bool mlxsw_sp_netdevice_ipip_can_offload(struct mlxsw_sp *mlxsw_sp,
1283 const struct net_device *ol_dev,
1284 enum mlxsw_sp_ipip_type ipipt)
1285{
1286 const struct mlxsw_sp_ipip_ops *ops
1287 = mlxsw_sp->router->ipip_ops_arr[ipipt];
1288
1289 /* For deciding whether decap should be offloaded, we don't care about
1290 * overlay protocol, so ask whether either one is supported.
1291 */
1292 return ops->can_offload(mlxsw_sp, ol_dev, MLXSW_SP_L3_PROTO_IPV4) ||
1293 ops->can_offload(mlxsw_sp, ol_dev, MLXSW_SP_L3_PROTO_IPV6);
1294}
1295
Petr Machata796ec772017-11-03 10:03:29 +01001296static int mlxsw_sp_netdevice_ipip_ol_reg_event(struct mlxsw_sp *mlxsw_sp,
1297 struct net_device *ol_dev)
Petr Machata00635872017-10-16 16:26:37 +02001298{
Petr Machata00635872017-10-16 16:26:37 +02001299 struct mlxsw_sp_ipip_entry *ipip_entry;
Petr Machataaf641712017-11-03 10:03:40 +01001300 enum mlxsw_sp_l3proto ul_proto;
Petr Machata00635872017-10-16 16:26:37 +02001301 enum mlxsw_sp_ipip_type ipipt;
Petr Machataaf641712017-11-03 10:03:40 +01001302 union mlxsw_sp_l3addr saddr;
1303 u32 ul_tb_id;
Petr Machata00635872017-10-16 16:26:37 +02001304
1305 mlxsw_sp_netdev_ipip_type(mlxsw_sp, ol_dev, &ipipt);
Petr Machatacafdb2a2017-11-03 10:03:30 +01001306 if (mlxsw_sp_netdevice_ipip_can_offload(mlxsw_sp, ol_dev, ipipt)) {
Petr Machataaf641712017-11-03 10:03:40 +01001307 ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(ol_dev);
1308 ul_proto = mlxsw_sp->router->ipip_ops_arr[ipipt]->ul_proto;
1309 saddr = mlxsw_sp_ipip_netdev_saddr(ul_proto, ol_dev);
1310 if (!mlxsw_sp_ipip_demote_tunnel_by_saddr(mlxsw_sp, ul_proto,
1311 saddr, ul_tb_id,
1312 NULL)) {
1313 ipip_entry = mlxsw_sp_ipip_entry_create(mlxsw_sp, ipipt,
1314 ol_dev);
1315 if (IS_ERR(ipip_entry))
1316 return PTR_ERR(ipip_entry);
1317 }
Petr Machata00635872017-10-16 16:26:37 +02001318 }
1319
1320 return 0;
1321}
1322
Petr Machata796ec772017-11-03 10:03:29 +01001323static void mlxsw_sp_netdevice_ipip_ol_unreg_event(struct mlxsw_sp *mlxsw_sp,
1324 struct net_device *ol_dev)
Petr Machata00635872017-10-16 16:26:37 +02001325{
1326 struct mlxsw_sp_ipip_entry *ipip_entry;
1327
1328 ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
1329 if (ipip_entry)
Petr Machata4cccb732017-10-16 16:26:39 +02001330 mlxsw_sp_ipip_entry_destroy(mlxsw_sp, ipip_entry);
Petr Machata00635872017-10-16 16:26:37 +02001331}
1332
Petr Machata47518ca2017-11-03 10:03:35 +01001333static void
1334mlxsw_sp_ipip_entry_ol_up_event(struct mlxsw_sp *mlxsw_sp,
1335 struct mlxsw_sp_ipip_entry *ipip_entry)
1336{
1337 struct mlxsw_sp_fib_entry *decap_fib_entry;
1338
1339 decap_fib_entry = mlxsw_sp_ipip_entry_find_decap(mlxsw_sp, ipip_entry);
1340 if (decap_fib_entry)
1341 mlxsw_sp_ipip_entry_promote_decap(mlxsw_sp, ipip_entry,
1342 decap_fib_entry);
1343}
1344
Petr Machata6d4de442017-11-03 10:03:34 +01001345static void mlxsw_sp_netdevice_ipip_ol_up_event(struct mlxsw_sp *mlxsw_sp,
1346 struct net_device *ol_dev)
Petr Machata00635872017-10-16 16:26:37 +02001347{
Petr Machata00635872017-10-16 16:26:37 +02001348 struct mlxsw_sp_ipip_entry *ipip_entry;
1349
1350 ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
Petr Machata47518ca2017-11-03 10:03:35 +01001351 if (ipip_entry)
1352 mlxsw_sp_ipip_entry_ol_up_event(mlxsw_sp, ipip_entry);
Petr Machata00635872017-10-16 16:26:37 +02001353}
1354
Petr Machataa3fe1982017-11-03 10:03:33 +01001355static void
1356mlxsw_sp_ipip_entry_ol_down_event(struct mlxsw_sp *mlxsw_sp,
1357 struct mlxsw_sp_ipip_entry *ipip_entry)
1358{
1359 if (ipip_entry->decap_fib_entry)
1360 mlxsw_sp_ipip_entry_demote_decap(mlxsw_sp, ipip_entry);
1361}
1362
Petr Machata796ec772017-11-03 10:03:29 +01001363static void mlxsw_sp_netdevice_ipip_ol_down_event(struct mlxsw_sp *mlxsw_sp,
1364 struct net_device *ol_dev)
Petr Machata00635872017-10-16 16:26:37 +02001365{
1366 struct mlxsw_sp_ipip_entry *ipip_entry;
1367
1368 ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
Petr Machataa3fe1982017-11-03 10:03:33 +01001369 if (ipip_entry)
1370 mlxsw_sp_ipip_entry_ol_down_event(mlxsw_sp, ipip_entry);
Petr Machata00635872017-10-16 16:26:37 +02001371}
1372
Petr Machata0c5f1cd2017-11-03 10:03:38 +01001373static void mlxsw_sp_nexthop_rif_update(struct mlxsw_sp *mlxsw_sp,
1374 struct mlxsw_sp_rif *rif);
Petr Machata65a61212017-11-03 10:03:37 +01001375static int
1376mlxsw_sp_ipip_entry_ol_lb_update(struct mlxsw_sp *mlxsw_sp,
1377 struct mlxsw_sp_ipip_entry *ipip_entry,
Petr Machata0c5f1cd2017-11-03 10:03:38 +01001378 bool keep_encap,
Petr Machata65a61212017-11-03 10:03:37 +01001379 struct netlink_ext_ack *extack)
Petr Machataf63ce4e2017-10-16 16:26:38 +02001380{
Petr Machata65a61212017-11-03 10:03:37 +01001381 struct mlxsw_sp_rif_ipip_lb *old_lb_rif = ipip_entry->ol_lb;
1382 struct mlxsw_sp_rif_ipip_lb *new_lb_rif;
Petr Machataf63ce4e2017-10-16 16:26:38 +02001383
Petr Machata65a61212017-11-03 10:03:37 +01001384 new_lb_rif = mlxsw_sp_ipip_ol_ipip_lb_create(mlxsw_sp,
1385 ipip_entry->ipipt,
1386 ipip_entry->ol_dev,
1387 extack);
1388 if (IS_ERR(new_lb_rif))
1389 return PTR_ERR(new_lb_rif);
1390 ipip_entry->ol_lb = new_lb_rif;
Petr Machata0c5f1cd2017-11-03 10:03:38 +01001391
1392 if (keep_encap) {
1393 list_splice_init(&old_lb_rif->common.nexthop_list,
1394 &new_lb_rif->common.nexthop_list);
1395 mlxsw_sp_nexthop_rif_update(mlxsw_sp, &new_lb_rif->common);
1396 }
1397
Petr Machata65a61212017-11-03 10:03:37 +01001398 mlxsw_sp_rif_destroy(&old_lb_rif->common);
Petr Machataf63ce4e2017-10-16 16:26:38 +02001399
Petr Machata65a61212017-11-03 10:03:37 +01001400 return 0;
1401}
1402
Petr Machata0c5f1cd2017-11-03 10:03:38 +01001403/**
1404 * Update the offload related to an IPIP entry. This always updates decap, and
1405 * in addition to that it also:
1406 * @recreate_loopback: recreates the associated loopback RIF
1407 * @keep_encap: updates next hops that use the tunnel netdevice. This is only
1408 * relevant when recreate_loopback is true.
1409 * @update_nexthops: updates next hops, keeping the current loopback RIF. This
1410 * is only relevant when recreate_loopback is false.
1411 */
Petr Machata65a61212017-11-03 10:03:37 +01001412int __mlxsw_sp_ipip_entry_update_tunnel(struct mlxsw_sp *mlxsw_sp,
1413 struct mlxsw_sp_ipip_entry *ipip_entry,
Petr Machata0c5f1cd2017-11-03 10:03:38 +01001414 bool recreate_loopback,
1415 bool keep_encap,
1416 bool update_nexthops,
Petr Machata65a61212017-11-03 10:03:37 +01001417 struct netlink_ext_ack *extack)
1418{
1419 int err;
1420
1421 /* RIFs can't be edited, so to update loopback, we need to destroy and
1422 * recreate it. That creates a window of opportunity where RALUE and
1423 * RATR registers end up referencing a RIF that's already gone. RATRs
1424 * are handled in mlxsw_sp_ipip_entry_ol_lb_update(), and to take care
Petr Machataf63ce4e2017-10-16 16:26:38 +02001425 * of RALUE, demote the decap route back.
1426 */
1427 if (ipip_entry->decap_fib_entry)
1428 mlxsw_sp_ipip_entry_demote_decap(mlxsw_sp, ipip_entry);
1429
Petr Machata0c5f1cd2017-11-03 10:03:38 +01001430 if (recreate_loopback) {
1431 err = mlxsw_sp_ipip_entry_ol_lb_update(mlxsw_sp, ipip_entry,
1432 keep_encap, extack);
1433 if (err)
1434 return err;
1435 } else if (update_nexthops) {
1436 mlxsw_sp_nexthop_rif_update(mlxsw_sp,
1437 &ipip_entry->ol_lb->common);
1438 }
Petr Machataf63ce4e2017-10-16 16:26:38 +02001439
Petr Machata65a61212017-11-03 10:03:37 +01001440 if (ipip_entry->ol_dev->flags & IFF_UP)
1441 mlxsw_sp_ipip_entry_ol_up_event(mlxsw_sp, ipip_entry);
Petr Machataf63ce4e2017-10-16 16:26:38 +02001442
1443 return 0;
1444}
1445
Petr Machata65a61212017-11-03 10:03:37 +01001446static int mlxsw_sp_netdevice_ipip_ol_vrf_event(struct mlxsw_sp *mlxsw_sp,
1447 struct net_device *ol_dev,
1448 struct netlink_ext_ack *extack)
1449{
1450 struct mlxsw_sp_ipip_entry *ipip_entry =
1451 mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
Petr Machatacab43d92017-11-28 13:17:12 +01001452 enum mlxsw_sp_l3proto ul_proto;
1453 union mlxsw_sp_l3addr saddr;
1454 u32 ul_tb_id;
Petr Machata65a61212017-11-03 10:03:37 +01001455
1456 if (!ipip_entry)
1457 return 0;
Petr Machatacab43d92017-11-28 13:17:12 +01001458
1459 /* For flat configuration cases, moving overlay to a different VRF might
1460 * cause local address conflict, and the conflicting tunnels need to be
1461 * demoted.
1462 */
1463 ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(ol_dev);
1464 ul_proto = mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt]->ul_proto;
1465 saddr = mlxsw_sp_ipip_netdev_saddr(ul_proto, ol_dev);
1466 if (mlxsw_sp_ipip_demote_tunnel_by_saddr(mlxsw_sp, ul_proto,
1467 saddr, ul_tb_id,
1468 ipip_entry)) {
1469 mlxsw_sp_ipip_entry_demote_tunnel(mlxsw_sp, ipip_entry);
1470 return 0;
1471 }
1472
Petr Machata65a61212017-11-03 10:03:37 +01001473 return __mlxsw_sp_ipip_entry_update_tunnel(mlxsw_sp, ipip_entry,
Petr Machata0c5f1cd2017-11-03 10:03:38 +01001474 true, false, false, extack);
Petr Machata65a61212017-11-03 10:03:37 +01001475}
1476
Petr Machata61481f22017-11-03 10:03:41 +01001477static int
1478mlxsw_sp_netdevice_ipip_ul_vrf_event(struct mlxsw_sp *mlxsw_sp,
1479 struct mlxsw_sp_ipip_entry *ipip_entry,
1480 struct net_device *ul_dev,
1481 struct netlink_ext_ack *extack)
1482{
1483 return __mlxsw_sp_ipip_entry_update_tunnel(mlxsw_sp, ipip_entry,
1484 true, true, false, extack);
1485}
1486
Petr Machata4cf04f32017-11-03 10:03:42 +01001487static int
Petr Machata44b0fff2017-11-03 10:03:44 +01001488mlxsw_sp_netdevice_ipip_ul_up_event(struct mlxsw_sp *mlxsw_sp,
1489 struct mlxsw_sp_ipip_entry *ipip_entry,
1490 struct net_device *ul_dev)
1491{
1492 return __mlxsw_sp_ipip_entry_update_tunnel(mlxsw_sp, ipip_entry,
1493 false, false, true, NULL);
1494}
1495
1496static int
1497mlxsw_sp_netdevice_ipip_ul_down_event(struct mlxsw_sp *mlxsw_sp,
1498 struct mlxsw_sp_ipip_entry *ipip_entry,
1499 struct net_device *ul_dev)
1500{
1501 /* A down underlay device causes encapsulated packets to not be
1502 * forwarded, but decap still works. So refresh next hops without
1503 * touching anything else.
1504 */
1505 return __mlxsw_sp_ipip_entry_update_tunnel(mlxsw_sp, ipip_entry,
1506 false, false, true, NULL);
1507}
1508
1509static int
Petr Machata4cf04f32017-11-03 10:03:42 +01001510mlxsw_sp_netdevice_ipip_ol_change_event(struct mlxsw_sp *mlxsw_sp,
1511 struct net_device *ol_dev,
1512 struct netlink_ext_ack *extack)
1513{
1514 const struct mlxsw_sp_ipip_ops *ipip_ops;
1515 struct mlxsw_sp_ipip_entry *ipip_entry;
1516 int err;
1517
1518 ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
1519 if (!ipip_entry)
1520 /* A change might make a tunnel eligible for offloading, but
1521 * that is currently not implemented. What falls to slow path
1522 * stays there.
1523 */
1524 return 0;
1525
1526 /* A change might make a tunnel not eligible for offloading. */
1527 if (!mlxsw_sp_netdevice_ipip_can_offload(mlxsw_sp, ol_dev,
1528 ipip_entry->ipipt)) {
1529 mlxsw_sp_ipip_entry_demote_tunnel(mlxsw_sp, ipip_entry);
1530 return 0;
1531 }
1532
1533 ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt];
1534 err = ipip_ops->ol_netdev_change(mlxsw_sp, ipip_entry, extack);
1535 return err;
1536}
1537
Petr Machataaf641712017-11-03 10:03:40 +01001538void mlxsw_sp_ipip_entry_demote_tunnel(struct mlxsw_sp *mlxsw_sp,
1539 struct mlxsw_sp_ipip_entry *ipip_entry)
1540{
1541 struct net_device *ol_dev = ipip_entry->ol_dev;
1542
1543 if (ol_dev->flags & IFF_UP)
1544 mlxsw_sp_ipip_entry_ol_down_event(mlxsw_sp, ipip_entry);
1545 mlxsw_sp_ipip_entry_destroy(mlxsw_sp, ipip_entry);
1546}
1547
1548/* The configuration where several tunnels have the same local address in the
1549 * same underlay table needs special treatment in the HW. That is currently not
1550 * implemented in the driver. This function finds and demotes the first tunnel
1551 * with a given source address, except the one passed in in the argument
1552 * `except'.
1553 */
1554bool
1555mlxsw_sp_ipip_demote_tunnel_by_saddr(struct mlxsw_sp *mlxsw_sp,
1556 enum mlxsw_sp_l3proto ul_proto,
1557 union mlxsw_sp_l3addr saddr,
1558 u32 ul_tb_id,
1559 const struct mlxsw_sp_ipip_entry *except)
1560{
1561 struct mlxsw_sp_ipip_entry *ipip_entry, *tmp;
1562
1563 list_for_each_entry_safe(ipip_entry, tmp, &mlxsw_sp->router->ipip_list,
1564 ipip_list_node) {
1565 if (ipip_entry != except &&
1566 mlxsw_sp_ipip_entry_saddr_matches(mlxsw_sp, ul_proto, saddr,
1567 ul_tb_id, ipip_entry)) {
1568 mlxsw_sp_ipip_entry_demote_tunnel(mlxsw_sp, ipip_entry);
1569 return true;
1570 }
1571 }
1572
1573 return false;
1574}
1575
Petr Machata61481f22017-11-03 10:03:41 +01001576static void mlxsw_sp_ipip_demote_tunnel_by_ul_netdev(struct mlxsw_sp *mlxsw_sp,
1577 struct net_device *ul_dev)
1578{
1579 struct mlxsw_sp_ipip_entry *ipip_entry, *tmp;
1580
1581 list_for_each_entry_safe(ipip_entry, tmp, &mlxsw_sp->router->ipip_list,
1582 ipip_list_node) {
1583 struct net_device *ipip_ul_dev =
1584 __mlxsw_sp_ipip_netdev_ul_dev_get(ipip_entry->ol_dev);
1585
1586 if (ipip_ul_dev == ul_dev)
1587 mlxsw_sp_ipip_entry_demote_tunnel(mlxsw_sp, ipip_entry);
1588 }
1589}
1590
Petr Machata7e75af62017-11-03 10:03:36 +01001591int mlxsw_sp_netdevice_ipip_ol_event(struct mlxsw_sp *mlxsw_sp,
1592 struct net_device *ol_dev,
1593 unsigned long event,
1594 struct netdev_notifier_info *info)
Petr Machata00635872017-10-16 16:26:37 +02001595{
Petr Machata7e75af62017-11-03 10:03:36 +01001596 struct netdev_notifier_changeupper_info *chup;
1597 struct netlink_ext_ack *extack;
1598
Petr Machata00635872017-10-16 16:26:37 +02001599 switch (event) {
1600 case NETDEV_REGISTER:
Petr Machata796ec772017-11-03 10:03:29 +01001601 return mlxsw_sp_netdevice_ipip_ol_reg_event(mlxsw_sp, ol_dev);
Petr Machata00635872017-10-16 16:26:37 +02001602 case NETDEV_UNREGISTER:
Petr Machata796ec772017-11-03 10:03:29 +01001603 mlxsw_sp_netdevice_ipip_ol_unreg_event(mlxsw_sp, ol_dev);
Petr Machata00635872017-10-16 16:26:37 +02001604 return 0;
1605 case NETDEV_UP:
Petr Machata6d4de442017-11-03 10:03:34 +01001606 mlxsw_sp_netdevice_ipip_ol_up_event(mlxsw_sp, ol_dev);
1607 return 0;
Petr Machata00635872017-10-16 16:26:37 +02001608 case NETDEV_DOWN:
Petr Machata796ec772017-11-03 10:03:29 +01001609 mlxsw_sp_netdevice_ipip_ol_down_event(mlxsw_sp, ol_dev);
Petr Machata00635872017-10-16 16:26:37 +02001610 return 0;
Petr Machataf63ce4e2017-10-16 16:26:38 +02001611 case NETDEV_CHANGEUPPER:
Petr Machata7e75af62017-11-03 10:03:36 +01001612 chup = container_of(info, typeof(*chup), info);
1613 extack = info->extack;
1614 if (netif_is_l3_master(chup->upper_dev))
Petr Machata796ec772017-11-03 10:03:29 +01001615 return mlxsw_sp_netdevice_ipip_ol_vrf_event(mlxsw_sp,
Petr Machata7e75af62017-11-03 10:03:36 +01001616 ol_dev,
1617 extack);
Petr Machataf63ce4e2017-10-16 16:26:38 +02001618 return 0;
Petr Machata4cf04f32017-11-03 10:03:42 +01001619 case NETDEV_CHANGE:
1620 extack = info->extack;
1621 return mlxsw_sp_netdevice_ipip_ol_change_event(mlxsw_sp,
1622 ol_dev, extack);
Petr Machata00635872017-10-16 16:26:37 +02001623 }
1624 return 0;
1625}
1626
Petr Machata61481f22017-11-03 10:03:41 +01001627static int
1628__mlxsw_sp_netdevice_ipip_ul_event(struct mlxsw_sp *mlxsw_sp,
1629 struct mlxsw_sp_ipip_entry *ipip_entry,
1630 struct net_device *ul_dev,
1631 unsigned long event,
1632 struct netdev_notifier_info *info)
1633{
1634 struct netdev_notifier_changeupper_info *chup;
1635 struct netlink_ext_ack *extack;
1636
1637 switch (event) {
1638 case NETDEV_CHANGEUPPER:
1639 chup = container_of(info, typeof(*chup), info);
1640 extack = info->extack;
1641 if (netif_is_l3_master(chup->upper_dev))
1642 return mlxsw_sp_netdevice_ipip_ul_vrf_event(mlxsw_sp,
1643 ipip_entry,
1644 ul_dev,
1645 extack);
1646 break;
Petr Machata44b0fff2017-11-03 10:03:44 +01001647
1648 case NETDEV_UP:
1649 return mlxsw_sp_netdevice_ipip_ul_up_event(mlxsw_sp, ipip_entry,
1650 ul_dev);
1651 case NETDEV_DOWN:
1652 return mlxsw_sp_netdevice_ipip_ul_down_event(mlxsw_sp,
1653 ipip_entry,
1654 ul_dev);
Petr Machata61481f22017-11-03 10:03:41 +01001655 }
1656 return 0;
1657}
1658
1659int
1660mlxsw_sp_netdevice_ipip_ul_event(struct mlxsw_sp *mlxsw_sp,
1661 struct net_device *ul_dev,
1662 unsigned long event,
1663 struct netdev_notifier_info *info)
1664{
1665 struct mlxsw_sp_ipip_entry *ipip_entry = NULL;
1666 int err;
1667
1668 while ((ipip_entry = mlxsw_sp_ipip_entry_find_by_ul_dev(mlxsw_sp,
1669 ul_dev,
1670 ipip_entry))) {
1671 err = __mlxsw_sp_netdevice_ipip_ul_event(mlxsw_sp, ipip_entry,
1672 ul_dev, event, info);
1673 if (err) {
1674 mlxsw_sp_ipip_demote_tunnel_by_ul_netdev(mlxsw_sp,
1675 ul_dev);
1676 return err;
1677 }
1678 }
1679
1680 return 0;
1681}
1682
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001683struct mlxsw_sp_neigh_key {
Jiri Pirko33b13412016-11-10 12:31:04 +01001684 struct neighbour *n;
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001685};
1686
1687struct mlxsw_sp_neigh_entry {
Ido Schimmel9665b742017-02-08 11:16:42 +01001688 struct list_head rif_list_node;
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001689 struct rhash_head ht_node;
1690 struct mlxsw_sp_neigh_key key;
1691 u16 rif;
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001692 bool connected;
Yotam Gigia6bf9e92016-07-05 11:27:44 +02001693 unsigned char ha[ETH_ALEN];
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001694 struct list_head nexthop_list; /* list of nexthops using
1695 * this neigh entry
1696 */
Yotam Gigib2157142016-07-05 11:27:51 +02001697 struct list_head nexthop_neighs_list_node;
Arkadi Sharshevsky7cfcbc72017-08-24 08:40:08 +02001698 unsigned int counter_index;
1699 bool counter_valid;
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001700};
1701
1702static const struct rhashtable_params mlxsw_sp_neigh_ht_params = {
1703 .key_offset = offsetof(struct mlxsw_sp_neigh_entry, key),
1704 .head_offset = offsetof(struct mlxsw_sp_neigh_entry, ht_node),
1705 .key_len = sizeof(struct mlxsw_sp_neigh_key),
1706};
1707
Arkadi Sharshevskyf17cc842017-08-24 08:40:04 +02001708struct mlxsw_sp_neigh_entry *
1709mlxsw_sp_rif_neigh_next(struct mlxsw_sp_rif *rif,
1710 struct mlxsw_sp_neigh_entry *neigh_entry)
1711{
1712 if (!neigh_entry) {
1713 if (list_empty(&rif->neigh_list))
1714 return NULL;
1715 else
1716 return list_first_entry(&rif->neigh_list,
1717 typeof(*neigh_entry),
1718 rif_list_node);
1719 }
Arkadi Sharshevskyec2437f2017-09-25 10:32:24 +02001720 if (list_is_last(&neigh_entry->rif_list_node, &rif->neigh_list))
Arkadi Sharshevskyf17cc842017-08-24 08:40:04 +02001721 return NULL;
1722 return list_next_entry(neigh_entry, rif_list_node);
1723}
1724
1725int mlxsw_sp_neigh_entry_type(struct mlxsw_sp_neigh_entry *neigh_entry)
1726{
1727 return neigh_entry->key.n->tbl->family;
1728}
1729
1730unsigned char *
1731mlxsw_sp_neigh_entry_ha(struct mlxsw_sp_neigh_entry *neigh_entry)
1732{
1733 return neigh_entry->ha;
1734}
1735
1736u32 mlxsw_sp_neigh4_entry_dip(struct mlxsw_sp_neigh_entry *neigh_entry)
1737{
1738 struct neighbour *n;
1739
1740 n = neigh_entry->key.n;
1741 return ntohl(*((__be32 *) n->primary_key));
1742}
1743
Arkadi Sharshevsky02507682017-08-31 17:59:15 +02001744struct in6_addr *
1745mlxsw_sp_neigh6_entry_dip(struct mlxsw_sp_neigh_entry *neigh_entry)
1746{
1747 struct neighbour *n;
1748
1749 n = neigh_entry->key.n;
1750 return (struct in6_addr *) &n->primary_key;
1751}
1752
Arkadi Sharshevsky7cfcbc72017-08-24 08:40:08 +02001753int mlxsw_sp_neigh_counter_get(struct mlxsw_sp *mlxsw_sp,
1754 struct mlxsw_sp_neigh_entry *neigh_entry,
1755 u64 *p_counter)
1756{
1757 if (!neigh_entry->counter_valid)
1758 return -EINVAL;
1759
1760 return mlxsw_sp_flow_counter_get(mlxsw_sp, neigh_entry->counter_index,
1761 p_counter, NULL);
1762}
1763
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001764static struct mlxsw_sp_neigh_entry *
1765mlxsw_sp_neigh_entry_alloc(struct mlxsw_sp *mlxsw_sp, struct neighbour *n,
1766 u16 rif)
1767{
1768 struct mlxsw_sp_neigh_entry *neigh_entry;
1769
1770 neigh_entry = kzalloc(sizeof(*neigh_entry), GFP_KERNEL);
1771 if (!neigh_entry)
1772 return NULL;
1773
1774 neigh_entry->key.n = n;
1775 neigh_entry->rif = rif;
1776 INIT_LIST_HEAD(&neigh_entry->nexthop_list);
1777
1778 return neigh_entry;
1779}
1780
1781static void mlxsw_sp_neigh_entry_free(struct mlxsw_sp_neigh_entry *neigh_entry)
1782{
1783 kfree(neigh_entry);
1784}
1785
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001786static int
1787mlxsw_sp_neigh_entry_insert(struct mlxsw_sp *mlxsw_sp,
1788 struct mlxsw_sp_neigh_entry *neigh_entry)
1789{
Ido Schimmel9011b672017-05-16 19:38:25 +02001790 return rhashtable_insert_fast(&mlxsw_sp->router->neigh_ht,
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001791 &neigh_entry->ht_node,
1792 mlxsw_sp_neigh_ht_params);
1793}
1794
1795static void
1796mlxsw_sp_neigh_entry_remove(struct mlxsw_sp *mlxsw_sp,
1797 struct mlxsw_sp_neigh_entry *neigh_entry)
1798{
Ido Schimmel9011b672017-05-16 19:38:25 +02001799 rhashtable_remove_fast(&mlxsw_sp->router->neigh_ht,
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001800 &neigh_entry->ht_node,
1801 mlxsw_sp_neigh_ht_params);
1802}
1803
Arkadi Sharshevsky7cfcbc72017-08-24 08:40:08 +02001804static bool
Arkadi Sharshevsky1ed55742017-08-31 17:59:18 +02001805mlxsw_sp_neigh_counter_should_alloc(struct mlxsw_sp *mlxsw_sp,
1806 struct mlxsw_sp_neigh_entry *neigh_entry)
Arkadi Sharshevsky7cfcbc72017-08-24 08:40:08 +02001807{
1808 struct devlink *devlink;
Arkadi Sharshevsky1ed55742017-08-31 17:59:18 +02001809 const char *table_name;
1810
1811 switch (mlxsw_sp_neigh_entry_type(neigh_entry)) {
1812 case AF_INET:
1813 table_name = MLXSW_SP_DPIPE_TABLE_NAME_HOST4;
1814 break;
1815 case AF_INET6:
1816 table_name = MLXSW_SP_DPIPE_TABLE_NAME_HOST6;
1817 break;
1818 default:
1819 WARN_ON(1);
1820 return false;
1821 }
Arkadi Sharshevsky7cfcbc72017-08-24 08:40:08 +02001822
1823 devlink = priv_to_devlink(mlxsw_sp->core);
Arkadi Sharshevsky1ed55742017-08-31 17:59:18 +02001824 return devlink_dpipe_table_counter_enabled(devlink, table_name);
Arkadi Sharshevsky7cfcbc72017-08-24 08:40:08 +02001825}
1826
1827static void
1828mlxsw_sp_neigh_counter_alloc(struct mlxsw_sp *mlxsw_sp,
1829 struct mlxsw_sp_neigh_entry *neigh_entry)
1830{
Arkadi Sharshevsky1ed55742017-08-31 17:59:18 +02001831 if (!mlxsw_sp_neigh_counter_should_alloc(mlxsw_sp, neigh_entry))
Arkadi Sharshevsky7cfcbc72017-08-24 08:40:08 +02001832 return;
1833
1834 if (mlxsw_sp_flow_counter_alloc(mlxsw_sp, &neigh_entry->counter_index))
1835 return;
1836
1837 neigh_entry->counter_valid = true;
1838}
1839
1840static void
1841mlxsw_sp_neigh_counter_free(struct mlxsw_sp *mlxsw_sp,
1842 struct mlxsw_sp_neigh_entry *neigh_entry)
1843{
1844 if (!neigh_entry->counter_valid)
1845 return;
1846 mlxsw_sp_flow_counter_free(mlxsw_sp,
1847 neigh_entry->counter_index);
1848 neigh_entry->counter_valid = false;
1849}
1850
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001851static struct mlxsw_sp_neigh_entry *
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001852mlxsw_sp_neigh_entry_create(struct mlxsw_sp *mlxsw_sp, struct neighbour *n)
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001853{
1854 struct mlxsw_sp_neigh_entry *neigh_entry;
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01001855 struct mlxsw_sp_rif *rif;
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001856 int err;
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001857
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01001858 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, n->dev);
1859 if (!rif)
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001860 return ERR_PTR(-EINVAL);
1861
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01001862 neigh_entry = mlxsw_sp_neigh_entry_alloc(mlxsw_sp, n, rif->rif_index);
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001863 if (!neigh_entry)
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001864 return ERR_PTR(-ENOMEM);
1865
1866 err = mlxsw_sp_neigh_entry_insert(mlxsw_sp, neigh_entry);
1867 if (err)
1868 goto err_neigh_entry_insert;
1869
Arkadi Sharshevsky7cfcbc72017-08-24 08:40:08 +02001870 mlxsw_sp_neigh_counter_alloc(mlxsw_sp, neigh_entry);
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01001871 list_add(&neigh_entry->rif_list_node, &rif->neigh_list);
Ido Schimmel9665b742017-02-08 11:16:42 +01001872
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001873 return neigh_entry;
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001874
1875err_neigh_entry_insert:
1876 mlxsw_sp_neigh_entry_free(neigh_entry);
1877 return ERR_PTR(err);
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001878}
1879
1880static void
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001881mlxsw_sp_neigh_entry_destroy(struct mlxsw_sp *mlxsw_sp,
1882 struct mlxsw_sp_neigh_entry *neigh_entry)
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001883{
Ido Schimmel9665b742017-02-08 11:16:42 +01001884 list_del(&neigh_entry->rif_list_node);
Arkadi Sharshevsky7cfcbc72017-08-24 08:40:08 +02001885 mlxsw_sp_neigh_counter_free(mlxsw_sp, neigh_entry);
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001886 mlxsw_sp_neigh_entry_remove(mlxsw_sp, neigh_entry);
1887 mlxsw_sp_neigh_entry_free(neigh_entry);
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001888}
1889
1890static struct mlxsw_sp_neigh_entry *
Jiri Pirko33b13412016-11-10 12:31:04 +01001891mlxsw_sp_neigh_entry_lookup(struct mlxsw_sp *mlxsw_sp, struct neighbour *n)
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001892{
Jiri Pirko33b13412016-11-10 12:31:04 +01001893 struct mlxsw_sp_neigh_key key;
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001894
Jiri Pirko33b13412016-11-10 12:31:04 +01001895 key.n = n;
Ido Schimmel9011b672017-05-16 19:38:25 +02001896 return rhashtable_lookup_fast(&mlxsw_sp->router->neigh_ht,
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001897 &key, mlxsw_sp_neigh_ht_params);
1898}
1899
Yotam Gigic723c7352016-07-05 11:27:43 +02001900static void
1901mlxsw_sp_router_neighs_update_interval_init(struct mlxsw_sp *mlxsw_sp)
1902{
Arkadi Sharshevskya6c9b5d2017-07-18 10:10:18 +02001903 unsigned long interval;
Yotam Gigic723c7352016-07-05 11:27:43 +02001904
Ido Schimmelb5f3e0d2017-07-24 09:56:00 +02001905#if IS_ENABLED(CONFIG_IPV6)
Arkadi Sharshevskya6c9b5d2017-07-18 10:10:18 +02001906 interval = min_t(unsigned long,
1907 NEIGH_VAR(&arp_tbl.parms, DELAY_PROBE_TIME),
1908 NEIGH_VAR(&nd_tbl.parms, DELAY_PROBE_TIME));
Ido Schimmelb5f3e0d2017-07-24 09:56:00 +02001909#else
1910 interval = NEIGH_VAR(&arp_tbl.parms, DELAY_PROBE_TIME);
1911#endif
Ido Schimmel9011b672017-05-16 19:38:25 +02001912 mlxsw_sp->router->neighs_update.interval = jiffies_to_msecs(interval);
Yotam Gigic723c7352016-07-05 11:27:43 +02001913}
1914
1915static void mlxsw_sp_router_neigh_ent_ipv4_process(struct mlxsw_sp *mlxsw_sp,
1916 char *rauhtd_pl,
1917 int ent_index)
1918{
1919 struct net_device *dev;
1920 struct neighbour *n;
1921 __be32 dipn;
1922 u32 dip;
1923 u16 rif;
1924
1925 mlxsw_reg_rauhtd_ent_ipv4_unpack(rauhtd_pl, ent_index, &rif, &dip);
1926
Ido Schimmel5f9efff2017-05-16 19:38:27 +02001927 if (!mlxsw_sp->router->rifs[rif]) {
Yotam Gigic723c7352016-07-05 11:27:43 +02001928 dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Incorrect RIF in neighbour entry\n");
1929 return;
1930 }
1931
1932 dipn = htonl(dip);
Ido Schimmel5f9efff2017-05-16 19:38:27 +02001933 dev = mlxsw_sp->router->rifs[rif]->dev;
Yotam Gigic723c7352016-07-05 11:27:43 +02001934 n = neigh_lookup(&arp_tbl, &dipn, dev);
1935 if (!n) {
1936 netdev_err(dev, "Failed to find matching neighbour for IP=%pI4h\n",
1937 &dip);
1938 return;
1939 }
1940
1941 netdev_dbg(dev, "Updating neighbour with IP=%pI4h\n", &dip);
1942 neigh_event_send(n, NULL);
1943 neigh_release(n);
1944}
1945
Ido Schimmeldf9a21f2017-08-15 09:10:33 +02001946#if IS_ENABLED(CONFIG_IPV6)
Arkadi Sharshevsky60f040c2017-07-18 10:10:17 +02001947static void mlxsw_sp_router_neigh_ent_ipv6_process(struct mlxsw_sp *mlxsw_sp,
1948 char *rauhtd_pl,
1949 int rec_index)
1950{
1951 struct net_device *dev;
1952 struct neighbour *n;
1953 struct in6_addr dip;
1954 u16 rif;
1955
1956 mlxsw_reg_rauhtd_ent_ipv6_unpack(rauhtd_pl, rec_index, &rif,
1957 (char *) &dip);
1958
1959 if (!mlxsw_sp->router->rifs[rif]) {
1960 dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Incorrect RIF in neighbour entry\n");
1961 return;
1962 }
1963
1964 dev = mlxsw_sp->router->rifs[rif]->dev;
1965 n = neigh_lookup(&nd_tbl, &dip, dev);
1966 if (!n) {
1967 netdev_err(dev, "Failed to find matching neighbour for IP=%pI6c\n",
1968 &dip);
1969 return;
1970 }
1971
1972 netdev_dbg(dev, "Updating neighbour with IP=%pI6c\n", &dip);
1973 neigh_event_send(n, NULL);
1974 neigh_release(n);
1975}
Ido Schimmelb5f3e0d2017-07-24 09:56:00 +02001976#else
1977static void mlxsw_sp_router_neigh_ent_ipv6_process(struct mlxsw_sp *mlxsw_sp,
1978 char *rauhtd_pl,
1979 int rec_index)
1980{
1981}
1982#endif
Arkadi Sharshevsky60f040c2017-07-18 10:10:17 +02001983
Yotam Gigic723c7352016-07-05 11:27:43 +02001984static void mlxsw_sp_router_neigh_rec_ipv4_process(struct mlxsw_sp *mlxsw_sp,
1985 char *rauhtd_pl,
1986 int rec_index)
1987{
1988 u8 num_entries;
1989 int i;
1990
1991 num_entries = mlxsw_reg_rauhtd_ipv4_rec_num_entries_get(rauhtd_pl,
1992 rec_index);
1993 /* Hardware starts counting at 0, so add 1. */
1994 num_entries++;
1995
1996 /* Each record consists of several neighbour entries. */
1997 for (i = 0; i < num_entries; i++) {
1998 int ent_index;
1999
2000 ent_index = rec_index * MLXSW_REG_RAUHTD_IPV4_ENT_PER_REC + i;
2001 mlxsw_sp_router_neigh_ent_ipv4_process(mlxsw_sp, rauhtd_pl,
2002 ent_index);
2003 }
2004
2005}
2006
Arkadi Sharshevsky60f040c2017-07-18 10:10:17 +02002007static void mlxsw_sp_router_neigh_rec_ipv6_process(struct mlxsw_sp *mlxsw_sp,
2008 char *rauhtd_pl,
2009 int rec_index)
2010{
2011 /* One record contains one entry. */
2012 mlxsw_sp_router_neigh_ent_ipv6_process(mlxsw_sp, rauhtd_pl,
2013 rec_index);
2014}
2015
Yotam Gigic723c7352016-07-05 11:27:43 +02002016static void mlxsw_sp_router_neigh_rec_process(struct mlxsw_sp *mlxsw_sp,
2017 char *rauhtd_pl, int rec_index)
2018{
2019 switch (mlxsw_reg_rauhtd_rec_type_get(rauhtd_pl, rec_index)) {
2020 case MLXSW_REG_RAUHTD_TYPE_IPV4:
2021 mlxsw_sp_router_neigh_rec_ipv4_process(mlxsw_sp, rauhtd_pl,
2022 rec_index);
2023 break;
2024 case MLXSW_REG_RAUHTD_TYPE_IPV6:
Arkadi Sharshevsky60f040c2017-07-18 10:10:17 +02002025 mlxsw_sp_router_neigh_rec_ipv6_process(mlxsw_sp, rauhtd_pl,
2026 rec_index);
Yotam Gigic723c7352016-07-05 11:27:43 +02002027 break;
2028 }
2029}
2030
Arkadi Sharshevsky42cdb332016-11-11 16:34:26 +01002031static bool mlxsw_sp_router_rauhtd_is_full(char *rauhtd_pl)
2032{
2033 u8 num_rec, last_rec_index, num_entries;
2034
2035 num_rec = mlxsw_reg_rauhtd_num_rec_get(rauhtd_pl);
2036 last_rec_index = num_rec - 1;
2037
2038 if (num_rec < MLXSW_REG_RAUHTD_REC_MAX_NUM)
2039 return false;
2040 if (mlxsw_reg_rauhtd_rec_type_get(rauhtd_pl, last_rec_index) ==
2041 MLXSW_REG_RAUHTD_TYPE_IPV6)
2042 return true;
2043
2044 num_entries = mlxsw_reg_rauhtd_ipv4_rec_num_entries_get(rauhtd_pl,
2045 last_rec_index);
2046 if (++num_entries == MLXSW_REG_RAUHTD_IPV4_ENT_PER_REC)
2047 return true;
2048 return false;
2049}
2050
Arkadi Sharshevsky60f040c2017-07-18 10:10:17 +02002051static int
2052__mlxsw_sp_router_neighs_update_rauhtd(struct mlxsw_sp *mlxsw_sp,
2053 char *rauhtd_pl,
2054 enum mlxsw_reg_rauhtd_type type)
Yotam Gigic723c7352016-07-05 11:27:43 +02002055{
Arkadi Sharshevsky60f040c2017-07-18 10:10:17 +02002056 int i, num_rec;
2057 int err;
Yotam Gigic723c7352016-07-05 11:27:43 +02002058
2059 /* Make sure the neighbour's netdev isn't removed in the
2060 * process.
2061 */
2062 rtnl_lock();
2063 do {
Arkadi Sharshevsky60f040c2017-07-18 10:10:17 +02002064 mlxsw_reg_rauhtd_pack(rauhtd_pl, type);
Yotam Gigic723c7352016-07-05 11:27:43 +02002065 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(rauhtd),
2066 rauhtd_pl);
2067 if (err) {
Petr Machata7ff176f2017-10-02 12:21:57 +02002068 dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Failed to dump neighbour table\n");
Yotam Gigic723c7352016-07-05 11:27:43 +02002069 break;
2070 }
2071 num_rec = mlxsw_reg_rauhtd_num_rec_get(rauhtd_pl);
2072 for (i = 0; i < num_rec; i++)
2073 mlxsw_sp_router_neigh_rec_process(mlxsw_sp, rauhtd_pl,
2074 i);
Arkadi Sharshevsky42cdb332016-11-11 16:34:26 +01002075 } while (mlxsw_sp_router_rauhtd_is_full(rauhtd_pl));
Yotam Gigic723c7352016-07-05 11:27:43 +02002076 rtnl_unlock();
2077
Arkadi Sharshevsky60f040c2017-07-18 10:10:17 +02002078 return err;
2079}
2080
2081static int mlxsw_sp_router_neighs_update_rauhtd(struct mlxsw_sp *mlxsw_sp)
2082{
2083 enum mlxsw_reg_rauhtd_type type;
2084 char *rauhtd_pl;
2085 int err;
2086
2087 rauhtd_pl = kmalloc(MLXSW_REG_RAUHTD_LEN, GFP_KERNEL);
2088 if (!rauhtd_pl)
2089 return -ENOMEM;
2090
2091 type = MLXSW_REG_RAUHTD_TYPE_IPV4;
2092 err = __mlxsw_sp_router_neighs_update_rauhtd(mlxsw_sp, rauhtd_pl, type);
2093 if (err)
2094 goto out;
2095
2096 type = MLXSW_REG_RAUHTD_TYPE_IPV6;
2097 err = __mlxsw_sp_router_neighs_update_rauhtd(mlxsw_sp, rauhtd_pl, type);
2098out:
Yotam Gigic723c7352016-07-05 11:27:43 +02002099 kfree(rauhtd_pl);
Yotam Gigib2157142016-07-05 11:27:51 +02002100 return err;
2101}
2102
2103static void mlxsw_sp_router_neighs_update_nh(struct mlxsw_sp *mlxsw_sp)
2104{
2105 struct mlxsw_sp_neigh_entry *neigh_entry;
2106
2107 /* Take RTNL mutex here to prevent lists from changes */
2108 rtnl_lock();
Ido Schimmel9011b672017-05-16 19:38:25 +02002109 list_for_each_entry(neigh_entry, &mlxsw_sp->router->nexthop_neighs_list,
Ido Schimmel8a0b7272017-02-06 16:20:15 +01002110 nexthop_neighs_list_node)
Yotam Gigib2157142016-07-05 11:27:51 +02002111 /* If this neigh have nexthops, make the kernel think this neigh
2112 * is active regardless of the traffic.
2113 */
Ido Schimmel8a0b7272017-02-06 16:20:15 +01002114 neigh_event_send(neigh_entry->key.n, NULL);
Yotam Gigib2157142016-07-05 11:27:51 +02002115 rtnl_unlock();
2116}
2117
2118static void
2119mlxsw_sp_router_neighs_update_work_schedule(struct mlxsw_sp *mlxsw_sp)
2120{
Ido Schimmel9011b672017-05-16 19:38:25 +02002121 unsigned long interval = mlxsw_sp->router->neighs_update.interval;
Yotam Gigib2157142016-07-05 11:27:51 +02002122
Ido Schimmel9011b672017-05-16 19:38:25 +02002123 mlxsw_core_schedule_dw(&mlxsw_sp->router->neighs_update.dw,
Yotam Gigib2157142016-07-05 11:27:51 +02002124 msecs_to_jiffies(interval));
2125}
2126
2127static void mlxsw_sp_router_neighs_update_work(struct work_struct *work)
2128{
Ido Schimmel9011b672017-05-16 19:38:25 +02002129 struct mlxsw_sp_router *router;
Yotam Gigib2157142016-07-05 11:27:51 +02002130 int err;
2131
Ido Schimmel9011b672017-05-16 19:38:25 +02002132 router = container_of(work, struct mlxsw_sp_router,
2133 neighs_update.dw.work);
2134 err = mlxsw_sp_router_neighs_update_rauhtd(router->mlxsw_sp);
Yotam Gigib2157142016-07-05 11:27:51 +02002135 if (err)
Ido Schimmel9011b672017-05-16 19:38:25 +02002136 dev_err(router->mlxsw_sp->bus_info->dev, "Could not update kernel for neigh activity");
Yotam Gigib2157142016-07-05 11:27:51 +02002137
Ido Schimmel9011b672017-05-16 19:38:25 +02002138 mlxsw_sp_router_neighs_update_nh(router->mlxsw_sp);
Yotam Gigib2157142016-07-05 11:27:51 +02002139
Ido Schimmel9011b672017-05-16 19:38:25 +02002140 mlxsw_sp_router_neighs_update_work_schedule(router->mlxsw_sp);
Yotam Gigic723c7352016-07-05 11:27:43 +02002141}
2142
Yotam Gigi0b2361d2016-07-05 11:27:52 +02002143static void mlxsw_sp_router_probe_unresolved_nexthops(struct work_struct *work)
2144{
2145 struct mlxsw_sp_neigh_entry *neigh_entry;
Ido Schimmel9011b672017-05-16 19:38:25 +02002146 struct mlxsw_sp_router *router;
Yotam Gigi0b2361d2016-07-05 11:27:52 +02002147
Ido Schimmel9011b672017-05-16 19:38:25 +02002148 router = container_of(work, struct mlxsw_sp_router,
2149 nexthop_probe_dw.work);
Yotam Gigi0b2361d2016-07-05 11:27:52 +02002150 /* Iterate over nexthop neighbours, find those who are unresolved and
2151 * send arp on them. This solves the chicken-egg problem when
2152 * the nexthop wouldn't get offloaded until the neighbor is resolved
2153 * but it wouldn't get resolved ever in case traffic is flowing in HW
2154 * using different nexthop.
2155 *
2156 * Take RTNL mutex here to prevent lists from changes.
2157 */
2158 rtnl_lock();
Ido Schimmel9011b672017-05-16 19:38:25 +02002159 list_for_each_entry(neigh_entry, &router->nexthop_neighs_list,
Ido Schimmel8a0b7272017-02-06 16:20:15 +01002160 nexthop_neighs_list_node)
Ido Schimmel01b1aa32017-02-06 16:20:16 +01002161 if (!neigh_entry->connected)
Jiri Pirko33b13412016-11-10 12:31:04 +01002162 neigh_event_send(neigh_entry->key.n, NULL);
Yotam Gigi0b2361d2016-07-05 11:27:52 +02002163 rtnl_unlock();
2164
Ido Schimmel9011b672017-05-16 19:38:25 +02002165 mlxsw_core_schedule_dw(&router->nexthop_probe_dw,
Yotam Gigi0b2361d2016-07-05 11:27:52 +02002166 MLXSW_SP_UNRESOLVED_NH_PROBE_INTERVAL);
2167}
2168
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002169static void
2170mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp *mlxsw_sp,
2171 struct mlxsw_sp_neigh_entry *neigh_entry,
2172 bool removing);
2173
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002174static enum mlxsw_reg_rauht_op mlxsw_sp_rauht_op(bool adding)
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002175{
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002176 return adding ? MLXSW_REG_RAUHT_OP_WRITE_ADD :
2177 MLXSW_REG_RAUHT_OP_WRITE_DELETE;
2178}
2179
2180static void
2181mlxsw_sp_router_neigh_entry_op4(struct mlxsw_sp *mlxsw_sp,
2182 struct mlxsw_sp_neigh_entry *neigh_entry,
2183 enum mlxsw_reg_rauht_op op)
2184{
Jiri Pirko33b13412016-11-10 12:31:04 +01002185 struct neighbour *n = neigh_entry->key.n;
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002186 u32 dip = ntohl(*((__be32 *) n->primary_key));
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002187 char rauht_pl[MLXSW_REG_RAUHT_LEN];
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002188
2189 mlxsw_reg_rauht_pack4(rauht_pl, op, neigh_entry->rif, neigh_entry->ha,
2190 dip);
Arkadi Sharshevsky7cfcbc72017-08-24 08:40:08 +02002191 if (neigh_entry->counter_valid)
2192 mlxsw_reg_rauht_pack_counter(rauht_pl,
2193 neigh_entry->counter_index);
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002194 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rauht), rauht_pl);
2195}
2196
2197static void
Arkadi Sharshevskyd5eb89c2017-07-18 10:10:15 +02002198mlxsw_sp_router_neigh_entry_op6(struct mlxsw_sp *mlxsw_sp,
2199 struct mlxsw_sp_neigh_entry *neigh_entry,
2200 enum mlxsw_reg_rauht_op op)
2201{
2202 struct neighbour *n = neigh_entry->key.n;
2203 char rauht_pl[MLXSW_REG_RAUHT_LEN];
2204 const char *dip = n->primary_key;
2205
2206 mlxsw_reg_rauht_pack6(rauht_pl, op, neigh_entry->rif, neigh_entry->ha,
2207 dip);
Arkadi Sharshevsky7cfcbc72017-08-24 08:40:08 +02002208 if (neigh_entry->counter_valid)
2209 mlxsw_reg_rauht_pack_counter(rauht_pl,
2210 neigh_entry->counter_index);
Arkadi Sharshevskyd5eb89c2017-07-18 10:10:15 +02002211 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rauht), rauht_pl);
2212}
2213
Arkadi Sharshevsky1d1056d82017-08-31 17:59:13 +02002214bool mlxsw_sp_neigh_ipv6_ignore(struct mlxsw_sp_neigh_entry *neigh_entry)
Arkadi Sharshevskyd5eb89c2017-07-18 10:10:15 +02002215{
Arkadi Sharshevsky1d1056d82017-08-31 17:59:13 +02002216 struct neighbour *n = neigh_entry->key.n;
2217
Arkadi Sharshevskyd5eb89c2017-07-18 10:10:15 +02002218 /* Packets with a link-local destination address are trapped
2219 * after LPM lookup and never reach the neighbour table, so
2220 * there is no need to program such neighbours to the device.
2221 */
2222 if (ipv6_addr_type((struct in6_addr *) &n->primary_key) &
2223 IPV6_ADDR_LINKLOCAL)
2224 return true;
2225 return false;
2226}
2227
2228static void
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002229mlxsw_sp_neigh_entry_update(struct mlxsw_sp *mlxsw_sp,
2230 struct mlxsw_sp_neigh_entry *neigh_entry,
2231 bool adding)
2232{
2233 if (!adding && !neigh_entry->connected)
2234 return;
2235 neigh_entry->connected = adding;
Ido Schimmelb5f3e0d2017-07-24 09:56:00 +02002236 if (neigh_entry->key.n->tbl->family == AF_INET) {
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002237 mlxsw_sp_router_neigh_entry_op4(mlxsw_sp, neigh_entry,
2238 mlxsw_sp_rauht_op(adding));
Ido Schimmelb5f3e0d2017-07-24 09:56:00 +02002239 } else if (neigh_entry->key.n->tbl->family == AF_INET6) {
Arkadi Sharshevsky1d1056d82017-08-31 17:59:13 +02002240 if (mlxsw_sp_neigh_ipv6_ignore(neigh_entry))
Arkadi Sharshevskyd5eb89c2017-07-18 10:10:15 +02002241 return;
2242 mlxsw_sp_router_neigh_entry_op6(mlxsw_sp, neigh_entry,
2243 mlxsw_sp_rauht_op(adding));
2244 } else {
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002245 WARN_ON_ONCE(1);
Arkadi Sharshevskyd5eb89c2017-07-18 10:10:15 +02002246 }
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002247}
2248
Arkadi Sharshevskya481d712017-08-24 08:40:10 +02002249void
2250mlxsw_sp_neigh_entry_counter_update(struct mlxsw_sp *mlxsw_sp,
2251 struct mlxsw_sp_neigh_entry *neigh_entry,
2252 bool adding)
2253{
2254 if (adding)
2255 mlxsw_sp_neigh_counter_alloc(mlxsw_sp, neigh_entry);
2256 else
2257 mlxsw_sp_neigh_counter_free(mlxsw_sp, neigh_entry);
2258 mlxsw_sp_neigh_entry_update(mlxsw_sp, neigh_entry, true);
2259}
2260
Ido Schimmelceb88812017-11-02 17:14:07 +01002261struct mlxsw_sp_netevent_work {
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002262 struct work_struct work;
2263 struct mlxsw_sp *mlxsw_sp;
2264 struct neighbour *n;
2265};
2266
2267static void mlxsw_sp_router_neigh_event_work(struct work_struct *work)
2268{
Ido Schimmelceb88812017-11-02 17:14:07 +01002269 struct mlxsw_sp_netevent_work *net_work =
2270 container_of(work, struct mlxsw_sp_netevent_work, work);
2271 struct mlxsw_sp *mlxsw_sp = net_work->mlxsw_sp;
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002272 struct mlxsw_sp_neigh_entry *neigh_entry;
Ido Schimmelceb88812017-11-02 17:14:07 +01002273 struct neighbour *n = net_work->n;
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002274 unsigned char ha[ETH_ALEN];
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002275 bool entry_connected;
Ido Schimmel93a87e52016-12-23 09:32:49 +01002276 u8 nud_state, dead;
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002277
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002278 /* If these parameters are changed after we release the lock,
2279 * then we are guaranteed to receive another event letting us
2280 * know about it.
2281 */
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002282 read_lock_bh(&n->lock);
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002283 memcpy(ha, n->ha, ETH_ALEN);
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002284 nud_state = n->nud_state;
Ido Schimmel93a87e52016-12-23 09:32:49 +01002285 dead = n->dead;
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002286 read_unlock_bh(&n->lock);
2287
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002288 rtnl_lock();
Ido Schimmel93a87e52016-12-23 09:32:49 +01002289 entry_connected = nud_state & NUD_VALID && !dead;
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002290 neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp, n);
2291 if (!entry_connected && !neigh_entry)
2292 goto out;
2293 if (!neigh_entry) {
2294 neigh_entry = mlxsw_sp_neigh_entry_create(mlxsw_sp, n);
2295 if (IS_ERR(neigh_entry))
2296 goto out;
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002297 }
2298
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002299 memcpy(neigh_entry->ha, ha, ETH_ALEN);
2300 mlxsw_sp_neigh_entry_update(mlxsw_sp, neigh_entry, entry_connected);
2301 mlxsw_sp_nexthop_neigh_update(mlxsw_sp, neigh_entry, !entry_connected);
2302
2303 if (!neigh_entry->connected && list_empty(&neigh_entry->nexthop_list))
2304 mlxsw_sp_neigh_entry_destroy(mlxsw_sp, neigh_entry);
2305
2306out:
2307 rtnl_unlock();
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002308 neigh_release(n);
Ido Schimmelceb88812017-11-02 17:14:07 +01002309 kfree(net_work);
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002310}
2311
Ido Schimmel28678f02017-11-02 17:14:10 +01002312static int mlxsw_sp_mp_hash_init(struct mlxsw_sp *mlxsw_sp);
2313
2314static void mlxsw_sp_router_mp_hash_event_work(struct work_struct *work)
2315{
2316 struct mlxsw_sp_netevent_work *net_work =
2317 container_of(work, struct mlxsw_sp_netevent_work, work);
2318 struct mlxsw_sp *mlxsw_sp = net_work->mlxsw_sp;
2319
2320 mlxsw_sp_mp_hash_init(mlxsw_sp);
2321 kfree(net_work);
2322}
2323
2324static int mlxsw_sp_router_netevent_event(struct notifier_block *nb,
Ido Schimmel48fac882017-11-02 17:14:06 +01002325 unsigned long event, void *ptr)
Yotam Gigic723c7352016-07-05 11:27:43 +02002326{
Ido Schimmelceb88812017-11-02 17:14:07 +01002327 struct mlxsw_sp_netevent_work *net_work;
Yotam Gigic723c7352016-07-05 11:27:43 +02002328 struct mlxsw_sp_port *mlxsw_sp_port;
Ido Schimmel28678f02017-11-02 17:14:10 +01002329 struct mlxsw_sp_router *router;
Yotam Gigic723c7352016-07-05 11:27:43 +02002330 struct mlxsw_sp *mlxsw_sp;
2331 unsigned long interval;
2332 struct neigh_parms *p;
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002333 struct neighbour *n;
Ido Schimmel28678f02017-11-02 17:14:10 +01002334 struct net *net;
Yotam Gigic723c7352016-07-05 11:27:43 +02002335
2336 switch (event) {
2337 case NETEVENT_DELAY_PROBE_TIME_UPDATE:
2338 p = ptr;
2339
2340 /* We don't care about changes in the default table. */
Ido Schimmelb5f3e0d2017-07-24 09:56:00 +02002341 if (!p->dev || (p->tbl->family != AF_INET &&
2342 p->tbl->family != AF_INET6))
Yotam Gigic723c7352016-07-05 11:27:43 +02002343 return NOTIFY_DONE;
2344
2345 /* We are in atomic context and can't take RTNL mutex,
2346 * so use RCU variant to walk the device chain.
2347 */
2348 mlxsw_sp_port = mlxsw_sp_port_lower_dev_hold(p->dev);
2349 if (!mlxsw_sp_port)
2350 return NOTIFY_DONE;
2351
2352 mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2353 interval = jiffies_to_msecs(NEIGH_VAR(p, DELAY_PROBE_TIME));
Ido Schimmel9011b672017-05-16 19:38:25 +02002354 mlxsw_sp->router->neighs_update.interval = interval;
Yotam Gigic723c7352016-07-05 11:27:43 +02002355
2356 mlxsw_sp_port_dev_put(mlxsw_sp_port);
2357 break;
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002358 case NETEVENT_NEIGH_UPDATE:
2359 n = ptr;
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002360
Ido Schimmelb5f3e0d2017-07-24 09:56:00 +02002361 if (n->tbl->family != AF_INET && n->tbl->family != AF_INET6)
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002362 return NOTIFY_DONE;
2363
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002364 mlxsw_sp_port = mlxsw_sp_port_lower_dev_hold(n->dev);
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002365 if (!mlxsw_sp_port)
2366 return NOTIFY_DONE;
2367
Ido Schimmelceb88812017-11-02 17:14:07 +01002368 net_work = kzalloc(sizeof(*net_work), GFP_ATOMIC);
2369 if (!net_work) {
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002370 mlxsw_sp_port_dev_put(mlxsw_sp_port);
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002371 return NOTIFY_BAD;
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002372 }
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002373
Ido Schimmelceb88812017-11-02 17:14:07 +01002374 INIT_WORK(&net_work->work, mlxsw_sp_router_neigh_event_work);
2375 net_work->mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2376 net_work->n = n;
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002377
2378 /* Take a reference to ensure the neighbour won't be
2379 * destructed until we drop the reference in delayed
2380 * work.
2381 */
2382 neigh_clone(n);
Ido Schimmelceb88812017-11-02 17:14:07 +01002383 mlxsw_core_schedule_work(&net_work->work);
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002384 mlxsw_sp_port_dev_put(mlxsw_sp_port);
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002385 break;
Ido Schimmel28678f02017-11-02 17:14:10 +01002386 case NETEVENT_MULTIPATH_HASH_UPDATE:
2387 net = ptr;
2388
2389 if (!net_eq(net, &init_net))
2390 return NOTIFY_DONE;
2391
2392 net_work = kzalloc(sizeof(*net_work), GFP_ATOMIC);
2393 if (!net_work)
2394 return NOTIFY_BAD;
2395
2396 router = container_of(nb, struct mlxsw_sp_router, netevent_nb);
2397 INIT_WORK(&net_work->work, mlxsw_sp_router_mp_hash_event_work);
2398 net_work->mlxsw_sp = router->mlxsw_sp;
2399 mlxsw_core_schedule_work(&net_work->work);
2400 break;
Yotam Gigic723c7352016-07-05 11:27:43 +02002401 }
2402
2403 return NOTIFY_DONE;
2404}
2405
Jiri Pirko6cf3c972016-07-05 11:27:39 +02002406static int mlxsw_sp_neigh_init(struct mlxsw_sp *mlxsw_sp)
2407{
Yotam Gigic723c7352016-07-05 11:27:43 +02002408 int err;
2409
Ido Schimmel9011b672017-05-16 19:38:25 +02002410 err = rhashtable_init(&mlxsw_sp->router->neigh_ht,
Yotam Gigic723c7352016-07-05 11:27:43 +02002411 &mlxsw_sp_neigh_ht_params);
2412 if (err)
2413 return err;
2414
2415 /* Initialize the polling interval according to the default
2416 * table.
2417 */
2418 mlxsw_sp_router_neighs_update_interval_init(mlxsw_sp);
2419
Yotam Gigi0b2361d2016-07-05 11:27:52 +02002420 /* Create the delayed works for the activity_update */
Ido Schimmel9011b672017-05-16 19:38:25 +02002421 INIT_DELAYED_WORK(&mlxsw_sp->router->neighs_update.dw,
Yotam Gigic723c7352016-07-05 11:27:43 +02002422 mlxsw_sp_router_neighs_update_work);
Ido Schimmel9011b672017-05-16 19:38:25 +02002423 INIT_DELAYED_WORK(&mlxsw_sp->router->nexthop_probe_dw,
Yotam Gigi0b2361d2016-07-05 11:27:52 +02002424 mlxsw_sp_router_probe_unresolved_nexthops);
Ido Schimmel9011b672017-05-16 19:38:25 +02002425 mlxsw_core_schedule_dw(&mlxsw_sp->router->neighs_update.dw, 0);
2426 mlxsw_core_schedule_dw(&mlxsw_sp->router->nexthop_probe_dw, 0);
Yotam Gigic723c7352016-07-05 11:27:43 +02002427 return 0;
Jiri Pirko6cf3c972016-07-05 11:27:39 +02002428}
2429
2430static void mlxsw_sp_neigh_fini(struct mlxsw_sp *mlxsw_sp)
2431{
Ido Schimmel9011b672017-05-16 19:38:25 +02002432 cancel_delayed_work_sync(&mlxsw_sp->router->neighs_update.dw);
2433 cancel_delayed_work_sync(&mlxsw_sp->router->nexthop_probe_dw);
2434 rhashtable_destroy(&mlxsw_sp->router->neigh_ht);
Jiri Pirko6cf3c972016-07-05 11:27:39 +02002435}
2436
Ido Schimmel63dd00f2017-11-12 09:02:56 +01002437static int mlxsw_sp_neigh_rif_flush(struct mlxsw_sp *mlxsw_sp,
2438 const struct mlxsw_sp_rif *rif)
2439{
2440 char rauht_pl[MLXSW_REG_RAUHT_LEN];
2441
2442 mlxsw_reg_rauht_pack(rauht_pl, MLXSW_REG_RAUHT_OP_WRITE_DELETE_ALL,
2443 rif->rif_index, rif->addr);
2444 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rauht), rauht_pl);
2445}
2446
Ido Schimmel9665b742017-02-08 11:16:42 +01002447static void mlxsw_sp_neigh_rif_gone_sync(struct mlxsw_sp *mlxsw_sp,
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01002448 struct mlxsw_sp_rif *rif)
Ido Schimmel9665b742017-02-08 11:16:42 +01002449{
2450 struct mlxsw_sp_neigh_entry *neigh_entry, *tmp;
2451
Ido Schimmel63dd00f2017-11-12 09:02:56 +01002452 mlxsw_sp_neigh_rif_flush(mlxsw_sp, rif);
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01002453 list_for_each_entry_safe(neigh_entry, tmp, &rif->neigh_list,
Ido Schimmel63dd00f2017-11-12 09:02:56 +01002454 rif_list_node)
Ido Schimmel9665b742017-02-08 11:16:42 +01002455 mlxsw_sp_neigh_entry_destroy(mlxsw_sp, neigh_entry);
2456}
2457
Petr Machata35225e42017-09-02 23:49:22 +02002458enum mlxsw_sp_nexthop_type {
2459 MLXSW_SP_NEXTHOP_TYPE_ETH,
Petr Machata1012b9a2017-09-02 23:49:23 +02002460 MLXSW_SP_NEXTHOP_TYPE_IPIP,
Petr Machata35225e42017-09-02 23:49:22 +02002461};
2462
Ido Schimmelc53b8e12017-02-08 11:16:30 +01002463struct mlxsw_sp_nexthop_key {
2464 struct fib_nh *fib_nh;
2465};
2466
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002467struct mlxsw_sp_nexthop {
2468 struct list_head neigh_list_node; /* member of neigh entry list */
Ido Schimmel9665b742017-02-08 11:16:42 +01002469 struct list_head rif_list_node;
Arkadi Sharshevskydbe45982017-09-25 10:32:23 +02002470 struct list_head router_list_node;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002471 struct mlxsw_sp_nexthop_group *nh_grp; /* pointer back to the group
2472 * this belongs to
2473 */
Ido Schimmelc53b8e12017-02-08 11:16:30 +01002474 struct rhash_head ht_node;
2475 struct mlxsw_sp_nexthop_key key;
Ido Schimmel58adf2c2017-07-18 10:10:19 +02002476 unsigned char gw_addr[sizeof(struct in6_addr)];
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02002477 int ifindex;
Ido Schimmel408bd942017-10-22 23:11:46 +02002478 int nh_weight;
Ido Schimmeleb789982017-10-22 23:11:48 +02002479 int norm_nh_weight;
2480 int num_adj_entries;
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01002481 struct mlxsw_sp_rif *rif;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002482 u8 should_offload:1, /* set indicates this neigh is connected and
2483 * should be put to KVD linear area of this group.
2484 */
2485 offloaded:1, /* set in case the neigh is actually put into
2486 * KVD linear area of this group.
2487 */
2488 update:1; /* set indicates that MAC of this neigh should be
2489 * updated in HW
2490 */
Petr Machata35225e42017-09-02 23:49:22 +02002491 enum mlxsw_sp_nexthop_type type;
2492 union {
2493 struct mlxsw_sp_neigh_entry *neigh_entry;
Petr Machata1012b9a2017-09-02 23:49:23 +02002494 struct mlxsw_sp_ipip_entry *ipip_entry;
Petr Machata35225e42017-09-02 23:49:22 +02002495 };
Arkadi Sharshevskya5390272017-09-25 10:32:28 +02002496 unsigned int counter_index;
2497 bool counter_valid;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002498};
2499
2500struct mlxsw_sp_nexthop_group {
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02002501 void *priv;
Ido Schimmele9ad5e72017-02-08 11:16:29 +01002502 struct rhash_head ht_node;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002503 struct list_head fib_list; /* list of fib entries that use this group */
Ido Schimmel58adf2c2017-07-18 10:10:19 +02002504 struct neigh_table *neigh_tbl;
Ido Schimmelb3e8d1e2017-02-08 11:16:32 +01002505 u8 adj_index_valid:1,
2506 gateway:1; /* routes using the group use a gateway */
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002507 u32 adj_index;
2508 u16 ecmp_size;
2509 u16 count;
Ido Schimmeleb789982017-10-22 23:11:48 +02002510 int sum_norm_weight;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002511 struct mlxsw_sp_nexthop nexthops[0];
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01002512#define nh_rif nexthops[0].rif
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002513};
2514
Arkadi Sharshevsky427e6522017-09-25 10:32:30 +02002515void mlxsw_sp_nexthop_counter_alloc(struct mlxsw_sp *mlxsw_sp,
2516 struct mlxsw_sp_nexthop *nh)
Arkadi Sharshevskya5390272017-09-25 10:32:28 +02002517{
2518 struct devlink *devlink;
2519
2520 devlink = priv_to_devlink(mlxsw_sp->core);
2521 if (!devlink_dpipe_table_counter_enabled(devlink,
2522 MLXSW_SP_DPIPE_TABLE_NAME_ADJ))
2523 return;
2524
2525 if (mlxsw_sp_flow_counter_alloc(mlxsw_sp, &nh->counter_index))
2526 return;
2527
2528 nh->counter_valid = true;
2529}
2530
Arkadi Sharshevsky427e6522017-09-25 10:32:30 +02002531void mlxsw_sp_nexthop_counter_free(struct mlxsw_sp *mlxsw_sp,
2532 struct mlxsw_sp_nexthop *nh)
Arkadi Sharshevskya5390272017-09-25 10:32:28 +02002533{
2534 if (!nh->counter_valid)
2535 return;
2536 mlxsw_sp_flow_counter_free(mlxsw_sp, nh->counter_index);
2537 nh->counter_valid = false;
2538}
2539
2540int mlxsw_sp_nexthop_counter_get(struct mlxsw_sp *mlxsw_sp,
2541 struct mlxsw_sp_nexthop *nh, u64 *p_counter)
2542{
2543 if (!nh->counter_valid)
2544 return -EINVAL;
2545
2546 return mlxsw_sp_flow_counter_get(mlxsw_sp, nh->counter_index,
2547 p_counter, NULL);
2548}
2549
Arkadi Sharshevskyc556cd22017-09-25 10:32:25 +02002550struct mlxsw_sp_nexthop *mlxsw_sp_nexthop_next(struct mlxsw_sp_router *router,
2551 struct mlxsw_sp_nexthop *nh)
2552{
2553 if (!nh) {
2554 if (list_empty(&router->nexthop_list))
2555 return NULL;
2556 else
2557 return list_first_entry(&router->nexthop_list,
2558 typeof(*nh), router_list_node);
2559 }
2560 if (list_is_last(&nh->router_list_node, &router->nexthop_list))
2561 return NULL;
2562 return list_next_entry(nh, router_list_node);
2563}
2564
2565bool mlxsw_sp_nexthop_offload(struct mlxsw_sp_nexthop *nh)
2566{
2567 return nh->offloaded;
2568}
2569
2570unsigned char *mlxsw_sp_nexthop_ha(struct mlxsw_sp_nexthop *nh)
2571{
2572 if (!nh->offloaded)
2573 return NULL;
2574 return nh->neigh_entry->ha;
2575}
2576
2577int mlxsw_sp_nexthop_indexes(struct mlxsw_sp_nexthop *nh, u32 *p_adj_index,
Ido Schimmele69cd9d2017-10-22 23:11:43 +02002578 u32 *p_adj_size, u32 *p_adj_hash_index)
Arkadi Sharshevskyc556cd22017-09-25 10:32:25 +02002579{
2580 struct mlxsw_sp_nexthop_group *nh_grp = nh->nh_grp;
2581 u32 adj_hash_index = 0;
2582 int i;
2583
2584 if (!nh->offloaded || !nh_grp->adj_index_valid)
2585 return -EINVAL;
2586
2587 *p_adj_index = nh_grp->adj_index;
Ido Schimmele69cd9d2017-10-22 23:11:43 +02002588 *p_adj_size = nh_grp->ecmp_size;
Arkadi Sharshevskyc556cd22017-09-25 10:32:25 +02002589
2590 for (i = 0; i < nh_grp->count; i++) {
2591 struct mlxsw_sp_nexthop *nh_iter = &nh_grp->nexthops[i];
2592
2593 if (nh_iter == nh)
2594 break;
2595 if (nh_iter->offloaded)
Ido Schimmeleb789982017-10-22 23:11:48 +02002596 adj_hash_index += nh_iter->num_adj_entries;
Arkadi Sharshevskyc556cd22017-09-25 10:32:25 +02002597 }
2598
2599 *p_adj_hash_index = adj_hash_index;
2600 return 0;
2601}
2602
2603struct mlxsw_sp_rif *mlxsw_sp_nexthop_rif(struct mlxsw_sp_nexthop *nh)
2604{
2605 return nh->rif;
2606}
2607
2608bool mlxsw_sp_nexthop_group_has_ipip(struct mlxsw_sp_nexthop *nh)
2609{
2610 struct mlxsw_sp_nexthop_group *nh_grp = nh->nh_grp;
2611 int i;
2612
2613 for (i = 0; i < nh_grp->count; i++) {
2614 struct mlxsw_sp_nexthop *nh_iter = &nh_grp->nexthops[i];
2615
2616 if (nh_iter->type == MLXSW_SP_NEXTHOP_TYPE_IPIP)
2617 return true;
2618 }
2619 return false;
2620}
2621
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02002622static struct fib_info *
2623mlxsw_sp_nexthop4_group_fi(const struct mlxsw_sp_nexthop_group *nh_grp)
2624{
2625 return nh_grp->priv;
2626}
2627
2628struct mlxsw_sp_nexthop_group_cmp_arg {
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02002629 enum mlxsw_sp_l3proto proto;
2630 union {
2631 struct fib_info *fi;
2632 struct mlxsw_sp_fib6_entry *fib6_entry;
2633 };
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02002634};
2635
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02002636static bool
2637mlxsw_sp_nexthop6_group_has_nexthop(const struct mlxsw_sp_nexthop_group *nh_grp,
2638 const struct in6_addr *gw, int ifindex)
2639{
2640 int i;
2641
2642 for (i = 0; i < nh_grp->count; i++) {
2643 const struct mlxsw_sp_nexthop *nh;
2644
2645 nh = &nh_grp->nexthops[i];
2646 if (nh->ifindex == ifindex &&
2647 ipv6_addr_equal(gw, (struct in6_addr *) nh->gw_addr))
2648 return true;
2649 }
2650
2651 return false;
2652}
2653
2654static bool
2655mlxsw_sp_nexthop6_group_cmp(const struct mlxsw_sp_nexthop_group *nh_grp,
2656 const struct mlxsw_sp_fib6_entry *fib6_entry)
2657{
2658 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
2659
2660 if (nh_grp->count != fib6_entry->nrt6)
2661 return false;
2662
2663 list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) {
2664 struct in6_addr *gw;
2665 int ifindex;
2666
2667 ifindex = mlxsw_sp_rt6->rt->dst.dev->ifindex;
2668 gw = &mlxsw_sp_rt6->rt->rt6i_gateway;
2669 if (!mlxsw_sp_nexthop6_group_has_nexthop(nh_grp, gw, ifindex))
2670 return false;
2671 }
2672
2673 return true;
2674}
2675
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02002676static int
2677mlxsw_sp_nexthop_group_cmp(struct rhashtable_compare_arg *arg, const void *ptr)
2678{
2679 const struct mlxsw_sp_nexthop_group_cmp_arg *cmp_arg = arg->key;
2680 const struct mlxsw_sp_nexthop_group *nh_grp = ptr;
2681
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02002682 switch (cmp_arg->proto) {
2683 case MLXSW_SP_L3_PROTO_IPV4:
2684 return cmp_arg->fi != mlxsw_sp_nexthop4_group_fi(nh_grp);
2685 case MLXSW_SP_L3_PROTO_IPV6:
2686 return !mlxsw_sp_nexthop6_group_cmp(nh_grp,
2687 cmp_arg->fib6_entry);
2688 default:
2689 WARN_ON(1);
2690 return 1;
2691 }
2692}
2693
2694static int
2695mlxsw_sp_nexthop_group_type(const struct mlxsw_sp_nexthop_group *nh_grp)
2696{
2697 return nh_grp->neigh_tbl->family;
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02002698}
2699
2700static u32 mlxsw_sp_nexthop_group_hash_obj(const void *data, u32 len, u32 seed)
2701{
2702 const struct mlxsw_sp_nexthop_group *nh_grp = data;
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02002703 const struct mlxsw_sp_nexthop *nh;
2704 struct fib_info *fi;
2705 unsigned int val;
2706 int i;
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02002707
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02002708 switch (mlxsw_sp_nexthop_group_type(nh_grp)) {
2709 case AF_INET:
2710 fi = mlxsw_sp_nexthop4_group_fi(nh_grp);
2711 return jhash(&fi, sizeof(fi), seed);
2712 case AF_INET6:
2713 val = nh_grp->count;
2714 for (i = 0; i < nh_grp->count; i++) {
2715 nh = &nh_grp->nexthops[i];
2716 val ^= nh->ifindex;
2717 }
2718 return jhash(&val, sizeof(val), seed);
2719 default:
2720 WARN_ON(1);
2721 return 0;
2722 }
2723}
2724
2725static u32
2726mlxsw_sp_nexthop6_group_hash(struct mlxsw_sp_fib6_entry *fib6_entry, u32 seed)
2727{
2728 unsigned int val = fib6_entry->nrt6;
2729 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
2730 struct net_device *dev;
2731
2732 list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) {
2733 dev = mlxsw_sp_rt6->rt->dst.dev;
2734 val ^= dev->ifindex;
2735 }
2736
2737 return jhash(&val, sizeof(val), seed);
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02002738}
2739
2740static u32
2741mlxsw_sp_nexthop_group_hash(const void *data, u32 len, u32 seed)
2742{
2743 const struct mlxsw_sp_nexthop_group_cmp_arg *cmp_arg = data;
2744
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02002745 switch (cmp_arg->proto) {
2746 case MLXSW_SP_L3_PROTO_IPV4:
2747 return jhash(&cmp_arg->fi, sizeof(cmp_arg->fi), seed);
2748 case MLXSW_SP_L3_PROTO_IPV6:
2749 return mlxsw_sp_nexthop6_group_hash(cmp_arg->fib6_entry, seed);
2750 default:
2751 WARN_ON(1);
2752 return 0;
2753 }
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02002754}
2755
Ido Schimmele9ad5e72017-02-08 11:16:29 +01002756static const struct rhashtable_params mlxsw_sp_nexthop_group_ht_params = {
Ido Schimmele9ad5e72017-02-08 11:16:29 +01002757 .head_offset = offsetof(struct mlxsw_sp_nexthop_group, ht_node),
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02002758 .hashfn = mlxsw_sp_nexthop_group_hash,
2759 .obj_hashfn = mlxsw_sp_nexthop_group_hash_obj,
2760 .obj_cmpfn = mlxsw_sp_nexthop_group_cmp,
Ido Schimmele9ad5e72017-02-08 11:16:29 +01002761};
2762
2763static int mlxsw_sp_nexthop_group_insert(struct mlxsw_sp *mlxsw_sp,
2764 struct mlxsw_sp_nexthop_group *nh_grp)
2765{
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02002766 if (mlxsw_sp_nexthop_group_type(nh_grp) == AF_INET6 &&
2767 !nh_grp->gateway)
2768 return 0;
2769
Ido Schimmel9011b672017-05-16 19:38:25 +02002770 return rhashtable_insert_fast(&mlxsw_sp->router->nexthop_group_ht,
Ido Schimmele9ad5e72017-02-08 11:16:29 +01002771 &nh_grp->ht_node,
2772 mlxsw_sp_nexthop_group_ht_params);
2773}
2774
2775static void mlxsw_sp_nexthop_group_remove(struct mlxsw_sp *mlxsw_sp,
2776 struct mlxsw_sp_nexthop_group *nh_grp)
2777{
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02002778 if (mlxsw_sp_nexthop_group_type(nh_grp) == AF_INET6 &&
2779 !nh_grp->gateway)
2780 return;
2781
Ido Schimmel9011b672017-05-16 19:38:25 +02002782 rhashtable_remove_fast(&mlxsw_sp->router->nexthop_group_ht,
Ido Schimmele9ad5e72017-02-08 11:16:29 +01002783 &nh_grp->ht_node,
2784 mlxsw_sp_nexthop_group_ht_params);
2785}
2786
2787static struct mlxsw_sp_nexthop_group *
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02002788mlxsw_sp_nexthop4_group_lookup(struct mlxsw_sp *mlxsw_sp,
2789 struct fib_info *fi)
Ido Schimmele9ad5e72017-02-08 11:16:29 +01002790{
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02002791 struct mlxsw_sp_nexthop_group_cmp_arg cmp_arg;
2792
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02002793 cmp_arg.proto = MLXSW_SP_L3_PROTO_IPV4;
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02002794 cmp_arg.fi = fi;
2795 return rhashtable_lookup_fast(&mlxsw_sp->router->nexthop_group_ht,
2796 &cmp_arg,
Ido Schimmele9ad5e72017-02-08 11:16:29 +01002797 mlxsw_sp_nexthop_group_ht_params);
2798}
2799
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02002800static struct mlxsw_sp_nexthop_group *
2801mlxsw_sp_nexthop6_group_lookup(struct mlxsw_sp *mlxsw_sp,
2802 struct mlxsw_sp_fib6_entry *fib6_entry)
2803{
2804 struct mlxsw_sp_nexthop_group_cmp_arg cmp_arg;
2805
2806 cmp_arg.proto = MLXSW_SP_L3_PROTO_IPV6;
2807 cmp_arg.fib6_entry = fib6_entry;
2808 return rhashtable_lookup_fast(&mlxsw_sp->router->nexthop_group_ht,
2809 &cmp_arg,
2810 mlxsw_sp_nexthop_group_ht_params);
2811}
2812
Ido Schimmelc53b8e12017-02-08 11:16:30 +01002813static const struct rhashtable_params mlxsw_sp_nexthop_ht_params = {
2814 .key_offset = offsetof(struct mlxsw_sp_nexthop, key),
2815 .head_offset = offsetof(struct mlxsw_sp_nexthop, ht_node),
2816 .key_len = sizeof(struct mlxsw_sp_nexthop_key),
2817};
2818
2819static int mlxsw_sp_nexthop_insert(struct mlxsw_sp *mlxsw_sp,
2820 struct mlxsw_sp_nexthop *nh)
2821{
Ido Schimmel9011b672017-05-16 19:38:25 +02002822 return rhashtable_insert_fast(&mlxsw_sp->router->nexthop_ht,
Ido Schimmelc53b8e12017-02-08 11:16:30 +01002823 &nh->ht_node, mlxsw_sp_nexthop_ht_params);
2824}
2825
2826static void mlxsw_sp_nexthop_remove(struct mlxsw_sp *mlxsw_sp,
2827 struct mlxsw_sp_nexthop *nh)
2828{
Ido Schimmel9011b672017-05-16 19:38:25 +02002829 rhashtable_remove_fast(&mlxsw_sp->router->nexthop_ht, &nh->ht_node,
Ido Schimmelc53b8e12017-02-08 11:16:30 +01002830 mlxsw_sp_nexthop_ht_params);
2831}
2832
Ido Schimmelad178c82017-02-08 11:16:40 +01002833static struct mlxsw_sp_nexthop *
2834mlxsw_sp_nexthop_lookup(struct mlxsw_sp *mlxsw_sp,
2835 struct mlxsw_sp_nexthop_key key)
2836{
Ido Schimmel9011b672017-05-16 19:38:25 +02002837 return rhashtable_lookup_fast(&mlxsw_sp->router->nexthop_ht, &key,
Ido Schimmelad178c82017-02-08 11:16:40 +01002838 mlxsw_sp_nexthop_ht_params);
2839}
2840
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002841static int mlxsw_sp_adj_index_mass_update_vr(struct mlxsw_sp *mlxsw_sp,
Ido Schimmel76610eb2017-03-10 08:53:41 +01002842 const struct mlxsw_sp_fib *fib,
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002843 u32 adj_index, u16 ecmp_size,
2844 u32 new_adj_index,
2845 u16 new_ecmp_size)
2846{
2847 char raleu_pl[MLXSW_REG_RALEU_LEN];
2848
Ido Schimmel1a9234e662016-09-19 08:29:26 +02002849 mlxsw_reg_raleu_pack(raleu_pl,
Ido Schimmel76610eb2017-03-10 08:53:41 +01002850 (enum mlxsw_reg_ralxx_protocol) fib->proto,
2851 fib->vr->id, adj_index, ecmp_size, new_adj_index,
Ido Schimmel1a9234e662016-09-19 08:29:26 +02002852 new_ecmp_size);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002853 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raleu), raleu_pl);
2854}
2855
2856static int mlxsw_sp_adj_index_mass_update(struct mlxsw_sp *mlxsw_sp,
2857 struct mlxsw_sp_nexthop_group *nh_grp,
2858 u32 old_adj_index, u16 old_ecmp_size)
2859{
2860 struct mlxsw_sp_fib_entry *fib_entry;
Ido Schimmel76610eb2017-03-10 08:53:41 +01002861 struct mlxsw_sp_fib *fib = NULL;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002862 int err;
2863
2864 list_for_each_entry(fib_entry, &nh_grp->fib_list, nexthop_group_node) {
Ido Schimmel76610eb2017-03-10 08:53:41 +01002865 if (fib == fib_entry->fib_node->fib)
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002866 continue;
Ido Schimmel76610eb2017-03-10 08:53:41 +01002867 fib = fib_entry->fib_node->fib;
2868 err = mlxsw_sp_adj_index_mass_update_vr(mlxsw_sp, fib,
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002869 old_adj_index,
2870 old_ecmp_size,
2871 nh_grp->adj_index,
2872 nh_grp->ecmp_size);
2873 if (err)
2874 return err;
2875 }
2876 return 0;
2877}
2878
Ido Schimmeleb789982017-10-22 23:11:48 +02002879static int __mlxsw_sp_nexthop_update(struct mlxsw_sp *mlxsw_sp, u32 adj_index,
2880 struct mlxsw_sp_nexthop *nh)
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002881{
2882 struct mlxsw_sp_neigh_entry *neigh_entry = nh->neigh_entry;
2883 char ratr_pl[MLXSW_REG_RATR_LEN];
2884
2885 mlxsw_reg_ratr_pack(ratr_pl, MLXSW_REG_RATR_OP_WRITE_WRITE_ENTRY,
Petr Machata89e41982017-09-02 23:49:15 +02002886 true, MLXSW_REG_RATR_TYPE_ETHERNET,
2887 adj_index, neigh_entry->rif);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002888 mlxsw_reg_ratr_eth_entry_pack(ratr_pl, neigh_entry->ha);
Arkadi Sharshevskya5390272017-09-25 10:32:28 +02002889 if (nh->counter_valid)
2890 mlxsw_reg_ratr_counter_pack(ratr_pl, nh->counter_index, true);
2891 else
2892 mlxsw_reg_ratr_counter_pack(ratr_pl, 0, false);
2893
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002894 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ratr), ratr_pl);
2895}
2896
Ido Schimmeleb789982017-10-22 23:11:48 +02002897int mlxsw_sp_nexthop_update(struct mlxsw_sp *mlxsw_sp, u32 adj_index,
2898 struct mlxsw_sp_nexthop *nh)
2899{
2900 int i;
2901
2902 for (i = 0; i < nh->num_adj_entries; i++) {
2903 int err;
2904
2905 err = __mlxsw_sp_nexthop_update(mlxsw_sp, adj_index + i, nh);
2906 if (err)
2907 return err;
2908 }
2909
2910 return 0;
2911}
2912
2913static int __mlxsw_sp_nexthop_ipip_update(struct mlxsw_sp *mlxsw_sp,
2914 u32 adj_index,
2915 struct mlxsw_sp_nexthop *nh)
Petr Machata1012b9a2017-09-02 23:49:23 +02002916{
2917 const struct mlxsw_sp_ipip_ops *ipip_ops;
2918
2919 ipip_ops = mlxsw_sp->router->ipip_ops_arr[nh->ipip_entry->ipipt];
2920 return ipip_ops->nexthop_update(mlxsw_sp, adj_index, nh->ipip_entry);
2921}
2922
Ido Schimmeleb789982017-10-22 23:11:48 +02002923static int mlxsw_sp_nexthop_ipip_update(struct mlxsw_sp *mlxsw_sp,
2924 u32 adj_index,
2925 struct mlxsw_sp_nexthop *nh)
2926{
2927 int i;
2928
2929 for (i = 0; i < nh->num_adj_entries; i++) {
2930 int err;
2931
2932 err = __mlxsw_sp_nexthop_ipip_update(mlxsw_sp, adj_index + i,
2933 nh);
2934 if (err)
2935 return err;
2936 }
2937
2938 return 0;
2939}
2940
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002941static int
Petr Machata35225e42017-09-02 23:49:22 +02002942mlxsw_sp_nexthop_group_update(struct mlxsw_sp *mlxsw_sp,
2943 struct mlxsw_sp_nexthop_group *nh_grp,
2944 bool reallocate)
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002945{
2946 u32 adj_index = nh_grp->adj_index; /* base */
2947 struct mlxsw_sp_nexthop *nh;
2948 int i;
2949 int err;
2950
2951 for (i = 0; i < nh_grp->count; i++) {
2952 nh = &nh_grp->nexthops[i];
2953
2954 if (!nh->should_offload) {
2955 nh->offloaded = 0;
2956 continue;
2957 }
2958
Ido Schimmela59b7e02017-01-23 11:11:42 +01002959 if (nh->update || reallocate) {
Petr Machata35225e42017-09-02 23:49:22 +02002960 switch (nh->type) {
2961 case MLXSW_SP_NEXTHOP_TYPE_ETH:
Arkadi Sharshevskya5390272017-09-25 10:32:28 +02002962 err = mlxsw_sp_nexthop_update
Petr Machata35225e42017-09-02 23:49:22 +02002963 (mlxsw_sp, adj_index, nh);
2964 break;
Petr Machata1012b9a2017-09-02 23:49:23 +02002965 case MLXSW_SP_NEXTHOP_TYPE_IPIP:
2966 err = mlxsw_sp_nexthop_ipip_update
2967 (mlxsw_sp, adj_index, nh);
2968 break;
Petr Machata35225e42017-09-02 23:49:22 +02002969 }
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002970 if (err)
2971 return err;
2972 nh->update = 0;
2973 nh->offloaded = 1;
2974 }
Ido Schimmeleb789982017-10-22 23:11:48 +02002975 adj_index += nh->num_adj_entries;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002976 }
2977 return 0;
2978}
2979
Ido Schimmel1819ae32017-07-21 18:04:28 +02002980static bool
2981mlxsw_sp_fib_node_entry_is_first(const struct mlxsw_sp_fib_node *fib_node,
2982 const struct mlxsw_sp_fib_entry *fib_entry);
2983
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002984static int
2985mlxsw_sp_nexthop_fib_entries_update(struct mlxsw_sp *mlxsw_sp,
2986 struct mlxsw_sp_nexthop_group *nh_grp)
2987{
2988 struct mlxsw_sp_fib_entry *fib_entry;
2989 int err;
2990
2991 list_for_each_entry(fib_entry, &nh_grp->fib_list, nexthop_group_node) {
Ido Schimmel1819ae32017-07-21 18:04:28 +02002992 if (!mlxsw_sp_fib_node_entry_is_first(fib_entry->fib_node,
2993 fib_entry))
2994 continue;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002995 err = mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
2996 if (err)
2997 return err;
2998 }
2999 return 0;
3000}
3001
3002static void
Ido Schimmel77d964e2017-08-02 09:56:05 +02003003mlxsw_sp_fib_entry_offload_refresh(struct mlxsw_sp_fib_entry *fib_entry,
3004 enum mlxsw_reg_ralue_op op, int err);
3005
3006static void
3007mlxsw_sp_nexthop_fib_entries_refresh(struct mlxsw_sp_nexthop_group *nh_grp)
3008{
3009 enum mlxsw_reg_ralue_op op = MLXSW_REG_RALUE_OP_WRITE_WRITE;
3010 struct mlxsw_sp_fib_entry *fib_entry;
3011
3012 list_for_each_entry(fib_entry, &nh_grp->fib_list, nexthop_group_node) {
3013 if (!mlxsw_sp_fib_node_entry_is_first(fib_entry->fib_node,
3014 fib_entry))
3015 continue;
3016 mlxsw_sp_fib_entry_offload_refresh(fib_entry, op, 0);
3017 }
3018}
3019
Ido Schimmel425a08c2017-10-22 23:11:47 +02003020static void mlxsw_sp_adj_grp_size_round_up(u16 *p_adj_grp_size)
3021{
3022 /* Valid sizes for an adjacency group are:
3023 * 1-64, 512, 1024, 2048 and 4096.
3024 */
3025 if (*p_adj_grp_size <= 64)
3026 return;
3027 else if (*p_adj_grp_size <= 512)
3028 *p_adj_grp_size = 512;
3029 else if (*p_adj_grp_size <= 1024)
3030 *p_adj_grp_size = 1024;
3031 else if (*p_adj_grp_size <= 2048)
3032 *p_adj_grp_size = 2048;
3033 else
3034 *p_adj_grp_size = 4096;
3035}
3036
3037static void mlxsw_sp_adj_grp_size_round_down(u16 *p_adj_grp_size,
3038 unsigned int alloc_size)
3039{
3040 if (alloc_size >= 4096)
3041 *p_adj_grp_size = 4096;
3042 else if (alloc_size >= 2048)
3043 *p_adj_grp_size = 2048;
3044 else if (alloc_size >= 1024)
3045 *p_adj_grp_size = 1024;
3046 else if (alloc_size >= 512)
3047 *p_adj_grp_size = 512;
3048}
3049
3050static int mlxsw_sp_fix_adj_grp_size(struct mlxsw_sp *mlxsw_sp,
3051 u16 *p_adj_grp_size)
3052{
3053 unsigned int alloc_size;
3054 int err;
3055
3056 /* Round up the requested group size to the next size supported
3057 * by the device and make sure the request can be satisfied.
3058 */
3059 mlxsw_sp_adj_grp_size_round_up(p_adj_grp_size);
3060 err = mlxsw_sp_kvdl_alloc_size_query(mlxsw_sp, *p_adj_grp_size,
3061 &alloc_size);
3062 if (err)
3063 return err;
3064 /* It is possible the allocation results in more allocated
3065 * entries than requested. Try to use as much of them as
3066 * possible.
3067 */
3068 mlxsw_sp_adj_grp_size_round_down(p_adj_grp_size, alloc_size);
3069
3070 return 0;
3071}
3072
Ido Schimmel77d964e2017-08-02 09:56:05 +02003073static void
Ido Schimmeleb789982017-10-22 23:11:48 +02003074mlxsw_sp_nexthop_group_normalize(struct mlxsw_sp_nexthop_group *nh_grp)
3075{
3076 int i, g = 0, sum_norm_weight = 0;
3077 struct mlxsw_sp_nexthop *nh;
3078
3079 for (i = 0; i < nh_grp->count; i++) {
3080 nh = &nh_grp->nexthops[i];
3081
3082 if (!nh->should_offload)
3083 continue;
3084 if (g > 0)
3085 g = gcd(nh->nh_weight, g);
3086 else
3087 g = nh->nh_weight;
3088 }
3089
3090 for (i = 0; i < nh_grp->count; i++) {
3091 nh = &nh_grp->nexthops[i];
3092
3093 if (!nh->should_offload)
3094 continue;
3095 nh->norm_nh_weight = nh->nh_weight / g;
3096 sum_norm_weight += nh->norm_nh_weight;
3097 }
3098
3099 nh_grp->sum_norm_weight = sum_norm_weight;
3100}
3101
3102static void
3103mlxsw_sp_nexthop_group_rebalance(struct mlxsw_sp_nexthop_group *nh_grp)
3104{
3105 int total = nh_grp->sum_norm_weight;
3106 u16 ecmp_size = nh_grp->ecmp_size;
3107 int i, weight = 0, lower_bound = 0;
3108
3109 for (i = 0; i < nh_grp->count; i++) {
3110 struct mlxsw_sp_nexthop *nh = &nh_grp->nexthops[i];
3111 int upper_bound;
3112
3113 if (!nh->should_offload)
3114 continue;
3115 weight += nh->norm_nh_weight;
3116 upper_bound = DIV_ROUND_CLOSEST(ecmp_size * weight, total);
3117 nh->num_adj_entries = upper_bound - lower_bound;
3118 lower_bound = upper_bound;
3119 }
3120}
3121
3122static void
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003123mlxsw_sp_nexthop_group_refresh(struct mlxsw_sp *mlxsw_sp,
3124 struct mlxsw_sp_nexthop_group *nh_grp)
3125{
Ido Schimmeleb789982017-10-22 23:11:48 +02003126 u16 ecmp_size, old_ecmp_size;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003127 struct mlxsw_sp_nexthop *nh;
3128 bool offload_change = false;
3129 u32 adj_index;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003130 bool old_adj_index_valid;
3131 u32 old_adj_index;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003132 int i;
3133 int err;
3134
Ido Schimmelb3e8d1e2017-02-08 11:16:32 +01003135 if (!nh_grp->gateway) {
3136 mlxsw_sp_nexthop_fib_entries_update(mlxsw_sp, nh_grp);
3137 return;
3138 }
3139
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003140 for (i = 0; i < nh_grp->count; i++) {
3141 nh = &nh_grp->nexthops[i];
3142
Petr Machata56b8a9e2017-07-31 09:27:29 +02003143 if (nh->should_offload != nh->offloaded) {
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003144 offload_change = true;
3145 if (nh->should_offload)
3146 nh->update = 1;
3147 }
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003148 }
3149 if (!offload_change) {
3150 /* Nothing was added or removed, so no need to reallocate. Just
3151 * update MAC on existing adjacency indexes.
3152 */
Petr Machata35225e42017-09-02 23:49:22 +02003153 err = mlxsw_sp_nexthop_group_update(mlxsw_sp, nh_grp, false);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003154 if (err) {
3155 dev_warn(mlxsw_sp->bus_info->dev, "Failed to update neigh MAC in adjacency table.\n");
3156 goto set_trap;
3157 }
3158 return;
3159 }
Ido Schimmeleb789982017-10-22 23:11:48 +02003160 mlxsw_sp_nexthop_group_normalize(nh_grp);
3161 if (!nh_grp->sum_norm_weight)
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003162 /* No neigh of this group is connected so we just set
3163 * the trap and let everthing flow through kernel.
3164 */
3165 goto set_trap;
3166
Ido Schimmeleb789982017-10-22 23:11:48 +02003167 ecmp_size = nh_grp->sum_norm_weight;
Ido Schimmel425a08c2017-10-22 23:11:47 +02003168 err = mlxsw_sp_fix_adj_grp_size(mlxsw_sp, &ecmp_size);
3169 if (err)
3170 /* No valid allocation size available. */
3171 goto set_trap;
3172
Arkadi Sharshevsky13124442017-03-25 08:28:22 +01003173 err = mlxsw_sp_kvdl_alloc(mlxsw_sp, ecmp_size, &adj_index);
3174 if (err) {
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003175 /* We ran out of KVD linear space, just set the
3176 * trap and let everything flow through kernel.
3177 */
3178 dev_warn(mlxsw_sp->bus_info->dev, "Failed to allocate KVD linear area for nexthop group.\n");
3179 goto set_trap;
3180 }
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003181 old_adj_index_valid = nh_grp->adj_index_valid;
3182 old_adj_index = nh_grp->adj_index;
3183 old_ecmp_size = nh_grp->ecmp_size;
3184 nh_grp->adj_index_valid = 1;
3185 nh_grp->adj_index = adj_index;
3186 nh_grp->ecmp_size = ecmp_size;
Ido Schimmeleb789982017-10-22 23:11:48 +02003187 mlxsw_sp_nexthop_group_rebalance(nh_grp);
Petr Machata35225e42017-09-02 23:49:22 +02003188 err = mlxsw_sp_nexthop_group_update(mlxsw_sp, nh_grp, true);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003189 if (err) {
3190 dev_warn(mlxsw_sp->bus_info->dev, "Failed to update neigh MAC in adjacency table.\n");
3191 goto set_trap;
3192 }
3193
3194 if (!old_adj_index_valid) {
3195 /* The trap was set for fib entries, so we have to call
3196 * fib entry update to unset it and use adjacency index.
3197 */
3198 err = mlxsw_sp_nexthop_fib_entries_update(mlxsw_sp, nh_grp);
3199 if (err) {
3200 dev_warn(mlxsw_sp->bus_info->dev, "Failed to add adjacency index to fib entries.\n");
3201 goto set_trap;
3202 }
3203 return;
3204 }
3205
3206 err = mlxsw_sp_adj_index_mass_update(mlxsw_sp, nh_grp,
3207 old_adj_index, old_ecmp_size);
3208 mlxsw_sp_kvdl_free(mlxsw_sp, old_adj_index);
3209 if (err) {
3210 dev_warn(mlxsw_sp->bus_info->dev, "Failed to mass-update adjacency index for nexthop group.\n");
3211 goto set_trap;
3212 }
Ido Schimmel77d964e2017-08-02 09:56:05 +02003213
3214 /* Offload state within the group changed, so update the flags. */
3215 mlxsw_sp_nexthop_fib_entries_refresh(nh_grp);
3216
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003217 return;
3218
3219set_trap:
3220 old_adj_index_valid = nh_grp->adj_index_valid;
3221 nh_grp->adj_index_valid = 0;
3222 for (i = 0; i < nh_grp->count; i++) {
3223 nh = &nh_grp->nexthops[i];
3224 nh->offloaded = 0;
3225 }
3226 err = mlxsw_sp_nexthop_fib_entries_update(mlxsw_sp, nh_grp);
3227 if (err)
3228 dev_warn(mlxsw_sp->bus_info->dev, "Failed to set traps for fib entries.\n");
3229 if (old_adj_index_valid)
3230 mlxsw_sp_kvdl_free(mlxsw_sp, nh_grp->adj_index);
3231}
3232
3233static void __mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp_nexthop *nh,
3234 bool removing)
3235{
Petr Machata213666a2017-07-31 09:27:30 +02003236 if (!removing)
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003237 nh->should_offload = 1;
Petr Machata213666a2017-07-31 09:27:30 +02003238 else if (nh->offloaded)
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003239 nh->should_offload = 0;
3240 nh->update = 1;
3241}
3242
3243static void
3244mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp *mlxsw_sp,
3245 struct mlxsw_sp_neigh_entry *neigh_entry,
3246 bool removing)
3247{
3248 struct mlxsw_sp_nexthop *nh;
3249
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003250 list_for_each_entry(nh, &neigh_entry->nexthop_list,
3251 neigh_list_node) {
3252 __mlxsw_sp_nexthop_neigh_update(nh, removing);
3253 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nh_grp);
3254 }
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003255}
3256
Ido Schimmel9665b742017-02-08 11:16:42 +01003257static void mlxsw_sp_nexthop_rif_init(struct mlxsw_sp_nexthop *nh,
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003258 struct mlxsw_sp_rif *rif)
Ido Schimmel9665b742017-02-08 11:16:42 +01003259{
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003260 if (nh->rif)
Ido Schimmel9665b742017-02-08 11:16:42 +01003261 return;
3262
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003263 nh->rif = rif;
3264 list_add(&nh->rif_list_node, &rif->nexthop_list);
Ido Schimmel9665b742017-02-08 11:16:42 +01003265}
3266
3267static void mlxsw_sp_nexthop_rif_fini(struct mlxsw_sp_nexthop *nh)
3268{
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003269 if (!nh->rif)
Ido Schimmel9665b742017-02-08 11:16:42 +01003270 return;
3271
3272 list_del(&nh->rif_list_node);
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003273 nh->rif = NULL;
Ido Schimmel9665b742017-02-08 11:16:42 +01003274}
3275
Ido Schimmela8c97012017-02-08 11:16:35 +01003276static int mlxsw_sp_nexthop_neigh_init(struct mlxsw_sp *mlxsw_sp,
3277 struct mlxsw_sp_nexthop *nh)
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003278{
3279 struct mlxsw_sp_neigh_entry *neigh_entry;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003280 struct neighbour *n;
Ido Schimmel93a87e52016-12-23 09:32:49 +01003281 u8 nud_state, dead;
Ido Schimmelc53b8e12017-02-08 11:16:30 +01003282 int err;
3283
Ido Schimmelad178c82017-02-08 11:16:40 +01003284 if (!nh->nh_grp->gateway || nh->neigh_entry)
Ido Schimmelb8399a12017-02-08 11:16:33 +01003285 return 0;
3286
Jiri Pirko33b13412016-11-10 12:31:04 +01003287 /* Take a reference of neigh here ensuring that neigh would
Petr Machata8de3c172017-07-31 09:27:25 +02003288 * not be destructed before the nexthop entry is finished.
Jiri Pirko33b13412016-11-10 12:31:04 +01003289 * The reference is taken either in neigh_lookup() or
Ido Schimmelfd76d912017-02-06 16:20:17 +01003290 * in neigh_create() in case n is not found.
Jiri Pirko33b13412016-11-10 12:31:04 +01003291 */
Ido Schimmel58adf2c2017-07-18 10:10:19 +02003292 n = neigh_lookup(nh->nh_grp->neigh_tbl, &nh->gw_addr, nh->rif->dev);
Jiri Pirko33b13412016-11-10 12:31:04 +01003293 if (!n) {
Ido Schimmel58adf2c2017-07-18 10:10:19 +02003294 n = neigh_create(nh->nh_grp->neigh_tbl, &nh->gw_addr,
3295 nh->rif->dev);
Ido Schimmela8c97012017-02-08 11:16:35 +01003296 if (IS_ERR(n))
3297 return PTR_ERR(n);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003298 neigh_event_send(n, NULL);
Jiri Pirko33b13412016-11-10 12:31:04 +01003299 }
3300 neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp, n);
3301 if (!neigh_entry) {
Ido Schimmel5c8802f2017-02-06 16:20:13 +01003302 neigh_entry = mlxsw_sp_neigh_entry_create(mlxsw_sp, n);
3303 if (IS_ERR(neigh_entry)) {
Ido Schimmelc53b8e12017-02-08 11:16:30 +01003304 err = -EINVAL;
3305 goto err_neigh_entry_create;
Ido Schimmel5c8802f2017-02-06 16:20:13 +01003306 }
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003307 }
Yotam Gigib2157142016-07-05 11:27:51 +02003308
3309 /* If that is the first nexthop connected to that neigh, add to
3310 * nexthop_neighs_list
3311 */
3312 if (list_empty(&neigh_entry->nexthop_list))
3313 list_add_tail(&neigh_entry->nexthop_neighs_list_node,
Ido Schimmel9011b672017-05-16 19:38:25 +02003314 &mlxsw_sp->router->nexthop_neighs_list);
Yotam Gigib2157142016-07-05 11:27:51 +02003315
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003316 nh->neigh_entry = neigh_entry;
3317 list_add_tail(&nh->neigh_list_node, &neigh_entry->nexthop_list);
3318 read_lock_bh(&n->lock);
3319 nud_state = n->nud_state;
Ido Schimmel93a87e52016-12-23 09:32:49 +01003320 dead = n->dead;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003321 read_unlock_bh(&n->lock);
Ido Schimmel93a87e52016-12-23 09:32:49 +01003322 __mlxsw_sp_nexthop_neigh_update(nh, !(nud_state & NUD_VALID && !dead));
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003323
3324 return 0;
Ido Schimmelc53b8e12017-02-08 11:16:30 +01003325
3326err_neigh_entry_create:
3327 neigh_release(n);
Ido Schimmelc53b8e12017-02-08 11:16:30 +01003328 return err;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003329}
3330
Ido Schimmela8c97012017-02-08 11:16:35 +01003331static void mlxsw_sp_nexthop_neigh_fini(struct mlxsw_sp *mlxsw_sp,
3332 struct mlxsw_sp_nexthop *nh)
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003333{
3334 struct mlxsw_sp_neigh_entry *neigh_entry = nh->neigh_entry;
Ido Schimmela8c97012017-02-08 11:16:35 +01003335 struct neighbour *n;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003336
Ido Schimmelb8399a12017-02-08 11:16:33 +01003337 if (!neigh_entry)
Ido Schimmela8c97012017-02-08 11:16:35 +01003338 return;
3339 n = neigh_entry->key.n;
Ido Schimmelb8399a12017-02-08 11:16:33 +01003340
Ido Schimmel58312122016-12-23 09:32:50 +01003341 __mlxsw_sp_nexthop_neigh_update(nh, true);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003342 list_del(&nh->neigh_list_node);
Ido Schimmele58be792017-02-08 11:16:28 +01003343 nh->neigh_entry = NULL;
Yotam Gigib2157142016-07-05 11:27:51 +02003344
3345 /* If that is the last nexthop connected to that neigh, remove from
3346 * nexthop_neighs_list
3347 */
Ido Schimmele58be792017-02-08 11:16:28 +01003348 if (list_empty(&neigh_entry->nexthop_list))
3349 list_del(&neigh_entry->nexthop_neighs_list_node);
Yotam Gigib2157142016-07-05 11:27:51 +02003350
Ido Schimmel5c8802f2017-02-06 16:20:13 +01003351 if (!neigh_entry->connected && list_empty(&neigh_entry->nexthop_list))
3352 mlxsw_sp_neigh_entry_destroy(mlxsw_sp, neigh_entry);
3353
3354 neigh_release(n);
Ido Schimmela8c97012017-02-08 11:16:35 +01003355}
Ido Schimmelc53b8e12017-02-08 11:16:30 +01003356
Petr Machata44b0fff2017-11-03 10:03:44 +01003357static bool mlxsw_sp_ipip_netdev_ul_up(struct net_device *ol_dev)
3358{
3359 struct net_device *ul_dev = __mlxsw_sp_ipip_netdev_ul_dev_get(ol_dev);
3360
3361 return ul_dev ? (ul_dev->flags & IFF_UP) : true;
3362}
3363
Petr Machata1012b9a2017-09-02 23:49:23 +02003364static int mlxsw_sp_nexthop_ipip_init(struct mlxsw_sp *mlxsw_sp,
Petr Machata1012b9a2017-09-02 23:49:23 +02003365 struct mlxsw_sp_nexthop *nh,
3366 struct net_device *ol_dev)
3367{
Petr Machata44b0fff2017-11-03 10:03:44 +01003368 bool removing;
3369
Petr Machata1012b9a2017-09-02 23:49:23 +02003370 if (!nh->nh_grp->gateway || nh->ipip_entry)
3371 return 0;
3372
Petr Machata4cccb732017-10-16 16:26:39 +02003373 nh->ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
3374 if (!nh->ipip_entry)
3375 return -ENOENT;
Petr Machata1012b9a2017-09-02 23:49:23 +02003376
Petr Machata44b0fff2017-11-03 10:03:44 +01003377 removing = !mlxsw_sp_ipip_netdev_ul_up(ol_dev);
3378 __mlxsw_sp_nexthop_neigh_update(nh, removing);
Petr Machata1012b9a2017-09-02 23:49:23 +02003379 return 0;
3380}
3381
3382static void mlxsw_sp_nexthop_ipip_fini(struct mlxsw_sp *mlxsw_sp,
3383 struct mlxsw_sp_nexthop *nh)
3384{
3385 struct mlxsw_sp_ipip_entry *ipip_entry = nh->ipip_entry;
3386
3387 if (!ipip_entry)
3388 return;
3389
3390 __mlxsw_sp_nexthop_neigh_update(nh, true);
Petr Machata1012b9a2017-09-02 23:49:23 +02003391 nh->ipip_entry = NULL;
3392}
3393
3394static bool mlxsw_sp_nexthop4_ipip_type(const struct mlxsw_sp *mlxsw_sp,
3395 const struct fib_nh *fib_nh,
3396 enum mlxsw_sp_ipip_type *p_ipipt)
3397{
3398 struct net_device *dev = fib_nh->nh_dev;
3399
3400 return dev &&
3401 fib_nh->nh_parent->fib_type == RTN_UNICAST &&
3402 mlxsw_sp_netdev_ipip_type(mlxsw_sp, dev, p_ipipt);
3403}
3404
Petr Machata35225e42017-09-02 23:49:22 +02003405static void mlxsw_sp_nexthop_type_fini(struct mlxsw_sp *mlxsw_sp,
3406 struct mlxsw_sp_nexthop *nh)
3407{
3408 switch (nh->type) {
3409 case MLXSW_SP_NEXTHOP_TYPE_ETH:
3410 mlxsw_sp_nexthop_neigh_fini(mlxsw_sp, nh);
3411 mlxsw_sp_nexthop_rif_fini(nh);
3412 break;
Petr Machata1012b9a2017-09-02 23:49:23 +02003413 case MLXSW_SP_NEXTHOP_TYPE_IPIP:
Petr Machatade0f43c2017-10-02 12:14:57 +02003414 mlxsw_sp_nexthop_rif_fini(nh);
Petr Machata1012b9a2017-09-02 23:49:23 +02003415 mlxsw_sp_nexthop_ipip_fini(mlxsw_sp, nh);
3416 break;
Petr Machata35225e42017-09-02 23:49:22 +02003417 }
3418}
3419
3420static int mlxsw_sp_nexthop4_type_init(struct mlxsw_sp *mlxsw_sp,
3421 struct mlxsw_sp_nexthop *nh,
3422 struct fib_nh *fib_nh)
3423{
Petr Machata1012b9a2017-09-02 23:49:23 +02003424 struct mlxsw_sp_router *router = mlxsw_sp->router;
Petr Machata35225e42017-09-02 23:49:22 +02003425 struct net_device *dev = fib_nh->nh_dev;
Petr Machata1012b9a2017-09-02 23:49:23 +02003426 enum mlxsw_sp_ipip_type ipipt;
Petr Machata35225e42017-09-02 23:49:22 +02003427 struct mlxsw_sp_rif *rif;
3428 int err;
3429
Petr Machata1012b9a2017-09-02 23:49:23 +02003430 if (mlxsw_sp_nexthop4_ipip_type(mlxsw_sp, fib_nh, &ipipt) &&
3431 router->ipip_ops_arr[ipipt]->can_offload(mlxsw_sp, dev,
3432 MLXSW_SP_L3_PROTO_IPV4)) {
3433 nh->type = MLXSW_SP_NEXTHOP_TYPE_IPIP;
Petr Machata4cccb732017-10-16 16:26:39 +02003434 err = mlxsw_sp_nexthop_ipip_init(mlxsw_sp, nh, dev);
Petr Machatade0f43c2017-10-02 12:14:57 +02003435 if (err)
3436 return err;
3437 mlxsw_sp_nexthop_rif_init(nh, &nh->ipip_entry->ol_lb->common);
3438 return 0;
Petr Machata1012b9a2017-09-02 23:49:23 +02003439 }
3440
Petr Machata35225e42017-09-02 23:49:22 +02003441 nh->type = MLXSW_SP_NEXTHOP_TYPE_ETH;
3442 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
3443 if (!rif)
3444 return 0;
3445
3446 mlxsw_sp_nexthop_rif_init(nh, rif);
3447 err = mlxsw_sp_nexthop_neigh_init(mlxsw_sp, nh);
3448 if (err)
3449 goto err_neigh_init;
3450
3451 return 0;
3452
3453err_neigh_init:
3454 mlxsw_sp_nexthop_rif_fini(nh);
3455 return err;
3456}
3457
3458static void mlxsw_sp_nexthop4_type_fini(struct mlxsw_sp *mlxsw_sp,
3459 struct mlxsw_sp_nexthop *nh)
3460{
3461 mlxsw_sp_nexthop_type_fini(mlxsw_sp, nh);
3462}
3463
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02003464static int mlxsw_sp_nexthop4_init(struct mlxsw_sp *mlxsw_sp,
3465 struct mlxsw_sp_nexthop_group *nh_grp,
3466 struct mlxsw_sp_nexthop *nh,
3467 struct fib_nh *fib_nh)
Ido Schimmela8c97012017-02-08 11:16:35 +01003468{
3469 struct net_device *dev = fib_nh->nh_dev;
Ido Schimmeldf6dd79b2017-02-08 14:36:49 +01003470 struct in_device *in_dev;
Ido Schimmela8c97012017-02-08 11:16:35 +01003471 int err;
3472
3473 nh->nh_grp = nh_grp;
3474 nh->key.fib_nh = fib_nh;
Ido Schimmel408bd942017-10-22 23:11:46 +02003475#ifdef CONFIG_IP_ROUTE_MULTIPATH
3476 nh->nh_weight = fib_nh->nh_weight;
3477#else
3478 nh->nh_weight = 1;
3479#endif
Ido Schimmel58adf2c2017-07-18 10:10:19 +02003480 memcpy(&nh->gw_addr, &fib_nh->nh_gw, sizeof(fib_nh->nh_gw));
Ido Schimmela8c97012017-02-08 11:16:35 +01003481 err = mlxsw_sp_nexthop_insert(mlxsw_sp, nh);
3482 if (err)
3483 return err;
3484
Arkadi Sharshevskya5390272017-09-25 10:32:28 +02003485 mlxsw_sp_nexthop_counter_alloc(mlxsw_sp, nh);
Arkadi Sharshevskydbe45982017-09-25 10:32:23 +02003486 list_add_tail(&nh->router_list_node, &mlxsw_sp->router->nexthop_list);
3487
Ido Schimmel97989ee2017-03-10 08:53:38 +01003488 if (!dev)
3489 return 0;
3490
Ido Schimmeldf6dd79b2017-02-08 14:36:49 +01003491 in_dev = __in_dev_get_rtnl(dev);
3492 if (in_dev && IN_DEV_IGNORE_ROUTES_WITH_LINKDOWN(in_dev) &&
3493 fib_nh->nh_flags & RTNH_F_LINKDOWN)
3494 return 0;
3495
Petr Machata35225e42017-09-02 23:49:22 +02003496 err = mlxsw_sp_nexthop4_type_init(mlxsw_sp, nh, fib_nh);
Ido Schimmela8c97012017-02-08 11:16:35 +01003497 if (err)
3498 goto err_nexthop_neigh_init;
3499
3500 return 0;
3501
3502err_nexthop_neigh_init:
3503 mlxsw_sp_nexthop_remove(mlxsw_sp, nh);
3504 return err;
3505}
3506
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02003507static void mlxsw_sp_nexthop4_fini(struct mlxsw_sp *mlxsw_sp,
3508 struct mlxsw_sp_nexthop *nh)
Ido Schimmela8c97012017-02-08 11:16:35 +01003509{
Petr Machata35225e42017-09-02 23:49:22 +02003510 mlxsw_sp_nexthop4_type_fini(mlxsw_sp, nh);
Arkadi Sharshevskydbe45982017-09-25 10:32:23 +02003511 list_del(&nh->router_list_node);
Arkadi Sharshevskya5390272017-09-25 10:32:28 +02003512 mlxsw_sp_nexthop_counter_free(mlxsw_sp, nh);
Ido Schimmelc53b8e12017-02-08 11:16:30 +01003513 mlxsw_sp_nexthop_remove(mlxsw_sp, nh);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003514}
3515
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02003516static void mlxsw_sp_nexthop4_event(struct mlxsw_sp *mlxsw_sp,
3517 unsigned long event, struct fib_nh *fib_nh)
Ido Schimmelad178c82017-02-08 11:16:40 +01003518{
3519 struct mlxsw_sp_nexthop_key key;
3520 struct mlxsw_sp_nexthop *nh;
Ido Schimmelad178c82017-02-08 11:16:40 +01003521
Ido Schimmel9011b672017-05-16 19:38:25 +02003522 if (mlxsw_sp->router->aborted)
Ido Schimmelad178c82017-02-08 11:16:40 +01003523 return;
3524
3525 key.fib_nh = fib_nh;
3526 nh = mlxsw_sp_nexthop_lookup(mlxsw_sp, key);
3527 if (WARN_ON_ONCE(!nh))
3528 return;
3529
Ido Schimmelad178c82017-02-08 11:16:40 +01003530 switch (event) {
3531 case FIB_EVENT_NH_ADD:
Petr Machata35225e42017-09-02 23:49:22 +02003532 mlxsw_sp_nexthop4_type_init(mlxsw_sp, nh, fib_nh);
Ido Schimmelad178c82017-02-08 11:16:40 +01003533 break;
3534 case FIB_EVENT_NH_DEL:
Petr Machata35225e42017-09-02 23:49:22 +02003535 mlxsw_sp_nexthop4_type_fini(mlxsw_sp, nh);
Ido Schimmelad178c82017-02-08 11:16:40 +01003536 break;
3537 }
3538
3539 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nh_grp);
3540}
3541
Petr Machata0c5f1cd2017-11-03 10:03:38 +01003542static void mlxsw_sp_nexthop_rif_update(struct mlxsw_sp *mlxsw_sp,
3543 struct mlxsw_sp_rif *rif)
3544{
3545 struct mlxsw_sp_nexthop *nh;
Petr Machata44b0fff2017-11-03 10:03:44 +01003546 bool removing;
Petr Machata0c5f1cd2017-11-03 10:03:38 +01003547
3548 list_for_each_entry(nh, &rif->nexthop_list, rif_list_node) {
Petr Machata44b0fff2017-11-03 10:03:44 +01003549 switch (nh->type) {
3550 case MLXSW_SP_NEXTHOP_TYPE_ETH:
3551 removing = false;
3552 break;
3553 case MLXSW_SP_NEXTHOP_TYPE_IPIP:
3554 removing = !mlxsw_sp_ipip_netdev_ul_up(rif->dev);
3555 break;
3556 default:
3557 WARN_ON(1);
3558 continue;
3559 }
3560
3561 __mlxsw_sp_nexthop_neigh_update(nh, removing);
Petr Machata0c5f1cd2017-11-03 10:03:38 +01003562 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nh_grp);
3563 }
3564}
3565
Ido Schimmel9665b742017-02-08 11:16:42 +01003566static void mlxsw_sp_nexthop_rif_gone_sync(struct mlxsw_sp *mlxsw_sp,
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003567 struct mlxsw_sp_rif *rif)
Ido Schimmel9665b742017-02-08 11:16:42 +01003568{
3569 struct mlxsw_sp_nexthop *nh, *tmp;
3570
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003571 list_for_each_entry_safe(nh, tmp, &rif->nexthop_list, rif_list_node) {
Petr Machata35225e42017-09-02 23:49:22 +02003572 mlxsw_sp_nexthop_type_fini(mlxsw_sp, nh);
Ido Schimmel9665b742017-02-08 11:16:42 +01003573 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nh_grp);
3574 }
3575}
3576
Petr Machata9b014512017-09-02 23:49:20 +02003577static bool mlxsw_sp_fi_is_gateway(const struct mlxsw_sp *mlxsw_sp,
3578 const struct fib_info *fi)
3579{
Petr Machata1012b9a2017-09-02 23:49:23 +02003580 return fi->fib_nh->nh_scope == RT_SCOPE_LINK ||
3581 mlxsw_sp_nexthop4_ipip_type(mlxsw_sp, fi->fib_nh, NULL);
Petr Machata9b014512017-09-02 23:49:20 +02003582}
3583
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003584static struct mlxsw_sp_nexthop_group *
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02003585mlxsw_sp_nexthop4_group_create(struct mlxsw_sp *mlxsw_sp, struct fib_info *fi)
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003586{
3587 struct mlxsw_sp_nexthop_group *nh_grp;
3588 struct mlxsw_sp_nexthop *nh;
3589 struct fib_nh *fib_nh;
3590 size_t alloc_size;
3591 int i;
3592 int err;
3593
3594 alloc_size = sizeof(*nh_grp) +
3595 fi->fib_nhs * sizeof(struct mlxsw_sp_nexthop);
3596 nh_grp = kzalloc(alloc_size, GFP_KERNEL);
3597 if (!nh_grp)
3598 return ERR_PTR(-ENOMEM);
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02003599 nh_grp->priv = fi;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003600 INIT_LIST_HEAD(&nh_grp->fib_list);
Ido Schimmel58adf2c2017-07-18 10:10:19 +02003601 nh_grp->neigh_tbl = &arp_tbl;
3602
Petr Machata9b014512017-09-02 23:49:20 +02003603 nh_grp->gateway = mlxsw_sp_fi_is_gateway(mlxsw_sp, fi);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003604 nh_grp->count = fi->fib_nhs;
Ido Schimmel7387dbb2017-07-12 09:12:53 +02003605 fib_info_hold(fi);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003606 for (i = 0; i < nh_grp->count; i++) {
3607 nh = &nh_grp->nexthops[i];
3608 fib_nh = &fi->fib_nh[i];
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02003609 err = mlxsw_sp_nexthop4_init(mlxsw_sp, nh_grp, nh, fib_nh);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003610 if (err)
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02003611 goto err_nexthop4_init;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003612 }
Ido Schimmele9ad5e72017-02-08 11:16:29 +01003613 err = mlxsw_sp_nexthop_group_insert(mlxsw_sp, nh_grp);
3614 if (err)
3615 goto err_nexthop_group_insert;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003616 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
3617 return nh_grp;
3618
Ido Schimmele9ad5e72017-02-08 11:16:29 +01003619err_nexthop_group_insert:
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02003620err_nexthop4_init:
Ido Schimmeldf6dd79b2017-02-08 14:36:49 +01003621 for (i--; i >= 0; i--) {
3622 nh = &nh_grp->nexthops[i];
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02003623 mlxsw_sp_nexthop4_fini(mlxsw_sp, nh);
Ido Schimmeldf6dd79b2017-02-08 14:36:49 +01003624 }
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02003625 fib_info_put(fi);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003626 kfree(nh_grp);
3627 return ERR_PTR(err);
3628}
3629
3630static void
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02003631mlxsw_sp_nexthop4_group_destroy(struct mlxsw_sp *mlxsw_sp,
3632 struct mlxsw_sp_nexthop_group *nh_grp)
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003633{
3634 struct mlxsw_sp_nexthop *nh;
3635 int i;
3636
Ido Schimmele9ad5e72017-02-08 11:16:29 +01003637 mlxsw_sp_nexthop_group_remove(mlxsw_sp, nh_grp);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003638 for (i = 0; i < nh_grp->count; i++) {
3639 nh = &nh_grp->nexthops[i];
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02003640 mlxsw_sp_nexthop4_fini(mlxsw_sp, nh);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003641 }
Ido Schimmel58312122016-12-23 09:32:50 +01003642 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
3643 WARN_ON_ONCE(nh_grp->adj_index_valid);
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02003644 fib_info_put(mlxsw_sp_nexthop4_group_fi(nh_grp));
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003645 kfree(nh_grp);
3646}
3647
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02003648static int mlxsw_sp_nexthop4_group_get(struct mlxsw_sp *mlxsw_sp,
3649 struct mlxsw_sp_fib_entry *fib_entry,
3650 struct fib_info *fi)
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003651{
3652 struct mlxsw_sp_nexthop_group *nh_grp;
3653
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02003654 nh_grp = mlxsw_sp_nexthop4_group_lookup(mlxsw_sp, fi);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003655 if (!nh_grp) {
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02003656 nh_grp = mlxsw_sp_nexthop4_group_create(mlxsw_sp, fi);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003657 if (IS_ERR(nh_grp))
3658 return PTR_ERR(nh_grp);
3659 }
3660 list_add_tail(&fib_entry->nexthop_group_node, &nh_grp->fib_list);
3661 fib_entry->nh_group = nh_grp;
3662 return 0;
3663}
3664
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02003665static void mlxsw_sp_nexthop4_group_put(struct mlxsw_sp *mlxsw_sp,
3666 struct mlxsw_sp_fib_entry *fib_entry)
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003667{
3668 struct mlxsw_sp_nexthop_group *nh_grp = fib_entry->nh_group;
3669
3670 list_del(&fib_entry->nexthop_group_node);
3671 if (!list_empty(&nh_grp->fib_list))
3672 return;
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02003673 mlxsw_sp_nexthop4_group_destroy(mlxsw_sp, nh_grp);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003674}
3675
Ido Schimmel013b20f2017-02-08 11:16:36 +01003676static bool
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02003677mlxsw_sp_fib4_entry_should_offload(const struct mlxsw_sp_fib_entry *fib_entry)
3678{
3679 struct mlxsw_sp_fib4_entry *fib4_entry;
3680
3681 fib4_entry = container_of(fib_entry, struct mlxsw_sp_fib4_entry,
3682 common);
3683 return !fib4_entry->tos;
3684}
3685
3686static bool
Ido Schimmel013b20f2017-02-08 11:16:36 +01003687mlxsw_sp_fib_entry_should_offload(const struct mlxsw_sp_fib_entry *fib_entry)
3688{
3689 struct mlxsw_sp_nexthop_group *nh_group = fib_entry->nh_group;
3690
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02003691 switch (fib_entry->fib_node->fib->proto) {
3692 case MLXSW_SP_L3_PROTO_IPV4:
3693 if (!mlxsw_sp_fib4_entry_should_offload(fib_entry))
3694 return false;
3695 break;
3696 case MLXSW_SP_L3_PROTO_IPV6:
3697 break;
3698 }
Ido Schimmel9aecce12017-02-09 10:28:42 +01003699
Ido Schimmel013b20f2017-02-08 11:16:36 +01003700 switch (fib_entry->type) {
3701 case MLXSW_SP_FIB_ENTRY_TYPE_REMOTE:
3702 return !!nh_group->adj_index_valid;
3703 case MLXSW_SP_FIB_ENTRY_TYPE_LOCAL:
Ido Schimmel70ad3502017-02-08 11:16:38 +01003704 return !!nh_group->nh_rif;
Petr Machata4607f6d2017-09-02 23:49:25 +02003705 case MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP:
3706 return true;
Ido Schimmel013b20f2017-02-08 11:16:36 +01003707 default:
3708 return false;
3709 }
3710}
3711
Ido Schimmel428b8512017-08-03 13:28:28 +02003712static struct mlxsw_sp_nexthop *
3713mlxsw_sp_rt6_nexthop(struct mlxsw_sp_nexthop_group *nh_grp,
3714 const struct mlxsw_sp_rt6 *mlxsw_sp_rt6)
3715{
3716 int i;
3717
3718 for (i = 0; i < nh_grp->count; i++) {
3719 struct mlxsw_sp_nexthop *nh = &nh_grp->nexthops[i];
3720 struct rt6_info *rt = mlxsw_sp_rt6->rt;
3721
3722 if (nh->rif && nh->rif->dev == rt->dst.dev &&
3723 ipv6_addr_equal((const struct in6_addr *) &nh->gw_addr,
3724 &rt->rt6i_gateway))
3725 return nh;
3726 continue;
3727 }
3728
3729 return NULL;
3730}
3731
Ido Schimmel3984d1a2017-08-02 09:56:03 +02003732static void
3733mlxsw_sp_fib4_entry_offload_set(struct mlxsw_sp_fib_entry *fib_entry)
3734{
3735 struct mlxsw_sp_nexthop_group *nh_grp = fib_entry->nh_group;
3736 int i;
3737
Petr Machata4607f6d2017-09-02 23:49:25 +02003738 if (fib_entry->type == MLXSW_SP_FIB_ENTRY_TYPE_LOCAL ||
3739 fib_entry->type == MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP) {
Ido Schimmel3984d1a2017-08-02 09:56:03 +02003740 nh_grp->nexthops->key.fib_nh->nh_flags |= RTNH_F_OFFLOAD;
3741 return;
3742 }
3743
3744 for (i = 0; i < nh_grp->count; i++) {
3745 struct mlxsw_sp_nexthop *nh = &nh_grp->nexthops[i];
3746
3747 if (nh->offloaded)
3748 nh->key.fib_nh->nh_flags |= RTNH_F_OFFLOAD;
3749 else
3750 nh->key.fib_nh->nh_flags &= ~RTNH_F_OFFLOAD;
3751 }
3752}
3753
3754static void
3755mlxsw_sp_fib4_entry_offload_unset(struct mlxsw_sp_fib_entry *fib_entry)
3756{
3757 struct mlxsw_sp_nexthop_group *nh_grp = fib_entry->nh_group;
3758 int i;
3759
3760 for (i = 0; i < nh_grp->count; i++) {
3761 struct mlxsw_sp_nexthop *nh = &nh_grp->nexthops[i];
3762
3763 nh->key.fib_nh->nh_flags &= ~RTNH_F_OFFLOAD;
3764 }
3765}
3766
Ido Schimmel428b8512017-08-03 13:28:28 +02003767static void
3768mlxsw_sp_fib6_entry_offload_set(struct mlxsw_sp_fib_entry *fib_entry)
3769{
3770 struct mlxsw_sp_fib6_entry *fib6_entry;
3771 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
3772
3773 fib6_entry = container_of(fib_entry, struct mlxsw_sp_fib6_entry,
3774 common);
3775
3776 if (fib_entry->type == MLXSW_SP_FIB_ENTRY_TYPE_LOCAL) {
3777 list_first_entry(&fib6_entry->rt6_list, struct mlxsw_sp_rt6,
Ido Schimmelfe400792017-08-15 09:09:49 +02003778 list)->rt->rt6i_nh_flags |= RTNH_F_OFFLOAD;
Ido Schimmel428b8512017-08-03 13:28:28 +02003779 return;
3780 }
3781
3782 list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) {
3783 struct mlxsw_sp_nexthop_group *nh_grp = fib_entry->nh_group;
3784 struct mlxsw_sp_nexthop *nh;
3785
3786 nh = mlxsw_sp_rt6_nexthop(nh_grp, mlxsw_sp_rt6);
3787 if (nh && nh->offloaded)
Ido Schimmelfe400792017-08-15 09:09:49 +02003788 mlxsw_sp_rt6->rt->rt6i_nh_flags |= RTNH_F_OFFLOAD;
Ido Schimmel428b8512017-08-03 13:28:28 +02003789 else
Ido Schimmelfe400792017-08-15 09:09:49 +02003790 mlxsw_sp_rt6->rt->rt6i_nh_flags &= ~RTNH_F_OFFLOAD;
Ido Schimmel428b8512017-08-03 13:28:28 +02003791 }
3792}
3793
3794static void
3795mlxsw_sp_fib6_entry_offload_unset(struct mlxsw_sp_fib_entry *fib_entry)
3796{
3797 struct mlxsw_sp_fib6_entry *fib6_entry;
3798 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
3799
3800 fib6_entry = container_of(fib_entry, struct mlxsw_sp_fib6_entry,
3801 common);
3802 list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) {
3803 struct rt6_info *rt = mlxsw_sp_rt6->rt;
3804
Ido Schimmelfe400792017-08-15 09:09:49 +02003805 rt->rt6i_nh_flags &= ~RTNH_F_OFFLOAD;
Ido Schimmel428b8512017-08-03 13:28:28 +02003806 }
3807}
3808
Ido Schimmel013b20f2017-02-08 11:16:36 +01003809static void mlxsw_sp_fib_entry_offload_set(struct mlxsw_sp_fib_entry *fib_entry)
3810{
Ido Schimmel76610eb2017-03-10 08:53:41 +01003811 switch (fib_entry->fib_node->fib->proto) {
Ido Schimmel013b20f2017-02-08 11:16:36 +01003812 case MLXSW_SP_L3_PROTO_IPV4:
Ido Schimmel3984d1a2017-08-02 09:56:03 +02003813 mlxsw_sp_fib4_entry_offload_set(fib_entry);
Ido Schimmel013b20f2017-02-08 11:16:36 +01003814 break;
3815 case MLXSW_SP_L3_PROTO_IPV6:
Ido Schimmel428b8512017-08-03 13:28:28 +02003816 mlxsw_sp_fib6_entry_offload_set(fib_entry);
3817 break;
Ido Schimmel013b20f2017-02-08 11:16:36 +01003818 }
3819}
3820
3821static void
3822mlxsw_sp_fib_entry_offload_unset(struct mlxsw_sp_fib_entry *fib_entry)
3823{
Ido Schimmel76610eb2017-03-10 08:53:41 +01003824 switch (fib_entry->fib_node->fib->proto) {
Ido Schimmel013b20f2017-02-08 11:16:36 +01003825 case MLXSW_SP_L3_PROTO_IPV4:
Ido Schimmel3984d1a2017-08-02 09:56:03 +02003826 mlxsw_sp_fib4_entry_offload_unset(fib_entry);
Ido Schimmel013b20f2017-02-08 11:16:36 +01003827 break;
3828 case MLXSW_SP_L3_PROTO_IPV6:
Ido Schimmel428b8512017-08-03 13:28:28 +02003829 mlxsw_sp_fib6_entry_offload_unset(fib_entry);
3830 break;
Ido Schimmel013b20f2017-02-08 11:16:36 +01003831 }
Ido Schimmel013b20f2017-02-08 11:16:36 +01003832}
3833
3834static void
3835mlxsw_sp_fib_entry_offload_refresh(struct mlxsw_sp_fib_entry *fib_entry,
3836 enum mlxsw_reg_ralue_op op, int err)
3837{
3838 switch (op) {
3839 case MLXSW_REG_RALUE_OP_WRITE_DELETE:
Ido Schimmel013b20f2017-02-08 11:16:36 +01003840 return mlxsw_sp_fib_entry_offload_unset(fib_entry);
3841 case MLXSW_REG_RALUE_OP_WRITE_WRITE:
3842 if (err)
3843 return;
Ido Schimmel1353ee72017-08-02 09:56:04 +02003844 if (mlxsw_sp_fib_entry_should_offload(fib_entry))
Ido Schimmel013b20f2017-02-08 11:16:36 +01003845 mlxsw_sp_fib_entry_offload_set(fib_entry);
Petr Machata85f44a12017-10-02 12:21:58 +02003846 else
Ido Schimmel013b20f2017-02-08 11:16:36 +01003847 mlxsw_sp_fib_entry_offload_unset(fib_entry);
3848 return;
3849 default:
3850 return;
3851 }
3852}
3853
Ido Schimmel9dbf4d72017-07-18 10:10:24 +02003854static void
3855mlxsw_sp_fib_entry_ralue_pack(char *ralue_pl,
3856 const struct mlxsw_sp_fib_entry *fib_entry,
3857 enum mlxsw_reg_ralue_op op)
3858{
3859 struct mlxsw_sp_fib *fib = fib_entry->fib_node->fib;
3860 enum mlxsw_reg_ralxx_protocol proto;
3861 u32 *p_dip;
3862
3863 proto = (enum mlxsw_reg_ralxx_protocol) fib->proto;
3864
3865 switch (fib->proto) {
3866 case MLXSW_SP_L3_PROTO_IPV4:
3867 p_dip = (u32 *) fib_entry->fib_node->key.addr;
3868 mlxsw_reg_ralue_pack4(ralue_pl, proto, op, fib->vr->id,
3869 fib_entry->fib_node->key.prefix_len,
3870 *p_dip);
3871 break;
3872 case MLXSW_SP_L3_PROTO_IPV6:
3873 mlxsw_reg_ralue_pack6(ralue_pl, proto, op, fib->vr->id,
3874 fib_entry->fib_node->key.prefix_len,
3875 fib_entry->fib_node->key.addr);
3876 break;
3877 }
3878}
3879
3880static int mlxsw_sp_fib_entry_op_remote(struct mlxsw_sp *mlxsw_sp,
3881 struct mlxsw_sp_fib_entry *fib_entry,
3882 enum mlxsw_reg_ralue_op op)
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003883{
3884 char ralue_pl[MLXSW_REG_RALUE_LEN];
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003885 enum mlxsw_reg_ralue_trap_action trap_action;
3886 u16 trap_id = 0;
3887 u32 adjacency_index = 0;
3888 u16 ecmp_size = 0;
3889
3890 /* In case the nexthop group adjacency index is valid, use it
3891 * with provided ECMP size. Otherwise, setup trap and pass
3892 * traffic to kernel.
3893 */
Ido Schimmel4b411472017-02-08 11:16:37 +01003894 if (mlxsw_sp_fib_entry_should_offload(fib_entry)) {
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003895 trap_action = MLXSW_REG_RALUE_TRAP_ACTION_NOP;
3896 adjacency_index = fib_entry->nh_group->adj_index;
3897 ecmp_size = fib_entry->nh_group->ecmp_size;
3898 } else {
3899 trap_action = MLXSW_REG_RALUE_TRAP_ACTION_TRAP;
3900 trap_id = MLXSW_TRAP_ID_RTR_INGRESS0;
3901 }
3902
Ido Schimmel9dbf4d72017-07-18 10:10:24 +02003903 mlxsw_sp_fib_entry_ralue_pack(ralue_pl, fib_entry, op);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003904 mlxsw_reg_ralue_act_remote_pack(ralue_pl, trap_action, trap_id,
3905 adjacency_index, ecmp_size);
3906 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
3907}
3908
Ido Schimmel9dbf4d72017-07-18 10:10:24 +02003909static int mlxsw_sp_fib_entry_op_local(struct mlxsw_sp *mlxsw_sp,
3910 struct mlxsw_sp_fib_entry *fib_entry,
3911 enum mlxsw_reg_ralue_op op)
Jiri Pirko61c503f2016-07-04 08:23:11 +02003912{
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003913 struct mlxsw_sp_rif *rif = fib_entry->nh_group->nh_rif;
Ido Schimmel70ad3502017-02-08 11:16:38 +01003914 enum mlxsw_reg_ralue_trap_action trap_action;
Jiri Pirko61c503f2016-07-04 08:23:11 +02003915 char ralue_pl[MLXSW_REG_RALUE_LEN];
Ido Schimmel70ad3502017-02-08 11:16:38 +01003916 u16 trap_id = 0;
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003917 u16 rif_index = 0;
Ido Schimmel70ad3502017-02-08 11:16:38 +01003918
3919 if (mlxsw_sp_fib_entry_should_offload(fib_entry)) {
3920 trap_action = MLXSW_REG_RALUE_TRAP_ACTION_NOP;
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003921 rif_index = rif->rif_index;
Ido Schimmel70ad3502017-02-08 11:16:38 +01003922 } else {
3923 trap_action = MLXSW_REG_RALUE_TRAP_ACTION_TRAP;
3924 trap_id = MLXSW_TRAP_ID_RTR_INGRESS0;
3925 }
Jiri Pirko61c503f2016-07-04 08:23:11 +02003926
Ido Schimmel9dbf4d72017-07-18 10:10:24 +02003927 mlxsw_sp_fib_entry_ralue_pack(ralue_pl, fib_entry, op);
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003928 mlxsw_reg_ralue_act_local_pack(ralue_pl, trap_action, trap_id,
3929 rif_index);
Jiri Pirko61c503f2016-07-04 08:23:11 +02003930 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
3931}
3932
Ido Schimmel9dbf4d72017-07-18 10:10:24 +02003933static int mlxsw_sp_fib_entry_op_trap(struct mlxsw_sp *mlxsw_sp,
3934 struct mlxsw_sp_fib_entry *fib_entry,
3935 enum mlxsw_reg_ralue_op op)
Jiri Pirko61c503f2016-07-04 08:23:11 +02003936{
3937 char ralue_pl[MLXSW_REG_RALUE_LEN];
Jiri Pirko61c503f2016-07-04 08:23:11 +02003938
Ido Schimmel9dbf4d72017-07-18 10:10:24 +02003939 mlxsw_sp_fib_entry_ralue_pack(ralue_pl, fib_entry, op);
Jiri Pirko61c503f2016-07-04 08:23:11 +02003940 mlxsw_reg_ralue_act_ip2me_pack(ralue_pl);
3941 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
3942}
3943
Petr Machata4607f6d2017-09-02 23:49:25 +02003944static int
3945mlxsw_sp_fib_entry_op_ipip_decap(struct mlxsw_sp *mlxsw_sp,
3946 struct mlxsw_sp_fib_entry *fib_entry,
3947 enum mlxsw_reg_ralue_op op)
3948{
3949 struct mlxsw_sp_ipip_entry *ipip_entry = fib_entry->decap.ipip_entry;
3950 const struct mlxsw_sp_ipip_ops *ipip_ops;
3951
3952 if (WARN_ON(!ipip_entry))
3953 return -EINVAL;
3954
3955 ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt];
3956 return ipip_ops->fib_entry_op(mlxsw_sp, ipip_entry, op,
3957 fib_entry->decap.tunnel_index);
3958}
3959
Ido Schimmel9dbf4d72017-07-18 10:10:24 +02003960static int __mlxsw_sp_fib_entry_op(struct mlxsw_sp *mlxsw_sp,
3961 struct mlxsw_sp_fib_entry *fib_entry,
3962 enum mlxsw_reg_ralue_op op)
Jiri Pirko61c503f2016-07-04 08:23:11 +02003963{
3964 switch (fib_entry->type) {
3965 case MLXSW_SP_FIB_ENTRY_TYPE_REMOTE:
Ido Schimmel9dbf4d72017-07-18 10:10:24 +02003966 return mlxsw_sp_fib_entry_op_remote(mlxsw_sp, fib_entry, op);
Jiri Pirko61c503f2016-07-04 08:23:11 +02003967 case MLXSW_SP_FIB_ENTRY_TYPE_LOCAL:
Ido Schimmel9dbf4d72017-07-18 10:10:24 +02003968 return mlxsw_sp_fib_entry_op_local(mlxsw_sp, fib_entry, op);
Jiri Pirko61c503f2016-07-04 08:23:11 +02003969 case MLXSW_SP_FIB_ENTRY_TYPE_TRAP:
Ido Schimmel9dbf4d72017-07-18 10:10:24 +02003970 return mlxsw_sp_fib_entry_op_trap(mlxsw_sp, fib_entry, op);
Petr Machata4607f6d2017-09-02 23:49:25 +02003971 case MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP:
3972 return mlxsw_sp_fib_entry_op_ipip_decap(mlxsw_sp,
3973 fib_entry, op);
Jiri Pirko61c503f2016-07-04 08:23:11 +02003974 }
3975 return -EINVAL;
3976}
3977
3978static int mlxsw_sp_fib_entry_op(struct mlxsw_sp *mlxsw_sp,
3979 struct mlxsw_sp_fib_entry *fib_entry,
3980 enum mlxsw_reg_ralue_op op)
3981{
Ido Schimmel9dbf4d72017-07-18 10:10:24 +02003982 int err = __mlxsw_sp_fib_entry_op(mlxsw_sp, fib_entry, op);
Ido Schimmel013b20f2017-02-08 11:16:36 +01003983
Ido Schimmel013b20f2017-02-08 11:16:36 +01003984 mlxsw_sp_fib_entry_offload_refresh(fib_entry, op, err);
Ido Schimmel9dbf4d72017-07-18 10:10:24 +02003985
Ido Schimmel013b20f2017-02-08 11:16:36 +01003986 return err;
Jiri Pirko61c503f2016-07-04 08:23:11 +02003987}
3988
3989static int mlxsw_sp_fib_entry_update(struct mlxsw_sp *mlxsw_sp,
3990 struct mlxsw_sp_fib_entry *fib_entry)
3991{
Jiri Pirko7146da32016-09-01 10:37:41 +02003992 return mlxsw_sp_fib_entry_op(mlxsw_sp, fib_entry,
3993 MLXSW_REG_RALUE_OP_WRITE_WRITE);
Jiri Pirko61c503f2016-07-04 08:23:11 +02003994}
3995
3996static int mlxsw_sp_fib_entry_del(struct mlxsw_sp *mlxsw_sp,
3997 struct mlxsw_sp_fib_entry *fib_entry)
3998{
3999 return mlxsw_sp_fib_entry_op(mlxsw_sp, fib_entry,
4000 MLXSW_REG_RALUE_OP_WRITE_DELETE);
4001}
4002
Jiri Pirko61c503f2016-07-04 08:23:11 +02004003static int
Ido Schimmel013b20f2017-02-08 11:16:36 +01004004mlxsw_sp_fib4_entry_type_set(struct mlxsw_sp *mlxsw_sp,
4005 const struct fib_entry_notifier_info *fen_info,
4006 struct mlxsw_sp_fib_entry *fib_entry)
Jiri Pirko61c503f2016-07-04 08:23:11 +02004007{
Petr Machata4607f6d2017-09-02 23:49:25 +02004008 union mlxsw_sp_l3addr dip = { .addr4 = htonl(fen_info->dst) };
4009 struct net_device *dev = fen_info->fi->fib_dev;
4010 struct mlxsw_sp_ipip_entry *ipip_entry;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02004011 struct fib_info *fi = fen_info->fi;
Jiri Pirko61c503f2016-07-04 08:23:11 +02004012
Ido Schimmel97989ee2017-03-10 08:53:38 +01004013 switch (fen_info->type) {
Ido Schimmel97989ee2017-03-10 08:53:38 +01004014 case RTN_LOCAL:
Petr Machata4607f6d2017-09-02 23:49:25 +02004015 ipip_entry = mlxsw_sp_ipip_entry_find_by_decap(mlxsw_sp, dev,
4016 MLXSW_SP_L3_PROTO_IPV4, dip);
Petr Machata57c77ce2017-11-28 13:17:11 +01004017 if (ipip_entry && ipip_entry->ol_dev->flags & IFF_UP) {
Petr Machata4607f6d2017-09-02 23:49:25 +02004018 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP;
4019 return mlxsw_sp_fib_entry_decap_init(mlxsw_sp,
4020 fib_entry,
4021 ipip_entry);
4022 }
4023 /* fall through */
4024 case RTN_BROADCAST:
Jiri Pirko61c503f2016-07-04 08:23:11 +02004025 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
4026 return 0;
Ido Schimmel97989ee2017-03-10 08:53:38 +01004027 case RTN_UNREACHABLE: /* fall through */
4028 case RTN_BLACKHOLE: /* fall through */
4029 case RTN_PROHIBIT:
4030 /* Packets hitting these routes need to be trapped, but
4031 * can do so with a lower priority than packets directed
4032 * at the host, so use action type local instead of trap.
4033 */
Jiri Pirkob45f64d2016-09-26 12:52:31 +02004034 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_LOCAL;
Ido Schimmel97989ee2017-03-10 08:53:38 +01004035 return 0;
4036 case RTN_UNICAST:
Petr Machata9b014512017-09-02 23:49:20 +02004037 if (mlxsw_sp_fi_is_gateway(mlxsw_sp, fi))
Ido Schimmel97989ee2017-03-10 08:53:38 +01004038 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_REMOTE;
Petr Machata9b014512017-09-02 23:49:20 +02004039 else
4040 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_LOCAL;
Ido Schimmel97989ee2017-03-10 08:53:38 +01004041 return 0;
4042 default:
4043 return -EINVAL;
4044 }
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02004045}
4046
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004047static struct mlxsw_sp_fib4_entry *
Ido Schimmel9aecce12017-02-09 10:28:42 +01004048mlxsw_sp_fib4_entry_create(struct mlxsw_sp *mlxsw_sp,
4049 struct mlxsw_sp_fib_node *fib_node,
4050 const struct fib_entry_notifier_info *fen_info)
Jiri Pirko5b004412016-09-01 10:37:40 +02004051{
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004052 struct mlxsw_sp_fib4_entry *fib4_entry;
Jiri Pirko5b004412016-09-01 10:37:40 +02004053 struct mlxsw_sp_fib_entry *fib_entry;
Ido Schimmel9aecce12017-02-09 10:28:42 +01004054 int err;
4055
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004056 fib4_entry = kzalloc(sizeof(*fib4_entry), GFP_KERNEL);
4057 if (!fib4_entry)
4058 return ERR_PTR(-ENOMEM);
4059 fib_entry = &fib4_entry->common;
Ido Schimmel9aecce12017-02-09 10:28:42 +01004060
4061 err = mlxsw_sp_fib4_entry_type_set(mlxsw_sp, fen_info, fib_entry);
4062 if (err)
4063 goto err_fib4_entry_type_set;
4064
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02004065 err = mlxsw_sp_nexthop4_group_get(mlxsw_sp, fib_entry, fen_info->fi);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004066 if (err)
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02004067 goto err_nexthop4_group_get;
Ido Schimmel9aecce12017-02-09 10:28:42 +01004068
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004069 fib4_entry->prio = fen_info->fi->fib_priority;
4070 fib4_entry->tb_id = fen_info->tb_id;
4071 fib4_entry->type = fen_info->type;
4072 fib4_entry->tos = fen_info->tos;
Ido Schimmel9aecce12017-02-09 10:28:42 +01004073
4074 fib_entry->fib_node = fib_node;
4075
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004076 return fib4_entry;
Ido Schimmel9aecce12017-02-09 10:28:42 +01004077
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02004078err_nexthop4_group_get:
Ido Schimmel9aecce12017-02-09 10:28:42 +01004079err_fib4_entry_type_set:
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004080 kfree(fib4_entry);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004081 return ERR_PTR(err);
4082}
4083
4084static void mlxsw_sp_fib4_entry_destroy(struct mlxsw_sp *mlxsw_sp,
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004085 struct mlxsw_sp_fib4_entry *fib4_entry)
Ido Schimmel9aecce12017-02-09 10:28:42 +01004086{
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02004087 mlxsw_sp_nexthop4_group_put(mlxsw_sp, &fib4_entry->common);
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004088 kfree(fib4_entry);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004089}
4090
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004091static struct mlxsw_sp_fib4_entry *
Ido Schimmel9aecce12017-02-09 10:28:42 +01004092mlxsw_sp_fib4_entry_lookup(struct mlxsw_sp *mlxsw_sp,
4093 const struct fib_entry_notifier_info *fen_info)
4094{
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004095 struct mlxsw_sp_fib4_entry *fib4_entry;
Ido Schimmel9aecce12017-02-09 10:28:42 +01004096 struct mlxsw_sp_fib_node *fib_node;
Ido Schimmel160e22a2017-07-18 10:10:20 +02004097 struct mlxsw_sp_fib *fib;
4098 struct mlxsw_sp_vr *vr;
Ido Schimmel9aecce12017-02-09 10:28:42 +01004099
Ido Schimmel160e22a2017-07-18 10:10:20 +02004100 vr = mlxsw_sp_vr_find(mlxsw_sp, fen_info->tb_id);
4101 if (!vr)
4102 return NULL;
4103 fib = mlxsw_sp_vr_fib(vr, MLXSW_SP_L3_PROTO_IPV4);
4104
4105 fib_node = mlxsw_sp_fib_node_lookup(fib, &fen_info->dst,
4106 sizeof(fen_info->dst),
4107 fen_info->dst_len);
4108 if (!fib_node)
Ido Schimmel9aecce12017-02-09 10:28:42 +01004109 return NULL;
4110
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004111 list_for_each_entry(fib4_entry, &fib_node->entry_list, common.list) {
4112 if (fib4_entry->tb_id == fen_info->tb_id &&
4113 fib4_entry->tos == fen_info->tos &&
4114 fib4_entry->type == fen_info->type &&
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02004115 mlxsw_sp_nexthop4_group_fi(fib4_entry->common.nh_group) ==
4116 fen_info->fi) {
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004117 return fib4_entry;
Ido Schimmel9aecce12017-02-09 10:28:42 +01004118 }
4119 }
4120
4121 return NULL;
4122}
4123
4124static const struct rhashtable_params mlxsw_sp_fib_ht_params = {
4125 .key_offset = offsetof(struct mlxsw_sp_fib_node, key),
4126 .head_offset = offsetof(struct mlxsw_sp_fib_node, ht_node),
4127 .key_len = sizeof(struct mlxsw_sp_fib_key),
4128 .automatic_shrinking = true,
4129};
4130
4131static int mlxsw_sp_fib_node_insert(struct mlxsw_sp_fib *fib,
4132 struct mlxsw_sp_fib_node *fib_node)
4133{
4134 return rhashtable_insert_fast(&fib->ht, &fib_node->ht_node,
4135 mlxsw_sp_fib_ht_params);
4136}
4137
4138static void mlxsw_sp_fib_node_remove(struct mlxsw_sp_fib *fib,
4139 struct mlxsw_sp_fib_node *fib_node)
4140{
4141 rhashtable_remove_fast(&fib->ht, &fib_node->ht_node,
4142 mlxsw_sp_fib_ht_params);
4143}
4144
4145static struct mlxsw_sp_fib_node *
4146mlxsw_sp_fib_node_lookup(struct mlxsw_sp_fib *fib, const void *addr,
4147 size_t addr_len, unsigned char prefix_len)
4148{
4149 struct mlxsw_sp_fib_key key;
4150
4151 memset(&key, 0, sizeof(key));
4152 memcpy(key.addr, addr, addr_len);
4153 key.prefix_len = prefix_len;
4154 return rhashtable_lookup_fast(&fib->ht, &key, mlxsw_sp_fib_ht_params);
4155}
4156
4157static struct mlxsw_sp_fib_node *
Ido Schimmel76610eb2017-03-10 08:53:41 +01004158mlxsw_sp_fib_node_create(struct mlxsw_sp_fib *fib, const void *addr,
Ido Schimmel9aecce12017-02-09 10:28:42 +01004159 size_t addr_len, unsigned char prefix_len)
4160{
4161 struct mlxsw_sp_fib_node *fib_node;
4162
4163 fib_node = kzalloc(sizeof(*fib_node), GFP_KERNEL);
4164 if (!fib_node)
4165 return NULL;
4166
4167 INIT_LIST_HEAD(&fib_node->entry_list);
Ido Schimmel76610eb2017-03-10 08:53:41 +01004168 list_add(&fib_node->list, &fib->node_list);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004169 memcpy(fib_node->key.addr, addr, addr_len);
4170 fib_node->key.prefix_len = prefix_len;
Ido Schimmel9aecce12017-02-09 10:28:42 +01004171
4172 return fib_node;
4173}
4174
4175static void mlxsw_sp_fib_node_destroy(struct mlxsw_sp_fib_node *fib_node)
4176{
Ido Schimmel9aecce12017-02-09 10:28:42 +01004177 list_del(&fib_node->list);
4178 WARN_ON(!list_empty(&fib_node->entry_list));
4179 kfree(fib_node);
4180}
4181
4182static bool
4183mlxsw_sp_fib_node_entry_is_first(const struct mlxsw_sp_fib_node *fib_node,
4184 const struct mlxsw_sp_fib_entry *fib_entry)
4185{
4186 return list_first_entry(&fib_node->entry_list,
4187 struct mlxsw_sp_fib_entry, list) == fib_entry;
4188}
4189
Ido Schimmelfc922bb2017-08-14 10:54:05 +02004190static int mlxsw_sp_fib_lpm_tree_link(struct mlxsw_sp *mlxsw_sp,
4191 struct mlxsw_sp_fib *fib,
4192 struct mlxsw_sp_fib_node *fib_node)
4193{
4194 struct mlxsw_sp_prefix_usage req_prefix_usage = {{ 0 } };
4195 struct mlxsw_sp_lpm_tree *lpm_tree;
4196 int err;
4197
4198 /* Since the tree is shared between all virtual routers we must
4199 * make sure it contains all the required prefix lengths. This
4200 * can be computed by either adding the new prefix length to the
4201 * existing prefix usage of a bound tree, or by aggregating the
4202 * prefix lengths across all virtual routers and adding the new
4203 * one as well.
4204 */
4205 if (fib->lpm_tree)
4206 mlxsw_sp_prefix_usage_cpy(&req_prefix_usage,
4207 &fib->lpm_tree->prefix_usage);
4208 else
4209 mlxsw_sp_vrs_prefixes(mlxsw_sp, fib->proto, &req_prefix_usage);
4210 mlxsw_sp_prefix_usage_set(&req_prefix_usage, fib_node->key.prefix_len);
4211
4212 lpm_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, &req_prefix_usage,
4213 fib->proto);
4214 if (IS_ERR(lpm_tree))
4215 return PTR_ERR(lpm_tree);
4216
4217 if (fib->lpm_tree && fib->lpm_tree->id == lpm_tree->id)
4218 return 0;
4219
4220 err = mlxsw_sp_vrs_lpm_tree_replace(mlxsw_sp, fib, lpm_tree);
4221 if (err)
4222 return err;
4223
4224 return 0;
4225}
4226
4227static void mlxsw_sp_fib_lpm_tree_unlink(struct mlxsw_sp *mlxsw_sp,
4228 struct mlxsw_sp_fib *fib)
4229{
Ido Schimmelfc922bb2017-08-14 10:54:05 +02004230 if (!mlxsw_sp_prefix_usage_none(&fib->prefix_usage))
4231 return;
4232 mlxsw_sp_vr_lpm_tree_unbind(mlxsw_sp, fib);
4233 mlxsw_sp_lpm_tree_put(mlxsw_sp, fib->lpm_tree);
4234 fib->lpm_tree = NULL;
4235}
4236
Ido Schimmel9aecce12017-02-09 10:28:42 +01004237static void mlxsw_sp_fib_node_prefix_inc(struct mlxsw_sp_fib_node *fib_node)
4238{
4239 unsigned char prefix_len = fib_node->key.prefix_len;
Ido Schimmel76610eb2017-03-10 08:53:41 +01004240 struct mlxsw_sp_fib *fib = fib_node->fib;
Ido Schimmel9aecce12017-02-09 10:28:42 +01004241
4242 if (fib->prefix_ref_count[prefix_len]++ == 0)
4243 mlxsw_sp_prefix_usage_set(&fib->prefix_usage, prefix_len);
4244}
4245
4246static void mlxsw_sp_fib_node_prefix_dec(struct mlxsw_sp_fib_node *fib_node)
4247{
4248 unsigned char prefix_len = fib_node->key.prefix_len;
Ido Schimmel76610eb2017-03-10 08:53:41 +01004249 struct mlxsw_sp_fib *fib = fib_node->fib;
Ido Schimmel9aecce12017-02-09 10:28:42 +01004250
4251 if (--fib->prefix_ref_count[prefix_len] == 0)
4252 mlxsw_sp_prefix_usage_clear(&fib->prefix_usage, prefix_len);
4253}
4254
Ido Schimmel76610eb2017-03-10 08:53:41 +01004255static int mlxsw_sp_fib_node_init(struct mlxsw_sp *mlxsw_sp,
4256 struct mlxsw_sp_fib_node *fib_node,
4257 struct mlxsw_sp_fib *fib)
4258{
Ido Schimmel76610eb2017-03-10 08:53:41 +01004259 int err;
4260
4261 err = mlxsw_sp_fib_node_insert(fib, fib_node);
4262 if (err)
4263 return err;
4264 fib_node->fib = fib;
4265
Ido Schimmelfc922bb2017-08-14 10:54:05 +02004266 err = mlxsw_sp_fib_lpm_tree_link(mlxsw_sp, fib, fib_node);
4267 if (err)
4268 goto err_fib_lpm_tree_link;
Ido Schimmel76610eb2017-03-10 08:53:41 +01004269
4270 mlxsw_sp_fib_node_prefix_inc(fib_node);
4271
4272 return 0;
4273
Ido Schimmelfc922bb2017-08-14 10:54:05 +02004274err_fib_lpm_tree_link:
Ido Schimmel76610eb2017-03-10 08:53:41 +01004275 fib_node->fib = NULL;
4276 mlxsw_sp_fib_node_remove(fib, fib_node);
4277 return err;
4278}
4279
4280static void mlxsw_sp_fib_node_fini(struct mlxsw_sp *mlxsw_sp,
4281 struct mlxsw_sp_fib_node *fib_node)
4282{
Ido Schimmel76610eb2017-03-10 08:53:41 +01004283 struct mlxsw_sp_fib *fib = fib_node->fib;
4284
4285 mlxsw_sp_fib_node_prefix_dec(fib_node);
Ido Schimmelfc922bb2017-08-14 10:54:05 +02004286 mlxsw_sp_fib_lpm_tree_unlink(mlxsw_sp, fib);
Ido Schimmel76610eb2017-03-10 08:53:41 +01004287 fib_node->fib = NULL;
4288 mlxsw_sp_fib_node_remove(fib, fib_node);
4289}
4290
Ido Schimmel9aecce12017-02-09 10:28:42 +01004291static struct mlxsw_sp_fib_node *
Ido Schimmel731ea1c2017-07-18 10:10:21 +02004292mlxsw_sp_fib_node_get(struct mlxsw_sp *mlxsw_sp, u32 tb_id, const void *addr,
4293 size_t addr_len, unsigned char prefix_len,
4294 enum mlxsw_sp_l3proto proto)
Ido Schimmel9aecce12017-02-09 10:28:42 +01004295{
4296 struct mlxsw_sp_fib_node *fib_node;
Ido Schimmel76610eb2017-03-10 08:53:41 +01004297 struct mlxsw_sp_fib *fib;
Jiri Pirko5b004412016-09-01 10:37:40 +02004298 struct mlxsw_sp_vr *vr;
4299 int err;
4300
David Ahernf8fa9b42017-10-18 09:56:56 -07004301 vr = mlxsw_sp_vr_get(mlxsw_sp, tb_id, NULL);
Jiri Pirko5b004412016-09-01 10:37:40 +02004302 if (IS_ERR(vr))
4303 return ERR_CAST(vr);
Ido Schimmel731ea1c2017-07-18 10:10:21 +02004304 fib = mlxsw_sp_vr_fib(vr, proto);
Jiri Pirko5b004412016-09-01 10:37:40 +02004305
Ido Schimmel731ea1c2017-07-18 10:10:21 +02004306 fib_node = mlxsw_sp_fib_node_lookup(fib, addr, addr_len, prefix_len);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004307 if (fib_node)
4308 return fib_node;
4309
Ido Schimmel731ea1c2017-07-18 10:10:21 +02004310 fib_node = mlxsw_sp_fib_node_create(fib, addr, addr_len, prefix_len);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004311 if (!fib_node) {
Jiri Pirko5b004412016-09-01 10:37:40 +02004312 err = -ENOMEM;
Ido Schimmel9aecce12017-02-09 10:28:42 +01004313 goto err_fib_node_create;
Jiri Pirko5b004412016-09-01 10:37:40 +02004314 }
Jiri Pirko5b004412016-09-01 10:37:40 +02004315
Ido Schimmel76610eb2017-03-10 08:53:41 +01004316 err = mlxsw_sp_fib_node_init(mlxsw_sp, fib_node, fib);
4317 if (err)
4318 goto err_fib_node_init;
4319
Ido Schimmel9aecce12017-02-09 10:28:42 +01004320 return fib_node;
Jiri Pirko5b004412016-09-01 10:37:40 +02004321
Ido Schimmel76610eb2017-03-10 08:53:41 +01004322err_fib_node_init:
4323 mlxsw_sp_fib_node_destroy(fib_node);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004324err_fib_node_create:
Ido Schimmel76610eb2017-03-10 08:53:41 +01004325 mlxsw_sp_vr_put(vr);
Jiri Pirko5b004412016-09-01 10:37:40 +02004326 return ERR_PTR(err);
4327}
4328
Ido Schimmel731ea1c2017-07-18 10:10:21 +02004329static void mlxsw_sp_fib_node_put(struct mlxsw_sp *mlxsw_sp,
4330 struct mlxsw_sp_fib_node *fib_node)
Jiri Pirko5b004412016-09-01 10:37:40 +02004331{
Ido Schimmel76610eb2017-03-10 08:53:41 +01004332 struct mlxsw_sp_vr *vr = fib_node->fib->vr;
Jiri Pirko5b004412016-09-01 10:37:40 +02004333
Ido Schimmel9aecce12017-02-09 10:28:42 +01004334 if (!list_empty(&fib_node->entry_list))
4335 return;
Ido Schimmel76610eb2017-03-10 08:53:41 +01004336 mlxsw_sp_fib_node_fini(mlxsw_sp, fib_node);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004337 mlxsw_sp_fib_node_destroy(fib_node);
Ido Schimmel76610eb2017-03-10 08:53:41 +01004338 mlxsw_sp_vr_put(vr);
Jiri Pirko5b004412016-09-01 10:37:40 +02004339}
4340
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004341static struct mlxsw_sp_fib4_entry *
Ido Schimmel9aecce12017-02-09 10:28:42 +01004342mlxsw_sp_fib4_node_entry_find(const struct mlxsw_sp_fib_node *fib_node,
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004343 const struct mlxsw_sp_fib4_entry *new4_entry)
Jiri Pirko61c503f2016-07-04 08:23:11 +02004344{
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004345 struct mlxsw_sp_fib4_entry *fib4_entry;
Ido Schimmel9aecce12017-02-09 10:28:42 +01004346
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004347 list_for_each_entry(fib4_entry, &fib_node->entry_list, common.list) {
4348 if (fib4_entry->tb_id > new4_entry->tb_id)
Ido Schimmel9aecce12017-02-09 10:28:42 +01004349 continue;
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004350 if (fib4_entry->tb_id != new4_entry->tb_id)
Ido Schimmel9aecce12017-02-09 10:28:42 +01004351 break;
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004352 if (fib4_entry->tos > new4_entry->tos)
Ido Schimmel9aecce12017-02-09 10:28:42 +01004353 continue;
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004354 if (fib4_entry->prio >= new4_entry->prio ||
4355 fib4_entry->tos < new4_entry->tos)
4356 return fib4_entry;
Ido Schimmel9aecce12017-02-09 10:28:42 +01004357 }
4358
4359 return NULL;
4360}
4361
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004362static int
4363mlxsw_sp_fib4_node_list_append(struct mlxsw_sp_fib4_entry *fib4_entry,
4364 struct mlxsw_sp_fib4_entry *new4_entry)
Ido Schimmel4283bce2017-02-09 10:28:43 +01004365{
4366 struct mlxsw_sp_fib_node *fib_node;
4367
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004368 if (WARN_ON(!fib4_entry))
Ido Schimmel4283bce2017-02-09 10:28:43 +01004369 return -EINVAL;
4370
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004371 fib_node = fib4_entry->common.fib_node;
4372 list_for_each_entry_from(fib4_entry, &fib_node->entry_list,
4373 common.list) {
4374 if (fib4_entry->tb_id != new4_entry->tb_id ||
4375 fib4_entry->tos != new4_entry->tos ||
4376 fib4_entry->prio != new4_entry->prio)
Ido Schimmel4283bce2017-02-09 10:28:43 +01004377 break;
4378 }
4379
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004380 list_add_tail(&new4_entry->common.list, &fib4_entry->common.list);
Ido Schimmel4283bce2017-02-09 10:28:43 +01004381 return 0;
4382}
4383
Ido Schimmel9aecce12017-02-09 10:28:42 +01004384static int
Ido Schimmel9efbee62017-07-18 10:10:28 +02004385mlxsw_sp_fib4_node_list_insert(struct mlxsw_sp_fib4_entry *new4_entry,
Ido Schimmel599cf8f2017-02-09 10:28:44 +01004386 bool replace, bool append)
Ido Schimmel9aecce12017-02-09 10:28:42 +01004387{
Ido Schimmel9efbee62017-07-18 10:10:28 +02004388 struct mlxsw_sp_fib_node *fib_node = new4_entry->common.fib_node;
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004389 struct mlxsw_sp_fib4_entry *fib4_entry;
Ido Schimmel9aecce12017-02-09 10:28:42 +01004390
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004391 fib4_entry = mlxsw_sp_fib4_node_entry_find(fib_node, new4_entry);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004392
Ido Schimmel4283bce2017-02-09 10:28:43 +01004393 if (append)
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004394 return mlxsw_sp_fib4_node_list_append(fib4_entry, new4_entry);
4395 if (replace && WARN_ON(!fib4_entry))
Ido Schimmel599cf8f2017-02-09 10:28:44 +01004396 return -EINVAL;
Ido Schimmel4283bce2017-02-09 10:28:43 +01004397
Ido Schimmel599cf8f2017-02-09 10:28:44 +01004398 /* Insert new entry before replaced one, so that we can later
4399 * remove the second.
4400 */
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004401 if (fib4_entry) {
4402 list_add_tail(&new4_entry->common.list,
4403 &fib4_entry->common.list);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004404 } else {
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004405 struct mlxsw_sp_fib4_entry *last;
Ido Schimmel9aecce12017-02-09 10:28:42 +01004406
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004407 list_for_each_entry(last, &fib_node->entry_list, common.list) {
4408 if (new4_entry->tb_id > last->tb_id)
Ido Schimmel9aecce12017-02-09 10:28:42 +01004409 break;
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004410 fib4_entry = last;
Ido Schimmel9aecce12017-02-09 10:28:42 +01004411 }
4412
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004413 if (fib4_entry)
4414 list_add(&new4_entry->common.list,
4415 &fib4_entry->common.list);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004416 else
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004417 list_add(&new4_entry->common.list,
4418 &fib_node->entry_list);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004419 }
4420
4421 return 0;
4422}
4423
4424static void
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004425mlxsw_sp_fib4_node_list_remove(struct mlxsw_sp_fib4_entry *fib4_entry)
Ido Schimmel9aecce12017-02-09 10:28:42 +01004426{
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004427 list_del(&fib4_entry->common.list);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004428}
4429
Ido Schimmel80c238f2017-07-18 10:10:29 +02004430static int mlxsw_sp_fib_node_entry_add(struct mlxsw_sp *mlxsw_sp,
4431 struct mlxsw_sp_fib_entry *fib_entry)
Ido Schimmel9aecce12017-02-09 10:28:42 +01004432{
Ido Schimmel9efbee62017-07-18 10:10:28 +02004433 struct mlxsw_sp_fib_node *fib_node = fib_entry->fib_node;
4434
Ido Schimmel9aecce12017-02-09 10:28:42 +01004435 if (!mlxsw_sp_fib_node_entry_is_first(fib_node, fib_entry))
4436 return 0;
4437
4438 /* To prevent packet loss, overwrite the previously offloaded
4439 * entry.
4440 */
4441 if (!list_is_singular(&fib_node->entry_list)) {
4442 enum mlxsw_reg_ralue_op op = MLXSW_REG_RALUE_OP_WRITE_DELETE;
4443 struct mlxsw_sp_fib_entry *n = list_next_entry(fib_entry, list);
4444
4445 mlxsw_sp_fib_entry_offload_refresh(n, op, 0);
4446 }
4447
4448 return mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
4449}
4450
Ido Schimmel80c238f2017-07-18 10:10:29 +02004451static void mlxsw_sp_fib_node_entry_del(struct mlxsw_sp *mlxsw_sp,
4452 struct mlxsw_sp_fib_entry *fib_entry)
Ido Schimmel9aecce12017-02-09 10:28:42 +01004453{
Ido Schimmel9efbee62017-07-18 10:10:28 +02004454 struct mlxsw_sp_fib_node *fib_node = fib_entry->fib_node;
4455
Ido Schimmel9aecce12017-02-09 10:28:42 +01004456 if (!mlxsw_sp_fib_node_entry_is_first(fib_node, fib_entry))
4457 return;
4458
4459 /* Promote the next entry by overwriting the deleted entry */
4460 if (!list_is_singular(&fib_node->entry_list)) {
4461 struct mlxsw_sp_fib_entry *n = list_next_entry(fib_entry, list);
4462 enum mlxsw_reg_ralue_op op = MLXSW_REG_RALUE_OP_WRITE_DELETE;
4463
4464 mlxsw_sp_fib_entry_update(mlxsw_sp, n);
4465 mlxsw_sp_fib_entry_offload_refresh(fib_entry, op, 0);
4466 return;
4467 }
4468
4469 mlxsw_sp_fib_entry_del(mlxsw_sp, fib_entry);
4470}
4471
4472static int mlxsw_sp_fib4_node_entry_link(struct mlxsw_sp *mlxsw_sp,
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004473 struct mlxsw_sp_fib4_entry *fib4_entry,
Ido Schimmel599cf8f2017-02-09 10:28:44 +01004474 bool replace, bool append)
Ido Schimmel9aecce12017-02-09 10:28:42 +01004475{
Ido Schimmel9aecce12017-02-09 10:28:42 +01004476 int err;
4477
Ido Schimmel9efbee62017-07-18 10:10:28 +02004478 err = mlxsw_sp_fib4_node_list_insert(fib4_entry, replace, append);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004479 if (err)
4480 return err;
4481
Ido Schimmel80c238f2017-07-18 10:10:29 +02004482 err = mlxsw_sp_fib_node_entry_add(mlxsw_sp, &fib4_entry->common);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004483 if (err)
Ido Schimmel80c238f2017-07-18 10:10:29 +02004484 goto err_fib_node_entry_add;
Ido Schimmel9aecce12017-02-09 10:28:42 +01004485
Ido Schimmel9aecce12017-02-09 10:28:42 +01004486 return 0;
4487
Ido Schimmel80c238f2017-07-18 10:10:29 +02004488err_fib_node_entry_add:
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004489 mlxsw_sp_fib4_node_list_remove(fib4_entry);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004490 return err;
4491}
4492
4493static void
4494mlxsw_sp_fib4_node_entry_unlink(struct mlxsw_sp *mlxsw_sp,
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004495 struct mlxsw_sp_fib4_entry *fib4_entry)
Ido Schimmel9aecce12017-02-09 10:28:42 +01004496{
Ido Schimmel80c238f2017-07-18 10:10:29 +02004497 mlxsw_sp_fib_node_entry_del(mlxsw_sp, &fib4_entry->common);
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004498 mlxsw_sp_fib4_node_list_remove(fib4_entry);
Petr Machata4607f6d2017-09-02 23:49:25 +02004499
4500 if (fib4_entry->common.type == MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP)
4501 mlxsw_sp_fib_entry_decap_fini(mlxsw_sp, &fib4_entry->common);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004502}
4503
Ido Schimmel599cf8f2017-02-09 10:28:44 +01004504static void mlxsw_sp_fib4_entry_replace(struct mlxsw_sp *mlxsw_sp,
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004505 struct mlxsw_sp_fib4_entry *fib4_entry,
Ido Schimmel599cf8f2017-02-09 10:28:44 +01004506 bool replace)
4507{
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004508 struct mlxsw_sp_fib_node *fib_node = fib4_entry->common.fib_node;
4509 struct mlxsw_sp_fib4_entry *replaced;
Ido Schimmel599cf8f2017-02-09 10:28:44 +01004510
4511 if (!replace)
4512 return;
4513
4514 /* We inserted the new entry before replaced one */
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004515 replaced = list_next_entry(fib4_entry, common.list);
Ido Schimmel599cf8f2017-02-09 10:28:44 +01004516
4517 mlxsw_sp_fib4_node_entry_unlink(mlxsw_sp, replaced);
4518 mlxsw_sp_fib4_entry_destroy(mlxsw_sp, replaced);
Ido Schimmel731ea1c2017-07-18 10:10:21 +02004519 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
Ido Schimmel599cf8f2017-02-09 10:28:44 +01004520}
4521
Ido Schimmel9aecce12017-02-09 10:28:42 +01004522static int
4523mlxsw_sp_router_fib4_add(struct mlxsw_sp *mlxsw_sp,
Ido Schimmel4283bce2017-02-09 10:28:43 +01004524 const struct fib_entry_notifier_info *fen_info,
Ido Schimmel599cf8f2017-02-09 10:28:44 +01004525 bool replace, bool append)
Ido Schimmel9aecce12017-02-09 10:28:42 +01004526{
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004527 struct mlxsw_sp_fib4_entry *fib4_entry;
Ido Schimmel9aecce12017-02-09 10:28:42 +01004528 struct mlxsw_sp_fib_node *fib_node;
Jiri Pirko61c503f2016-07-04 08:23:11 +02004529 int err;
4530
Ido Schimmel9011b672017-05-16 19:38:25 +02004531 if (mlxsw_sp->router->aborted)
Jiri Pirkob45f64d2016-09-26 12:52:31 +02004532 return 0;
4533
Ido Schimmel731ea1c2017-07-18 10:10:21 +02004534 fib_node = mlxsw_sp_fib_node_get(mlxsw_sp, fen_info->tb_id,
4535 &fen_info->dst, sizeof(fen_info->dst),
4536 fen_info->dst_len,
4537 MLXSW_SP_L3_PROTO_IPV4);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004538 if (IS_ERR(fib_node)) {
4539 dev_warn(mlxsw_sp->bus_info->dev, "Failed to get FIB node\n");
4540 return PTR_ERR(fib_node);
4541 }
4542
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004543 fib4_entry = mlxsw_sp_fib4_entry_create(mlxsw_sp, fib_node, fen_info);
4544 if (IS_ERR(fib4_entry)) {
Ido Schimmel9aecce12017-02-09 10:28:42 +01004545 dev_warn(mlxsw_sp->bus_info->dev, "Failed to create FIB entry\n");
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004546 err = PTR_ERR(fib4_entry);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004547 goto err_fib4_entry_create;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02004548 }
Jiri Pirko61c503f2016-07-04 08:23:11 +02004549
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004550 err = mlxsw_sp_fib4_node_entry_link(mlxsw_sp, fib4_entry, replace,
Ido Schimmel599cf8f2017-02-09 10:28:44 +01004551 append);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02004552 if (err) {
Ido Schimmel9aecce12017-02-09 10:28:42 +01004553 dev_warn(mlxsw_sp->bus_info->dev, "Failed to link FIB entry to node\n");
4554 goto err_fib4_node_entry_link;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02004555 }
Ido Schimmel9aecce12017-02-09 10:28:42 +01004556
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004557 mlxsw_sp_fib4_entry_replace(mlxsw_sp, fib4_entry, replace);
Ido Schimmel599cf8f2017-02-09 10:28:44 +01004558
Jiri Pirko61c503f2016-07-04 08:23:11 +02004559 return 0;
4560
Ido Schimmel9aecce12017-02-09 10:28:42 +01004561err_fib4_node_entry_link:
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004562 mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib4_entry);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004563err_fib4_entry_create:
Ido Schimmel731ea1c2017-07-18 10:10:21 +02004564 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
Jiri Pirko61c503f2016-07-04 08:23:11 +02004565 return err;
4566}
4567
Jiri Pirko37956d72016-10-20 16:05:43 +02004568static void mlxsw_sp_router_fib4_del(struct mlxsw_sp *mlxsw_sp,
4569 struct fib_entry_notifier_info *fen_info)
Jiri Pirko61c503f2016-07-04 08:23:11 +02004570{
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004571 struct mlxsw_sp_fib4_entry *fib4_entry;
Ido Schimmel9aecce12017-02-09 10:28:42 +01004572 struct mlxsw_sp_fib_node *fib_node;
Jiri Pirko61c503f2016-07-04 08:23:11 +02004573
Ido Schimmel9011b672017-05-16 19:38:25 +02004574 if (mlxsw_sp->router->aborted)
Jiri Pirko37956d72016-10-20 16:05:43 +02004575 return;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02004576
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004577 fib4_entry = mlxsw_sp_fib4_entry_lookup(mlxsw_sp, fen_info);
4578 if (WARN_ON(!fib4_entry))
Jiri Pirko37956d72016-10-20 16:05:43 +02004579 return;
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004580 fib_node = fib4_entry->common.fib_node;
Jiri Pirko5b004412016-09-01 10:37:40 +02004581
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004582 mlxsw_sp_fib4_node_entry_unlink(mlxsw_sp, fib4_entry);
4583 mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib4_entry);
Ido Schimmel731ea1c2017-07-18 10:10:21 +02004584 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
Jiri Pirko61c503f2016-07-04 08:23:11 +02004585}
Jiri Pirkob45f64d2016-09-26 12:52:31 +02004586
Ido Schimmel428b8512017-08-03 13:28:28 +02004587static bool mlxsw_sp_fib6_rt_should_ignore(const struct rt6_info *rt)
4588{
4589 /* Packets with link-local destination IP arriving to the router
4590 * are trapped to the CPU, so no need to program specific routes
4591 * for them.
4592 */
4593 if (ipv6_addr_type(&rt->rt6i_dst.addr) & IPV6_ADDR_LINKLOCAL)
4594 return true;
4595
4596 /* Multicast routes aren't supported, so ignore them. Neighbour
4597 * Discovery packets are specifically trapped.
4598 */
4599 if (ipv6_addr_type(&rt->rt6i_dst.addr) & IPV6_ADDR_MULTICAST)
4600 return true;
4601
4602 /* Cloned routes are irrelevant in the forwarding path. */
4603 if (rt->rt6i_flags & RTF_CACHE)
4604 return true;
4605
4606 return false;
4607}
4608
4609static struct mlxsw_sp_rt6 *mlxsw_sp_rt6_create(struct rt6_info *rt)
4610{
4611 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
4612
4613 mlxsw_sp_rt6 = kzalloc(sizeof(*mlxsw_sp_rt6), GFP_KERNEL);
4614 if (!mlxsw_sp_rt6)
4615 return ERR_PTR(-ENOMEM);
4616
4617 /* In case of route replace, replaced route is deleted with
4618 * no notification. Take reference to prevent accessing freed
4619 * memory.
4620 */
4621 mlxsw_sp_rt6->rt = rt;
4622 rt6_hold(rt);
4623
4624 return mlxsw_sp_rt6;
4625}
4626
4627#if IS_ENABLED(CONFIG_IPV6)
4628static void mlxsw_sp_rt6_release(struct rt6_info *rt)
4629{
4630 rt6_release(rt);
4631}
4632#else
4633static void mlxsw_sp_rt6_release(struct rt6_info *rt)
4634{
4635}
4636#endif
4637
4638static void mlxsw_sp_rt6_destroy(struct mlxsw_sp_rt6 *mlxsw_sp_rt6)
4639{
4640 mlxsw_sp_rt6_release(mlxsw_sp_rt6->rt);
4641 kfree(mlxsw_sp_rt6);
4642}
4643
4644static bool mlxsw_sp_fib6_rt_can_mp(const struct rt6_info *rt)
4645{
4646 /* RTF_CACHE routes are ignored */
4647 return (rt->rt6i_flags & (RTF_GATEWAY | RTF_ADDRCONF)) == RTF_GATEWAY;
4648}
4649
4650static struct rt6_info *
4651mlxsw_sp_fib6_entry_rt(const struct mlxsw_sp_fib6_entry *fib6_entry)
4652{
4653 return list_first_entry(&fib6_entry->rt6_list, struct mlxsw_sp_rt6,
4654 list)->rt;
4655}
4656
4657static struct mlxsw_sp_fib6_entry *
4658mlxsw_sp_fib6_node_mp_entry_find(const struct mlxsw_sp_fib_node *fib_node,
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02004659 const struct rt6_info *nrt, bool replace)
Ido Schimmel428b8512017-08-03 13:28:28 +02004660{
4661 struct mlxsw_sp_fib6_entry *fib6_entry;
4662
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02004663 if (!mlxsw_sp_fib6_rt_can_mp(nrt) || replace)
Ido Schimmel428b8512017-08-03 13:28:28 +02004664 return NULL;
4665
4666 list_for_each_entry(fib6_entry, &fib_node->entry_list, common.list) {
4667 struct rt6_info *rt = mlxsw_sp_fib6_entry_rt(fib6_entry);
4668
4669 /* RT6_TABLE_LOCAL and RT6_TABLE_MAIN share the same
4670 * virtual router.
4671 */
4672 if (rt->rt6i_table->tb6_id > nrt->rt6i_table->tb6_id)
4673 continue;
4674 if (rt->rt6i_table->tb6_id != nrt->rt6i_table->tb6_id)
4675 break;
4676 if (rt->rt6i_metric < nrt->rt6i_metric)
4677 continue;
4678 if (rt->rt6i_metric == nrt->rt6i_metric &&
4679 mlxsw_sp_fib6_rt_can_mp(rt))
4680 return fib6_entry;
4681 if (rt->rt6i_metric > nrt->rt6i_metric)
4682 break;
4683 }
4684
4685 return NULL;
4686}
4687
4688static struct mlxsw_sp_rt6 *
4689mlxsw_sp_fib6_entry_rt_find(const struct mlxsw_sp_fib6_entry *fib6_entry,
4690 const struct rt6_info *rt)
4691{
4692 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
4693
4694 list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) {
4695 if (mlxsw_sp_rt6->rt == rt)
4696 return mlxsw_sp_rt6;
4697 }
4698
4699 return NULL;
4700}
4701
Petr Machata8f28a302017-09-02 23:49:24 +02004702static bool mlxsw_sp_nexthop6_ipip_type(const struct mlxsw_sp *mlxsw_sp,
4703 const struct rt6_info *rt,
4704 enum mlxsw_sp_ipip_type *ret)
4705{
4706 return rt->dst.dev &&
4707 mlxsw_sp_netdev_ipip_type(mlxsw_sp, rt->dst.dev, ret);
4708}
4709
Petr Machata35225e42017-09-02 23:49:22 +02004710static int mlxsw_sp_nexthop6_type_init(struct mlxsw_sp *mlxsw_sp,
4711 struct mlxsw_sp_nexthop_group *nh_grp,
4712 struct mlxsw_sp_nexthop *nh,
4713 const struct rt6_info *rt)
Ido Schimmel428b8512017-08-03 13:28:28 +02004714{
Petr Machata8f28a302017-09-02 23:49:24 +02004715 struct mlxsw_sp_router *router = mlxsw_sp->router;
Ido Schimmel428b8512017-08-03 13:28:28 +02004716 struct net_device *dev = rt->dst.dev;
Petr Machata8f28a302017-09-02 23:49:24 +02004717 enum mlxsw_sp_ipip_type ipipt;
Ido Schimmel428b8512017-08-03 13:28:28 +02004718 struct mlxsw_sp_rif *rif;
4719 int err;
4720
Petr Machata8f28a302017-09-02 23:49:24 +02004721 if (mlxsw_sp_nexthop6_ipip_type(mlxsw_sp, rt, &ipipt) &&
4722 router->ipip_ops_arr[ipipt]->can_offload(mlxsw_sp, dev,
4723 MLXSW_SP_L3_PROTO_IPV6)) {
4724 nh->type = MLXSW_SP_NEXTHOP_TYPE_IPIP;
Petr Machata4cccb732017-10-16 16:26:39 +02004725 err = mlxsw_sp_nexthop_ipip_init(mlxsw_sp, nh, dev);
Petr Machatade0f43c2017-10-02 12:14:57 +02004726 if (err)
4727 return err;
4728 mlxsw_sp_nexthop_rif_init(nh, &nh->ipip_entry->ol_lb->common);
4729 return 0;
Petr Machata8f28a302017-09-02 23:49:24 +02004730 }
4731
Petr Machata35225e42017-09-02 23:49:22 +02004732 nh->type = MLXSW_SP_NEXTHOP_TYPE_ETH;
Ido Schimmel428b8512017-08-03 13:28:28 +02004733 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
4734 if (!rif)
4735 return 0;
4736 mlxsw_sp_nexthop_rif_init(nh, rif);
4737
4738 err = mlxsw_sp_nexthop_neigh_init(mlxsw_sp, nh);
4739 if (err)
4740 goto err_nexthop_neigh_init;
4741
4742 return 0;
4743
4744err_nexthop_neigh_init:
4745 mlxsw_sp_nexthop_rif_fini(nh);
4746 return err;
4747}
4748
Petr Machata35225e42017-09-02 23:49:22 +02004749static void mlxsw_sp_nexthop6_type_fini(struct mlxsw_sp *mlxsw_sp,
4750 struct mlxsw_sp_nexthop *nh)
4751{
4752 mlxsw_sp_nexthop_type_fini(mlxsw_sp, nh);
4753}
4754
4755static int mlxsw_sp_nexthop6_init(struct mlxsw_sp *mlxsw_sp,
4756 struct mlxsw_sp_nexthop_group *nh_grp,
4757 struct mlxsw_sp_nexthop *nh,
4758 const struct rt6_info *rt)
4759{
4760 struct net_device *dev = rt->dst.dev;
4761
4762 nh->nh_grp = nh_grp;
Ido Schimmel408bd942017-10-22 23:11:46 +02004763 nh->nh_weight = 1;
Petr Machata35225e42017-09-02 23:49:22 +02004764 memcpy(&nh->gw_addr, &rt->rt6i_gateway, sizeof(nh->gw_addr));
Arkadi Sharshevskya5390272017-09-25 10:32:28 +02004765 mlxsw_sp_nexthop_counter_alloc(mlxsw_sp, nh);
Petr Machata35225e42017-09-02 23:49:22 +02004766
Arkadi Sharshevskydbe45982017-09-25 10:32:23 +02004767 list_add_tail(&nh->router_list_node, &mlxsw_sp->router->nexthop_list);
4768
Petr Machata35225e42017-09-02 23:49:22 +02004769 if (!dev)
4770 return 0;
4771 nh->ifindex = dev->ifindex;
4772
4773 return mlxsw_sp_nexthop6_type_init(mlxsw_sp, nh_grp, nh, rt);
4774}
4775
Ido Schimmel428b8512017-08-03 13:28:28 +02004776static void mlxsw_sp_nexthop6_fini(struct mlxsw_sp *mlxsw_sp,
4777 struct mlxsw_sp_nexthop *nh)
4778{
Petr Machata35225e42017-09-02 23:49:22 +02004779 mlxsw_sp_nexthop6_type_fini(mlxsw_sp, nh);
Arkadi Sharshevskydbe45982017-09-25 10:32:23 +02004780 list_del(&nh->router_list_node);
Arkadi Sharshevskya5390272017-09-25 10:32:28 +02004781 mlxsw_sp_nexthop_counter_free(mlxsw_sp, nh);
Ido Schimmel428b8512017-08-03 13:28:28 +02004782}
4783
Petr Machataf6050ee2017-09-02 23:49:21 +02004784static bool mlxsw_sp_rt6_is_gateway(const struct mlxsw_sp *mlxsw_sp,
4785 const struct rt6_info *rt)
4786{
Petr Machata8f28a302017-09-02 23:49:24 +02004787 return rt->rt6i_flags & RTF_GATEWAY ||
4788 mlxsw_sp_nexthop6_ipip_type(mlxsw_sp, rt, NULL);
Petr Machataf6050ee2017-09-02 23:49:21 +02004789}
4790
Ido Schimmel428b8512017-08-03 13:28:28 +02004791static struct mlxsw_sp_nexthop_group *
4792mlxsw_sp_nexthop6_group_create(struct mlxsw_sp *mlxsw_sp,
4793 struct mlxsw_sp_fib6_entry *fib6_entry)
4794{
4795 struct mlxsw_sp_nexthop_group *nh_grp;
4796 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
4797 struct mlxsw_sp_nexthop *nh;
4798 size_t alloc_size;
4799 int i = 0;
4800 int err;
4801
4802 alloc_size = sizeof(*nh_grp) +
4803 fib6_entry->nrt6 * sizeof(struct mlxsw_sp_nexthop);
4804 nh_grp = kzalloc(alloc_size, GFP_KERNEL);
4805 if (!nh_grp)
4806 return ERR_PTR(-ENOMEM);
4807 INIT_LIST_HEAD(&nh_grp->fib_list);
4808#if IS_ENABLED(CONFIG_IPV6)
4809 nh_grp->neigh_tbl = &nd_tbl;
4810#endif
4811 mlxsw_sp_rt6 = list_first_entry(&fib6_entry->rt6_list,
4812 struct mlxsw_sp_rt6, list);
Petr Machataf6050ee2017-09-02 23:49:21 +02004813 nh_grp->gateway = mlxsw_sp_rt6_is_gateway(mlxsw_sp, mlxsw_sp_rt6->rt);
Ido Schimmel428b8512017-08-03 13:28:28 +02004814 nh_grp->count = fib6_entry->nrt6;
4815 for (i = 0; i < nh_grp->count; i++) {
4816 struct rt6_info *rt = mlxsw_sp_rt6->rt;
4817
4818 nh = &nh_grp->nexthops[i];
4819 err = mlxsw_sp_nexthop6_init(mlxsw_sp, nh_grp, nh, rt);
4820 if (err)
4821 goto err_nexthop6_init;
4822 mlxsw_sp_rt6 = list_next_entry(mlxsw_sp_rt6, list);
4823 }
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02004824
4825 err = mlxsw_sp_nexthop_group_insert(mlxsw_sp, nh_grp);
4826 if (err)
4827 goto err_nexthop_group_insert;
4828
Ido Schimmel428b8512017-08-03 13:28:28 +02004829 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
4830 return nh_grp;
4831
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02004832err_nexthop_group_insert:
Ido Schimmel428b8512017-08-03 13:28:28 +02004833err_nexthop6_init:
4834 for (i--; i >= 0; i--) {
4835 nh = &nh_grp->nexthops[i];
4836 mlxsw_sp_nexthop6_fini(mlxsw_sp, nh);
4837 }
4838 kfree(nh_grp);
4839 return ERR_PTR(err);
4840}
4841
4842static void
4843mlxsw_sp_nexthop6_group_destroy(struct mlxsw_sp *mlxsw_sp,
4844 struct mlxsw_sp_nexthop_group *nh_grp)
4845{
4846 struct mlxsw_sp_nexthop *nh;
4847 int i = nh_grp->count;
4848
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02004849 mlxsw_sp_nexthop_group_remove(mlxsw_sp, nh_grp);
Ido Schimmel428b8512017-08-03 13:28:28 +02004850 for (i--; i >= 0; i--) {
4851 nh = &nh_grp->nexthops[i];
4852 mlxsw_sp_nexthop6_fini(mlxsw_sp, nh);
4853 }
4854 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
4855 WARN_ON(nh_grp->adj_index_valid);
4856 kfree(nh_grp);
4857}
4858
4859static int mlxsw_sp_nexthop6_group_get(struct mlxsw_sp *mlxsw_sp,
4860 struct mlxsw_sp_fib6_entry *fib6_entry)
4861{
4862 struct mlxsw_sp_nexthop_group *nh_grp;
4863
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02004864 nh_grp = mlxsw_sp_nexthop6_group_lookup(mlxsw_sp, fib6_entry);
4865 if (!nh_grp) {
4866 nh_grp = mlxsw_sp_nexthop6_group_create(mlxsw_sp, fib6_entry);
4867 if (IS_ERR(nh_grp))
4868 return PTR_ERR(nh_grp);
4869 }
Ido Schimmel428b8512017-08-03 13:28:28 +02004870
4871 list_add_tail(&fib6_entry->common.nexthop_group_node,
4872 &nh_grp->fib_list);
4873 fib6_entry->common.nh_group = nh_grp;
4874
4875 return 0;
4876}
4877
4878static void mlxsw_sp_nexthop6_group_put(struct mlxsw_sp *mlxsw_sp,
4879 struct mlxsw_sp_fib_entry *fib_entry)
4880{
4881 struct mlxsw_sp_nexthop_group *nh_grp = fib_entry->nh_group;
4882
4883 list_del(&fib_entry->nexthop_group_node);
4884 if (!list_empty(&nh_grp->fib_list))
4885 return;
4886 mlxsw_sp_nexthop6_group_destroy(mlxsw_sp, nh_grp);
4887}
4888
4889static int
4890mlxsw_sp_nexthop6_group_update(struct mlxsw_sp *mlxsw_sp,
4891 struct mlxsw_sp_fib6_entry *fib6_entry)
4892{
4893 struct mlxsw_sp_nexthop_group *old_nh_grp = fib6_entry->common.nh_group;
4894 int err;
4895
4896 fib6_entry->common.nh_group = NULL;
4897 list_del(&fib6_entry->common.nexthop_group_node);
4898
4899 err = mlxsw_sp_nexthop6_group_get(mlxsw_sp, fib6_entry);
4900 if (err)
4901 goto err_nexthop6_group_get;
4902
4903 /* In case this entry is offloaded, then the adjacency index
4904 * currently associated with it in the device's table is that
4905 * of the old group. Start using the new one instead.
4906 */
4907 err = mlxsw_sp_fib_node_entry_add(mlxsw_sp, &fib6_entry->common);
4908 if (err)
4909 goto err_fib_node_entry_add;
4910
4911 if (list_empty(&old_nh_grp->fib_list))
4912 mlxsw_sp_nexthop6_group_destroy(mlxsw_sp, old_nh_grp);
4913
4914 return 0;
4915
4916err_fib_node_entry_add:
4917 mlxsw_sp_nexthop6_group_put(mlxsw_sp, &fib6_entry->common);
4918err_nexthop6_group_get:
4919 list_add_tail(&fib6_entry->common.nexthop_group_node,
4920 &old_nh_grp->fib_list);
4921 fib6_entry->common.nh_group = old_nh_grp;
4922 return err;
4923}
4924
4925static int
4926mlxsw_sp_fib6_entry_nexthop_add(struct mlxsw_sp *mlxsw_sp,
4927 struct mlxsw_sp_fib6_entry *fib6_entry,
4928 struct rt6_info *rt)
4929{
4930 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
4931 int err;
4932
4933 mlxsw_sp_rt6 = mlxsw_sp_rt6_create(rt);
4934 if (IS_ERR(mlxsw_sp_rt6))
4935 return PTR_ERR(mlxsw_sp_rt6);
4936
4937 list_add_tail(&mlxsw_sp_rt6->list, &fib6_entry->rt6_list);
4938 fib6_entry->nrt6++;
4939
4940 err = mlxsw_sp_nexthop6_group_update(mlxsw_sp, fib6_entry);
4941 if (err)
4942 goto err_nexthop6_group_update;
4943
4944 return 0;
4945
4946err_nexthop6_group_update:
4947 fib6_entry->nrt6--;
4948 list_del(&mlxsw_sp_rt6->list);
4949 mlxsw_sp_rt6_destroy(mlxsw_sp_rt6);
4950 return err;
4951}
4952
4953static void
4954mlxsw_sp_fib6_entry_nexthop_del(struct mlxsw_sp *mlxsw_sp,
4955 struct mlxsw_sp_fib6_entry *fib6_entry,
4956 struct rt6_info *rt)
4957{
4958 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
4959
4960 mlxsw_sp_rt6 = mlxsw_sp_fib6_entry_rt_find(fib6_entry, rt);
4961 if (WARN_ON(!mlxsw_sp_rt6))
4962 return;
4963
4964 fib6_entry->nrt6--;
4965 list_del(&mlxsw_sp_rt6->list);
4966 mlxsw_sp_nexthop6_group_update(mlxsw_sp, fib6_entry);
4967 mlxsw_sp_rt6_destroy(mlxsw_sp_rt6);
4968}
4969
Petr Machataf6050ee2017-09-02 23:49:21 +02004970static void mlxsw_sp_fib6_entry_type_set(struct mlxsw_sp *mlxsw_sp,
4971 struct mlxsw_sp_fib_entry *fib_entry,
Ido Schimmel428b8512017-08-03 13:28:28 +02004972 const struct rt6_info *rt)
4973{
4974 /* Packets hitting RTF_REJECT routes need to be discarded by the
4975 * stack. We can rely on their destination device not having a
4976 * RIF (it's the loopback device) and can thus use action type
4977 * local, which will cause them to be trapped with a lower
4978 * priority than packets that need to be locally received.
4979 */
Ido Schimmeld3b6d372017-09-01 10:58:55 +02004980 if (rt->rt6i_flags & (RTF_LOCAL | RTF_ANYCAST))
Ido Schimmel428b8512017-08-03 13:28:28 +02004981 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
4982 else if (rt->rt6i_flags & RTF_REJECT)
4983 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_LOCAL;
Petr Machataf6050ee2017-09-02 23:49:21 +02004984 else if (mlxsw_sp_rt6_is_gateway(mlxsw_sp, rt))
Ido Schimmel428b8512017-08-03 13:28:28 +02004985 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_REMOTE;
4986 else
4987 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_LOCAL;
4988}
4989
4990static void
4991mlxsw_sp_fib6_entry_rt_destroy_all(struct mlxsw_sp_fib6_entry *fib6_entry)
4992{
4993 struct mlxsw_sp_rt6 *mlxsw_sp_rt6, *tmp;
4994
4995 list_for_each_entry_safe(mlxsw_sp_rt6, tmp, &fib6_entry->rt6_list,
4996 list) {
4997 fib6_entry->nrt6--;
4998 list_del(&mlxsw_sp_rt6->list);
4999 mlxsw_sp_rt6_destroy(mlxsw_sp_rt6);
5000 }
5001}
5002
5003static struct mlxsw_sp_fib6_entry *
5004mlxsw_sp_fib6_entry_create(struct mlxsw_sp *mlxsw_sp,
5005 struct mlxsw_sp_fib_node *fib_node,
5006 struct rt6_info *rt)
5007{
5008 struct mlxsw_sp_fib6_entry *fib6_entry;
5009 struct mlxsw_sp_fib_entry *fib_entry;
5010 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
5011 int err;
5012
5013 fib6_entry = kzalloc(sizeof(*fib6_entry), GFP_KERNEL);
5014 if (!fib6_entry)
5015 return ERR_PTR(-ENOMEM);
5016 fib_entry = &fib6_entry->common;
5017
5018 mlxsw_sp_rt6 = mlxsw_sp_rt6_create(rt);
5019 if (IS_ERR(mlxsw_sp_rt6)) {
5020 err = PTR_ERR(mlxsw_sp_rt6);
5021 goto err_rt6_create;
5022 }
5023
Petr Machataf6050ee2017-09-02 23:49:21 +02005024 mlxsw_sp_fib6_entry_type_set(mlxsw_sp, fib_entry, mlxsw_sp_rt6->rt);
Ido Schimmel428b8512017-08-03 13:28:28 +02005025
5026 INIT_LIST_HEAD(&fib6_entry->rt6_list);
5027 list_add_tail(&mlxsw_sp_rt6->list, &fib6_entry->rt6_list);
5028 fib6_entry->nrt6 = 1;
5029 err = mlxsw_sp_nexthop6_group_get(mlxsw_sp, fib6_entry);
5030 if (err)
5031 goto err_nexthop6_group_get;
5032
5033 fib_entry->fib_node = fib_node;
5034
5035 return fib6_entry;
5036
5037err_nexthop6_group_get:
5038 list_del(&mlxsw_sp_rt6->list);
5039 mlxsw_sp_rt6_destroy(mlxsw_sp_rt6);
5040err_rt6_create:
5041 kfree(fib6_entry);
5042 return ERR_PTR(err);
5043}
5044
5045static void mlxsw_sp_fib6_entry_destroy(struct mlxsw_sp *mlxsw_sp,
5046 struct mlxsw_sp_fib6_entry *fib6_entry)
5047{
5048 mlxsw_sp_nexthop6_group_put(mlxsw_sp, &fib6_entry->common);
5049 mlxsw_sp_fib6_entry_rt_destroy_all(fib6_entry);
5050 WARN_ON(fib6_entry->nrt6);
5051 kfree(fib6_entry);
5052}
5053
5054static struct mlxsw_sp_fib6_entry *
5055mlxsw_sp_fib6_node_entry_find(const struct mlxsw_sp_fib_node *fib_node,
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02005056 const struct rt6_info *nrt, bool replace)
Ido Schimmel428b8512017-08-03 13:28:28 +02005057{
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02005058 struct mlxsw_sp_fib6_entry *fib6_entry, *fallback = NULL;
Ido Schimmel428b8512017-08-03 13:28:28 +02005059
5060 list_for_each_entry(fib6_entry, &fib_node->entry_list, common.list) {
5061 struct rt6_info *rt = mlxsw_sp_fib6_entry_rt(fib6_entry);
5062
5063 if (rt->rt6i_table->tb6_id > nrt->rt6i_table->tb6_id)
5064 continue;
5065 if (rt->rt6i_table->tb6_id != nrt->rt6i_table->tb6_id)
5066 break;
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02005067 if (replace && rt->rt6i_metric == nrt->rt6i_metric) {
5068 if (mlxsw_sp_fib6_rt_can_mp(rt) ==
5069 mlxsw_sp_fib6_rt_can_mp(nrt))
5070 return fib6_entry;
5071 if (mlxsw_sp_fib6_rt_can_mp(nrt))
5072 fallback = fallback ?: fib6_entry;
5073 }
Ido Schimmel428b8512017-08-03 13:28:28 +02005074 if (rt->rt6i_metric > nrt->rt6i_metric)
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02005075 return fallback ?: fib6_entry;
Ido Schimmel428b8512017-08-03 13:28:28 +02005076 }
5077
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02005078 return fallback;
Ido Schimmel428b8512017-08-03 13:28:28 +02005079}
5080
5081static int
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02005082mlxsw_sp_fib6_node_list_insert(struct mlxsw_sp_fib6_entry *new6_entry,
5083 bool replace)
Ido Schimmel428b8512017-08-03 13:28:28 +02005084{
5085 struct mlxsw_sp_fib_node *fib_node = new6_entry->common.fib_node;
5086 struct rt6_info *nrt = mlxsw_sp_fib6_entry_rt(new6_entry);
5087 struct mlxsw_sp_fib6_entry *fib6_entry;
5088
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02005089 fib6_entry = mlxsw_sp_fib6_node_entry_find(fib_node, nrt, replace);
5090
5091 if (replace && WARN_ON(!fib6_entry))
5092 return -EINVAL;
Ido Schimmel428b8512017-08-03 13:28:28 +02005093
5094 if (fib6_entry) {
5095 list_add_tail(&new6_entry->common.list,
5096 &fib6_entry->common.list);
5097 } else {
5098 struct mlxsw_sp_fib6_entry *last;
5099
5100 list_for_each_entry(last, &fib_node->entry_list, common.list) {
5101 struct rt6_info *rt = mlxsw_sp_fib6_entry_rt(last);
5102
5103 if (nrt->rt6i_table->tb6_id > rt->rt6i_table->tb6_id)
5104 break;
5105 fib6_entry = last;
5106 }
5107
5108 if (fib6_entry)
5109 list_add(&new6_entry->common.list,
5110 &fib6_entry->common.list);
5111 else
5112 list_add(&new6_entry->common.list,
5113 &fib_node->entry_list);
5114 }
5115
5116 return 0;
5117}
5118
5119static void
5120mlxsw_sp_fib6_node_list_remove(struct mlxsw_sp_fib6_entry *fib6_entry)
5121{
5122 list_del(&fib6_entry->common.list);
5123}
5124
5125static int mlxsw_sp_fib6_node_entry_link(struct mlxsw_sp *mlxsw_sp,
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02005126 struct mlxsw_sp_fib6_entry *fib6_entry,
5127 bool replace)
Ido Schimmel428b8512017-08-03 13:28:28 +02005128{
5129 int err;
5130
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02005131 err = mlxsw_sp_fib6_node_list_insert(fib6_entry, replace);
Ido Schimmel428b8512017-08-03 13:28:28 +02005132 if (err)
5133 return err;
5134
5135 err = mlxsw_sp_fib_node_entry_add(mlxsw_sp, &fib6_entry->common);
5136 if (err)
5137 goto err_fib_node_entry_add;
5138
5139 return 0;
5140
5141err_fib_node_entry_add:
5142 mlxsw_sp_fib6_node_list_remove(fib6_entry);
5143 return err;
5144}
5145
5146static void
5147mlxsw_sp_fib6_node_entry_unlink(struct mlxsw_sp *mlxsw_sp,
5148 struct mlxsw_sp_fib6_entry *fib6_entry)
5149{
5150 mlxsw_sp_fib_node_entry_del(mlxsw_sp, &fib6_entry->common);
5151 mlxsw_sp_fib6_node_list_remove(fib6_entry);
5152}
5153
5154static struct mlxsw_sp_fib6_entry *
5155mlxsw_sp_fib6_entry_lookup(struct mlxsw_sp *mlxsw_sp,
5156 const struct rt6_info *rt)
5157{
5158 struct mlxsw_sp_fib6_entry *fib6_entry;
5159 struct mlxsw_sp_fib_node *fib_node;
5160 struct mlxsw_sp_fib *fib;
5161 struct mlxsw_sp_vr *vr;
5162
5163 vr = mlxsw_sp_vr_find(mlxsw_sp, rt->rt6i_table->tb6_id);
5164 if (!vr)
5165 return NULL;
5166 fib = mlxsw_sp_vr_fib(vr, MLXSW_SP_L3_PROTO_IPV6);
5167
5168 fib_node = mlxsw_sp_fib_node_lookup(fib, &rt->rt6i_dst.addr,
5169 sizeof(rt->rt6i_dst.addr),
5170 rt->rt6i_dst.plen);
5171 if (!fib_node)
5172 return NULL;
5173
5174 list_for_each_entry(fib6_entry, &fib_node->entry_list, common.list) {
5175 struct rt6_info *iter_rt = mlxsw_sp_fib6_entry_rt(fib6_entry);
5176
5177 if (rt->rt6i_table->tb6_id == iter_rt->rt6i_table->tb6_id &&
5178 rt->rt6i_metric == iter_rt->rt6i_metric &&
5179 mlxsw_sp_fib6_entry_rt_find(fib6_entry, rt))
5180 return fib6_entry;
5181 }
5182
5183 return NULL;
5184}
5185
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02005186static void mlxsw_sp_fib6_entry_replace(struct mlxsw_sp *mlxsw_sp,
5187 struct mlxsw_sp_fib6_entry *fib6_entry,
5188 bool replace)
5189{
5190 struct mlxsw_sp_fib_node *fib_node = fib6_entry->common.fib_node;
5191 struct mlxsw_sp_fib6_entry *replaced;
5192
5193 if (!replace)
5194 return;
5195
5196 replaced = list_next_entry(fib6_entry, common.list);
5197
5198 mlxsw_sp_fib6_node_entry_unlink(mlxsw_sp, replaced);
5199 mlxsw_sp_fib6_entry_destroy(mlxsw_sp, replaced);
5200 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
5201}
5202
Ido Schimmel428b8512017-08-03 13:28:28 +02005203static int mlxsw_sp_router_fib6_add(struct mlxsw_sp *mlxsw_sp,
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02005204 struct rt6_info *rt, bool replace)
Ido Schimmel428b8512017-08-03 13:28:28 +02005205{
5206 struct mlxsw_sp_fib6_entry *fib6_entry;
5207 struct mlxsw_sp_fib_node *fib_node;
5208 int err;
5209
5210 if (mlxsw_sp->router->aborted)
5211 return 0;
5212
Ido Schimmelf36f5ac2017-08-03 13:28:30 +02005213 if (rt->rt6i_src.plen)
5214 return -EINVAL;
5215
Ido Schimmel428b8512017-08-03 13:28:28 +02005216 if (mlxsw_sp_fib6_rt_should_ignore(rt))
5217 return 0;
5218
5219 fib_node = mlxsw_sp_fib_node_get(mlxsw_sp, rt->rt6i_table->tb6_id,
5220 &rt->rt6i_dst.addr,
5221 sizeof(rt->rt6i_dst.addr),
5222 rt->rt6i_dst.plen,
5223 MLXSW_SP_L3_PROTO_IPV6);
5224 if (IS_ERR(fib_node))
5225 return PTR_ERR(fib_node);
5226
5227 /* Before creating a new entry, try to append route to an existing
5228 * multipath entry.
5229 */
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02005230 fib6_entry = mlxsw_sp_fib6_node_mp_entry_find(fib_node, rt, replace);
Ido Schimmel428b8512017-08-03 13:28:28 +02005231 if (fib6_entry) {
5232 err = mlxsw_sp_fib6_entry_nexthop_add(mlxsw_sp, fib6_entry, rt);
5233 if (err)
5234 goto err_fib6_entry_nexthop_add;
5235 return 0;
5236 }
5237
5238 fib6_entry = mlxsw_sp_fib6_entry_create(mlxsw_sp, fib_node, rt);
5239 if (IS_ERR(fib6_entry)) {
5240 err = PTR_ERR(fib6_entry);
5241 goto err_fib6_entry_create;
5242 }
5243
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02005244 err = mlxsw_sp_fib6_node_entry_link(mlxsw_sp, fib6_entry, replace);
Ido Schimmel428b8512017-08-03 13:28:28 +02005245 if (err)
5246 goto err_fib6_node_entry_link;
5247
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02005248 mlxsw_sp_fib6_entry_replace(mlxsw_sp, fib6_entry, replace);
5249
Ido Schimmel428b8512017-08-03 13:28:28 +02005250 return 0;
5251
5252err_fib6_node_entry_link:
5253 mlxsw_sp_fib6_entry_destroy(mlxsw_sp, fib6_entry);
5254err_fib6_entry_create:
5255err_fib6_entry_nexthop_add:
5256 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
5257 return err;
5258}
5259
5260static void mlxsw_sp_router_fib6_del(struct mlxsw_sp *mlxsw_sp,
5261 struct rt6_info *rt)
5262{
5263 struct mlxsw_sp_fib6_entry *fib6_entry;
5264 struct mlxsw_sp_fib_node *fib_node;
5265
5266 if (mlxsw_sp->router->aborted)
5267 return;
5268
5269 if (mlxsw_sp_fib6_rt_should_ignore(rt))
5270 return;
5271
5272 fib6_entry = mlxsw_sp_fib6_entry_lookup(mlxsw_sp, rt);
5273 if (WARN_ON(!fib6_entry))
5274 return;
5275
5276 /* If route is part of a multipath entry, but not the last one
5277 * removed, then only reduce its nexthop group.
5278 */
5279 if (!list_is_singular(&fib6_entry->rt6_list)) {
5280 mlxsw_sp_fib6_entry_nexthop_del(mlxsw_sp, fib6_entry, rt);
5281 return;
5282 }
5283
5284 fib_node = fib6_entry->common.fib_node;
5285
5286 mlxsw_sp_fib6_node_entry_unlink(mlxsw_sp, fib6_entry);
5287 mlxsw_sp_fib6_entry_destroy(mlxsw_sp, fib6_entry);
5288 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
5289}
5290
Ido Schimmelbc65a8a2017-07-18 10:10:25 +02005291static int __mlxsw_sp_router_set_abort_trap(struct mlxsw_sp *mlxsw_sp,
5292 enum mlxsw_reg_ralxx_protocol proto,
5293 u8 tree_id)
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005294{
5295 char ralta_pl[MLXSW_REG_RALTA_LEN];
5296 char ralst_pl[MLXSW_REG_RALST_LEN];
Ido Schimmelb5d90e62017-03-10 08:53:43 +01005297 int i, err;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005298
Ido Schimmelbc65a8a2017-07-18 10:10:25 +02005299 mlxsw_reg_ralta_pack(ralta_pl, true, proto, tree_id);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005300 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralta), ralta_pl);
5301 if (err)
5302 return err;
5303
Ido Schimmelbc65a8a2017-07-18 10:10:25 +02005304 mlxsw_reg_ralst_pack(ralst_pl, 0xff, tree_id);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005305 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralst), ralst_pl);
5306 if (err)
5307 return err;
5308
Ido Schimmelb5d90e62017-03-10 08:53:43 +01005309 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
Ido Schimmel9011b672017-05-16 19:38:25 +02005310 struct mlxsw_sp_vr *vr = &mlxsw_sp->router->vrs[i];
Ido Schimmelb5d90e62017-03-10 08:53:43 +01005311 char raltb_pl[MLXSW_REG_RALTB_LEN];
5312 char ralue_pl[MLXSW_REG_RALUE_LEN];
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005313
Ido Schimmelbc65a8a2017-07-18 10:10:25 +02005314 mlxsw_reg_raltb_pack(raltb_pl, vr->id, proto, tree_id);
Ido Schimmelb5d90e62017-03-10 08:53:43 +01005315 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raltb),
5316 raltb_pl);
5317 if (err)
5318 return err;
5319
Ido Schimmelbc65a8a2017-07-18 10:10:25 +02005320 mlxsw_reg_ralue_pack(ralue_pl, proto,
5321 MLXSW_REG_RALUE_OP_WRITE_WRITE, vr->id, 0);
Ido Schimmelb5d90e62017-03-10 08:53:43 +01005322 mlxsw_reg_ralue_act_ip2me_pack(ralue_pl);
5323 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue),
5324 ralue_pl);
5325 if (err)
5326 return err;
5327 }
5328
5329 return 0;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005330}
5331
Yotam Gigid42b0962017-09-27 08:23:20 +02005332static int mlxsw_sp_router_fibmr_add(struct mlxsw_sp *mlxsw_sp,
5333 struct mfc_entry_notifier_info *men_info,
5334 bool replace)
5335{
5336 struct mlxsw_sp_vr *vr;
5337
5338 if (mlxsw_sp->router->aborted)
5339 return 0;
5340
David Ahernf8fa9b42017-10-18 09:56:56 -07005341 vr = mlxsw_sp_vr_get(mlxsw_sp, men_info->tb_id, NULL);
Yotam Gigid42b0962017-09-27 08:23:20 +02005342 if (IS_ERR(vr))
5343 return PTR_ERR(vr);
5344
5345 return mlxsw_sp_mr_route4_add(vr->mr4_table, men_info->mfc, replace);
5346}
5347
5348static void mlxsw_sp_router_fibmr_del(struct mlxsw_sp *mlxsw_sp,
5349 struct mfc_entry_notifier_info *men_info)
5350{
5351 struct mlxsw_sp_vr *vr;
5352
5353 if (mlxsw_sp->router->aborted)
5354 return;
5355
5356 vr = mlxsw_sp_vr_find(mlxsw_sp, men_info->tb_id);
5357 if (WARN_ON(!vr))
5358 return;
5359
5360 mlxsw_sp_mr_route4_del(vr->mr4_table, men_info->mfc);
5361 mlxsw_sp_vr_put(vr);
5362}
5363
5364static int
5365mlxsw_sp_router_fibmr_vif_add(struct mlxsw_sp *mlxsw_sp,
5366 struct vif_entry_notifier_info *ven_info)
5367{
5368 struct mlxsw_sp_rif *rif;
5369 struct mlxsw_sp_vr *vr;
5370
5371 if (mlxsw_sp->router->aborted)
5372 return 0;
5373
David Ahernf8fa9b42017-10-18 09:56:56 -07005374 vr = mlxsw_sp_vr_get(mlxsw_sp, ven_info->tb_id, NULL);
Yotam Gigid42b0962017-09-27 08:23:20 +02005375 if (IS_ERR(vr))
5376 return PTR_ERR(vr);
5377
5378 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, ven_info->dev);
5379 return mlxsw_sp_mr_vif_add(vr->mr4_table, ven_info->dev,
5380 ven_info->vif_index,
5381 ven_info->vif_flags, rif);
5382}
5383
5384static void
5385mlxsw_sp_router_fibmr_vif_del(struct mlxsw_sp *mlxsw_sp,
5386 struct vif_entry_notifier_info *ven_info)
5387{
5388 struct mlxsw_sp_vr *vr;
5389
5390 if (mlxsw_sp->router->aborted)
5391 return;
5392
5393 vr = mlxsw_sp_vr_find(mlxsw_sp, ven_info->tb_id);
5394 if (WARN_ON(!vr))
5395 return;
5396
5397 mlxsw_sp_mr_vif_del(vr->mr4_table, ven_info->vif_index);
5398 mlxsw_sp_vr_put(vr);
5399}
5400
Ido Schimmelbc65a8a2017-07-18 10:10:25 +02005401static int mlxsw_sp_router_set_abort_trap(struct mlxsw_sp *mlxsw_sp)
5402{
5403 enum mlxsw_reg_ralxx_protocol proto = MLXSW_REG_RALXX_PROTOCOL_IPV4;
5404 int err;
5405
5406 err = __mlxsw_sp_router_set_abort_trap(mlxsw_sp, proto,
5407 MLXSW_SP_LPM_TREE_MIN);
5408 if (err)
5409 return err;
5410
Yotam Gigid42b0962017-09-27 08:23:20 +02005411 /* The multicast router code does not need an abort trap as by default,
5412 * packets that don't match any routes are trapped to the CPU.
5413 */
5414
Ido Schimmelbc65a8a2017-07-18 10:10:25 +02005415 proto = MLXSW_REG_RALXX_PROTOCOL_IPV6;
5416 return __mlxsw_sp_router_set_abort_trap(mlxsw_sp, proto,
5417 MLXSW_SP_LPM_TREE_MIN + 1);
5418}
5419
Ido Schimmel9aecce12017-02-09 10:28:42 +01005420static void mlxsw_sp_fib4_node_flush(struct mlxsw_sp *mlxsw_sp,
5421 struct mlxsw_sp_fib_node *fib_node)
5422{
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02005423 struct mlxsw_sp_fib4_entry *fib4_entry, *tmp;
Ido Schimmel9aecce12017-02-09 10:28:42 +01005424
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02005425 list_for_each_entry_safe(fib4_entry, tmp, &fib_node->entry_list,
5426 common.list) {
5427 bool do_break = &tmp->common.list == &fib_node->entry_list;
Ido Schimmel9aecce12017-02-09 10:28:42 +01005428
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02005429 mlxsw_sp_fib4_node_entry_unlink(mlxsw_sp, fib4_entry);
5430 mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib4_entry);
Ido Schimmel731ea1c2017-07-18 10:10:21 +02005431 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
Ido Schimmel9aecce12017-02-09 10:28:42 +01005432 /* Break when entry list is empty and node was freed.
5433 * Otherwise, we'll access freed memory in the next
5434 * iteration.
5435 */
5436 if (do_break)
5437 break;
5438 }
5439}
5440
Ido Schimmel428b8512017-08-03 13:28:28 +02005441static void mlxsw_sp_fib6_node_flush(struct mlxsw_sp *mlxsw_sp,
5442 struct mlxsw_sp_fib_node *fib_node)
5443{
5444 struct mlxsw_sp_fib6_entry *fib6_entry, *tmp;
5445
5446 list_for_each_entry_safe(fib6_entry, tmp, &fib_node->entry_list,
5447 common.list) {
5448 bool do_break = &tmp->common.list == &fib_node->entry_list;
5449
5450 mlxsw_sp_fib6_node_entry_unlink(mlxsw_sp, fib6_entry);
5451 mlxsw_sp_fib6_entry_destroy(mlxsw_sp, fib6_entry);
5452 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
5453 if (do_break)
5454 break;
5455 }
5456}
5457
Ido Schimmel9aecce12017-02-09 10:28:42 +01005458static void mlxsw_sp_fib_node_flush(struct mlxsw_sp *mlxsw_sp,
5459 struct mlxsw_sp_fib_node *fib_node)
5460{
Ido Schimmel76610eb2017-03-10 08:53:41 +01005461 switch (fib_node->fib->proto) {
Ido Schimmel9aecce12017-02-09 10:28:42 +01005462 case MLXSW_SP_L3_PROTO_IPV4:
5463 mlxsw_sp_fib4_node_flush(mlxsw_sp, fib_node);
5464 break;
5465 case MLXSW_SP_L3_PROTO_IPV6:
Ido Schimmel428b8512017-08-03 13:28:28 +02005466 mlxsw_sp_fib6_node_flush(mlxsw_sp, fib_node);
Ido Schimmel9aecce12017-02-09 10:28:42 +01005467 break;
5468 }
5469}
5470
Ido Schimmel76610eb2017-03-10 08:53:41 +01005471static void mlxsw_sp_vr_fib_flush(struct mlxsw_sp *mlxsw_sp,
5472 struct mlxsw_sp_vr *vr,
5473 enum mlxsw_sp_l3proto proto)
5474{
5475 struct mlxsw_sp_fib *fib = mlxsw_sp_vr_fib(vr, proto);
5476 struct mlxsw_sp_fib_node *fib_node, *tmp;
5477
5478 list_for_each_entry_safe(fib_node, tmp, &fib->node_list, list) {
5479 bool do_break = &tmp->list == &fib->node_list;
5480
5481 mlxsw_sp_fib_node_flush(mlxsw_sp, fib_node);
5482 if (do_break)
5483 break;
5484 }
5485}
5486
Ido Schimmelac571de2016-11-14 11:26:32 +01005487static void mlxsw_sp_router_fib_flush(struct mlxsw_sp *mlxsw_sp)
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005488{
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005489 int i;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005490
Jiri Pirkoc1a38312016-10-21 16:07:23 +02005491 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
Ido Schimmel9011b672017-05-16 19:38:25 +02005492 struct mlxsw_sp_vr *vr = &mlxsw_sp->router->vrs[i];
Ido Schimmelac571de2016-11-14 11:26:32 +01005493
Ido Schimmel76610eb2017-03-10 08:53:41 +01005494 if (!mlxsw_sp_vr_is_used(vr))
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005495 continue;
Yotam Gigid42b0962017-09-27 08:23:20 +02005496
5497 mlxsw_sp_mr_table_flush(vr->mr4_table);
Ido Schimmel76610eb2017-03-10 08:53:41 +01005498 mlxsw_sp_vr_fib_flush(mlxsw_sp, vr, MLXSW_SP_L3_PROTO_IPV4);
Ido Schimmela3d9bc52017-07-18 10:10:22 +02005499
5500 /* If virtual router was only used for IPv4, then it's no
5501 * longer used.
5502 */
5503 if (!mlxsw_sp_vr_is_used(vr))
5504 continue;
5505 mlxsw_sp_vr_fib_flush(mlxsw_sp, vr, MLXSW_SP_L3_PROTO_IPV6);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005506 }
Ido Schimmelac571de2016-11-14 11:26:32 +01005507}
5508
Ido Schimmelbc65a8a2017-07-18 10:10:25 +02005509static void mlxsw_sp_router_fib_abort(struct mlxsw_sp *mlxsw_sp)
Ido Schimmelac571de2016-11-14 11:26:32 +01005510{
5511 int err;
5512
Ido Schimmel9011b672017-05-16 19:38:25 +02005513 if (mlxsw_sp->router->aborted)
Ido Schimmeld331d302016-11-16 09:51:58 +01005514 return;
5515 dev_warn(mlxsw_sp->bus_info->dev, "FIB abort triggered. Note that FIB entries are no longer being offloaded to this device.\n");
Ido Schimmelac571de2016-11-14 11:26:32 +01005516 mlxsw_sp_router_fib_flush(mlxsw_sp);
Ido Schimmel9011b672017-05-16 19:38:25 +02005517 mlxsw_sp->router->aborted = true;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005518 err = mlxsw_sp_router_set_abort_trap(mlxsw_sp);
5519 if (err)
5520 dev_warn(mlxsw_sp->bus_info->dev, "Failed to set abort trap.\n");
5521}
5522
Ido Schimmel30572242016-12-03 16:45:01 +01005523struct mlxsw_sp_fib_event_work {
Ido Schimmela0e47612017-02-06 16:20:10 +01005524 struct work_struct work;
Ido Schimmelad178c82017-02-08 11:16:40 +01005525 union {
Ido Schimmel428b8512017-08-03 13:28:28 +02005526 struct fib6_entry_notifier_info fen6_info;
Ido Schimmelad178c82017-02-08 11:16:40 +01005527 struct fib_entry_notifier_info fen_info;
Ido Schimmel5d7bfd12017-03-16 09:08:14 +01005528 struct fib_rule_notifier_info fr_info;
Ido Schimmelad178c82017-02-08 11:16:40 +01005529 struct fib_nh_notifier_info fnh_info;
Yotam Gigid42b0962017-09-27 08:23:20 +02005530 struct mfc_entry_notifier_info men_info;
5531 struct vif_entry_notifier_info ven_info;
Ido Schimmelad178c82017-02-08 11:16:40 +01005532 };
Ido Schimmel30572242016-12-03 16:45:01 +01005533 struct mlxsw_sp *mlxsw_sp;
5534 unsigned long event;
5535};
5536
Ido Schimmel66a57632017-08-03 13:28:26 +02005537static void mlxsw_sp_router_fib4_event_work(struct work_struct *work)
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005538{
Ido Schimmel30572242016-12-03 16:45:01 +01005539 struct mlxsw_sp_fib_event_work *fib_work =
Ido Schimmela0e47612017-02-06 16:20:10 +01005540 container_of(work, struct mlxsw_sp_fib_event_work, work);
Ido Schimmel30572242016-12-03 16:45:01 +01005541 struct mlxsw_sp *mlxsw_sp = fib_work->mlxsw_sp;
Ido Schimmel599cf8f2017-02-09 10:28:44 +01005542 bool replace, append;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005543 int err;
5544
Ido Schimmel30572242016-12-03 16:45:01 +01005545 /* Protect internal structures from changes */
5546 rtnl_lock();
5547 switch (fib_work->event) {
Ido Schimmel599cf8f2017-02-09 10:28:44 +01005548 case FIB_EVENT_ENTRY_REPLACE: /* fall through */
Ido Schimmel4283bce2017-02-09 10:28:43 +01005549 case FIB_EVENT_ENTRY_APPEND: /* fall through */
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005550 case FIB_EVENT_ENTRY_ADD:
Ido Schimmel599cf8f2017-02-09 10:28:44 +01005551 replace = fib_work->event == FIB_EVENT_ENTRY_REPLACE;
Ido Schimmel4283bce2017-02-09 10:28:43 +01005552 append = fib_work->event == FIB_EVENT_ENTRY_APPEND;
5553 err = mlxsw_sp_router_fib4_add(mlxsw_sp, &fib_work->fen_info,
Ido Schimmel599cf8f2017-02-09 10:28:44 +01005554 replace, append);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005555 if (err)
Ido Schimmelbc65a8a2017-07-18 10:10:25 +02005556 mlxsw_sp_router_fib_abort(mlxsw_sp);
Ido Schimmel30572242016-12-03 16:45:01 +01005557 fib_info_put(fib_work->fen_info.fi);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005558 break;
5559 case FIB_EVENT_ENTRY_DEL:
Ido Schimmel30572242016-12-03 16:45:01 +01005560 mlxsw_sp_router_fib4_del(mlxsw_sp, &fib_work->fen_info);
5561 fib_info_put(fib_work->fen_info.fi);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005562 break;
David Ahern1f279232017-10-27 17:37:14 -07005563 case FIB_EVENT_RULE_ADD:
5564 /* if we get here, a rule was added that we do not support.
5565 * just do the fib_abort
5566 */
5567 mlxsw_sp_router_fib_abort(mlxsw_sp);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005568 break;
Ido Schimmelad178c82017-02-08 11:16:40 +01005569 case FIB_EVENT_NH_ADD: /* fall through */
5570 case FIB_EVENT_NH_DEL:
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02005571 mlxsw_sp_nexthop4_event(mlxsw_sp, fib_work->event,
5572 fib_work->fnh_info.fib_nh);
Ido Schimmelad178c82017-02-08 11:16:40 +01005573 fib_info_put(fib_work->fnh_info.fib_nh->nh_parent);
5574 break;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005575 }
Ido Schimmel30572242016-12-03 16:45:01 +01005576 rtnl_unlock();
5577 kfree(fib_work);
5578}
5579
Ido Schimmel66a57632017-08-03 13:28:26 +02005580static void mlxsw_sp_router_fib6_event_work(struct work_struct *work)
5581{
Ido Schimmel583419f2017-08-03 13:28:27 +02005582 struct mlxsw_sp_fib_event_work *fib_work =
5583 container_of(work, struct mlxsw_sp_fib_event_work, work);
5584 struct mlxsw_sp *mlxsw_sp = fib_work->mlxsw_sp;
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02005585 bool replace;
Ido Schimmel428b8512017-08-03 13:28:28 +02005586 int err;
Ido Schimmel583419f2017-08-03 13:28:27 +02005587
5588 rtnl_lock();
5589 switch (fib_work->event) {
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02005590 case FIB_EVENT_ENTRY_REPLACE: /* fall through */
Ido Schimmel428b8512017-08-03 13:28:28 +02005591 case FIB_EVENT_ENTRY_ADD:
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02005592 replace = fib_work->event == FIB_EVENT_ENTRY_REPLACE;
Ido Schimmel428b8512017-08-03 13:28:28 +02005593 err = mlxsw_sp_router_fib6_add(mlxsw_sp,
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02005594 fib_work->fen6_info.rt, replace);
Ido Schimmel428b8512017-08-03 13:28:28 +02005595 if (err)
5596 mlxsw_sp_router_fib_abort(mlxsw_sp);
5597 mlxsw_sp_rt6_release(fib_work->fen6_info.rt);
5598 break;
5599 case FIB_EVENT_ENTRY_DEL:
5600 mlxsw_sp_router_fib6_del(mlxsw_sp, fib_work->fen6_info.rt);
5601 mlxsw_sp_rt6_release(fib_work->fen6_info.rt);
5602 break;
David Ahern1f279232017-10-27 17:37:14 -07005603 case FIB_EVENT_RULE_ADD:
5604 /* if we get here, a rule was added that we do not support.
5605 * just do the fib_abort
5606 */
5607 mlxsw_sp_router_fib_abort(mlxsw_sp);
Ido Schimmel583419f2017-08-03 13:28:27 +02005608 break;
5609 }
5610 rtnl_unlock();
5611 kfree(fib_work);
Ido Schimmel66a57632017-08-03 13:28:26 +02005612}
5613
Yotam Gigid42b0962017-09-27 08:23:20 +02005614static void mlxsw_sp_router_fibmr_event_work(struct work_struct *work)
5615{
5616 struct mlxsw_sp_fib_event_work *fib_work =
5617 container_of(work, struct mlxsw_sp_fib_event_work, work);
5618 struct mlxsw_sp *mlxsw_sp = fib_work->mlxsw_sp;
Yotam Gigid42b0962017-09-27 08:23:20 +02005619 bool replace;
5620 int err;
5621
5622 rtnl_lock();
5623 switch (fib_work->event) {
5624 case FIB_EVENT_ENTRY_REPLACE: /* fall through */
5625 case FIB_EVENT_ENTRY_ADD:
5626 replace = fib_work->event == FIB_EVENT_ENTRY_REPLACE;
5627
5628 err = mlxsw_sp_router_fibmr_add(mlxsw_sp, &fib_work->men_info,
5629 replace);
5630 if (err)
5631 mlxsw_sp_router_fib_abort(mlxsw_sp);
5632 ipmr_cache_put(fib_work->men_info.mfc);
5633 break;
5634 case FIB_EVENT_ENTRY_DEL:
5635 mlxsw_sp_router_fibmr_del(mlxsw_sp, &fib_work->men_info);
5636 ipmr_cache_put(fib_work->men_info.mfc);
5637 break;
5638 case FIB_EVENT_VIF_ADD:
5639 err = mlxsw_sp_router_fibmr_vif_add(mlxsw_sp,
5640 &fib_work->ven_info);
5641 if (err)
5642 mlxsw_sp_router_fib_abort(mlxsw_sp);
5643 dev_put(fib_work->ven_info.dev);
5644 break;
5645 case FIB_EVENT_VIF_DEL:
5646 mlxsw_sp_router_fibmr_vif_del(mlxsw_sp,
5647 &fib_work->ven_info);
5648 dev_put(fib_work->ven_info.dev);
5649 break;
David Ahern1f279232017-10-27 17:37:14 -07005650 case FIB_EVENT_RULE_ADD:
5651 /* if we get here, a rule was added that we do not support.
5652 * just do the fib_abort
5653 */
5654 mlxsw_sp_router_fib_abort(mlxsw_sp);
Yotam Gigid42b0962017-09-27 08:23:20 +02005655 break;
5656 }
5657 rtnl_unlock();
5658 kfree(fib_work);
5659}
5660
Ido Schimmel66a57632017-08-03 13:28:26 +02005661static void mlxsw_sp_router_fib4_event(struct mlxsw_sp_fib_event_work *fib_work,
5662 struct fib_notifier_info *info)
5663{
David Ahern3c75f9b2017-10-18 15:01:38 -07005664 struct fib_entry_notifier_info *fen_info;
David Ahern3c75f9b2017-10-18 15:01:38 -07005665 struct fib_nh_notifier_info *fnh_info;
5666
Ido Schimmel66a57632017-08-03 13:28:26 +02005667 switch (fib_work->event) {
5668 case FIB_EVENT_ENTRY_REPLACE: /* fall through */
5669 case FIB_EVENT_ENTRY_APPEND: /* fall through */
5670 case FIB_EVENT_ENTRY_ADD: /* fall through */
5671 case FIB_EVENT_ENTRY_DEL:
David Ahern3c75f9b2017-10-18 15:01:38 -07005672 fen_info = container_of(info, struct fib_entry_notifier_info,
5673 info);
5674 fib_work->fen_info = *fen_info;
5675 /* Take reference on fib_info to prevent it from being
Ido Schimmel66a57632017-08-03 13:28:26 +02005676 * freed while work is queued. Release it afterwards.
5677 */
5678 fib_info_hold(fib_work->fen_info.fi);
5679 break;
Ido Schimmel66a57632017-08-03 13:28:26 +02005680 case FIB_EVENT_NH_ADD: /* fall through */
5681 case FIB_EVENT_NH_DEL:
David Ahern3c75f9b2017-10-18 15:01:38 -07005682 fnh_info = container_of(info, struct fib_nh_notifier_info,
5683 info);
5684 fib_work->fnh_info = *fnh_info;
Ido Schimmel66a57632017-08-03 13:28:26 +02005685 fib_info_hold(fib_work->fnh_info.fib_nh->nh_parent);
5686 break;
5687 }
5688}
5689
5690static void mlxsw_sp_router_fib6_event(struct mlxsw_sp_fib_event_work *fib_work,
5691 struct fib_notifier_info *info)
5692{
David Ahern3c75f9b2017-10-18 15:01:38 -07005693 struct fib6_entry_notifier_info *fen6_info;
David Ahern3c75f9b2017-10-18 15:01:38 -07005694
Ido Schimmel583419f2017-08-03 13:28:27 +02005695 switch (fib_work->event) {
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02005696 case FIB_EVENT_ENTRY_REPLACE: /* fall through */
Ido Schimmel428b8512017-08-03 13:28:28 +02005697 case FIB_EVENT_ENTRY_ADD: /* fall through */
5698 case FIB_EVENT_ENTRY_DEL:
David Ahern3c75f9b2017-10-18 15:01:38 -07005699 fen6_info = container_of(info, struct fib6_entry_notifier_info,
5700 info);
5701 fib_work->fen6_info = *fen6_info;
Ido Schimmel428b8512017-08-03 13:28:28 +02005702 rt6_hold(fib_work->fen6_info.rt);
5703 break;
Ido Schimmel583419f2017-08-03 13:28:27 +02005704 }
Ido Schimmel66a57632017-08-03 13:28:26 +02005705}
5706
Yotam Gigid42b0962017-09-27 08:23:20 +02005707static void
5708mlxsw_sp_router_fibmr_event(struct mlxsw_sp_fib_event_work *fib_work,
5709 struct fib_notifier_info *info)
5710{
5711 switch (fib_work->event) {
5712 case FIB_EVENT_ENTRY_REPLACE: /* fall through */
5713 case FIB_EVENT_ENTRY_ADD: /* fall through */
5714 case FIB_EVENT_ENTRY_DEL:
5715 memcpy(&fib_work->men_info, info, sizeof(fib_work->men_info));
5716 ipmr_cache_hold(fib_work->men_info.mfc);
5717 break;
5718 case FIB_EVENT_VIF_ADD: /* fall through */
5719 case FIB_EVENT_VIF_DEL:
5720 memcpy(&fib_work->ven_info, info, sizeof(fib_work->ven_info));
5721 dev_hold(fib_work->ven_info.dev);
5722 break;
David Ahern1f279232017-10-27 17:37:14 -07005723 }
5724}
5725
5726static int mlxsw_sp_router_fib_rule_event(unsigned long event,
5727 struct fib_notifier_info *info,
5728 struct mlxsw_sp *mlxsw_sp)
5729{
5730 struct netlink_ext_ack *extack = info->extack;
5731 struct fib_rule_notifier_info *fr_info;
5732 struct fib_rule *rule;
5733 int err = 0;
5734
5735 /* nothing to do at the moment */
5736 if (event == FIB_EVENT_RULE_DEL)
5737 return 0;
5738
5739 if (mlxsw_sp->router->aborted)
5740 return 0;
5741
5742 fr_info = container_of(info, struct fib_rule_notifier_info, info);
5743 rule = fr_info->rule;
5744
5745 switch (info->family) {
5746 case AF_INET:
5747 if (!fib4_rule_default(rule) && !rule->l3mdev)
5748 err = -1;
5749 break;
5750 case AF_INET6:
5751 if (!fib6_rule_default(rule) && !rule->l3mdev)
5752 err = -1;
5753 break;
5754 case RTNL_FAMILY_IPMR:
5755 if (!ipmr_rule_default(rule) && !rule->l3mdev)
5756 err = -1;
Yotam Gigid42b0962017-09-27 08:23:20 +02005757 break;
5758 }
David Ahern1f279232017-10-27 17:37:14 -07005759
5760 if (err < 0)
5761 NL_SET_ERR_MSG(extack, "spectrum: FIB rules not supported. Aborting offload");
5762
5763 return err;
Yotam Gigid42b0962017-09-27 08:23:20 +02005764}
5765
Ido Schimmel30572242016-12-03 16:45:01 +01005766/* Called with rcu_read_lock() */
5767static int mlxsw_sp_router_fib_event(struct notifier_block *nb,
5768 unsigned long event, void *ptr)
5769{
Ido Schimmel30572242016-12-03 16:45:01 +01005770 struct mlxsw_sp_fib_event_work *fib_work;
5771 struct fib_notifier_info *info = ptr;
Ido Schimmel7e39d112017-05-16 19:38:28 +02005772 struct mlxsw_sp_router *router;
David Ahern1f279232017-10-27 17:37:14 -07005773 int err;
Ido Schimmel30572242016-12-03 16:45:01 +01005774
Ido Schimmel8e29f972017-09-15 15:31:07 +02005775 if (!net_eq(info->net, &init_net) ||
Yotam Gigi664375e2017-09-27 08:23:22 +02005776 (info->family != AF_INET && info->family != AF_INET6 &&
5777 info->family != RTNL_FAMILY_IPMR))
Ido Schimmel30572242016-12-03 16:45:01 +01005778 return NOTIFY_DONE;
5779
David Ahern1f279232017-10-27 17:37:14 -07005780 router = container_of(nb, struct mlxsw_sp_router, fib_nb);
5781
5782 switch (event) {
5783 case FIB_EVENT_RULE_ADD: /* fall through */
5784 case FIB_EVENT_RULE_DEL:
5785 err = mlxsw_sp_router_fib_rule_event(event, info,
5786 router->mlxsw_sp);
5787 if (!err)
5788 return NOTIFY_DONE;
5789 }
5790
Ido Schimmel30572242016-12-03 16:45:01 +01005791 fib_work = kzalloc(sizeof(*fib_work), GFP_ATOMIC);
5792 if (WARN_ON(!fib_work))
5793 return NOTIFY_BAD;
5794
Ido Schimmel7e39d112017-05-16 19:38:28 +02005795 fib_work->mlxsw_sp = router->mlxsw_sp;
Ido Schimmel30572242016-12-03 16:45:01 +01005796 fib_work->event = event;
5797
Ido Schimmel66a57632017-08-03 13:28:26 +02005798 switch (info->family) {
5799 case AF_INET:
5800 INIT_WORK(&fib_work->work, mlxsw_sp_router_fib4_event_work);
5801 mlxsw_sp_router_fib4_event(fib_work, info);
Ido Schimmel30572242016-12-03 16:45:01 +01005802 break;
Ido Schimmel66a57632017-08-03 13:28:26 +02005803 case AF_INET6:
5804 INIT_WORK(&fib_work->work, mlxsw_sp_router_fib6_event_work);
5805 mlxsw_sp_router_fib6_event(fib_work, info);
Ido Schimmelad178c82017-02-08 11:16:40 +01005806 break;
Yotam Gigid42b0962017-09-27 08:23:20 +02005807 case RTNL_FAMILY_IPMR:
5808 INIT_WORK(&fib_work->work, mlxsw_sp_router_fibmr_event_work);
5809 mlxsw_sp_router_fibmr_event(fib_work, info);
5810 break;
Ido Schimmel30572242016-12-03 16:45:01 +01005811 }
5812
Ido Schimmela0e47612017-02-06 16:20:10 +01005813 mlxsw_core_schedule_work(&fib_work->work);
Ido Schimmel30572242016-12-03 16:45:01 +01005814
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005815 return NOTIFY_DONE;
5816}
5817
Ido Schimmel4724ba562017-03-10 08:53:39 +01005818static struct mlxsw_sp_rif *
5819mlxsw_sp_rif_find_by_dev(const struct mlxsw_sp *mlxsw_sp,
5820 const struct net_device *dev)
5821{
5822 int i;
5823
5824 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++)
Ido Schimmel5f9efff2017-05-16 19:38:27 +02005825 if (mlxsw_sp->router->rifs[i] &&
5826 mlxsw_sp->router->rifs[i]->dev == dev)
5827 return mlxsw_sp->router->rifs[i];
Ido Schimmel4724ba562017-03-10 08:53:39 +01005828
5829 return NULL;
5830}
5831
5832static int mlxsw_sp_router_rif_disable(struct mlxsw_sp *mlxsw_sp, u16 rif)
5833{
5834 char ritr_pl[MLXSW_REG_RITR_LEN];
5835 int err;
5836
5837 mlxsw_reg_ritr_rif_pack(ritr_pl, rif);
5838 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
5839 if (WARN_ON_ONCE(err))
5840 return err;
5841
5842 mlxsw_reg_ritr_enable_set(ritr_pl, false);
5843 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
5844}
5845
5846static void mlxsw_sp_router_rif_gone_sync(struct mlxsw_sp *mlxsw_sp,
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01005847 struct mlxsw_sp_rif *rif)
Ido Schimmel4724ba562017-03-10 08:53:39 +01005848{
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01005849 mlxsw_sp_router_rif_disable(mlxsw_sp, rif->rif_index);
5850 mlxsw_sp_nexthop_rif_gone_sync(mlxsw_sp, rif);
5851 mlxsw_sp_neigh_rif_gone_sync(mlxsw_sp, rif);
Ido Schimmel4724ba562017-03-10 08:53:39 +01005852}
5853
Arkadi Sharshevsky5ea12372017-07-18 10:10:13 +02005854static bool
5855mlxsw_sp_rif_should_config(struct mlxsw_sp_rif *rif, struct net_device *dev,
5856 unsigned long event)
Ido Schimmel4724ba562017-03-10 08:53:39 +01005857{
Arkadi Sharshevsky5ea12372017-07-18 10:10:13 +02005858 struct inet6_dev *inet6_dev;
5859 bool addr_list_empty = true;
5860 struct in_device *idev;
5861
Ido Schimmel4724ba562017-03-10 08:53:39 +01005862 switch (event) {
5863 case NETDEV_UP:
Petr Machataf1b1f272017-07-31 09:27:28 +02005864 return rif == NULL;
Ido Schimmel4724ba562017-03-10 08:53:39 +01005865 case NETDEV_DOWN:
Arkadi Sharshevsky5ea12372017-07-18 10:10:13 +02005866 idev = __in_dev_get_rtnl(dev);
5867 if (idev && idev->ifa_list)
5868 addr_list_empty = false;
5869
5870 inet6_dev = __in6_dev_get(dev);
5871 if (addr_list_empty && inet6_dev &&
5872 !list_empty(&inet6_dev->addr_list))
5873 addr_list_empty = false;
5874
5875 if (rif && addr_list_empty &&
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01005876 !netif_is_l3_slave(rif->dev))
Ido Schimmel4724ba562017-03-10 08:53:39 +01005877 return true;
5878 /* It is possible we already removed the RIF ourselves
5879 * if it was assigned to a netdev that is now a bridge
5880 * or LAG slave.
5881 */
5882 return false;
5883 }
5884
5885 return false;
5886}
5887
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02005888static enum mlxsw_sp_rif_type
5889mlxsw_sp_dev_rif_type(const struct mlxsw_sp *mlxsw_sp,
5890 const struct net_device *dev)
5891{
5892 enum mlxsw_sp_fid_type type;
5893
Petr Machata6ddb7422017-09-02 23:49:19 +02005894 if (mlxsw_sp_netdev_ipip_type(mlxsw_sp, dev, NULL))
5895 return MLXSW_SP_RIF_TYPE_IPIP_LB;
5896
5897 /* Otherwise RIF type is derived from the type of the underlying FID. */
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02005898 if (is_vlan_dev(dev) && netif_is_bridge_master(vlan_dev_real_dev(dev)))
5899 type = MLXSW_SP_FID_TYPE_8021Q;
5900 else if (netif_is_bridge_master(dev) && br_vlan_enabled(dev))
5901 type = MLXSW_SP_FID_TYPE_8021Q;
5902 else if (netif_is_bridge_master(dev))
5903 type = MLXSW_SP_FID_TYPE_8021D;
5904 else
5905 type = MLXSW_SP_FID_TYPE_RFID;
5906
5907 return mlxsw_sp_fid_type_rif_type(mlxsw_sp, type);
5908}
5909
Ido Schimmelde5ed992017-06-04 16:53:40 +02005910static int mlxsw_sp_rif_index_alloc(struct mlxsw_sp *mlxsw_sp, u16 *p_rif_index)
Ido Schimmel4724ba562017-03-10 08:53:39 +01005911{
5912 int i;
5913
Ido Schimmelde5ed992017-06-04 16:53:40 +02005914 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++) {
5915 if (!mlxsw_sp->router->rifs[i]) {
5916 *p_rif_index = i;
5917 return 0;
5918 }
5919 }
Ido Schimmel4724ba562017-03-10 08:53:39 +01005920
Ido Schimmelde5ed992017-06-04 16:53:40 +02005921 return -ENOBUFS;
Ido Schimmel4724ba562017-03-10 08:53:39 +01005922}
5923
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02005924static struct mlxsw_sp_rif *mlxsw_sp_rif_alloc(size_t rif_size, u16 rif_index,
5925 u16 vr_id,
5926 struct net_device *l3_dev)
Ido Schimmel4724ba562017-03-10 08:53:39 +01005927{
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01005928 struct mlxsw_sp_rif *rif;
Ido Schimmel4724ba562017-03-10 08:53:39 +01005929
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02005930 rif = kzalloc(rif_size, GFP_KERNEL);
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01005931 if (!rif)
Ido Schimmel4724ba562017-03-10 08:53:39 +01005932 return NULL;
5933
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01005934 INIT_LIST_HEAD(&rif->nexthop_list);
5935 INIT_LIST_HEAD(&rif->neigh_list);
5936 ether_addr_copy(rif->addr, l3_dev->dev_addr);
5937 rif->mtu = l3_dev->mtu;
5938 rif->vr_id = vr_id;
5939 rif->dev = l3_dev;
5940 rif->rif_index = rif_index;
Ido Schimmel4724ba562017-03-10 08:53:39 +01005941
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01005942 return rif;
Ido Schimmel4724ba562017-03-10 08:53:39 +01005943}
5944
Ido Schimmel5f9efff2017-05-16 19:38:27 +02005945struct mlxsw_sp_rif *mlxsw_sp_rif_by_index(const struct mlxsw_sp *mlxsw_sp,
5946 u16 rif_index)
5947{
5948 return mlxsw_sp->router->rifs[rif_index];
5949}
5950
Arkadi Sharshevskyfd1b9d42017-03-28 17:24:16 +02005951u16 mlxsw_sp_rif_index(const struct mlxsw_sp_rif *rif)
5952{
5953 return rif->rif_index;
5954}
5955
Petr Machata92107cf2017-09-02 23:49:28 +02005956u16 mlxsw_sp_ipip_lb_rif_index(const struct mlxsw_sp_rif_ipip_lb *lb_rif)
5957{
5958 return lb_rif->common.rif_index;
5959}
5960
5961u16 mlxsw_sp_ipip_lb_ul_vr_id(const struct mlxsw_sp_rif_ipip_lb *lb_rif)
5962{
5963 return lb_rif->ul_vr_id;
5964}
5965
Arkadi Sharshevskyfd1b9d42017-03-28 17:24:16 +02005966int mlxsw_sp_rif_dev_ifindex(const struct mlxsw_sp_rif *rif)
5967{
5968 return rif->dev->ifindex;
5969}
5970
Yotam Gigi91e4d592017-09-19 10:00:19 +02005971const struct net_device *mlxsw_sp_rif_dev(const struct mlxsw_sp_rif *rif)
5972{
5973 return rif->dev;
5974}
5975
Ido Schimmel4724ba562017-03-10 08:53:39 +01005976static struct mlxsw_sp_rif *
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02005977mlxsw_sp_rif_create(struct mlxsw_sp *mlxsw_sp,
David Ahernf8fa9b42017-10-18 09:56:56 -07005978 const struct mlxsw_sp_rif_params *params,
5979 struct netlink_ext_ack *extack)
Ido Schimmel4724ba562017-03-10 08:53:39 +01005980{
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02005981 u32 tb_id = l3mdev_fib_table(params->dev);
5982 const struct mlxsw_sp_rif_ops *ops;
Petr Machata010cadf2017-09-02 23:49:18 +02005983 struct mlxsw_sp_fid *fid = NULL;
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02005984 enum mlxsw_sp_rif_type type;
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01005985 struct mlxsw_sp_rif *rif;
Ido Schimmela1107482017-05-26 08:37:39 +02005986 struct mlxsw_sp_vr *vr;
5987 u16 rif_index;
Ido Schimmel4724ba562017-03-10 08:53:39 +01005988 int err;
5989
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02005990 type = mlxsw_sp_dev_rif_type(mlxsw_sp, params->dev);
5991 ops = mlxsw_sp->router->rif_ops_arr[type];
5992
David Ahernf8fa9b42017-10-18 09:56:56 -07005993 vr = mlxsw_sp_vr_get(mlxsw_sp, tb_id ? : RT_TABLE_MAIN, extack);
Ido Schimmelc9ec53f2017-05-26 08:37:38 +02005994 if (IS_ERR(vr))
5995 return ERR_CAST(vr);
Petr Machata28a04c72017-10-02 12:14:56 +02005996 vr->rif_count++;
Ido Schimmelc9ec53f2017-05-26 08:37:38 +02005997
Ido Schimmelde5ed992017-06-04 16:53:40 +02005998 err = mlxsw_sp_rif_index_alloc(mlxsw_sp, &rif_index);
David Ahernf8fa9b42017-10-18 09:56:56 -07005999 if (err) {
6000 NL_SET_ERR_MSG(extack, "spectrum: Exceeded number of supported router interfaces");
Ido Schimmelde5ed992017-06-04 16:53:40 +02006001 goto err_rif_index_alloc;
David Ahernf8fa9b42017-10-18 09:56:56 -07006002 }
Ido Schimmel4724ba562017-03-10 08:53:39 +01006003
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006004 rif = mlxsw_sp_rif_alloc(ops->rif_size, rif_index, vr->id, params->dev);
Ido Schimmela13a5942017-05-26 08:37:33 +02006005 if (!rif) {
6006 err = -ENOMEM;
6007 goto err_rif_alloc;
6008 }
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006009 rif->mlxsw_sp = mlxsw_sp;
6010 rif->ops = ops;
Ido Schimmela13a5942017-05-26 08:37:33 +02006011
Petr Machata010cadf2017-09-02 23:49:18 +02006012 if (ops->fid_get) {
6013 fid = ops->fid_get(rif);
6014 if (IS_ERR(fid)) {
6015 err = PTR_ERR(fid);
6016 goto err_fid_get;
6017 }
6018 rif->fid = fid;
Ido Schimmel4d93cee2017-05-26 08:37:34 +02006019 }
6020
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006021 if (ops->setup)
6022 ops->setup(rif, params);
6023
6024 err = ops->configure(rif);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006025 if (err)
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006026 goto err_configure;
Ido Schimmel4724ba562017-03-10 08:53:39 +01006027
Yotam Gigid42b0962017-09-27 08:23:20 +02006028 err = mlxsw_sp_mr_rif_add(vr->mr4_table, rif);
6029 if (err)
6030 goto err_mr_rif_add;
6031
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006032 mlxsw_sp_rif_counters_alloc(rif);
Ido Schimmel5f9efff2017-05-16 19:38:27 +02006033 mlxsw_sp->router->rifs[rif_index] = rif;
Ido Schimmel4724ba562017-03-10 08:53:39 +01006034
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01006035 return rif;
Ido Schimmel4724ba562017-03-10 08:53:39 +01006036
Yotam Gigid42b0962017-09-27 08:23:20 +02006037err_mr_rif_add:
6038 ops->deconfigure(rif);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006039err_configure:
Petr Machata010cadf2017-09-02 23:49:18 +02006040 if (fid)
6041 mlxsw_sp_fid_put(fid);
Ido Schimmela1107482017-05-26 08:37:39 +02006042err_fid_get:
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006043 kfree(rif);
6044err_rif_alloc:
Ido Schimmelde5ed992017-06-04 16:53:40 +02006045err_rif_index_alloc:
Petr Machata28a04c72017-10-02 12:14:56 +02006046 vr->rif_count--;
Ido Schimmelc9ec53f2017-05-26 08:37:38 +02006047 mlxsw_sp_vr_put(vr);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006048 return ERR_PTR(err);
6049}
6050
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006051void mlxsw_sp_rif_destroy(struct mlxsw_sp_rif *rif)
Ido Schimmel4724ba562017-03-10 08:53:39 +01006052{
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006053 const struct mlxsw_sp_rif_ops *ops = rif->ops;
6054 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
Ido Schimmela1107482017-05-26 08:37:39 +02006055 struct mlxsw_sp_fid *fid = rif->fid;
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006056 struct mlxsw_sp_vr *vr;
Ido Schimmel4724ba562017-03-10 08:53:39 +01006057
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01006058 mlxsw_sp_router_rif_gone_sync(mlxsw_sp, rif);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006059 vr = &mlxsw_sp->router->vrs[rif->vr_id];
Arkadi Sharshevskye0c0afd2017-03-28 17:24:15 +02006060
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006061 mlxsw_sp->router->rifs[rif->rif_index] = NULL;
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006062 mlxsw_sp_rif_counters_free(rif);
Yotam Gigid42b0962017-09-27 08:23:20 +02006063 mlxsw_sp_mr_rif_del(vr->mr4_table, rif);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006064 ops->deconfigure(rif);
Petr Machata010cadf2017-09-02 23:49:18 +02006065 if (fid)
6066 /* Loopback RIFs are not associated with a FID. */
6067 mlxsw_sp_fid_put(fid);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006068 kfree(rif);
Petr Machata28a04c72017-10-02 12:14:56 +02006069 vr->rif_count--;
Ido Schimmelc9ec53f2017-05-26 08:37:38 +02006070 mlxsw_sp_vr_put(vr);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006071}
6072
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006073static void
6074mlxsw_sp_rif_subport_params_init(struct mlxsw_sp_rif_params *params,
6075 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
6076{
6077 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
6078
6079 params->vid = mlxsw_sp_port_vlan->vid;
6080 params->lag = mlxsw_sp_port->lagged;
6081 if (params->lag)
6082 params->lag_id = mlxsw_sp_port->lag_id;
6083 else
6084 params->system_port = mlxsw_sp_port->local_port;
6085}
6086
Ido Schimmel7cbecf22017-05-26 08:37:28 +02006087static int
Ido Schimmela1107482017-05-26 08:37:39 +02006088mlxsw_sp_port_vlan_router_join(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan,
David Ahernf8fa9b42017-10-18 09:56:56 -07006089 struct net_device *l3_dev,
6090 struct netlink_ext_ack *extack)
Ido Schimmel4724ba562017-03-10 08:53:39 +01006091{
Ido Schimmel7cbecf22017-05-26 08:37:28 +02006092 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
Ido Schimmel1b8f09a2017-05-26 08:37:36 +02006093 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
Ido Schimmel7cbecf22017-05-26 08:37:28 +02006094 u16 vid = mlxsw_sp_port_vlan->vid;
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01006095 struct mlxsw_sp_rif *rif;
Ido Schimmela1107482017-05-26 08:37:39 +02006096 struct mlxsw_sp_fid *fid;
Ido Schimmel03ea01e2017-05-23 21:56:30 +02006097 int err;
Ido Schimmel4724ba562017-03-10 08:53:39 +01006098
Ido Schimmel1b8f09a2017-05-26 08:37:36 +02006099 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev);
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01006100 if (!rif) {
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006101 struct mlxsw_sp_rif_params params = {
6102 .dev = l3_dev,
6103 };
6104
6105 mlxsw_sp_rif_subport_params_init(&params, mlxsw_sp_port_vlan);
David Ahernf8fa9b42017-10-18 09:56:56 -07006106 rif = mlxsw_sp_rif_create(mlxsw_sp, &params, extack);
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01006107 if (IS_ERR(rif))
6108 return PTR_ERR(rif);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006109 }
6110
Ido Schimmela1107482017-05-26 08:37:39 +02006111 /* FID was already created, just take a reference */
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006112 fid = rif->ops->fid_get(rif);
Ido Schimmela1107482017-05-26 08:37:39 +02006113 err = mlxsw_sp_fid_port_vid_map(fid, mlxsw_sp_port, vid);
6114 if (err)
6115 goto err_fid_port_vid_map;
6116
Ido Schimmel7cbecf22017-05-26 08:37:28 +02006117 err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, false);
Ido Schimmel03ea01e2017-05-23 21:56:30 +02006118 if (err)
6119 goto err_port_vid_learning_set;
6120
Ido Schimmel7cbecf22017-05-26 08:37:28 +02006121 err = mlxsw_sp_port_vid_stp_set(mlxsw_sp_port, vid,
Ido Schimmel03ea01e2017-05-23 21:56:30 +02006122 BR_STATE_FORWARDING);
6123 if (err)
6124 goto err_port_vid_stp_set;
6125
Ido Schimmela1107482017-05-26 08:37:39 +02006126 mlxsw_sp_port_vlan->fid = fid;
Ido Schimmel4724ba562017-03-10 08:53:39 +01006127
Ido Schimmel4724ba562017-03-10 08:53:39 +01006128 return 0;
Ido Schimmel03ea01e2017-05-23 21:56:30 +02006129
6130err_port_vid_stp_set:
Ido Schimmel7cbecf22017-05-26 08:37:28 +02006131 mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, true);
Ido Schimmel03ea01e2017-05-23 21:56:30 +02006132err_port_vid_learning_set:
Ido Schimmela1107482017-05-26 08:37:39 +02006133 mlxsw_sp_fid_port_vid_unmap(fid, mlxsw_sp_port, vid);
6134err_fid_port_vid_map:
6135 mlxsw_sp_fid_put(fid);
Ido Schimmel03ea01e2017-05-23 21:56:30 +02006136 return err;
Ido Schimmel4724ba562017-03-10 08:53:39 +01006137}
6138
Ido Schimmela1107482017-05-26 08:37:39 +02006139void
6140mlxsw_sp_port_vlan_router_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
Ido Schimmel4724ba562017-03-10 08:53:39 +01006141{
Ido Schimmelce95e152017-05-26 08:37:27 +02006142 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
Ido Schimmel7cbecf22017-05-26 08:37:28 +02006143 struct mlxsw_sp_fid *fid = mlxsw_sp_port_vlan->fid;
Ido Schimmelce95e152017-05-26 08:37:27 +02006144 u16 vid = mlxsw_sp_port_vlan->vid;
Ido Schimmelce95e152017-05-26 08:37:27 +02006145
Ido Schimmela1107482017-05-26 08:37:39 +02006146 if (WARN_ON(mlxsw_sp_fid_type(fid) != MLXSW_SP_FID_TYPE_RFID))
6147 return;
Ido Schimmel4aafc362017-05-26 08:37:25 +02006148
Ido Schimmela1107482017-05-26 08:37:39 +02006149 mlxsw_sp_port_vlan->fid = NULL;
Ido Schimmel7cbecf22017-05-26 08:37:28 +02006150 mlxsw_sp_port_vid_stp_set(mlxsw_sp_port, vid, BR_STATE_BLOCKING);
6151 mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, true);
Ido Schimmela1107482017-05-26 08:37:39 +02006152 mlxsw_sp_fid_port_vid_unmap(fid, mlxsw_sp_port, vid);
6153 /* If router port holds the last reference on the rFID, then the
6154 * associated Sub-port RIF will be destroyed.
6155 */
6156 mlxsw_sp_fid_put(fid);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006157}
6158
Ido Schimmel7cbecf22017-05-26 08:37:28 +02006159static int mlxsw_sp_inetaddr_port_vlan_event(struct net_device *l3_dev,
6160 struct net_device *port_dev,
David Ahernf8fa9b42017-10-18 09:56:56 -07006161 unsigned long event, u16 vid,
6162 struct netlink_ext_ack *extack)
Ido Schimmel4724ba562017-03-10 08:53:39 +01006163{
6164 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(port_dev);
Ido Schimmelce95e152017-05-26 08:37:27 +02006165 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
Ido Schimmel4724ba562017-03-10 08:53:39 +01006166
Ido Schimmelce95e152017-05-26 08:37:27 +02006167 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
Ido Schimmel7cbecf22017-05-26 08:37:28 +02006168 if (WARN_ON(!mlxsw_sp_port_vlan))
6169 return -EINVAL;
Ido Schimmel4724ba562017-03-10 08:53:39 +01006170
6171 switch (event) {
6172 case NETDEV_UP:
Ido Schimmela1107482017-05-26 08:37:39 +02006173 return mlxsw_sp_port_vlan_router_join(mlxsw_sp_port_vlan,
David Ahernf8fa9b42017-10-18 09:56:56 -07006174 l3_dev, extack);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006175 case NETDEV_DOWN:
Ido Schimmela1107482017-05-26 08:37:39 +02006176 mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port_vlan);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006177 break;
6178 }
6179
6180 return 0;
6181}
6182
6183static int mlxsw_sp_inetaddr_port_event(struct net_device *port_dev,
David Ahernf8fa9b42017-10-18 09:56:56 -07006184 unsigned long event,
6185 struct netlink_ext_ack *extack)
Ido Schimmel4724ba562017-03-10 08:53:39 +01006186{
Jiri Pirko2b94e582017-04-18 16:55:37 +02006187 if (netif_is_bridge_port(port_dev) ||
6188 netif_is_lag_port(port_dev) ||
6189 netif_is_ovs_port(port_dev))
Ido Schimmel4724ba562017-03-10 08:53:39 +01006190 return 0;
6191
David Ahernf8fa9b42017-10-18 09:56:56 -07006192 return mlxsw_sp_inetaddr_port_vlan_event(port_dev, port_dev, event, 1,
6193 extack);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006194}
6195
6196static int __mlxsw_sp_inetaddr_lag_event(struct net_device *l3_dev,
6197 struct net_device *lag_dev,
David Ahernf8fa9b42017-10-18 09:56:56 -07006198 unsigned long event, u16 vid,
6199 struct netlink_ext_ack *extack)
Ido Schimmel4724ba562017-03-10 08:53:39 +01006200{
6201 struct net_device *port_dev;
6202 struct list_head *iter;
6203 int err;
6204
6205 netdev_for_each_lower_dev(lag_dev, port_dev, iter) {
6206 if (mlxsw_sp_port_dev_check(port_dev)) {
Ido Schimmel7cbecf22017-05-26 08:37:28 +02006207 err = mlxsw_sp_inetaddr_port_vlan_event(l3_dev,
6208 port_dev,
David Ahernf8fa9b42017-10-18 09:56:56 -07006209 event, vid,
6210 extack);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006211 if (err)
6212 return err;
6213 }
6214 }
6215
6216 return 0;
6217}
6218
6219static int mlxsw_sp_inetaddr_lag_event(struct net_device *lag_dev,
David Ahernf8fa9b42017-10-18 09:56:56 -07006220 unsigned long event,
6221 struct netlink_ext_ack *extack)
Ido Schimmel4724ba562017-03-10 08:53:39 +01006222{
6223 if (netif_is_bridge_port(lag_dev))
6224 return 0;
6225
David Ahernf8fa9b42017-10-18 09:56:56 -07006226 return __mlxsw_sp_inetaddr_lag_event(lag_dev, lag_dev, event, 1,
6227 extack);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006228}
6229
Ido Schimmel4724ba562017-03-10 08:53:39 +01006230static int mlxsw_sp_inetaddr_bridge_event(struct net_device *l3_dev,
David Ahernf8fa9b42017-10-18 09:56:56 -07006231 unsigned long event,
6232 struct netlink_ext_ack *extack)
Ido Schimmel4724ba562017-03-10 08:53:39 +01006233{
6234 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(l3_dev);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006235 struct mlxsw_sp_rif_params params = {
6236 .dev = l3_dev,
6237 };
Ido Schimmela1107482017-05-26 08:37:39 +02006238 struct mlxsw_sp_rif *rif;
Ido Schimmel4724ba562017-03-10 08:53:39 +01006239
6240 switch (event) {
6241 case NETDEV_UP:
David Ahernf8fa9b42017-10-18 09:56:56 -07006242 rif = mlxsw_sp_rif_create(mlxsw_sp, &params, extack);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006243 if (IS_ERR(rif))
6244 return PTR_ERR(rif);
6245 break;
Ido Schimmel4724ba562017-03-10 08:53:39 +01006246 case NETDEV_DOWN:
Ido Schimmela1107482017-05-26 08:37:39 +02006247 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006248 mlxsw_sp_rif_destroy(rif);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006249 break;
6250 }
6251
6252 return 0;
6253}
6254
6255static int mlxsw_sp_inetaddr_vlan_event(struct net_device *vlan_dev,
David Ahernf8fa9b42017-10-18 09:56:56 -07006256 unsigned long event,
6257 struct netlink_ext_ack *extack)
Ido Schimmel4724ba562017-03-10 08:53:39 +01006258{
6259 struct net_device *real_dev = vlan_dev_real_dev(vlan_dev);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006260 u16 vid = vlan_dev_vlan_id(vlan_dev);
6261
Ido Schimmel6b27c8a2017-06-28 09:03:12 +03006262 if (netif_is_bridge_port(vlan_dev))
6263 return 0;
6264
Ido Schimmel4724ba562017-03-10 08:53:39 +01006265 if (mlxsw_sp_port_dev_check(real_dev))
Ido Schimmel7cbecf22017-05-26 08:37:28 +02006266 return mlxsw_sp_inetaddr_port_vlan_event(vlan_dev, real_dev,
David Ahernf8fa9b42017-10-18 09:56:56 -07006267 event, vid, extack);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006268 else if (netif_is_lag_master(real_dev))
6269 return __mlxsw_sp_inetaddr_lag_event(vlan_dev, real_dev, event,
David Ahernf8fa9b42017-10-18 09:56:56 -07006270 vid, extack);
Ido Schimmelc57529e2017-05-26 08:37:31 +02006271 else if (netif_is_bridge_master(real_dev) && br_vlan_enabled(real_dev))
David Ahernf8fa9b42017-10-18 09:56:56 -07006272 return mlxsw_sp_inetaddr_bridge_event(vlan_dev, event, extack);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006273
6274 return 0;
6275}
6276
Ido Schimmelb1e45522017-04-30 19:47:14 +03006277static int __mlxsw_sp_inetaddr_event(struct net_device *dev,
David Ahernf8fa9b42017-10-18 09:56:56 -07006278 unsigned long event,
6279 struct netlink_ext_ack *extack)
Ido Schimmelb1e45522017-04-30 19:47:14 +03006280{
6281 if (mlxsw_sp_port_dev_check(dev))
David Ahernf8fa9b42017-10-18 09:56:56 -07006282 return mlxsw_sp_inetaddr_port_event(dev, event, extack);
Ido Schimmelb1e45522017-04-30 19:47:14 +03006283 else if (netif_is_lag_master(dev))
David Ahernf8fa9b42017-10-18 09:56:56 -07006284 return mlxsw_sp_inetaddr_lag_event(dev, event, extack);
Ido Schimmelb1e45522017-04-30 19:47:14 +03006285 else if (netif_is_bridge_master(dev))
David Ahernf8fa9b42017-10-18 09:56:56 -07006286 return mlxsw_sp_inetaddr_bridge_event(dev, event, extack);
Ido Schimmelb1e45522017-04-30 19:47:14 +03006287 else if (is_vlan_dev(dev))
David Ahernf8fa9b42017-10-18 09:56:56 -07006288 return mlxsw_sp_inetaddr_vlan_event(dev, event, extack);
Ido Schimmelb1e45522017-04-30 19:47:14 +03006289 else
6290 return 0;
6291}
6292
Ido Schimmel4724ba562017-03-10 08:53:39 +01006293int mlxsw_sp_inetaddr_event(struct notifier_block *unused,
6294 unsigned long event, void *ptr)
6295{
6296 struct in_ifaddr *ifa = (struct in_ifaddr *) ptr;
6297 struct net_device *dev = ifa->ifa_dev->dev;
6298 struct mlxsw_sp *mlxsw_sp;
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01006299 struct mlxsw_sp_rif *rif;
Ido Schimmel4724ba562017-03-10 08:53:39 +01006300 int err = 0;
6301
David Ahern89d5dd22017-10-18 09:56:55 -07006302 /* NETDEV_UP event is handled by mlxsw_sp_inetaddr_valid_event */
6303 if (event == NETDEV_UP)
6304 goto out;
6305
6306 mlxsw_sp = mlxsw_sp_lower_get(dev);
6307 if (!mlxsw_sp)
6308 goto out;
6309
6310 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
6311 if (!mlxsw_sp_rif_should_config(rif, dev, event))
6312 goto out;
6313
David Ahernf8fa9b42017-10-18 09:56:56 -07006314 err = __mlxsw_sp_inetaddr_event(dev, event, NULL);
David Ahern89d5dd22017-10-18 09:56:55 -07006315out:
6316 return notifier_from_errno(err);
6317}
6318
6319int mlxsw_sp_inetaddr_valid_event(struct notifier_block *unused,
6320 unsigned long event, void *ptr)
6321{
6322 struct in_validator_info *ivi = (struct in_validator_info *) ptr;
6323 struct net_device *dev = ivi->ivi_dev->dev;
6324 struct mlxsw_sp *mlxsw_sp;
6325 struct mlxsw_sp_rif *rif;
6326 int err = 0;
6327
Ido Schimmel4724ba562017-03-10 08:53:39 +01006328 mlxsw_sp = mlxsw_sp_lower_get(dev);
6329 if (!mlxsw_sp)
6330 goto out;
6331
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01006332 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
Arkadi Sharshevsky5ea12372017-07-18 10:10:13 +02006333 if (!mlxsw_sp_rif_should_config(rif, dev, event))
Ido Schimmel4724ba562017-03-10 08:53:39 +01006334 goto out;
6335
David Ahernf8fa9b42017-10-18 09:56:56 -07006336 err = __mlxsw_sp_inetaddr_event(dev, event, ivi->extack);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006337out:
6338 return notifier_from_errno(err);
6339}
6340
Arkadi Sharshevsky5ea12372017-07-18 10:10:13 +02006341struct mlxsw_sp_inet6addr_event_work {
6342 struct work_struct work;
6343 struct net_device *dev;
6344 unsigned long event;
6345};
6346
6347static void mlxsw_sp_inet6addr_event_work(struct work_struct *work)
6348{
6349 struct mlxsw_sp_inet6addr_event_work *inet6addr_work =
6350 container_of(work, struct mlxsw_sp_inet6addr_event_work, work);
6351 struct net_device *dev = inet6addr_work->dev;
6352 unsigned long event = inet6addr_work->event;
6353 struct mlxsw_sp *mlxsw_sp;
6354 struct mlxsw_sp_rif *rif;
6355
6356 rtnl_lock();
6357 mlxsw_sp = mlxsw_sp_lower_get(dev);
6358 if (!mlxsw_sp)
6359 goto out;
6360
6361 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
6362 if (!mlxsw_sp_rif_should_config(rif, dev, event))
6363 goto out;
6364
David Ahernf8fa9b42017-10-18 09:56:56 -07006365 __mlxsw_sp_inetaddr_event(dev, event, NULL);
Arkadi Sharshevsky5ea12372017-07-18 10:10:13 +02006366out:
6367 rtnl_unlock();
6368 dev_put(dev);
6369 kfree(inet6addr_work);
6370}
6371
6372/* Called with rcu_read_lock() */
6373int mlxsw_sp_inet6addr_event(struct notifier_block *unused,
6374 unsigned long event, void *ptr)
6375{
6376 struct inet6_ifaddr *if6 = (struct inet6_ifaddr *) ptr;
6377 struct mlxsw_sp_inet6addr_event_work *inet6addr_work;
6378 struct net_device *dev = if6->idev->dev;
6379
David Ahern89d5dd22017-10-18 09:56:55 -07006380 /* NETDEV_UP event is handled by mlxsw_sp_inet6addr_valid_event */
6381 if (event == NETDEV_UP)
6382 return NOTIFY_DONE;
6383
Arkadi Sharshevsky5ea12372017-07-18 10:10:13 +02006384 if (!mlxsw_sp_port_dev_lower_find_rcu(dev))
6385 return NOTIFY_DONE;
6386
6387 inet6addr_work = kzalloc(sizeof(*inet6addr_work), GFP_ATOMIC);
6388 if (!inet6addr_work)
6389 return NOTIFY_BAD;
6390
6391 INIT_WORK(&inet6addr_work->work, mlxsw_sp_inet6addr_event_work);
6392 inet6addr_work->dev = dev;
6393 inet6addr_work->event = event;
6394 dev_hold(dev);
6395 mlxsw_core_schedule_work(&inet6addr_work->work);
6396
6397 return NOTIFY_DONE;
6398}
6399
David Ahern89d5dd22017-10-18 09:56:55 -07006400int mlxsw_sp_inet6addr_valid_event(struct notifier_block *unused,
6401 unsigned long event, void *ptr)
6402{
6403 struct in6_validator_info *i6vi = (struct in6_validator_info *) ptr;
6404 struct net_device *dev = i6vi->i6vi_dev->dev;
6405 struct mlxsw_sp *mlxsw_sp;
6406 struct mlxsw_sp_rif *rif;
6407 int err = 0;
6408
6409 mlxsw_sp = mlxsw_sp_lower_get(dev);
6410 if (!mlxsw_sp)
6411 goto out;
6412
6413 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
6414 if (!mlxsw_sp_rif_should_config(rif, dev, event))
6415 goto out;
6416
David Ahernf8fa9b42017-10-18 09:56:56 -07006417 err = __mlxsw_sp_inetaddr_event(dev, event, i6vi->extack);
David Ahern89d5dd22017-10-18 09:56:55 -07006418out:
6419 return notifier_from_errno(err);
6420}
6421
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01006422static int mlxsw_sp_rif_edit(struct mlxsw_sp *mlxsw_sp, u16 rif_index,
Ido Schimmel4724ba562017-03-10 08:53:39 +01006423 const char *mac, int mtu)
6424{
6425 char ritr_pl[MLXSW_REG_RITR_LEN];
6426 int err;
6427
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01006428 mlxsw_reg_ritr_rif_pack(ritr_pl, rif_index);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006429 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
6430 if (err)
6431 return err;
6432
6433 mlxsw_reg_ritr_mtu_set(ritr_pl, mtu);
6434 mlxsw_reg_ritr_if_mac_memcpy_to(ritr_pl, mac);
6435 mlxsw_reg_ritr_op_set(ritr_pl, MLXSW_REG_RITR_RIF_CREATE);
6436 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
6437}
6438
6439int mlxsw_sp_netdevice_router_port_event(struct net_device *dev)
6440{
6441 struct mlxsw_sp *mlxsw_sp;
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01006442 struct mlxsw_sp_rif *rif;
Ido Schimmela1107482017-05-26 08:37:39 +02006443 u16 fid_index;
Ido Schimmel4724ba562017-03-10 08:53:39 +01006444 int err;
6445
6446 mlxsw_sp = mlxsw_sp_lower_get(dev);
6447 if (!mlxsw_sp)
6448 return 0;
6449
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01006450 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
6451 if (!rif)
Ido Schimmel4724ba562017-03-10 08:53:39 +01006452 return 0;
Ido Schimmela1107482017-05-26 08:37:39 +02006453 fid_index = mlxsw_sp_fid_index(rif->fid);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006454
Ido Schimmela1107482017-05-26 08:37:39 +02006455 err = mlxsw_sp_rif_fdb_op(mlxsw_sp, rif->addr, fid_index, false);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006456 if (err)
6457 return err;
6458
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01006459 err = mlxsw_sp_rif_edit(mlxsw_sp, rif->rif_index, dev->dev_addr,
6460 dev->mtu);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006461 if (err)
6462 goto err_rif_edit;
6463
Ido Schimmela1107482017-05-26 08:37:39 +02006464 err = mlxsw_sp_rif_fdb_op(mlxsw_sp, dev->dev_addr, fid_index, true);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006465 if (err)
6466 goto err_rif_fdb_op;
6467
Yotam Gigifd890fe2017-09-27 08:23:21 +02006468 if (rif->mtu != dev->mtu) {
6469 struct mlxsw_sp_vr *vr;
6470
6471 /* The RIF is relevant only to its mr_table instance, as unlike
6472 * unicast routing, in multicast routing a RIF cannot be shared
6473 * between several multicast routing tables.
6474 */
6475 vr = &mlxsw_sp->router->vrs[rif->vr_id];
6476 mlxsw_sp_mr_rif_mtu_update(vr->mr4_table, rif, dev->mtu);
6477 }
6478
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01006479 ether_addr_copy(rif->addr, dev->dev_addr);
6480 rif->mtu = dev->mtu;
Ido Schimmel4724ba562017-03-10 08:53:39 +01006481
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01006482 netdev_dbg(dev, "Updated RIF=%d\n", rif->rif_index);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006483
6484 return 0;
6485
6486err_rif_fdb_op:
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01006487 mlxsw_sp_rif_edit(mlxsw_sp, rif->rif_index, rif->addr, rif->mtu);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006488err_rif_edit:
Ido Schimmela1107482017-05-26 08:37:39 +02006489 mlxsw_sp_rif_fdb_op(mlxsw_sp, rif->addr, fid_index, true);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006490 return err;
6491}
6492
Ido Schimmelb1e45522017-04-30 19:47:14 +03006493static int mlxsw_sp_port_vrf_join(struct mlxsw_sp *mlxsw_sp,
David Ahernf8fa9b42017-10-18 09:56:56 -07006494 struct net_device *l3_dev,
6495 struct netlink_ext_ack *extack)
Ido Schimmel7179eb52017-03-16 09:08:18 +01006496{
Ido Schimmelb1e45522017-04-30 19:47:14 +03006497 struct mlxsw_sp_rif *rif;
Ido Schimmel7179eb52017-03-16 09:08:18 +01006498
Ido Schimmelb1e45522017-04-30 19:47:14 +03006499 /* If netdev is already associated with a RIF, then we need to
6500 * destroy it and create a new one with the new virtual router ID.
Ido Schimmel7179eb52017-03-16 09:08:18 +01006501 */
Ido Schimmelb1e45522017-04-30 19:47:14 +03006502 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev);
6503 if (rif)
David Ahernf8fa9b42017-10-18 09:56:56 -07006504 __mlxsw_sp_inetaddr_event(l3_dev, NETDEV_DOWN, extack);
Ido Schimmel7179eb52017-03-16 09:08:18 +01006505
David Ahernf8fa9b42017-10-18 09:56:56 -07006506 return __mlxsw_sp_inetaddr_event(l3_dev, NETDEV_UP, extack);
Ido Schimmel7179eb52017-03-16 09:08:18 +01006507}
6508
Ido Schimmelb1e45522017-04-30 19:47:14 +03006509static void mlxsw_sp_port_vrf_leave(struct mlxsw_sp *mlxsw_sp,
6510 struct net_device *l3_dev)
Ido Schimmel7179eb52017-03-16 09:08:18 +01006511{
Ido Schimmelb1e45522017-04-30 19:47:14 +03006512 struct mlxsw_sp_rif *rif;
Ido Schimmel7179eb52017-03-16 09:08:18 +01006513
Ido Schimmelb1e45522017-04-30 19:47:14 +03006514 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev);
6515 if (!rif)
Ido Schimmel7179eb52017-03-16 09:08:18 +01006516 return;
David Ahernf8fa9b42017-10-18 09:56:56 -07006517 __mlxsw_sp_inetaddr_event(l3_dev, NETDEV_DOWN, NULL);
Ido Schimmel7179eb52017-03-16 09:08:18 +01006518}
6519
Ido Schimmelb1e45522017-04-30 19:47:14 +03006520int mlxsw_sp_netdevice_vrf_event(struct net_device *l3_dev, unsigned long event,
6521 struct netdev_notifier_changeupper_info *info)
Ido Schimmel3d70e4582017-03-16 09:08:19 +01006522{
Ido Schimmelb1e45522017-04-30 19:47:14 +03006523 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(l3_dev);
6524 int err = 0;
Ido Schimmel3d70e4582017-03-16 09:08:19 +01006525
Ido Schimmelb1e45522017-04-30 19:47:14 +03006526 if (!mlxsw_sp)
6527 return 0;
Ido Schimmel3d70e4582017-03-16 09:08:19 +01006528
Ido Schimmelb1e45522017-04-30 19:47:14 +03006529 switch (event) {
6530 case NETDEV_PRECHANGEUPPER:
6531 return 0;
6532 case NETDEV_CHANGEUPPER:
David Ahernf8fa9b42017-10-18 09:56:56 -07006533 if (info->linking) {
6534 struct netlink_ext_ack *extack;
6535
6536 extack = netdev_notifier_info_to_extack(&info->info);
6537 err = mlxsw_sp_port_vrf_join(mlxsw_sp, l3_dev, extack);
6538 } else {
Ido Schimmelb1e45522017-04-30 19:47:14 +03006539 mlxsw_sp_port_vrf_leave(mlxsw_sp, l3_dev);
David Ahernf8fa9b42017-10-18 09:56:56 -07006540 }
Ido Schimmelb1e45522017-04-30 19:47:14 +03006541 break;
6542 }
Ido Schimmel3d70e4582017-03-16 09:08:19 +01006543
Ido Schimmelb1e45522017-04-30 19:47:14 +03006544 return err;
Ido Schimmel3d70e4582017-03-16 09:08:19 +01006545}
6546
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006547static struct mlxsw_sp_rif_subport *
6548mlxsw_sp_rif_subport_rif(const struct mlxsw_sp_rif *rif)
Ido Schimmela1107482017-05-26 08:37:39 +02006549{
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006550 return container_of(rif, struct mlxsw_sp_rif_subport, common);
Ido Schimmela1107482017-05-26 08:37:39 +02006551}
6552
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006553static void mlxsw_sp_rif_subport_setup(struct mlxsw_sp_rif *rif,
6554 const struct mlxsw_sp_rif_params *params)
6555{
6556 struct mlxsw_sp_rif_subport *rif_subport;
6557
6558 rif_subport = mlxsw_sp_rif_subport_rif(rif);
6559 rif_subport->vid = params->vid;
6560 rif_subport->lag = params->lag;
6561 if (params->lag)
6562 rif_subport->lag_id = params->lag_id;
6563 else
6564 rif_subport->system_port = params->system_port;
6565}
6566
6567static int mlxsw_sp_rif_subport_op(struct mlxsw_sp_rif *rif, bool enable)
6568{
6569 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
6570 struct mlxsw_sp_rif_subport *rif_subport;
6571 char ritr_pl[MLXSW_REG_RITR_LEN];
6572
6573 rif_subport = mlxsw_sp_rif_subport_rif(rif);
6574 mlxsw_reg_ritr_pack(ritr_pl, enable, MLXSW_REG_RITR_SP_IF,
Petr Machata9571e822017-09-02 23:49:14 +02006575 rif->rif_index, rif->vr_id, rif->dev->mtu);
6576 mlxsw_reg_ritr_mac_pack(ritr_pl, rif->dev->dev_addr);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006577 mlxsw_reg_ritr_sp_if_pack(ritr_pl, rif_subport->lag,
6578 rif_subport->lag ? rif_subport->lag_id :
6579 rif_subport->system_port,
6580 rif_subport->vid);
6581
6582 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
6583}
6584
6585static int mlxsw_sp_rif_subport_configure(struct mlxsw_sp_rif *rif)
6586{
Petr Machata010cadf2017-09-02 23:49:18 +02006587 int err;
6588
6589 err = mlxsw_sp_rif_subport_op(rif, true);
6590 if (err)
6591 return err;
6592
6593 err = mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
6594 mlxsw_sp_fid_index(rif->fid), true);
6595 if (err)
6596 goto err_rif_fdb_op;
6597
6598 mlxsw_sp_fid_rif_set(rif->fid, rif);
6599 return 0;
6600
6601err_rif_fdb_op:
6602 mlxsw_sp_rif_subport_op(rif, false);
6603 return err;
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006604}
6605
6606static void mlxsw_sp_rif_subport_deconfigure(struct mlxsw_sp_rif *rif)
6607{
Petr Machata010cadf2017-09-02 23:49:18 +02006608 struct mlxsw_sp_fid *fid = rif->fid;
6609
6610 mlxsw_sp_fid_rif_set(fid, NULL);
6611 mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
6612 mlxsw_sp_fid_index(fid), false);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006613 mlxsw_sp_rif_subport_op(rif, false);
6614}
6615
6616static struct mlxsw_sp_fid *
6617mlxsw_sp_rif_subport_fid_get(struct mlxsw_sp_rif *rif)
6618{
6619 return mlxsw_sp_fid_rfid_get(rif->mlxsw_sp, rif->rif_index);
6620}
6621
6622static const struct mlxsw_sp_rif_ops mlxsw_sp_rif_subport_ops = {
6623 .type = MLXSW_SP_RIF_TYPE_SUBPORT,
6624 .rif_size = sizeof(struct mlxsw_sp_rif_subport),
6625 .setup = mlxsw_sp_rif_subport_setup,
6626 .configure = mlxsw_sp_rif_subport_configure,
6627 .deconfigure = mlxsw_sp_rif_subport_deconfigure,
6628 .fid_get = mlxsw_sp_rif_subport_fid_get,
6629};
6630
6631static int mlxsw_sp_rif_vlan_fid_op(struct mlxsw_sp_rif *rif,
6632 enum mlxsw_reg_ritr_if_type type,
6633 u16 vid_fid, bool enable)
6634{
6635 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
6636 char ritr_pl[MLXSW_REG_RITR_LEN];
6637
6638 mlxsw_reg_ritr_pack(ritr_pl, enable, type, rif->rif_index, rif->vr_id,
Petr Machata9571e822017-09-02 23:49:14 +02006639 rif->dev->mtu);
6640 mlxsw_reg_ritr_mac_pack(ritr_pl, rif->dev->dev_addr);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006641 mlxsw_reg_ritr_fid_set(ritr_pl, type, vid_fid);
6642
6643 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
6644}
6645
Yotam Gigib35750f2017-10-09 11:15:33 +02006646u8 mlxsw_sp_router_port(const struct mlxsw_sp *mlxsw_sp)
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006647{
6648 return mlxsw_core_max_ports(mlxsw_sp->core) + 1;
6649}
6650
6651static int mlxsw_sp_rif_vlan_configure(struct mlxsw_sp_rif *rif)
6652{
6653 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
6654 u16 vid = mlxsw_sp_fid_8021q_vid(rif->fid);
6655 int err;
6656
6657 err = mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_VLAN_IF, vid, true);
6658 if (err)
6659 return err;
6660
Ido Schimmel0d284812017-07-18 10:10:12 +02006661 err = mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
6662 mlxsw_sp_router_port(mlxsw_sp), true);
6663 if (err)
6664 goto err_fid_mc_flood_set;
6665
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006666 err = mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
6667 mlxsw_sp_router_port(mlxsw_sp), true);
6668 if (err)
6669 goto err_fid_bc_flood_set;
6670
Petr Machata010cadf2017-09-02 23:49:18 +02006671 err = mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
6672 mlxsw_sp_fid_index(rif->fid), true);
6673 if (err)
6674 goto err_rif_fdb_op;
6675
6676 mlxsw_sp_fid_rif_set(rif->fid, rif);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006677 return 0;
6678
Petr Machata010cadf2017-09-02 23:49:18 +02006679err_rif_fdb_op:
6680 mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
6681 mlxsw_sp_router_port(mlxsw_sp), false);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006682err_fid_bc_flood_set:
Ido Schimmel0d284812017-07-18 10:10:12 +02006683 mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
6684 mlxsw_sp_router_port(mlxsw_sp), false);
6685err_fid_mc_flood_set:
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006686 mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_VLAN_IF, vid, false);
6687 return err;
6688}
6689
6690static void mlxsw_sp_rif_vlan_deconfigure(struct mlxsw_sp_rif *rif)
6691{
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006692 u16 vid = mlxsw_sp_fid_8021q_vid(rif->fid);
Petr Machata010cadf2017-09-02 23:49:18 +02006693 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
6694 struct mlxsw_sp_fid *fid = rif->fid;
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006695
Petr Machata010cadf2017-09-02 23:49:18 +02006696 mlxsw_sp_fid_rif_set(fid, NULL);
6697 mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
6698 mlxsw_sp_fid_index(fid), false);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006699 mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
6700 mlxsw_sp_router_port(mlxsw_sp), false);
Ido Schimmel0d284812017-07-18 10:10:12 +02006701 mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
6702 mlxsw_sp_router_port(mlxsw_sp), false);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006703 mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_VLAN_IF, vid, false);
6704}
6705
6706static struct mlxsw_sp_fid *
6707mlxsw_sp_rif_vlan_fid_get(struct mlxsw_sp_rif *rif)
6708{
6709 u16 vid = is_vlan_dev(rif->dev) ? vlan_dev_vlan_id(rif->dev) : 1;
6710
6711 return mlxsw_sp_fid_8021q_get(rif->mlxsw_sp, vid);
6712}
6713
6714static const struct mlxsw_sp_rif_ops mlxsw_sp_rif_vlan_ops = {
6715 .type = MLXSW_SP_RIF_TYPE_VLAN,
6716 .rif_size = sizeof(struct mlxsw_sp_rif),
6717 .configure = mlxsw_sp_rif_vlan_configure,
6718 .deconfigure = mlxsw_sp_rif_vlan_deconfigure,
6719 .fid_get = mlxsw_sp_rif_vlan_fid_get,
6720};
6721
6722static int mlxsw_sp_rif_fid_configure(struct mlxsw_sp_rif *rif)
6723{
6724 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
6725 u16 fid_index = mlxsw_sp_fid_index(rif->fid);
6726 int err;
6727
6728 err = mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_FID_IF, fid_index,
6729 true);
6730 if (err)
6731 return err;
6732
Ido Schimmel0d284812017-07-18 10:10:12 +02006733 err = mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
6734 mlxsw_sp_router_port(mlxsw_sp), true);
6735 if (err)
6736 goto err_fid_mc_flood_set;
6737
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006738 err = mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
6739 mlxsw_sp_router_port(mlxsw_sp), true);
6740 if (err)
6741 goto err_fid_bc_flood_set;
6742
Petr Machata010cadf2017-09-02 23:49:18 +02006743 err = mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
6744 mlxsw_sp_fid_index(rif->fid), true);
6745 if (err)
6746 goto err_rif_fdb_op;
6747
6748 mlxsw_sp_fid_rif_set(rif->fid, rif);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006749 return 0;
6750
Petr Machata010cadf2017-09-02 23:49:18 +02006751err_rif_fdb_op:
6752 mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
6753 mlxsw_sp_router_port(mlxsw_sp), false);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006754err_fid_bc_flood_set:
Ido Schimmel0d284812017-07-18 10:10:12 +02006755 mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
6756 mlxsw_sp_router_port(mlxsw_sp), false);
6757err_fid_mc_flood_set:
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006758 mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_FID_IF, fid_index, false);
6759 return err;
6760}
6761
6762static void mlxsw_sp_rif_fid_deconfigure(struct mlxsw_sp_rif *rif)
6763{
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006764 u16 fid_index = mlxsw_sp_fid_index(rif->fid);
Petr Machata010cadf2017-09-02 23:49:18 +02006765 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
6766 struct mlxsw_sp_fid *fid = rif->fid;
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006767
Petr Machata010cadf2017-09-02 23:49:18 +02006768 mlxsw_sp_fid_rif_set(fid, NULL);
6769 mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
6770 mlxsw_sp_fid_index(fid), false);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006771 mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
6772 mlxsw_sp_router_port(mlxsw_sp), false);
Ido Schimmel0d284812017-07-18 10:10:12 +02006773 mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
6774 mlxsw_sp_router_port(mlxsw_sp), false);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006775 mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_FID_IF, fid_index, false);
6776}
6777
6778static struct mlxsw_sp_fid *
6779mlxsw_sp_rif_fid_fid_get(struct mlxsw_sp_rif *rif)
6780{
6781 return mlxsw_sp_fid_8021d_get(rif->mlxsw_sp, rif->dev->ifindex);
6782}
6783
6784static const struct mlxsw_sp_rif_ops mlxsw_sp_rif_fid_ops = {
6785 .type = MLXSW_SP_RIF_TYPE_FID,
6786 .rif_size = sizeof(struct mlxsw_sp_rif),
6787 .configure = mlxsw_sp_rif_fid_configure,
6788 .deconfigure = mlxsw_sp_rif_fid_deconfigure,
6789 .fid_get = mlxsw_sp_rif_fid_fid_get,
6790};
6791
Petr Machata6ddb7422017-09-02 23:49:19 +02006792static struct mlxsw_sp_rif_ipip_lb *
6793mlxsw_sp_rif_ipip_lb_rif(struct mlxsw_sp_rif *rif)
6794{
6795 return container_of(rif, struct mlxsw_sp_rif_ipip_lb, common);
6796}
6797
6798static void
6799mlxsw_sp_rif_ipip_lb_setup(struct mlxsw_sp_rif *rif,
6800 const struct mlxsw_sp_rif_params *params)
6801{
6802 struct mlxsw_sp_rif_params_ipip_lb *params_lb;
6803 struct mlxsw_sp_rif_ipip_lb *rif_lb;
6804
6805 params_lb = container_of(params, struct mlxsw_sp_rif_params_ipip_lb,
6806 common);
6807 rif_lb = mlxsw_sp_rif_ipip_lb_rif(rif);
6808 rif_lb->lb_config = params_lb->lb_config;
6809}
6810
6811static int
6812mlxsw_sp_rif_ipip_lb_op(struct mlxsw_sp_rif_ipip_lb *lb_rif,
6813 struct mlxsw_sp_vr *ul_vr, bool enable)
6814{
6815 struct mlxsw_sp_rif_ipip_lb_config lb_cf = lb_rif->lb_config;
6816 struct mlxsw_sp_rif *rif = &lb_rif->common;
6817 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
6818 char ritr_pl[MLXSW_REG_RITR_LEN];
6819 u32 saddr4;
6820
6821 switch (lb_cf.ul_protocol) {
6822 case MLXSW_SP_L3_PROTO_IPV4:
6823 saddr4 = be32_to_cpu(lb_cf.saddr.addr4);
6824 mlxsw_reg_ritr_pack(ritr_pl, enable, MLXSW_REG_RITR_LOOPBACK_IF,
6825 rif->rif_index, rif->vr_id, rif->dev->mtu);
6826 mlxsw_reg_ritr_loopback_ipip4_pack(ritr_pl, lb_cf.lb_ipipt,
6827 MLXSW_REG_RITR_LOOPBACK_IPIP_OPTIONS_GRE_KEY_PRESET,
6828 ul_vr->id, saddr4, lb_cf.okey);
6829 break;
6830
6831 case MLXSW_SP_L3_PROTO_IPV6:
6832 return -EAFNOSUPPORT;
6833 }
6834
6835 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
6836}
6837
6838static int
6839mlxsw_sp_rif_ipip_lb_configure(struct mlxsw_sp_rif *rif)
6840{
6841 struct mlxsw_sp_rif_ipip_lb *lb_rif = mlxsw_sp_rif_ipip_lb_rif(rif);
6842 u32 ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(rif->dev);
6843 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
6844 struct mlxsw_sp_vr *ul_vr;
6845 int err;
6846
David Ahernf8fa9b42017-10-18 09:56:56 -07006847 ul_vr = mlxsw_sp_vr_get(mlxsw_sp, ul_tb_id, NULL);
Petr Machata6ddb7422017-09-02 23:49:19 +02006848 if (IS_ERR(ul_vr))
6849 return PTR_ERR(ul_vr);
6850
6851 err = mlxsw_sp_rif_ipip_lb_op(lb_rif, ul_vr, true);
6852 if (err)
6853 goto err_loopback_op;
6854
6855 lb_rif->ul_vr_id = ul_vr->id;
6856 ++ul_vr->rif_count;
6857 return 0;
6858
6859err_loopback_op:
6860 mlxsw_sp_vr_put(ul_vr);
6861 return err;
6862}
6863
6864static void mlxsw_sp_rif_ipip_lb_deconfigure(struct mlxsw_sp_rif *rif)
6865{
6866 struct mlxsw_sp_rif_ipip_lb *lb_rif = mlxsw_sp_rif_ipip_lb_rif(rif);
6867 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
6868 struct mlxsw_sp_vr *ul_vr;
6869
6870 ul_vr = &mlxsw_sp->router->vrs[lb_rif->ul_vr_id];
6871 mlxsw_sp_rif_ipip_lb_op(lb_rif, ul_vr, false);
6872
6873 --ul_vr->rif_count;
6874 mlxsw_sp_vr_put(ul_vr);
6875}
6876
6877static const struct mlxsw_sp_rif_ops mlxsw_sp_rif_ipip_lb_ops = {
6878 .type = MLXSW_SP_RIF_TYPE_IPIP_LB,
6879 .rif_size = sizeof(struct mlxsw_sp_rif_ipip_lb),
6880 .setup = mlxsw_sp_rif_ipip_lb_setup,
6881 .configure = mlxsw_sp_rif_ipip_lb_configure,
6882 .deconfigure = mlxsw_sp_rif_ipip_lb_deconfigure,
6883};
6884
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006885static const struct mlxsw_sp_rif_ops *mlxsw_sp_rif_ops_arr[] = {
6886 [MLXSW_SP_RIF_TYPE_SUBPORT] = &mlxsw_sp_rif_subport_ops,
6887 [MLXSW_SP_RIF_TYPE_VLAN] = &mlxsw_sp_rif_vlan_ops,
6888 [MLXSW_SP_RIF_TYPE_FID] = &mlxsw_sp_rif_fid_ops,
Petr Machata6ddb7422017-09-02 23:49:19 +02006889 [MLXSW_SP_RIF_TYPE_IPIP_LB] = &mlxsw_sp_rif_ipip_lb_ops,
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006890};
6891
Ido Schimmel348b8fc2017-05-16 19:38:29 +02006892static int mlxsw_sp_rifs_init(struct mlxsw_sp *mlxsw_sp)
6893{
6894 u64 max_rifs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS);
6895
6896 mlxsw_sp->router->rifs = kcalloc(max_rifs,
6897 sizeof(struct mlxsw_sp_rif *),
6898 GFP_KERNEL);
6899 if (!mlxsw_sp->router->rifs)
6900 return -ENOMEM;
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006901
6902 mlxsw_sp->router->rif_ops_arr = mlxsw_sp_rif_ops_arr;
6903
Ido Schimmel348b8fc2017-05-16 19:38:29 +02006904 return 0;
6905}
6906
6907static void mlxsw_sp_rifs_fini(struct mlxsw_sp *mlxsw_sp)
6908{
6909 int i;
6910
6911 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++)
6912 WARN_ON_ONCE(mlxsw_sp->router->rifs[i]);
6913
6914 kfree(mlxsw_sp->router->rifs);
6915}
6916
Petr Machatadcbda282017-10-20 09:16:16 +02006917static int
6918mlxsw_sp_ipip_config_tigcr(struct mlxsw_sp *mlxsw_sp)
6919{
6920 char tigcr_pl[MLXSW_REG_TIGCR_LEN];
6921
6922 mlxsw_reg_tigcr_pack(tigcr_pl, true, 0);
6923 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(tigcr), tigcr_pl);
6924}
6925
Petr Machata38ebc0f2017-09-02 23:49:17 +02006926static int mlxsw_sp_ipips_init(struct mlxsw_sp *mlxsw_sp)
6927{
6928 mlxsw_sp->router->ipip_ops_arr = mlxsw_sp_ipip_ops_arr;
Petr Machata1012b9a2017-09-02 23:49:23 +02006929 INIT_LIST_HEAD(&mlxsw_sp->router->ipip_list);
Petr Machatadcbda282017-10-20 09:16:16 +02006930 return mlxsw_sp_ipip_config_tigcr(mlxsw_sp);
Petr Machata38ebc0f2017-09-02 23:49:17 +02006931}
6932
6933static void mlxsw_sp_ipips_fini(struct mlxsw_sp *mlxsw_sp)
6934{
Petr Machata1012b9a2017-09-02 23:49:23 +02006935 WARN_ON(!list_empty(&mlxsw_sp->router->ipip_list));
Petr Machata38ebc0f2017-09-02 23:49:17 +02006936}
6937
Ido Schimmelc3852ef2016-12-03 16:45:07 +01006938static void mlxsw_sp_router_fib_dump_flush(struct notifier_block *nb)
6939{
Ido Schimmel7e39d112017-05-16 19:38:28 +02006940 struct mlxsw_sp_router *router;
Ido Schimmelc3852ef2016-12-03 16:45:07 +01006941
6942 /* Flush pending FIB notifications and then flush the device's
6943 * table before requesting another dump. The FIB notification
6944 * block is unregistered, so no need to take RTNL.
6945 */
6946 mlxsw_core_flush_owq();
Ido Schimmel7e39d112017-05-16 19:38:28 +02006947 router = container_of(nb, struct mlxsw_sp_router, fib_nb);
6948 mlxsw_sp_router_fib_flush(router->mlxsw_sp);
Ido Schimmelc3852ef2016-12-03 16:45:07 +01006949}
6950
Ido Schimmelaf658b62017-11-02 17:14:09 +01006951#ifdef CONFIG_IP_ROUTE_MULTIPATH
6952static void mlxsw_sp_mp_hash_header_set(char *recr2_pl, int header)
6953{
6954 mlxsw_reg_recr2_outer_header_enables_set(recr2_pl, header, true);
6955}
6956
6957static void mlxsw_sp_mp_hash_field_set(char *recr2_pl, int field)
6958{
6959 mlxsw_reg_recr2_outer_header_fields_enable_set(recr2_pl, field, true);
6960}
6961
6962static void mlxsw_sp_mp4_hash_init(char *recr2_pl)
6963{
6964 bool only_l3 = !init_net.ipv4.sysctl_fib_multipath_hash_policy;
6965
6966 mlxsw_sp_mp_hash_header_set(recr2_pl,
6967 MLXSW_REG_RECR2_IPV4_EN_NOT_TCP_NOT_UDP);
6968 mlxsw_sp_mp_hash_header_set(recr2_pl, MLXSW_REG_RECR2_IPV4_EN_TCP_UDP);
6969 mlxsw_reg_recr2_ipv4_sip_enable(recr2_pl);
6970 mlxsw_reg_recr2_ipv4_dip_enable(recr2_pl);
6971 if (only_l3)
6972 return;
6973 mlxsw_sp_mp_hash_header_set(recr2_pl, MLXSW_REG_RECR2_TCP_UDP_EN_IPV4);
6974 mlxsw_sp_mp_hash_field_set(recr2_pl, MLXSW_REG_RECR2_IPV4_PROTOCOL);
6975 mlxsw_sp_mp_hash_field_set(recr2_pl, MLXSW_REG_RECR2_TCP_UDP_SPORT);
6976 mlxsw_sp_mp_hash_field_set(recr2_pl, MLXSW_REG_RECR2_TCP_UDP_DPORT);
6977}
6978
6979static void mlxsw_sp_mp6_hash_init(char *recr2_pl)
6980{
6981 mlxsw_sp_mp_hash_header_set(recr2_pl,
6982 MLXSW_REG_RECR2_IPV6_EN_NOT_TCP_NOT_UDP);
6983 mlxsw_sp_mp_hash_header_set(recr2_pl, MLXSW_REG_RECR2_IPV6_EN_TCP_UDP);
6984 mlxsw_reg_recr2_ipv6_sip_enable(recr2_pl);
6985 mlxsw_reg_recr2_ipv6_dip_enable(recr2_pl);
6986 mlxsw_sp_mp_hash_field_set(recr2_pl, MLXSW_REG_RECR2_IPV6_FLOW_LABEL);
6987 mlxsw_sp_mp_hash_field_set(recr2_pl, MLXSW_REG_RECR2_IPV6_NEXT_HEADER);
6988}
6989
6990static int mlxsw_sp_mp_hash_init(struct mlxsw_sp *mlxsw_sp)
6991{
6992 char recr2_pl[MLXSW_REG_RECR2_LEN];
6993 u32 seed;
6994
6995 get_random_bytes(&seed, sizeof(seed));
6996 mlxsw_reg_recr2_pack(recr2_pl, seed);
6997 mlxsw_sp_mp4_hash_init(recr2_pl);
6998 mlxsw_sp_mp6_hash_init(recr2_pl);
6999
7000 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(recr2), recr2_pl);
7001}
7002#else
7003static int mlxsw_sp_mp_hash_init(struct mlxsw_sp *mlxsw_sp)
7004{
7005 return 0;
7006}
7007#endif
7008
Ido Schimmel4724ba562017-03-10 08:53:39 +01007009static int __mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp)
7010{
7011 char rgcr_pl[MLXSW_REG_RGCR_LEN];
7012 u64 max_rifs;
7013 int err;
7014
7015 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_RIFS))
7016 return -EIO;
Ido Schimmel4724ba562017-03-10 08:53:39 +01007017 max_rifs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS);
Ido Schimmel4724ba562017-03-10 08:53:39 +01007018
Arkadi Sharshevskye29237e2017-07-18 10:10:09 +02007019 mlxsw_reg_rgcr_pack(rgcr_pl, true, true);
Ido Schimmel4724ba562017-03-10 08:53:39 +01007020 mlxsw_reg_rgcr_max_router_interfaces_set(rgcr_pl, max_rifs);
7021 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rgcr), rgcr_pl);
7022 if (err)
Ido Schimmel348b8fc2017-05-16 19:38:29 +02007023 return err;
Ido Schimmel4724ba562017-03-10 08:53:39 +01007024 return 0;
Ido Schimmel4724ba562017-03-10 08:53:39 +01007025}
7026
7027static void __mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp)
7028{
7029 char rgcr_pl[MLXSW_REG_RGCR_LEN];
Ido Schimmel4724ba562017-03-10 08:53:39 +01007030
Arkadi Sharshevskye29237e2017-07-18 10:10:09 +02007031 mlxsw_reg_rgcr_pack(rgcr_pl, false, false);
Ido Schimmel4724ba562017-03-10 08:53:39 +01007032 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rgcr), rgcr_pl);
Ido Schimmel4724ba562017-03-10 08:53:39 +01007033}
7034
Jiri Pirkob45f64d2016-09-26 12:52:31 +02007035int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp)
7036{
Ido Schimmel9011b672017-05-16 19:38:25 +02007037 struct mlxsw_sp_router *router;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02007038 int err;
7039
Ido Schimmel9011b672017-05-16 19:38:25 +02007040 router = kzalloc(sizeof(*mlxsw_sp->router), GFP_KERNEL);
7041 if (!router)
7042 return -ENOMEM;
7043 mlxsw_sp->router = router;
7044 router->mlxsw_sp = mlxsw_sp;
7045
7046 INIT_LIST_HEAD(&mlxsw_sp->router->nexthop_neighs_list);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02007047 err = __mlxsw_sp_router_init(mlxsw_sp);
7048 if (err)
Ido Schimmel9011b672017-05-16 19:38:25 +02007049 goto err_router_init;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02007050
Ido Schimmel348b8fc2017-05-16 19:38:29 +02007051 err = mlxsw_sp_rifs_init(mlxsw_sp);
7052 if (err)
7053 goto err_rifs_init;
7054
Petr Machata38ebc0f2017-09-02 23:49:17 +02007055 err = mlxsw_sp_ipips_init(mlxsw_sp);
7056 if (err)
7057 goto err_ipips_init;
7058
Ido Schimmel9011b672017-05-16 19:38:25 +02007059 err = rhashtable_init(&mlxsw_sp->router->nexthop_ht,
Ido Schimmelc53b8e12017-02-08 11:16:30 +01007060 &mlxsw_sp_nexthop_ht_params);
7061 if (err)
7062 goto err_nexthop_ht_init;
7063
Ido Schimmel9011b672017-05-16 19:38:25 +02007064 err = rhashtable_init(&mlxsw_sp->router->nexthop_group_ht,
Ido Schimmele9ad5e72017-02-08 11:16:29 +01007065 &mlxsw_sp_nexthop_group_ht_params);
7066 if (err)
7067 goto err_nexthop_group_ht_init;
7068
Arkadi Sharshevskydbe45982017-09-25 10:32:23 +02007069 INIT_LIST_HEAD(&mlxsw_sp->router->nexthop_list);
Ido Schimmel8494ab02017-03-24 08:02:47 +01007070 err = mlxsw_sp_lpm_init(mlxsw_sp);
7071 if (err)
7072 goto err_lpm_init;
7073
Yotam Gigid42b0962017-09-27 08:23:20 +02007074 err = mlxsw_sp_mr_init(mlxsw_sp, &mlxsw_sp_mr_tcam_ops);
7075 if (err)
7076 goto err_mr_init;
7077
Jiri Pirkob45f64d2016-09-26 12:52:31 +02007078 err = mlxsw_sp_vrs_init(mlxsw_sp);
7079 if (err)
7080 goto err_vrs_init;
7081
Ido Schimmel8c9583a2016-10-27 15:12:57 +02007082 err = mlxsw_sp_neigh_init(mlxsw_sp);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02007083 if (err)
7084 goto err_neigh_init;
7085
Ido Schimmel48fac882017-11-02 17:14:06 +01007086 mlxsw_sp->router->netevent_nb.notifier_call =
7087 mlxsw_sp_router_netevent_event;
7088 err = register_netevent_notifier(&mlxsw_sp->router->netevent_nb);
7089 if (err)
7090 goto err_register_netevent_notifier;
7091
Ido Schimmelaf658b62017-11-02 17:14:09 +01007092 err = mlxsw_sp_mp_hash_init(mlxsw_sp);
7093 if (err)
7094 goto err_mp_hash_init;
7095
Ido Schimmel7e39d112017-05-16 19:38:28 +02007096 mlxsw_sp->router->fib_nb.notifier_call = mlxsw_sp_router_fib_event;
7097 err = register_fib_notifier(&mlxsw_sp->router->fib_nb,
Ido Schimmelc3852ef2016-12-03 16:45:07 +01007098 mlxsw_sp_router_fib_dump_flush);
7099 if (err)
7100 goto err_register_fib_notifier;
7101
Jiri Pirkob45f64d2016-09-26 12:52:31 +02007102 return 0;
7103
Ido Schimmelc3852ef2016-12-03 16:45:07 +01007104err_register_fib_notifier:
Ido Schimmelaf658b62017-11-02 17:14:09 +01007105err_mp_hash_init:
Ido Schimmel48fac882017-11-02 17:14:06 +01007106 unregister_netevent_notifier(&mlxsw_sp->router->netevent_nb);
7107err_register_netevent_notifier:
Ido Schimmelc3852ef2016-12-03 16:45:07 +01007108 mlxsw_sp_neigh_fini(mlxsw_sp);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02007109err_neigh_init:
7110 mlxsw_sp_vrs_fini(mlxsw_sp);
7111err_vrs_init:
Yotam Gigid42b0962017-09-27 08:23:20 +02007112 mlxsw_sp_mr_fini(mlxsw_sp);
7113err_mr_init:
Ido Schimmel8494ab02017-03-24 08:02:47 +01007114 mlxsw_sp_lpm_fini(mlxsw_sp);
7115err_lpm_init:
Ido Schimmel9011b672017-05-16 19:38:25 +02007116 rhashtable_destroy(&mlxsw_sp->router->nexthop_group_ht);
Ido Schimmele9ad5e72017-02-08 11:16:29 +01007117err_nexthop_group_ht_init:
Ido Schimmel9011b672017-05-16 19:38:25 +02007118 rhashtable_destroy(&mlxsw_sp->router->nexthop_ht);
Ido Schimmelc53b8e12017-02-08 11:16:30 +01007119err_nexthop_ht_init:
Petr Machata38ebc0f2017-09-02 23:49:17 +02007120 mlxsw_sp_ipips_fini(mlxsw_sp);
7121err_ipips_init:
Ido Schimmel348b8fc2017-05-16 19:38:29 +02007122 mlxsw_sp_rifs_fini(mlxsw_sp);
7123err_rifs_init:
Jiri Pirkob45f64d2016-09-26 12:52:31 +02007124 __mlxsw_sp_router_fini(mlxsw_sp);
Ido Schimmel9011b672017-05-16 19:38:25 +02007125err_router_init:
7126 kfree(mlxsw_sp->router);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02007127 return err;
7128}
7129
7130void mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp)
7131{
Ido Schimmel7e39d112017-05-16 19:38:28 +02007132 unregister_fib_notifier(&mlxsw_sp->router->fib_nb);
Ido Schimmel48fac882017-11-02 17:14:06 +01007133 unregister_netevent_notifier(&mlxsw_sp->router->netevent_nb);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02007134 mlxsw_sp_neigh_fini(mlxsw_sp);
7135 mlxsw_sp_vrs_fini(mlxsw_sp);
Yotam Gigid42b0962017-09-27 08:23:20 +02007136 mlxsw_sp_mr_fini(mlxsw_sp);
Ido Schimmel8494ab02017-03-24 08:02:47 +01007137 mlxsw_sp_lpm_fini(mlxsw_sp);
Ido Schimmel9011b672017-05-16 19:38:25 +02007138 rhashtable_destroy(&mlxsw_sp->router->nexthop_group_ht);
7139 rhashtable_destroy(&mlxsw_sp->router->nexthop_ht);
Petr Machata38ebc0f2017-09-02 23:49:17 +02007140 mlxsw_sp_ipips_fini(mlxsw_sp);
Ido Schimmel348b8fc2017-05-16 19:38:29 +02007141 mlxsw_sp_rifs_fini(mlxsw_sp);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02007142 __mlxsw_sp_router_fini(mlxsw_sp);
Ido Schimmel9011b672017-05-16 19:38:25 +02007143 kfree(mlxsw_sp->router);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02007144}