blob: 27b632cac991d4355e7e3b1c3a25781c2b06c225 [file] [log] [blame]
Ido Schimmel464dce12016-07-02 11:00:15 +02001/*
2 * drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
Petr Machata6ddb7422017-09-02 23:49:19 +02003 * Copyright (c) 2016-2017 Mellanox Technologies. All rights reserved.
Ido Schimmel464dce12016-07-02 11:00:15 +02004 * Copyright (c) 2016 Jiri Pirko <jiri@mellanox.com>
5 * Copyright (c) 2016 Ido Schimmel <idosch@mellanox.com>
Yotam Gigic723c7352016-07-05 11:27:43 +02006 * Copyright (c) 2016 Yotam Gigi <yotamg@mellanox.com>
Petr Machata6ddb7422017-09-02 23:49:19 +02007 * Copyright (c) 2017 Petr Machata <petrm@mellanox.com>
Ido Schimmel464dce12016-07-02 11:00:15 +02008 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions are met:
11 *
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. Neither the names of the copyright holders nor the names of its
18 * contributors may be used to endorse or promote products derived from
19 * this software without specific prior written permission.
20 *
21 * Alternatively, this software may be distributed under the terms of the
22 * GNU General Public License ("GPL") version 2 as published by the Free
23 * Software Foundation.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
26 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
29 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
36 */
37
38#include <linux/kernel.h>
39#include <linux/types.h>
Jiri Pirko5e9c16c2016-07-04 08:23:04 +020040#include <linux/rhashtable.h>
41#include <linux/bitops.h>
42#include <linux/in6.h>
Yotam Gigic723c7352016-07-05 11:27:43 +020043#include <linux/notifier.h>
Ido Schimmeldf6dd79b2017-02-08 14:36:49 +010044#include <linux/inetdevice.h>
Ido Schimmel9db032b2017-03-16 09:08:17 +010045#include <linux/netdevice.h>
Ido Schimmel03ea01e2017-05-23 21:56:30 +020046#include <linux/if_bridge.h>
Ido Schimmelb5f3e0d2017-07-24 09:56:00 +020047#include <linux/socket.h>
Ido Schimmel428b8512017-08-03 13:28:28 +020048#include <linux/route.h>
Ido Schimmeleb789982017-10-22 23:11:48 +020049#include <linux/gcd.h>
Yotam Gigic723c7352016-07-05 11:27:43 +020050#include <net/netevent.h>
Jiri Pirko6cf3c972016-07-05 11:27:39 +020051#include <net/neighbour.h>
52#include <net/arp.h>
Jiri Pirkob45f64d2016-09-26 12:52:31 +020053#include <net/ip_fib.h>
Ido Schimmel583419f2017-08-03 13:28:27 +020054#include <net/ip6_fib.h>
Ido Schimmel5d7bfd12017-03-16 09:08:14 +010055#include <net/fib_rules.h>
Petr Machata6ddb7422017-09-02 23:49:19 +020056#include <net/ip_tunnels.h>
Ido Schimmel57837882017-03-16 09:08:16 +010057#include <net/l3mdev.h>
Arkadi Sharshevsky5ea12372017-07-18 10:10:13 +020058#include <net/addrconf.h>
Arkadi Sharshevskyd5eb89c2017-07-18 10:10:15 +020059#include <net/ndisc.h>
60#include <net/ipv6.h>
Ido Schimmel04b1d4e2017-08-03 13:28:11 +020061#include <net/fib_notifier.h>
Ido Schimmel464dce12016-07-02 11:00:15 +020062
63#include "spectrum.h"
64#include "core.h"
65#include "reg.h"
Arkadi Sharshevskye0c0afd2017-03-28 17:24:15 +020066#include "spectrum_cnt.h"
67#include "spectrum_dpipe.h"
Petr Machata38ebc0f2017-09-02 23:49:17 +020068#include "spectrum_ipip.h"
Yotam Gigid42b0962017-09-27 08:23:20 +020069#include "spectrum_mr.h"
70#include "spectrum_mr_tcam.h"
Arkadi Sharshevskye0c0afd2017-03-28 17:24:15 +020071#include "spectrum_router.h"
Ido Schimmel464dce12016-07-02 11:00:15 +020072
Ido Schimmel9011b672017-05-16 19:38:25 +020073struct mlxsw_sp_vr;
74struct mlxsw_sp_lpm_tree;
Ido Schimmele4f3c1c2017-05-26 08:37:40 +020075struct mlxsw_sp_rif_ops;
Ido Schimmel9011b672017-05-16 19:38:25 +020076
77struct mlxsw_sp_router {
78 struct mlxsw_sp *mlxsw_sp;
Ido Schimmel5f9efff2017-05-16 19:38:27 +020079 struct mlxsw_sp_rif **rifs;
Ido Schimmel9011b672017-05-16 19:38:25 +020080 struct mlxsw_sp_vr *vrs;
81 struct rhashtable neigh_ht;
82 struct rhashtable nexthop_group_ht;
83 struct rhashtable nexthop_ht;
Arkadi Sharshevskydbe45982017-09-25 10:32:23 +020084 struct list_head nexthop_list;
Ido Schimmel9011b672017-05-16 19:38:25 +020085 struct {
86 struct mlxsw_sp_lpm_tree *trees;
87 unsigned int tree_count;
88 } lpm;
89 struct {
90 struct delayed_work dw;
91 unsigned long interval; /* ms */
92 } neighs_update;
93 struct delayed_work nexthop_probe_dw;
94#define MLXSW_SP_UNRESOLVED_NH_PROBE_INTERVAL 5000 /* ms */
95 struct list_head nexthop_neighs_list;
Petr Machata1012b9a2017-09-02 23:49:23 +020096 struct list_head ipip_list;
Ido Schimmel9011b672017-05-16 19:38:25 +020097 bool aborted;
Ido Schimmel7e39d112017-05-16 19:38:28 +020098 struct notifier_block fib_nb;
Ido Schimmele4f3c1c2017-05-26 08:37:40 +020099 const struct mlxsw_sp_rif_ops **rif_ops_arr;
Petr Machata38ebc0f2017-09-02 23:49:17 +0200100 const struct mlxsw_sp_ipip_ops **ipip_ops_arr;
Ido Schimmel9011b672017-05-16 19:38:25 +0200101};
102
Ido Schimmel4724ba562017-03-10 08:53:39 +0100103struct mlxsw_sp_rif {
104 struct list_head nexthop_list;
105 struct list_head neigh_list;
106 struct net_device *dev;
Ido Schimmela1107482017-05-26 08:37:39 +0200107 struct mlxsw_sp_fid *fid;
Ido Schimmel4724ba562017-03-10 08:53:39 +0100108 unsigned char addr[ETH_ALEN];
109 int mtu;
Arkadi Sharshevskybf952332017-03-17 09:38:00 +0100110 u16 rif_index;
Ido Schimmel69132292017-03-10 08:53:42 +0100111 u16 vr_id;
Ido Schimmele4f3c1c2017-05-26 08:37:40 +0200112 const struct mlxsw_sp_rif_ops *ops;
113 struct mlxsw_sp *mlxsw_sp;
114
Arkadi Sharshevskye0c0afd2017-03-28 17:24:15 +0200115 unsigned int counter_ingress;
116 bool counter_ingress_valid;
117 unsigned int counter_egress;
118 bool counter_egress_valid;
Ido Schimmel4724ba562017-03-10 08:53:39 +0100119};
120
Ido Schimmele4f3c1c2017-05-26 08:37:40 +0200121struct mlxsw_sp_rif_params {
122 struct net_device *dev;
123 union {
124 u16 system_port;
125 u16 lag_id;
126 };
127 u16 vid;
128 bool lag;
129};
130
Ido Schimmel4d93cee2017-05-26 08:37:34 +0200131struct mlxsw_sp_rif_subport {
132 struct mlxsw_sp_rif common;
133 union {
134 u16 system_port;
135 u16 lag_id;
136 };
137 u16 vid;
138 bool lag;
139};
140
Petr Machata6ddb7422017-09-02 23:49:19 +0200141struct mlxsw_sp_rif_ipip_lb {
142 struct mlxsw_sp_rif common;
143 struct mlxsw_sp_rif_ipip_lb_config lb_config;
144 u16 ul_vr_id; /* Reserved for Spectrum-2. */
145};
146
147struct mlxsw_sp_rif_params_ipip_lb {
148 struct mlxsw_sp_rif_params common;
149 struct mlxsw_sp_rif_ipip_lb_config lb_config;
150};
151
Ido Schimmele4f3c1c2017-05-26 08:37:40 +0200152struct mlxsw_sp_rif_ops {
153 enum mlxsw_sp_rif_type type;
154 size_t rif_size;
155
156 void (*setup)(struct mlxsw_sp_rif *rif,
157 const struct mlxsw_sp_rif_params *params);
158 int (*configure)(struct mlxsw_sp_rif *rif);
159 void (*deconfigure)(struct mlxsw_sp_rif *rif);
160 struct mlxsw_sp_fid * (*fid_get)(struct mlxsw_sp_rif *rif);
161};
162
Arkadi Sharshevskye0c0afd2017-03-28 17:24:15 +0200163static unsigned int *
164mlxsw_sp_rif_p_counter_get(struct mlxsw_sp_rif *rif,
165 enum mlxsw_sp_rif_counter_dir dir)
166{
167 switch (dir) {
168 case MLXSW_SP_RIF_COUNTER_EGRESS:
169 return &rif->counter_egress;
170 case MLXSW_SP_RIF_COUNTER_INGRESS:
171 return &rif->counter_ingress;
172 }
173 return NULL;
174}
175
176static bool
177mlxsw_sp_rif_counter_valid_get(struct mlxsw_sp_rif *rif,
178 enum mlxsw_sp_rif_counter_dir dir)
179{
180 switch (dir) {
181 case MLXSW_SP_RIF_COUNTER_EGRESS:
182 return rif->counter_egress_valid;
183 case MLXSW_SP_RIF_COUNTER_INGRESS:
184 return rif->counter_ingress_valid;
185 }
186 return false;
187}
188
189static void
190mlxsw_sp_rif_counter_valid_set(struct mlxsw_sp_rif *rif,
191 enum mlxsw_sp_rif_counter_dir dir,
192 bool valid)
193{
194 switch (dir) {
195 case MLXSW_SP_RIF_COUNTER_EGRESS:
196 rif->counter_egress_valid = valid;
197 break;
198 case MLXSW_SP_RIF_COUNTER_INGRESS:
199 rif->counter_ingress_valid = valid;
200 break;
201 }
202}
203
204static int mlxsw_sp_rif_counter_edit(struct mlxsw_sp *mlxsw_sp, u16 rif_index,
205 unsigned int counter_index, bool enable,
206 enum mlxsw_sp_rif_counter_dir dir)
207{
208 char ritr_pl[MLXSW_REG_RITR_LEN];
209 bool is_egress = false;
210 int err;
211
212 if (dir == MLXSW_SP_RIF_COUNTER_EGRESS)
213 is_egress = true;
214 mlxsw_reg_ritr_rif_pack(ritr_pl, rif_index);
215 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
216 if (err)
217 return err;
218
219 mlxsw_reg_ritr_counter_pack(ritr_pl, counter_index, enable,
220 is_egress);
221 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
222}
223
224int mlxsw_sp_rif_counter_value_get(struct mlxsw_sp *mlxsw_sp,
225 struct mlxsw_sp_rif *rif,
226 enum mlxsw_sp_rif_counter_dir dir, u64 *cnt)
227{
228 char ricnt_pl[MLXSW_REG_RICNT_LEN];
229 unsigned int *p_counter_index;
230 bool valid;
231 int err;
232
233 valid = mlxsw_sp_rif_counter_valid_get(rif, dir);
234 if (!valid)
235 return -EINVAL;
236
237 p_counter_index = mlxsw_sp_rif_p_counter_get(rif, dir);
238 if (!p_counter_index)
239 return -EINVAL;
240 mlxsw_reg_ricnt_pack(ricnt_pl, *p_counter_index,
241 MLXSW_REG_RICNT_OPCODE_NOP);
242 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ricnt), ricnt_pl);
243 if (err)
244 return err;
245 *cnt = mlxsw_reg_ricnt_good_unicast_packets_get(ricnt_pl);
246 return 0;
247}
248
249static int mlxsw_sp_rif_counter_clear(struct mlxsw_sp *mlxsw_sp,
250 unsigned int counter_index)
251{
252 char ricnt_pl[MLXSW_REG_RICNT_LEN];
253
254 mlxsw_reg_ricnt_pack(ricnt_pl, counter_index,
255 MLXSW_REG_RICNT_OPCODE_CLEAR);
256 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ricnt), ricnt_pl);
257}
258
259int mlxsw_sp_rif_counter_alloc(struct mlxsw_sp *mlxsw_sp,
260 struct mlxsw_sp_rif *rif,
261 enum mlxsw_sp_rif_counter_dir dir)
262{
263 unsigned int *p_counter_index;
264 int err;
265
266 p_counter_index = mlxsw_sp_rif_p_counter_get(rif, dir);
267 if (!p_counter_index)
268 return -EINVAL;
269 err = mlxsw_sp_counter_alloc(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_RIF,
270 p_counter_index);
271 if (err)
272 return err;
273
274 err = mlxsw_sp_rif_counter_clear(mlxsw_sp, *p_counter_index);
275 if (err)
276 goto err_counter_clear;
277
278 err = mlxsw_sp_rif_counter_edit(mlxsw_sp, rif->rif_index,
279 *p_counter_index, true, dir);
280 if (err)
281 goto err_counter_edit;
282 mlxsw_sp_rif_counter_valid_set(rif, dir, true);
283 return 0;
284
285err_counter_edit:
286err_counter_clear:
287 mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_RIF,
288 *p_counter_index);
289 return err;
290}
291
292void mlxsw_sp_rif_counter_free(struct mlxsw_sp *mlxsw_sp,
293 struct mlxsw_sp_rif *rif,
294 enum mlxsw_sp_rif_counter_dir dir)
295{
296 unsigned int *p_counter_index;
297
Arkadi Sharshevsky6b1206b2017-05-18 09:18:53 +0200298 if (!mlxsw_sp_rif_counter_valid_get(rif, dir))
299 return;
300
Arkadi Sharshevskye0c0afd2017-03-28 17:24:15 +0200301 p_counter_index = mlxsw_sp_rif_p_counter_get(rif, dir);
302 if (WARN_ON(!p_counter_index))
303 return;
304 mlxsw_sp_rif_counter_edit(mlxsw_sp, rif->rif_index,
305 *p_counter_index, false, dir);
306 mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_RIF,
307 *p_counter_index);
308 mlxsw_sp_rif_counter_valid_set(rif, dir, false);
309}
310
Ido Schimmele4f3c1c2017-05-26 08:37:40 +0200311static void mlxsw_sp_rif_counters_alloc(struct mlxsw_sp_rif *rif)
312{
313 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
314 struct devlink *devlink;
315
316 devlink = priv_to_devlink(mlxsw_sp->core);
317 if (!devlink_dpipe_table_counter_enabled(devlink,
318 MLXSW_SP_DPIPE_TABLE_NAME_ERIF))
319 return;
320 mlxsw_sp_rif_counter_alloc(mlxsw_sp, rif, MLXSW_SP_RIF_COUNTER_EGRESS);
321}
322
323static void mlxsw_sp_rif_counters_free(struct mlxsw_sp_rif *rif)
324{
325 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
326
327 mlxsw_sp_rif_counter_free(mlxsw_sp, rif, MLXSW_SP_RIF_COUNTER_EGRESS);
328}
329
Ido Schimmel4724ba562017-03-10 08:53:39 +0100330static struct mlxsw_sp_rif *
331mlxsw_sp_rif_find_by_dev(const struct mlxsw_sp *mlxsw_sp,
332 const struct net_device *dev);
333
Ido Schimmel7dcc18a2017-07-18 10:10:30 +0200334#define MLXSW_SP_PREFIX_COUNT (sizeof(struct in6_addr) * BITS_PER_BYTE + 1)
Ido Schimmel9011b672017-05-16 19:38:25 +0200335
336struct mlxsw_sp_prefix_usage {
337 DECLARE_BITMAP(b, MLXSW_SP_PREFIX_COUNT);
338};
339
Jiri Pirko53342022016-07-04 08:23:08 +0200340#define mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage) \
341 for_each_set_bit(prefix, (prefix_usage)->b, MLXSW_SP_PREFIX_COUNT)
342
343static bool
344mlxsw_sp_prefix_usage_eq(struct mlxsw_sp_prefix_usage *prefix_usage1,
345 struct mlxsw_sp_prefix_usage *prefix_usage2)
346{
347 return !memcmp(prefix_usage1, prefix_usage2, sizeof(*prefix_usage1));
348}
349
Jiri Pirko6b75c482016-07-04 08:23:09 +0200350static bool
351mlxsw_sp_prefix_usage_none(struct mlxsw_sp_prefix_usage *prefix_usage)
352{
353 struct mlxsw_sp_prefix_usage prefix_usage_none = {{ 0 } };
354
355 return mlxsw_sp_prefix_usage_eq(prefix_usage, &prefix_usage_none);
356}
357
358static void
359mlxsw_sp_prefix_usage_cpy(struct mlxsw_sp_prefix_usage *prefix_usage1,
360 struct mlxsw_sp_prefix_usage *prefix_usage2)
361{
362 memcpy(prefix_usage1, prefix_usage2, sizeof(*prefix_usage1));
363}
364
365static void
Jiri Pirko5e9c16c2016-07-04 08:23:04 +0200366mlxsw_sp_prefix_usage_set(struct mlxsw_sp_prefix_usage *prefix_usage,
367 unsigned char prefix_len)
368{
369 set_bit(prefix_len, prefix_usage->b);
370}
371
372static void
373mlxsw_sp_prefix_usage_clear(struct mlxsw_sp_prefix_usage *prefix_usage,
374 unsigned char prefix_len)
375{
376 clear_bit(prefix_len, prefix_usage->b);
377}
378
379struct mlxsw_sp_fib_key {
380 unsigned char addr[sizeof(struct in6_addr)];
381 unsigned char prefix_len;
382};
383
Jiri Pirko61c503f2016-07-04 08:23:11 +0200384enum mlxsw_sp_fib_entry_type {
385 MLXSW_SP_FIB_ENTRY_TYPE_REMOTE,
386 MLXSW_SP_FIB_ENTRY_TYPE_LOCAL,
387 MLXSW_SP_FIB_ENTRY_TYPE_TRAP,
Petr Machata4607f6d2017-09-02 23:49:25 +0200388
389 /* This is a special case of local delivery, where a packet should be
390 * decapsulated on reception. Note that there is no corresponding ENCAP,
391 * because that's a type of next hop, not of FIB entry. (There can be
392 * several next hops in a REMOTE entry, and some of them may be
393 * encapsulating entries.)
394 */
395 MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP,
Jiri Pirko61c503f2016-07-04 08:23:11 +0200396};
397
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +0200398struct mlxsw_sp_nexthop_group;
Ido Schimmel9011b672017-05-16 19:38:25 +0200399struct mlxsw_sp_fib;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +0200400
Ido Schimmel9aecce12017-02-09 10:28:42 +0100401struct mlxsw_sp_fib_node {
402 struct list_head entry_list;
Jiri Pirkob45f64d2016-09-26 12:52:31 +0200403 struct list_head list;
Ido Schimmel9aecce12017-02-09 10:28:42 +0100404 struct rhash_head ht_node;
Ido Schimmel76610eb2017-03-10 08:53:41 +0100405 struct mlxsw_sp_fib *fib;
Ido Schimmel9aecce12017-02-09 10:28:42 +0100406 struct mlxsw_sp_fib_key key;
407};
408
Petr Machata4607f6d2017-09-02 23:49:25 +0200409struct mlxsw_sp_fib_entry_decap {
410 struct mlxsw_sp_ipip_entry *ipip_entry;
411 u32 tunnel_index;
412};
413
Ido Schimmel9aecce12017-02-09 10:28:42 +0100414struct mlxsw_sp_fib_entry {
415 struct list_head list;
416 struct mlxsw_sp_fib_node *fib_node;
417 enum mlxsw_sp_fib_entry_type type;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +0200418 struct list_head nexthop_group_node;
419 struct mlxsw_sp_nexthop_group *nh_group;
Petr Machata4607f6d2017-09-02 23:49:25 +0200420 struct mlxsw_sp_fib_entry_decap decap; /* Valid for decap entries. */
Jiri Pirko5e9c16c2016-07-04 08:23:04 +0200421};
422
Ido Schimmel4f1c7f12017-07-18 10:10:26 +0200423struct mlxsw_sp_fib4_entry {
424 struct mlxsw_sp_fib_entry common;
425 u32 tb_id;
426 u32 prio;
427 u8 tos;
428 u8 type;
429};
430
Ido Schimmel428b8512017-08-03 13:28:28 +0200431struct mlxsw_sp_fib6_entry {
432 struct mlxsw_sp_fib_entry common;
433 struct list_head rt6_list;
434 unsigned int nrt6;
435};
436
437struct mlxsw_sp_rt6 {
438 struct list_head list;
439 struct rt6_info *rt;
440};
441
Ido Schimmel9011b672017-05-16 19:38:25 +0200442struct mlxsw_sp_lpm_tree {
443 u8 id; /* tree ID */
444 unsigned int ref_count;
445 enum mlxsw_sp_l3proto proto;
446 struct mlxsw_sp_prefix_usage prefix_usage;
447};
448
Jiri Pirko5e9c16c2016-07-04 08:23:04 +0200449struct mlxsw_sp_fib {
450 struct rhashtable ht;
Ido Schimmel9aecce12017-02-09 10:28:42 +0100451 struct list_head node_list;
Ido Schimmel76610eb2017-03-10 08:53:41 +0100452 struct mlxsw_sp_vr *vr;
453 struct mlxsw_sp_lpm_tree *lpm_tree;
Jiri Pirko5e9c16c2016-07-04 08:23:04 +0200454 unsigned long prefix_ref_count[MLXSW_SP_PREFIX_COUNT];
455 struct mlxsw_sp_prefix_usage prefix_usage;
Ido Schimmel76610eb2017-03-10 08:53:41 +0100456 enum mlxsw_sp_l3proto proto;
Jiri Pirko5e9c16c2016-07-04 08:23:04 +0200457};
458
Ido Schimmel9011b672017-05-16 19:38:25 +0200459struct mlxsw_sp_vr {
460 u16 id; /* virtual router ID */
461 u32 tb_id; /* kernel fib table id */
462 unsigned int rif_count;
463 struct mlxsw_sp_fib *fib4;
Ido Schimmela3d9bc52017-07-18 10:10:22 +0200464 struct mlxsw_sp_fib *fib6;
Yotam Gigid42b0962017-09-27 08:23:20 +0200465 struct mlxsw_sp_mr_table *mr4_table;
Ido Schimmel9011b672017-05-16 19:38:25 +0200466};
467
Ido Schimmel9aecce12017-02-09 10:28:42 +0100468static const struct rhashtable_params mlxsw_sp_fib_ht_params;
Jiri Pirko5e9c16c2016-07-04 08:23:04 +0200469
Ido Schimmel76610eb2017-03-10 08:53:41 +0100470static struct mlxsw_sp_fib *mlxsw_sp_fib_create(struct mlxsw_sp_vr *vr,
471 enum mlxsw_sp_l3proto proto)
Jiri Pirko5e9c16c2016-07-04 08:23:04 +0200472{
473 struct mlxsw_sp_fib *fib;
474 int err;
475
476 fib = kzalloc(sizeof(*fib), GFP_KERNEL);
477 if (!fib)
478 return ERR_PTR(-ENOMEM);
479 err = rhashtable_init(&fib->ht, &mlxsw_sp_fib_ht_params);
480 if (err)
481 goto err_rhashtable_init;
Ido Schimmel9aecce12017-02-09 10:28:42 +0100482 INIT_LIST_HEAD(&fib->node_list);
Ido Schimmel76610eb2017-03-10 08:53:41 +0100483 fib->proto = proto;
484 fib->vr = vr;
Jiri Pirko5e9c16c2016-07-04 08:23:04 +0200485 return fib;
486
487err_rhashtable_init:
488 kfree(fib);
489 return ERR_PTR(err);
490}
491
492static void mlxsw_sp_fib_destroy(struct mlxsw_sp_fib *fib)
493{
Ido Schimmel9aecce12017-02-09 10:28:42 +0100494 WARN_ON(!list_empty(&fib->node_list));
Ido Schimmel76610eb2017-03-10 08:53:41 +0100495 WARN_ON(fib->lpm_tree);
Jiri Pirko5e9c16c2016-07-04 08:23:04 +0200496 rhashtable_destroy(&fib->ht);
497 kfree(fib);
498}
499
Jiri Pirko53342022016-07-04 08:23:08 +0200500static struct mlxsw_sp_lpm_tree *
Ido Schimmel382dbb42017-03-10 08:53:40 +0100501mlxsw_sp_lpm_tree_find_unused(struct mlxsw_sp *mlxsw_sp)
Jiri Pirko53342022016-07-04 08:23:08 +0200502{
503 static struct mlxsw_sp_lpm_tree *lpm_tree;
504 int i;
505
Ido Schimmel9011b672017-05-16 19:38:25 +0200506 for (i = 0; i < mlxsw_sp->router->lpm.tree_count; i++) {
507 lpm_tree = &mlxsw_sp->router->lpm.trees[i];
Ido Schimmel382dbb42017-03-10 08:53:40 +0100508 if (lpm_tree->ref_count == 0)
509 return lpm_tree;
Jiri Pirko53342022016-07-04 08:23:08 +0200510 }
511 return NULL;
512}
513
514static int mlxsw_sp_lpm_tree_alloc(struct mlxsw_sp *mlxsw_sp,
515 struct mlxsw_sp_lpm_tree *lpm_tree)
516{
517 char ralta_pl[MLXSW_REG_RALTA_LEN];
518
Ido Schimmel1a9234e662016-09-19 08:29:26 +0200519 mlxsw_reg_ralta_pack(ralta_pl, true,
520 (enum mlxsw_reg_ralxx_protocol) lpm_tree->proto,
521 lpm_tree->id);
Jiri Pirko53342022016-07-04 08:23:08 +0200522 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralta), ralta_pl);
523}
524
Ido Schimmelcc702672017-08-14 10:54:03 +0200525static void mlxsw_sp_lpm_tree_free(struct mlxsw_sp *mlxsw_sp,
526 struct mlxsw_sp_lpm_tree *lpm_tree)
Jiri Pirko53342022016-07-04 08:23:08 +0200527{
528 char ralta_pl[MLXSW_REG_RALTA_LEN];
529
Ido Schimmel1a9234e662016-09-19 08:29:26 +0200530 mlxsw_reg_ralta_pack(ralta_pl, false,
531 (enum mlxsw_reg_ralxx_protocol) lpm_tree->proto,
532 lpm_tree->id);
Ido Schimmelcc702672017-08-14 10:54:03 +0200533 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralta), ralta_pl);
Jiri Pirko53342022016-07-04 08:23:08 +0200534}
535
536static int
537mlxsw_sp_lpm_tree_left_struct_set(struct mlxsw_sp *mlxsw_sp,
538 struct mlxsw_sp_prefix_usage *prefix_usage,
539 struct mlxsw_sp_lpm_tree *lpm_tree)
540{
541 char ralst_pl[MLXSW_REG_RALST_LEN];
542 u8 root_bin = 0;
543 u8 prefix;
544 u8 last_prefix = MLXSW_REG_RALST_BIN_NO_CHILD;
545
546 mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage)
547 root_bin = prefix;
548
549 mlxsw_reg_ralst_pack(ralst_pl, root_bin, lpm_tree->id);
550 mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage) {
551 if (prefix == 0)
552 continue;
553 mlxsw_reg_ralst_bin_pack(ralst_pl, prefix, last_prefix,
554 MLXSW_REG_RALST_BIN_NO_CHILD);
555 last_prefix = prefix;
556 }
557 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralst), ralst_pl);
558}
559
560static struct mlxsw_sp_lpm_tree *
561mlxsw_sp_lpm_tree_create(struct mlxsw_sp *mlxsw_sp,
562 struct mlxsw_sp_prefix_usage *prefix_usage,
Ido Schimmel382dbb42017-03-10 08:53:40 +0100563 enum mlxsw_sp_l3proto proto)
Jiri Pirko53342022016-07-04 08:23:08 +0200564{
565 struct mlxsw_sp_lpm_tree *lpm_tree;
566 int err;
567
Ido Schimmel382dbb42017-03-10 08:53:40 +0100568 lpm_tree = mlxsw_sp_lpm_tree_find_unused(mlxsw_sp);
Jiri Pirko53342022016-07-04 08:23:08 +0200569 if (!lpm_tree)
570 return ERR_PTR(-EBUSY);
571 lpm_tree->proto = proto;
572 err = mlxsw_sp_lpm_tree_alloc(mlxsw_sp, lpm_tree);
573 if (err)
574 return ERR_PTR(err);
575
576 err = mlxsw_sp_lpm_tree_left_struct_set(mlxsw_sp, prefix_usage,
577 lpm_tree);
578 if (err)
579 goto err_left_struct_set;
Jiri Pirko2083d362016-10-25 11:25:56 +0200580 memcpy(&lpm_tree->prefix_usage, prefix_usage,
581 sizeof(lpm_tree->prefix_usage));
Jiri Pirko53342022016-07-04 08:23:08 +0200582 return lpm_tree;
583
584err_left_struct_set:
585 mlxsw_sp_lpm_tree_free(mlxsw_sp, lpm_tree);
586 return ERR_PTR(err);
587}
588
Ido Schimmelcc702672017-08-14 10:54:03 +0200589static void mlxsw_sp_lpm_tree_destroy(struct mlxsw_sp *mlxsw_sp,
590 struct mlxsw_sp_lpm_tree *lpm_tree)
Jiri Pirko53342022016-07-04 08:23:08 +0200591{
Ido Schimmelcc702672017-08-14 10:54:03 +0200592 mlxsw_sp_lpm_tree_free(mlxsw_sp, lpm_tree);
Jiri Pirko53342022016-07-04 08:23:08 +0200593}
594
595static struct mlxsw_sp_lpm_tree *
596mlxsw_sp_lpm_tree_get(struct mlxsw_sp *mlxsw_sp,
597 struct mlxsw_sp_prefix_usage *prefix_usage,
Ido Schimmel382dbb42017-03-10 08:53:40 +0100598 enum mlxsw_sp_l3proto proto)
Jiri Pirko53342022016-07-04 08:23:08 +0200599{
600 struct mlxsw_sp_lpm_tree *lpm_tree;
601 int i;
602
Ido Schimmel9011b672017-05-16 19:38:25 +0200603 for (i = 0; i < mlxsw_sp->router->lpm.tree_count; i++) {
604 lpm_tree = &mlxsw_sp->router->lpm.trees[i];
Jiri Pirko8b99bec2016-10-25 11:25:57 +0200605 if (lpm_tree->ref_count != 0 &&
606 lpm_tree->proto == proto &&
Jiri Pirko53342022016-07-04 08:23:08 +0200607 mlxsw_sp_prefix_usage_eq(&lpm_tree->prefix_usage,
608 prefix_usage))
Ido Schimmelfc922bb2017-08-14 10:54:05 +0200609 return lpm_tree;
Jiri Pirko53342022016-07-04 08:23:08 +0200610 }
Ido Schimmelfc922bb2017-08-14 10:54:05 +0200611 return mlxsw_sp_lpm_tree_create(mlxsw_sp, prefix_usage, proto);
612}
Jiri Pirko53342022016-07-04 08:23:08 +0200613
Ido Schimmelfc922bb2017-08-14 10:54:05 +0200614static void mlxsw_sp_lpm_tree_hold(struct mlxsw_sp_lpm_tree *lpm_tree)
615{
Jiri Pirko53342022016-07-04 08:23:08 +0200616 lpm_tree->ref_count++;
Jiri Pirko53342022016-07-04 08:23:08 +0200617}
618
Ido Schimmelcc702672017-08-14 10:54:03 +0200619static void mlxsw_sp_lpm_tree_put(struct mlxsw_sp *mlxsw_sp,
620 struct mlxsw_sp_lpm_tree *lpm_tree)
Jiri Pirko53342022016-07-04 08:23:08 +0200621{
622 if (--lpm_tree->ref_count == 0)
Ido Schimmelcc702672017-08-14 10:54:03 +0200623 mlxsw_sp_lpm_tree_destroy(mlxsw_sp, lpm_tree);
Jiri Pirko53342022016-07-04 08:23:08 +0200624}
625
Ido Schimmeld7a60302017-06-08 08:47:43 +0200626#define MLXSW_SP_LPM_TREE_MIN 1 /* tree 0 is reserved */
Ido Schimmel8494ab02017-03-24 08:02:47 +0100627
628static int mlxsw_sp_lpm_init(struct mlxsw_sp *mlxsw_sp)
Jiri Pirko53342022016-07-04 08:23:08 +0200629{
630 struct mlxsw_sp_lpm_tree *lpm_tree;
Ido Schimmel8494ab02017-03-24 08:02:47 +0100631 u64 max_trees;
Jiri Pirko53342022016-07-04 08:23:08 +0200632 int i;
633
Ido Schimmel8494ab02017-03-24 08:02:47 +0100634 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_LPM_TREES))
635 return -EIO;
636
637 max_trees = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_LPM_TREES);
Ido Schimmel9011b672017-05-16 19:38:25 +0200638 mlxsw_sp->router->lpm.tree_count = max_trees - MLXSW_SP_LPM_TREE_MIN;
639 mlxsw_sp->router->lpm.trees = kcalloc(mlxsw_sp->router->lpm.tree_count,
Ido Schimmel8494ab02017-03-24 08:02:47 +0100640 sizeof(struct mlxsw_sp_lpm_tree),
641 GFP_KERNEL);
Ido Schimmel9011b672017-05-16 19:38:25 +0200642 if (!mlxsw_sp->router->lpm.trees)
Ido Schimmel8494ab02017-03-24 08:02:47 +0100643 return -ENOMEM;
644
Ido Schimmel9011b672017-05-16 19:38:25 +0200645 for (i = 0; i < mlxsw_sp->router->lpm.tree_count; i++) {
646 lpm_tree = &mlxsw_sp->router->lpm.trees[i];
Jiri Pirko53342022016-07-04 08:23:08 +0200647 lpm_tree->id = i + MLXSW_SP_LPM_TREE_MIN;
648 }
Ido Schimmel8494ab02017-03-24 08:02:47 +0100649
650 return 0;
651}
652
653static void mlxsw_sp_lpm_fini(struct mlxsw_sp *mlxsw_sp)
654{
Ido Schimmel9011b672017-05-16 19:38:25 +0200655 kfree(mlxsw_sp->router->lpm.trees);
Jiri Pirko53342022016-07-04 08:23:08 +0200656}
657
Ido Schimmel76610eb2017-03-10 08:53:41 +0100658static bool mlxsw_sp_vr_is_used(const struct mlxsw_sp_vr *vr)
659{
Yotam Gigid42b0962017-09-27 08:23:20 +0200660 return !!vr->fib4 || !!vr->fib6 || !!vr->mr4_table;
Ido Schimmel76610eb2017-03-10 08:53:41 +0100661}
662
Jiri Pirko6b75c482016-07-04 08:23:09 +0200663static struct mlxsw_sp_vr *mlxsw_sp_vr_find_unused(struct mlxsw_sp *mlxsw_sp)
664{
665 struct mlxsw_sp_vr *vr;
666 int i;
667
Jiri Pirkoc1a38312016-10-21 16:07:23 +0200668 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
Ido Schimmel9011b672017-05-16 19:38:25 +0200669 vr = &mlxsw_sp->router->vrs[i];
Ido Schimmel76610eb2017-03-10 08:53:41 +0100670 if (!mlxsw_sp_vr_is_used(vr))
Jiri Pirko6b75c482016-07-04 08:23:09 +0200671 return vr;
672 }
673 return NULL;
674}
675
676static int mlxsw_sp_vr_lpm_tree_bind(struct mlxsw_sp *mlxsw_sp,
Ido Schimmel0adb2142017-08-14 10:54:04 +0200677 const struct mlxsw_sp_fib *fib, u8 tree_id)
Jiri Pirko6b75c482016-07-04 08:23:09 +0200678{
679 char raltb_pl[MLXSW_REG_RALTB_LEN];
680
Ido Schimmel76610eb2017-03-10 08:53:41 +0100681 mlxsw_reg_raltb_pack(raltb_pl, fib->vr->id,
682 (enum mlxsw_reg_ralxx_protocol) fib->proto,
Ido Schimmel0adb2142017-08-14 10:54:04 +0200683 tree_id);
Jiri Pirko6b75c482016-07-04 08:23:09 +0200684 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raltb), raltb_pl);
685}
686
687static int mlxsw_sp_vr_lpm_tree_unbind(struct mlxsw_sp *mlxsw_sp,
Ido Schimmel76610eb2017-03-10 08:53:41 +0100688 const struct mlxsw_sp_fib *fib)
Jiri Pirko6b75c482016-07-04 08:23:09 +0200689{
690 char raltb_pl[MLXSW_REG_RALTB_LEN];
691
692 /* Bind to tree 0 which is default */
Ido Schimmel76610eb2017-03-10 08:53:41 +0100693 mlxsw_reg_raltb_pack(raltb_pl, fib->vr->id,
694 (enum mlxsw_reg_ralxx_protocol) fib->proto, 0);
Jiri Pirko6b75c482016-07-04 08:23:09 +0200695 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raltb), raltb_pl);
696}
697
698static u32 mlxsw_sp_fix_tb_id(u32 tb_id)
699{
Yotam Gigi7e50d432017-09-27 08:23:19 +0200700 /* For our purpose, squash main, default and local tables into one */
701 if (tb_id == RT_TABLE_LOCAL || tb_id == RT_TABLE_DEFAULT)
Jiri Pirko6b75c482016-07-04 08:23:09 +0200702 tb_id = RT_TABLE_MAIN;
703 return tb_id;
704}
705
706static struct mlxsw_sp_vr *mlxsw_sp_vr_find(struct mlxsw_sp *mlxsw_sp,
Ido Schimmel76610eb2017-03-10 08:53:41 +0100707 u32 tb_id)
Jiri Pirko6b75c482016-07-04 08:23:09 +0200708{
709 struct mlxsw_sp_vr *vr;
710 int i;
711
712 tb_id = mlxsw_sp_fix_tb_id(tb_id);
Nogah Frankel9497c042016-09-20 11:16:54 +0200713
Jiri Pirkoc1a38312016-10-21 16:07:23 +0200714 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
Ido Schimmel9011b672017-05-16 19:38:25 +0200715 vr = &mlxsw_sp->router->vrs[i];
Ido Schimmel76610eb2017-03-10 08:53:41 +0100716 if (mlxsw_sp_vr_is_used(vr) && vr->tb_id == tb_id)
Jiri Pirko6b75c482016-07-04 08:23:09 +0200717 return vr;
718 }
719 return NULL;
720}
721
Ido Schimmel76610eb2017-03-10 08:53:41 +0100722static struct mlxsw_sp_fib *mlxsw_sp_vr_fib(const struct mlxsw_sp_vr *vr,
723 enum mlxsw_sp_l3proto proto)
Jiri Pirko6b75c482016-07-04 08:23:09 +0200724{
Ido Schimmel76610eb2017-03-10 08:53:41 +0100725 switch (proto) {
726 case MLXSW_SP_L3_PROTO_IPV4:
727 return vr->fib4;
728 case MLXSW_SP_L3_PROTO_IPV6:
Ido Schimmela3d9bc52017-07-18 10:10:22 +0200729 return vr->fib6;
Ido Schimmel76610eb2017-03-10 08:53:41 +0100730 }
731 return NULL;
732}
733
734static struct mlxsw_sp_vr *mlxsw_sp_vr_create(struct mlxsw_sp *mlxsw_sp,
David Ahernf8fa9b42017-10-18 09:56:56 -0700735 u32 tb_id,
736 struct netlink_ext_ack *extack)
Ido Schimmel76610eb2017-03-10 08:53:41 +0100737{
Jiri Pirko6b75c482016-07-04 08:23:09 +0200738 struct mlxsw_sp_vr *vr;
Ido Schimmela3d9bc52017-07-18 10:10:22 +0200739 int err;
Jiri Pirko6b75c482016-07-04 08:23:09 +0200740
741 vr = mlxsw_sp_vr_find_unused(mlxsw_sp);
David Ahernf8fa9b42017-10-18 09:56:56 -0700742 if (!vr) {
743 NL_SET_ERR_MSG(extack, "spectrum: Exceeded number of supported virtual routers");
Jiri Pirko6b75c482016-07-04 08:23:09 +0200744 return ERR_PTR(-EBUSY);
David Ahernf8fa9b42017-10-18 09:56:56 -0700745 }
Ido Schimmel76610eb2017-03-10 08:53:41 +0100746 vr->fib4 = mlxsw_sp_fib_create(vr, MLXSW_SP_L3_PROTO_IPV4);
747 if (IS_ERR(vr->fib4))
748 return ERR_CAST(vr->fib4);
Ido Schimmela3d9bc52017-07-18 10:10:22 +0200749 vr->fib6 = mlxsw_sp_fib_create(vr, MLXSW_SP_L3_PROTO_IPV6);
750 if (IS_ERR(vr->fib6)) {
751 err = PTR_ERR(vr->fib6);
752 goto err_fib6_create;
753 }
Yotam Gigid42b0962017-09-27 08:23:20 +0200754 vr->mr4_table = mlxsw_sp_mr_table_create(mlxsw_sp, vr->id,
755 MLXSW_SP_L3_PROTO_IPV4);
756 if (IS_ERR(vr->mr4_table)) {
757 err = PTR_ERR(vr->mr4_table);
758 goto err_mr_table_create;
759 }
Jiri Pirko6b75c482016-07-04 08:23:09 +0200760 vr->tb_id = tb_id;
Jiri Pirko6b75c482016-07-04 08:23:09 +0200761 return vr;
Ido Schimmela3d9bc52017-07-18 10:10:22 +0200762
Yotam Gigid42b0962017-09-27 08:23:20 +0200763err_mr_table_create:
764 mlxsw_sp_fib_destroy(vr->fib6);
765 vr->fib6 = NULL;
Ido Schimmela3d9bc52017-07-18 10:10:22 +0200766err_fib6_create:
767 mlxsw_sp_fib_destroy(vr->fib4);
768 vr->fib4 = NULL;
769 return ERR_PTR(err);
Jiri Pirko6b75c482016-07-04 08:23:09 +0200770}
771
Ido Schimmel76610eb2017-03-10 08:53:41 +0100772static void mlxsw_sp_vr_destroy(struct mlxsw_sp_vr *vr)
Jiri Pirko6b75c482016-07-04 08:23:09 +0200773{
Yotam Gigid42b0962017-09-27 08:23:20 +0200774 mlxsw_sp_mr_table_destroy(vr->mr4_table);
775 vr->mr4_table = NULL;
Ido Schimmela3d9bc52017-07-18 10:10:22 +0200776 mlxsw_sp_fib_destroy(vr->fib6);
777 vr->fib6 = NULL;
Ido Schimmel76610eb2017-03-10 08:53:41 +0100778 mlxsw_sp_fib_destroy(vr->fib4);
779 vr->fib4 = NULL;
Jiri Pirko6b75c482016-07-04 08:23:09 +0200780}
781
David Ahernf8fa9b42017-10-18 09:56:56 -0700782static struct mlxsw_sp_vr *mlxsw_sp_vr_get(struct mlxsw_sp *mlxsw_sp, u32 tb_id,
783 struct netlink_ext_ack *extack)
Jiri Pirko6b75c482016-07-04 08:23:09 +0200784{
785 struct mlxsw_sp_vr *vr;
Jiri Pirko6b75c482016-07-04 08:23:09 +0200786
787 tb_id = mlxsw_sp_fix_tb_id(tb_id);
Ido Schimmel76610eb2017-03-10 08:53:41 +0100788 vr = mlxsw_sp_vr_find(mlxsw_sp, tb_id);
789 if (!vr)
David Ahernf8fa9b42017-10-18 09:56:56 -0700790 vr = mlxsw_sp_vr_create(mlxsw_sp, tb_id, extack);
Jiri Pirko6b75c482016-07-04 08:23:09 +0200791 return vr;
792}
793
Ido Schimmel76610eb2017-03-10 08:53:41 +0100794static void mlxsw_sp_vr_put(struct mlxsw_sp_vr *vr)
Jiri Pirko6b75c482016-07-04 08:23:09 +0200795{
Ido Schimmela3d9bc52017-07-18 10:10:22 +0200796 if (!vr->rif_count && list_empty(&vr->fib4->node_list) &&
Yotam Gigid42b0962017-09-27 08:23:20 +0200797 list_empty(&vr->fib6->node_list) &&
798 mlxsw_sp_mr_table_empty(vr->mr4_table))
Ido Schimmel76610eb2017-03-10 08:53:41 +0100799 mlxsw_sp_vr_destroy(vr);
Jiri Pirko6b75c482016-07-04 08:23:09 +0200800}
801
Ido Schimmelfc922bb2017-08-14 10:54:05 +0200802static bool
803mlxsw_sp_vr_lpm_tree_should_replace(struct mlxsw_sp_vr *vr,
804 enum mlxsw_sp_l3proto proto, u8 tree_id)
805{
806 struct mlxsw_sp_fib *fib = mlxsw_sp_vr_fib(vr, proto);
807
808 if (!mlxsw_sp_vr_is_used(vr))
809 return false;
810 if (fib->lpm_tree && fib->lpm_tree->id == tree_id)
811 return true;
812 return false;
813}
814
815static int mlxsw_sp_vr_lpm_tree_replace(struct mlxsw_sp *mlxsw_sp,
816 struct mlxsw_sp_fib *fib,
817 struct mlxsw_sp_lpm_tree *new_tree)
818{
819 struct mlxsw_sp_lpm_tree *old_tree = fib->lpm_tree;
820 int err;
821
822 err = mlxsw_sp_vr_lpm_tree_bind(mlxsw_sp, fib, new_tree->id);
823 if (err)
824 return err;
825 fib->lpm_tree = new_tree;
826 mlxsw_sp_lpm_tree_hold(new_tree);
827 mlxsw_sp_lpm_tree_put(mlxsw_sp, old_tree);
828 return 0;
829}
830
831static int mlxsw_sp_vrs_lpm_tree_replace(struct mlxsw_sp *mlxsw_sp,
832 struct mlxsw_sp_fib *fib,
833 struct mlxsw_sp_lpm_tree *new_tree)
834{
835 struct mlxsw_sp_lpm_tree *old_tree = fib->lpm_tree;
836 enum mlxsw_sp_l3proto proto = fib->proto;
837 u8 old_id, new_id = new_tree->id;
838 struct mlxsw_sp_vr *vr;
839 int i, err;
840
841 if (!old_tree)
842 goto no_replace;
843 old_id = old_tree->id;
844
845 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
846 vr = &mlxsw_sp->router->vrs[i];
847 if (!mlxsw_sp_vr_lpm_tree_should_replace(vr, proto, old_id))
848 continue;
849 err = mlxsw_sp_vr_lpm_tree_replace(mlxsw_sp,
850 mlxsw_sp_vr_fib(vr, proto),
851 new_tree);
852 if (err)
853 goto err_tree_replace;
854 }
855
856 return 0;
857
858err_tree_replace:
859 for (i--; i >= 0; i--) {
860 if (!mlxsw_sp_vr_lpm_tree_should_replace(vr, proto, new_id))
861 continue;
862 mlxsw_sp_vr_lpm_tree_replace(mlxsw_sp,
863 mlxsw_sp_vr_fib(vr, proto),
864 old_tree);
865 }
866 return err;
867
868no_replace:
869 err = mlxsw_sp_vr_lpm_tree_bind(mlxsw_sp, fib, new_tree->id);
870 if (err)
871 return err;
872 fib->lpm_tree = new_tree;
873 mlxsw_sp_lpm_tree_hold(new_tree);
874 return 0;
875}
876
877static void
878mlxsw_sp_vrs_prefixes(struct mlxsw_sp *mlxsw_sp,
879 enum mlxsw_sp_l3proto proto,
880 struct mlxsw_sp_prefix_usage *req_prefix_usage)
881{
882 int i;
883
884 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
885 struct mlxsw_sp_vr *vr = &mlxsw_sp->router->vrs[i];
886 struct mlxsw_sp_fib *fib = mlxsw_sp_vr_fib(vr, proto);
887 unsigned char prefix;
888
889 if (!mlxsw_sp_vr_is_used(vr))
890 continue;
891 mlxsw_sp_prefix_usage_for_each(prefix, &fib->prefix_usage)
892 mlxsw_sp_prefix_usage_set(req_prefix_usage, prefix);
893 }
894}
895
Nogah Frankel9497c042016-09-20 11:16:54 +0200896static int mlxsw_sp_vrs_init(struct mlxsw_sp *mlxsw_sp)
Jiri Pirko6b75c482016-07-04 08:23:09 +0200897{
898 struct mlxsw_sp_vr *vr;
Jiri Pirkoc1a38312016-10-21 16:07:23 +0200899 u64 max_vrs;
Jiri Pirko6b75c482016-07-04 08:23:09 +0200900 int i;
901
Jiri Pirkoc1a38312016-10-21 16:07:23 +0200902 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_VRS))
Nogah Frankel9497c042016-09-20 11:16:54 +0200903 return -EIO;
904
Jiri Pirkoc1a38312016-10-21 16:07:23 +0200905 max_vrs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS);
Ido Schimmel9011b672017-05-16 19:38:25 +0200906 mlxsw_sp->router->vrs = kcalloc(max_vrs, sizeof(struct mlxsw_sp_vr),
907 GFP_KERNEL);
908 if (!mlxsw_sp->router->vrs)
Nogah Frankel9497c042016-09-20 11:16:54 +0200909 return -ENOMEM;
910
Jiri Pirkoc1a38312016-10-21 16:07:23 +0200911 for (i = 0; i < max_vrs; i++) {
Ido Schimmel9011b672017-05-16 19:38:25 +0200912 vr = &mlxsw_sp->router->vrs[i];
Jiri Pirko6b75c482016-07-04 08:23:09 +0200913 vr->id = i;
914 }
Nogah Frankel9497c042016-09-20 11:16:54 +0200915
916 return 0;
917}
918
Ido Schimmelac571de2016-11-14 11:26:32 +0100919static void mlxsw_sp_router_fib_flush(struct mlxsw_sp *mlxsw_sp);
920
Nogah Frankel9497c042016-09-20 11:16:54 +0200921static void mlxsw_sp_vrs_fini(struct mlxsw_sp *mlxsw_sp)
922{
Ido Schimmel30572242016-12-03 16:45:01 +0100923 /* At this stage we're guaranteed not to have new incoming
924 * FIB notifications and the work queue is free from FIBs
925 * sitting on top of mlxsw netdevs. However, we can still
926 * have other FIBs queued. Flush the queue before flushing
927 * the device's tables. No need for locks, as we're the only
928 * writer.
929 */
930 mlxsw_core_flush_owq();
Ido Schimmelac571de2016-11-14 11:26:32 +0100931 mlxsw_sp_router_fib_flush(mlxsw_sp);
Ido Schimmel9011b672017-05-16 19:38:25 +0200932 kfree(mlxsw_sp->router->vrs);
Jiri Pirko6b75c482016-07-04 08:23:09 +0200933}
934
Petr Machata6ddb7422017-09-02 23:49:19 +0200935static struct net_device *
936__mlxsw_sp_ipip_netdev_ul_dev_get(const struct net_device *ol_dev)
937{
938 struct ip_tunnel *tun = netdev_priv(ol_dev);
939 struct net *net = dev_net(ol_dev);
940
941 return __dev_get_by_index(net, tun->parms.link);
942}
943
944static u32 mlxsw_sp_ipip_dev_ul_tb_id(const struct net_device *ol_dev)
945{
946 struct net_device *d = __mlxsw_sp_ipip_netdev_ul_dev_get(ol_dev);
947
948 if (d)
949 return l3mdev_fib_table(d) ? : RT_TABLE_MAIN;
950 else
951 return l3mdev_fib_table(ol_dev) ? : RT_TABLE_MAIN;
952}
953
Petr Machata1012b9a2017-09-02 23:49:23 +0200954static struct mlxsw_sp_rif *
955mlxsw_sp_rif_create(struct mlxsw_sp *mlxsw_sp,
David Ahernf8fa9b42017-10-18 09:56:56 -0700956 const struct mlxsw_sp_rif_params *params,
957 struct netlink_ext_ack *extack);
Petr Machata1012b9a2017-09-02 23:49:23 +0200958
959static struct mlxsw_sp_rif_ipip_lb *
960mlxsw_sp_ipip_ol_ipip_lb_create(struct mlxsw_sp *mlxsw_sp,
961 enum mlxsw_sp_ipip_type ipipt,
962 struct net_device *ol_dev)
963{
964 struct mlxsw_sp_rif_params_ipip_lb lb_params;
965 const struct mlxsw_sp_ipip_ops *ipip_ops;
966 struct mlxsw_sp_rif *rif;
967
968 ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipipt];
969 lb_params = (struct mlxsw_sp_rif_params_ipip_lb) {
970 .common.dev = ol_dev,
971 .common.lag = false,
972 .lb_config = ipip_ops->ol_loopback_config(mlxsw_sp, ol_dev),
973 };
974
David Ahernf8fa9b42017-10-18 09:56:56 -0700975 rif = mlxsw_sp_rif_create(mlxsw_sp, &lb_params.common, NULL);
Petr Machata1012b9a2017-09-02 23:49:23 +0200976 if (IS_ERR(rif))
977 return ERR_CAST(rif);
978 return container_of(rif, struct mlxsw_sp_rif_ipip_lb, common);
979}
980
981static struct mlxsw_sp_ipip_entry *
982mlxsw_sp_ipip_entry_alloc(struct mlxsw_sp *mlxsw_sp,
983 enum mlxsw_sp_ipip_type ipipt,
984 struct net_device *ol_dev)
985{
986 struct mlxsw_sp_ipip_entry *ipip_entry;
987 struct mlxsw_sp_ipip_entry *ret = NULL;
988
989 ipip_entry = kzalloc(sizeof(*ipip_entry), GFP_KERNEL);
990 if (!ipip_entry)
991 return ERR_PTR(-ENOMEM);
992
993 ipip_entry->ol_lb = mlxsw_sp_ipip_ol_ipip_lb_create(mlxsw_sp, ipipt,
994 ol_dev);
995 if (IS_ERR(ipip_entry->ol_lb)) {
996 ret = ERR_CAST(ipip_entry->ol_lb);
997 goto err_ol_ipip_lb_create;
998 }
999
1000 ipip_entry->ipipt = ipipt;
1001 ipip_entry->ol_dev = ol_dev;
1002
1003 return ipip_entry;
1004
1005err_ol_ipip_lb_create:
1006 kfree(ipip_entry);
1007 return ret;
1008}
1009
1010static void
Petr Machata4cccb732017-10-16 16:26:39 +02001011mlxsw_sp_ipip_entry_dealloc(struct mlxsw_sp_ipip_entry *ipip_entry)
Petr Machata1012b9a2017-09-02 23:49:23 +02001012{
Petr Machata1012b9a2017-09-02 23:49:23 +02001013 mlxsw_sp_rif_destroy(&ipip_entry->ol_lb->common);
1014 kfree(ipip_entry);
1015}
1016
1017static __be32
1018mlxsw_sp_ipip_netdev_saddr4(const struct net_device *ol_dev)
1019{
1020 struct ip_tunnel *tun = netdev_priv(ol_dev);
1021
1022 return tun->parms.iph.saddr;
1023}
1024
1025union mlxsw_sp_l3addr
1026mlxsw_sp_ipip_netdev_saddr(enum mlxsw_sp_l3proto proto,
1027 const struct net_device *ol_dev)
1028{
1029 switch (proto) {
1030 case MLXSW_SP_L3_PROTO_IPV4:
1031 return (union mlxsw_sp_l3addr) {
1032 .addr4 = mlxsw_sp_ipip_netdev_saddr4(ol_dev),
1033 };
1034 case MLXSW_SP_L3_PROTO_IPV6:
1035 break;
1036 };
1037
1038 WARN_ON(1);
1039 return (union mlxsw_sp_l3addr) {
1040 .addr4 = 0,
1041 };
1042}
1043
Petr Machataee954d1a2017-09-02 23:49:29 +02001044__be32 mlxsw_sp_ipip_netdev_daddr4(const struct net_device *ol_dev)
1045{
1046 struct ip_tunnel *tun = netdev_priv(ol_dev);
1047
1048 return tun->parms.iph.daddr;
1049}
1050
1051union mlxsw_sp_l3addr
1052mlxsw_sp_ipip_netdev_daddr(enum mlxsw_sp_l3proto proto,
1053 const struct net_device *ol_dev)
1054{
1055 switch (proto) {
1056 case MLXSW_SP_L3_PROTO_IPV4:
1057 return (union mlxsw_sp_l3addr) {
1058 .addr4 = mlxsw_sp_ipip_netdev_daddr4(ol_dev),
1059 };
1060 case MLXSW_SP_L3_PROTO_IPV6:
1061 break;
1062 };
1063
1064 WARN_ON(1);
1065 return (union mlxsw_sp_l3addr) {
1066 .addr4 = 0,
1067 };
1068}
1069
Petr Machata1012b9a2017-09-02 23:49:23 +02001070static bool mlxsw_sp_l3addr_eq(const union mlxsw_sp_l3addr *addr1,
1071 const union mlxsw_sp_l3addr *addr2)
1072{
1073 return !memcmp(addr1, addr2, sizeof(*addr1));
1074}
1075
1076static bool
1077mlxsw_sp_ipip_entry_saddr_matches(struct mlxsw_sp *mlxsw_sp,
1078 const enum mlxsw_sp_l3proto ul_proto,
1079 union mlxsw_sp_l3addr saddr,
1080 u32 ul_tb_id,
1081 struct mlxsw_sp_ipip_entry *ipip_entry)
1082{
1083 u32 tun_ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(ipip_entry->ol_dev);
1084 enum mlxsw_sp_ipip_type ipipt = ipip_entry->ipipt;
1085 union mlxsw_sp_l3addr tun_saddr;
1086
1087 if (mlxsw_sp->router->ipip_ops_arr[ipipt]->ul_proto != ul_proto)
1088 return false;
1089
1090 tun_saddr = mlxsw_sp_ipip_netdev_saddr(ul_proto, ipip_entry->ol_dev);
1091 return tun_ul_tb_id == ul_tb_id &&
1092 mlxsw_sp_l3addr_eq(&tun_saddr, &saddr);
1093}
1094
Petr Machata4607f6d2017-09-02 23:49:25 +02001095static int
1096mlxsw_sp_fib_entry_decap_init(struct mlxsw_sp *mlxsw_sp,
1097 struct mlxsw_sp_fib_entry *fib_entry,
1098 struct mlxsw_sp_ipip_entry *ipip_entry)
1099{
1100 u32 tunnel_index;
1101 int err;
1102
1103 err = mlxsw_sp_kvdl_alloc(mlxsw_sp, 1, &tunnel_index);
1104 if (err)
1105 return err;
1106
1107 ipip_entry->decap_fib_entry = fib_entry;
1108 fib_entry->decap.ipip_entry = ipip_entry;
1109 fib_entry->decap.tunnel_index = tunnel_index;
1110 return 0;
1111}
1112
1113static void mlxsw_sp_fib_entry_decap_fini(struct mlxsw_sp *mlxsw_sp,
1114 struct mlxsw_sp_fib_entry *fib_entry)
1115{
1116 /* Unlink this node from the IPIP entry that it's the decap entry of. */
1117 fib_entry->decap.ipip_entry->decap_fib_entry = NULL;
1118 fib_entry->decap.ipip_entry = NULL;
1119 mlxsw_sp_kvdl_free(mlxsw_sp, fib_entry->decap.tunnel_index);
1120}
1121
Petr Machata1cc38fb2017-09-02 23:49:26 +02001122static struct mlxsw_sp_fib_node *
1123mlxsw_sp_fib_node_lookup(struct mlxsw_sp_fib *fib, const void *addr,
1124 size_t addr_len, unsigned char prefix_len);
Petr Machata4607f6d2017-09-02 23:49:25 +02001125static int mlxsw_sp_fib_entry_update(struct mlxsw_sp *mlxsw_sp,
1126 struct mlxsw_sp_fib_entry *fib_entry);
1127
1128static void
1129mlxsw_sp_ipip_entry_demote_decap(struct mlxsw_sp *mlxsw_sp,
1130 struct mlxsw_sp_ipip_entry *ipip_entry)
1131{
1132 struct mlxsw_sp_fib_entry *fib_entry = ipip_entry->decap_fib_entry;
1133
1134 mlxsw_sp_fib_entry_decap_fini(mlxsw_sp, fib_entry);
1135 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
1136
1137 mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
1138}
1139
Petr Machata1cc38fb2017-09-02 23:49:26 +02001140static void
1141mlxsw_sp_ipip_entry_promote_decap(struct mlxsw_sp *mlxsw_sp,
1142 struct mlxsw_sp_ipip_entry *ipip_entry,
1143 struct mlxsw_sp_fib_entry *decap_fib_entry)
1144{
1145 if (mlxsw_sp_fib_entry_decap_init(mlxsw_sp, decap_fib_entry,
1146 ipip_entry))
1147 return;
1148 decap_fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP;
1149
1150 if (mlxsw_sp_fib_entry_update(mlxsw_sp, decap_fib_entry))
1151 mlxsw_sp_ipip_entry_demote_decap(mlxsw_sp, ipip_entry);
1152}
1153
1154/* Given an IPIP entry, find the corresponding decap route. */
1155static struct mlxsw_sp_fib_entry *
1156mlxsw_sp_ipip_entry_find_decap(struct mlxsw_sp *mlxsw_sp,
1157 struct mlxsw_sp_ipip_entry *ipip_entry)
1158{
1159 static struct mlxsw_sp_fib_node *fib_node;
1160 const struct mlxsw_sp_ipip_ops *ipip_ops;
1161 struct mlxsw_sp_fib_entry *fib_entry;
1162 unsigned char saddr_prefix_len;
1163 union mlxsw_sp_l3addr saddr;
1164 struct mlxsw_sp_fib *ul_fib;
1165 struct mlxsw_sp_vr *ul_vr;
1166 const void *saddrp;
1167 size_t saddr_len;
1168 u32 ul_tb_id;
1169 u32 saddr4;
1170
1171 ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt];
1172
1173 ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(ipip_entry->ol_dev);
1174 ul_vr = mlxsw_sp_vr_find(mlxsw_sp, ul_tb_id);
1175 if (!ul_vr)
1176 return NULL;
1177
1178 ul_fib = mlxsw_sp_vr_fib(ul_vr, ipip_ops->ul_proto);
1179 saddr = mlxsw_sp_ipip_netdev_saddr(ipip_ops->ul_proto,
1180 ipip_entry->ol_dev);
1181
1182 switch (ipip_ops->ul_proto) {
1183 case MLXSW_SP_L3_PROTO_IPV4:
1184 saddr4 = be32_to_cpu(saddr.addr4);
1185 saddrp = &saddr4;
1186 saddr_len = 4;
1187 saddr_prefix_len = 32;
1188 break;
1189 case MLXSW_SP_L3_PROTO_IPV6:
1190 WARN_ON(1);
1191 return NULL;
1192 }
1193
1194 fib_node = mlxsw_sp_fib_node_lookup(ul_fib, saddrp, saddr_len,
1195 saddr_prefix_len);
1196 if (!fib_node || list_empty(&fib_node->entry_list))
1197 return NULL;
1198
1199 fib_entry = list_first_entry(&fib_node->entry_list,
1200 struct mlxsw_sp_fib_entry, list);
1201 if (fib_entry->type != MLXSW_SP_FIB_ENTRY_TYPE_TRAP)
1202 return NULL;
1203
1204 return fib_entry;
1205}
1206
Petr Machata1012b9a2017-09-02 23:49:23 +02001207static struct mlxsw_sp_ipip_entry *
Petr Machata4cccb732017-10-16 16:26:39 +02001208mlxsw_sp_ipip_entry_create(struct mlxsw_sp *mlxsw_sp,
1209 enum mlxsw_sp_ipip_type ipipt,
1210 struct net_device *ol_dev)
Petr Machata1012b9a2017-09-02 23:49:23 +02001211{
1212 u32 ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(ol_dev);
1213 struct mlxsw_sp_router *router = mlxsw_sp->router;
1214 struct mlxsw_sp_ipip_entry *ipip_entry;
1215 enum mlxsw_sp_l3proto ul_proto;
1216 union mlxsw_sp_l3addr saddr;
1217
Petr Machata4cccb732017-10-16 16:26:39 +02001218 /* The configuration where several tunnels have the same local address
1219 * in the same underlay table needs special treatment in the HW. That is
1220 * currently not implemented in the driver.
1221 */
Petr Machata1012b9a2017-09-02 23:49:23 +02001222 list_for_each_entry(ipip_entry, &mlxsw_sp->router->ipip_list,
1223 ipip_list_node) {
Petr Machata1012b9a2017-09-02 23:49:23 +02001224 ul_proto = router->ipip_ops_arr[ipip_entry->ipipt]->ul_proto;
1225 saddr = mlxsw_sp_ipip_netdev_saddr(ul_proto, ol_dev);
1226 if (mlxsw_sp_ipip_entry_saddr_matches(mlxsw_sp, ul_proto, saddr,
1227 ul_tb_id, ipip_entry))
1228 return ERR_PTR(-EEXIST);
1229 }
1230
1231 ipip_entry = mlxsw_sp_ipip_entry_alloc(mlxsw_sp, ipipt, ol_dev);
1232 if (IS_ERR(ipip_entry))
1233 return ipip_entry;
1234
1235 list_add_tail(&ipip_entry->ipip_list_node,
1236 &mlxsw_sp->router->ipip_list);
1237
Petr Machata1012b9a2017-09-02 23:49:23 +02001238 return ipip_entry;
1239}
1240
1241static void
Petr Machata4cccb732017-10-16 16:26:39 +02001242mlxsw_sp_ipip_entry_destroy(struct mlxsw_sp *mlxsw_sp,
1243 struct mlxsw_sp_ipip_entry *ipip_entry)
Petr Machata1012b9a2017-09-02 23:49:23 +02001244{
Petr Machata4cccb732017-10-16 16:26:39 +02001245 list_del(&ipip_entry->ipip_list_node);
1246 mlxsw_sp_ipip_entry_dealloc(ipip_entry);
Petr Machata1012b9a2017-09-02 23:49:23 +02001247}
1248
Petr Machata4607f6d2017-09-02 23:49:25 +02001249static bool
1250mlxsw_sp_ipip_entry_matches_decap(struct mlxsw_sp *mlxsw_sp,
1251 const struct net_device *ul_dev,
1252 enum mlxsw_sp_l3proto ul_proto,
1253 union mlxsw_sp_l3addr ul_dip,
1254 struct mlxsw_sp_ipip_entry *ipip_entry)
1255{
1256 u32 ul_tb_id = l3mdev_fib_table(ul_dev) ? : RT_TABLE_MAIN;
1257 enum mlxsw_sp_ipip_type ipipt = ipip_entry->ipipt;
1258 struct net_device *ipip_ul_dev;
1259
1260 if (mlxsw_sp->router->ipip_ops_arr[ipipt]->ul_proto != ul_proto)
1261 return false;
1262
1263 ipip_ul_dev = __mlxsw_sp_ipip_netdev_ul_dev_get(ipip_entry->ol_dev);
1264 return mlxsw_sp_ipip_entry_saddr_matches(mlxsw_sp, ul_proto, ul_dip,
1265 ul_tb_id, ipip_entry) &&
1266 (!ipip_ul_dev || ipip_ul_dev == ul_dev);
1267}
1268
1269/* Given decap parameters, find the corresponding IPIP entry. */
1270static struct mlxsw_sp_ipip_entry *
1271mlxsw_sp_ipip_entry_find_by_decap(struct mlxsw_sp *mlxsw_sp,
1272 const struct net_device *ul_dev,
1273 enum mlxsw_sp_l3proto ul_proto,
1274 union mlxsw_sp_l3addr ul_dip)
1275{
1276 struct mlxsw_sp_ipip_entry *ipip_entry;
1277
1278 list_for_each_entry(ipip_entry, &mlxsw_sp->router->ipip_list,
1279 ipip_list_node)
1280 if (mlxsw_sp_ipip_entry_matches_decap(mlxsw_sp, ul_dev,
1281 ul_proto, ul_dip,
1282 ipip_entry))
1283 return ipip_entry;
1284
1285 return NULL;
1286}
1287
Petr Machata6698c162017-10-16 16:26:36 +02001288static bool mlxsw_sp_netdev_ipip_type(const struct mlxsw_sp *mlxsw_sp,
1289 const struct net_device *dev,
1290 enum mlxsw_sp_ipip_type *p_type)
1291{
1292 struct mlxsw_sp_router *router = mlxsw_sp->router;
1293 const struct mlxsw_sp_ipip_ops *ipip_ops;
1294 enum mlxsw_sp_ipip_type ipipt;
1295
1296 for (ipipt = 0; ipipt < MLXSW_SP_IPIP_TYPE_MAX; ++ipipt) {
1297 ipip_ops = router->ipip_ops_arr[ipipt];
1298 if (dev->type == ipip_ops->dev_type) {
1299 if (p_type)
1300 *p_type = ipipt;
1301 return true;
1302 }
1303 }
1304 return false;
1305}
1306
Petr Machata00635872017-10-16 16:26:37 +02001307bool mlxsw_sp_netdev_is_ipip(const struct mlxsw_sp *mlxsw_sp,
1308 const struct net_device *dev)
1309{
1310 return mlxsw_sp_netdev_ipip_type(mlxsw_sp, dev, NULL);
1311}
1312
1313static struct mlxsw_sp_ipip_entry *
1314mlxsw_sp_ipip_entry_find_by_ol_dev(struct mlxsw_sp *mlxsw_sp,
1315 const struct net_device *ol_dev)
1316{
1317 struct mlxsw_sp_ipip_entry *ipip_entry;
1318
1319 list_for_each_entry(ipip_entry, &mlxsw_sp->router->ipip_list,
1320 ipip_list_node)
1321 if (ipip_entry->ol_dev == ol_dev)
1322 return ipip_entry;
1323
1324 return NULL;
1325}
1326
1327static int mlxsw_sp_netdevice_ipip_reg_event(struct mlxsw_sp *mlxsw_sp,
1328 struct net_device *ol_dev)
1329{
1330 struct mlxsw_sp_router *router = mlxsw_sp->router;
1331 struct mlxsw_sp_ipip_entry *ipip_entry;
1332 enum mlxsw_sp_ipip_type ipipt;
1333
1334 mlxsw_sp_netdev_ipip_type(mlxsw_sp, ol_dev, &ipipt);
1335 if (router->ipip_ops_arr[ipipt]->can_offload(mlxsw_sp, ol_dev,
1336 MLXSW_SP_L3_PROTO_IPV4) ||
1337 router->ipip_ops_arr[ipipt]->can_offload(mlxsw_sp, ol_dev,
1338 MLXSW_SP_L3_PROTO_IPV6)) {
Petr Machata4cccb732017-10-16 16:26:39 +02001339 ipip_entry = mlxsw_sp_ipip_entry_create(mlxsw_sp, ipipt,
1340 ol_dev);
Petr Machata00635872017-10-16 16:26:37 +02001341 if (IS_ERR(ipip_entry))
1342 return PTR_ERR(ipip_entry);
1343 }
1344
1345 return 0;
1346}
1347
1348static void mlxsw_sp_netdevice_ipip_unreg_event(struct mlxsw_sp *mlxsw_sp,
1349 struct net_device *ol_dev)
1350{
1351 struct mlxsw_sp_ipip_entry *ipip_entry;
1352
1353 ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
1354 if (ipip_entry)
Petr Machata4cccb732017-10-16 16:26:39 +02001355 mlxsw_sp_ipip_entry_destroy(mlxsw_sp, ipip_entry);
Petr Machata00635872017-10-16 16:26:37 +02001356}
1357
1358static int mlxsw_sp_netdevice_ipip_up_event(struct mlxsw_sp *mlxsw_sp,
1359 struct net_device *ol_dev)
1360{
1361 struct mlxsw_sp_fib_entry *decap_fib_entry;
1362 struct mlxsw_sp_ipip_entry *ipip_entry;
1363
1364 ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
1365 if (ipip_entry) {
1366 decap_fib_entry = mlxsw_sp_ipip_entry_find_decap(mlxsw_sp,
1367 ipip_entry);
1368 if (decap_fib_entry)
1369 mlxsw_sp_ipip_entry_promote_decap(mlxsw_sp, ipip_entry,
1370 decap_fib_entry);
1371 }
1372
1373 return 0;
1374}
1375
1376static void mlxsw_sp_netdevice_ipip_down_event(struct mlxsw_sp *mlxsw_sp,
1377 struct net_device *ol_dev)
1378{
1379 struct mlxsw_sp_ipip_entry *ipip_entry;
1380
1381 ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
1382 if (ipip_entry && ipip_entry->decap_fib_entry)
1383 mlxsw_sp_ipip_entry_demote_decap(mlxsw_sp, ipip_entry);
1384}
1385
Petr Machataf63ce4e2017-10-16 16:26:38 +02001386static int mlxsw_sp_netdevice_ipip_vrf_event(struct mlxsw_sp *mlxsw_sp,
1387 struct net_device *ol_dev)
1388{
1389 struct mlxsw_sp_fib_entry *decap_fib_entry;
1390 struct mlxsw_sp_ipip_entry *ipip_entry;
1391 struct mlxsw_sp_rif_ipip_lb *lb_rif;
1392
1393 ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
1394 if (!ipip_entry)
1395 return 0;
1396
1397 /* When a tunneling device is moved to a different VRF, we need to
1398 * update the backing loopback. Since RIFs can't be edited, we need to
1399 * destroy and recreate it. That might create a window of opportunity
1400 * where RALUE and RATR registers end up referencing a RIF that's
1401 * already gone. RATRs are handled by the RIF destroy, and to take care
1402 * of RALUE, demote the decap route back.
1403 */
1404 if (ipip_entry->decap_fib_entry)
1405 mlxsw_sp_ipip_entry_demote_decap(mlxsw_sp, ipip_entry);
1406
1407 lb_rif = mlxsw_sp_ipip_ol_ipip_lb_create(mlxsw_sp, ipip_entry->ipipt,
1408 ol_dev);
1409 if (IS_ERR(lb_rif))
1410 return PTR_ERR(lb_rif);
1411 mlxsw_sp_rif_destroy(&ipip_entry->ol_lb->common);
1412 ipip_entry->ol_lb = lb_rif;
1413
1414 if (ol_dev->flags & IFF_UP) {
1415 decap_fib_entry = mlxsw_sp_ipip_entry_find_decap(mlxsw_sp,
1416 ipip_entry);
1417 if (decap_fib_entry)
1418 mlxsw_sp_ipip_entry_promote_decap(mlxsw_sp, ipip_entry,
1419 decap_fib_entry);
1420 }
1421
1422 return 0;
1423}
1424
Petr Machata00635872017-10-16 16:26:37 +02001425int mlxsw_sp_netdevice_ipip_event(struct mlxsw_sp *mlxsw_sp,
1426 struct net_device *ol_dev,
Petr Machataf63ce4e2017-10-16 16:26:38 +02001427 unsigned long event,
1428 struct netdev_notifier_changeupper_info *info)
Petr Machata00635872017-10-16 16:26:37 +02001429{
1430 switch (event) {
1431 case NETDEV_REGISTER:
1432 return mlxsw_sp_netdevice_ipip_reg_event(mlxsw_sp, ol_dev);
1433 case NETDEV_UNREGISTER:
1434 mlxsw_sp_netdevice_ipip_unreg_event(mlxsw_sp, ol_dev);
1435 return 0;
1436 case NETDEV_UP:
1437 return mlxsw_sp_netdevice_ipip_up_event(mlxsw_sp, ol_dev);
1438 case NETDEV_DOWN:
1439 mlxsw_sp_netdevice_ipip_down_event(mlxsw_sp, ol_dev);
1440 return 0;
Petr Machataf63ce4e2017-10-16 16:26:38 +02001441 case NETDEV_CHANGEUPPER:
1442 if (netif_is_l3_master(info->upper_dev))
1443 return mlxsw_sp_netdevice_ipip_vrf_event(mlxsw_sp,
1444 ol_dev);
1445 return 0;
Petr Machata00635872017-10-16 16:26:37 +02001446 }
1447 return 0;
1448}
1449
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001450struct mlxsw_sp_neigh_key {
Jiri Pirko33b13412016-11-10 12:31:04 +01001451 struct neighbour *n;
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001452};
1453
1454struct mlxsw_sp_neigh_entry {
Ido Schimmel9665b742017-02-08 11:16:42 +01001455 struct list_head rif_list_node;
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001456 struct rhash_head ht_node;
1457 struct mlxsw_sp_neigh_key key;
1458 u16 rif;
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001459 bool connected;
Yotam Gigia6bf9e92016-07-05 11:27:44 +02001460 unsigned char ha[ETH_ALEN];
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001461 struct list_head nexthop_list; /* list of nexthops using
1462 * this neigh entry
1463 */
Yotam Gigib2157142016-07-05 11:27:51 +02001464 struct list_head nexthop_neighs_list_node;
Arkadi Sharshevsky7cfcbc72017-08-24 08:40:08 +02001465 unsigned int counter_index;
1466 bool counter_valid;
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001467};
1468
1469static const struct rhashtable_params mlxsw_sp_neigh_ht_params = {
1470 .key_offset = offsetof(struct mlxsw_sp_neigh_entry, key),
1471 .head_offset = offsetof(struct mlxsw_sp_neigh_entry, ht_node),
1472 .key_len = sizeof(struct mlxsw_sp_neigh_key),
1473};
1474
Arkadi Sharshevskyf17cc842017-08-24 08:40:04 +02001475struct mlxsw_sp_neigh_entry *
1476mlxsw_sp_rif_neigh_next(struct mlxsw_sp_rif *rif,
1477 struct mlxsw_sp_neigh_entry *neigh_entry)
1478{
1479 if (!neigh_entry) {
1480 if (list_empty(&rif->neigh_list))
1481 return NULL;
1482 else
1483 return list_first_entry(&rif->neigh_list,
1484 typeof(*neigh_entry),
1485 rif_list_node);
1486 }
Arkadi Sharshevskyec2437f2017-09-25 10:32:24 +02001487 if (list_is_last(&neigh_entry->rif_list_node, &rif->neigh_list))
Arkadi Sharshevskyf17cc842017-08-24 08:40:04 +02001488 return NULL;
1489 return list_next_entry(neigh_entry, rif_list_node);
1490}
1491
1492int mlxsw_sp_neigh_entry_type(struct mlxsw_sp_neigh_entry *neigh_entry)
1493{
1494 return neigh_entry->key.n->tbl->family;
1495}
1496
1497unsigned char *
1498mlxsw_sp_neigh_entry_ha(struct mlxsw_sp_neigh_entry *neigh_entry)
1499{
1500 return neigh_entry->ha;
1501}
1502
1503u32 mlxsw_sp_neigh4_entry_dip(struct mlxsw_sp_neigh_entry *neigh_entry)
1504{
1505 struct neighbour *n;
1506
1507 n = neigh_entry->key.n;
1508 return ntohl(*((__be32 *) n->primary_key));
1509}
1510
Arkadi Sharshevsky02507682017-08-31 17:59:15 +02001511struct in6_addr *
1512mlxsw_sp_neigh6_entry_dip(struct mlxsw_sp_neigh_entry *neigh_entry)
1513{
1514 struct neighbour *n;
1515
1516 n = neigh_entry->key.n;
1517 return (struct in6_addr *) &n->primary_key;
1518}
1519
Arkadi Sharshevsky7cfcbc72017-08-24 08:40:08 +02001520int mlxsw_sp_neigh_counter_get(struct mlxsw_sp *mlxsw_sp,
1521 struct mlxsw_sp_neigh_entry *neigh_entry,
1522 u64 *p_counter)
1523{
1524 if (!neigh_entry->counter_valid)
1525 return -EINVAL;
1526
1527 return mlxsw_sp_flow_counter_get(mlxsw_sp, neigh_entry->counter_index,
1528 p_counter, NULL);
1529}
1530
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001531static struct mlxsw_sp_neigh_entry *
1532mlxsw_sp_neigh_entry_alloc(struct mlxsw_sp *mlxsw_sp, struct neighbour *n,
1533 u16 rif)
1534{
1535 struct mlxsw_sp_neigh_entry *neigh_entry;
1536
1537 neigh_entry = kzalloc(sizeof(*neigh_entry), GFP_KERNEL);
1538 if (!neigh_entry)
1539 return NULL;
1540
1541 neigh_entry->key.n = n;
1542 neigh_entry->rif = rif;
1543 INIT_LIST_HEAD(&neigh_entry->nexthop_list);
1544
1545 return neigh_entry;
1546}
1547
1548static void mlxsw_sp_neigh_entry_free(struct mlxsw_sp_neigh_entry *neigh_entry)
1549{
1550 kfree(neigh_entry);
1551}
1552
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001553static int
1554mlxsw_sp_neigh_entry_insert(struct mlxsw_sp *mlxsw_sp,
1555 struct mlxsw_sp_neigh_entry *neigh_entry)
1556{
Ido Schimmel9011b672017-05-16 19:38:25 +02001557 return rhashtable_insert_fast(&mlxsw_sp->router->neigh_ht,
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001558 &neigh_entry->ht_node,
1559 mlxsw_sp_neigh_ht_params);
1560}
1561
1562static void
1563mlxsw_sp_neigh_entry_remove(struct mlxsw_sp *mlxsw_sp,
1564 struct mlxsw_sp_neigh_entry *neigh_entry)
1565{
Ido Schimmel9011b672017-05-16 19:38:25 +02001566 rhashtable_remove_fast(&mlxsw_sp->router->neigh_ht,
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001567 &neigh_entry->ht_node,
1568 mlxsw_sp_neigh_ht_params);
1569}
1570
Arkadi Sharshevsky7cfcbc72017-08-24 08:40:08 +02001571static bool
Arkadi Sharshevsky1ed55742017-08-31 17:59:18 +02001572mlxsw_sp_neigh_counter_should_alloc(struct mlxsw_sp *mlxsw_sp,
1573 struct mlxsw_sp_neigh_entry *neigh_entry)
Arkadi Sharshevsky7cfcbc72017-08-24 08:40:08 +02001574{
1575 struct devlink *devlink;
Arkadi Sharshevsky1ed55742017-08-31 17:59:18 +02001576 const char *table_name;
1577
1578 switch (mlxsw_sp_neigh_entry_type(neigh_entry)) {
1579 case AF_INET:
1580 table_name = MLXSW_SP_DPIPE_TABLE_NAME_HOST4;
1581 break;
1582 case AF_INET6:
1583 table_name = MLXSW_SP_DPIPE_TABLE_NAME_HOST6;
1584 break;
1585 default:
1586 WARN_ON(1);
1587 return false;
1588 }
Arkadi Sharshevsky7cfcbc72017-08-24 08:40:08 +02001589
1590 devlink = priv_to_devlink(mlxsw_sp->core);
Arkadi Sharshevsky1ed55742017-08-31 17:59:18 +02001591 return devlink_dpipe_table_counter_enabled(devlink, table_name);
Arkadi Sharshevsky7cfcbc72017-08-24 08:40:08 +02001592}
1593
1594static void
1595mlxsw_sp_neigh_counter_alloc(struct mlxsw_sp *mlxsw_sp,
1596 struct mlxsw_sp_neigh_entry *neigh_entry)
1597{
Arkadi Sharshevsky1ed55742017-08-31 17:59:18 +02001598 if (!mlxsw_sp_neigh_counter_should_alloc(mlxsw_sp, neigh_entry))
Arkadi Sharshevsky7cfcbc72017-08-24 08:40:08 +02001599 return;
1600
1601 if (mlxsw_sp_flow_counter_alloc(mlxsw_sp, &neigh_entry->counter_index))
1602 return;
1603
1604 neigh_entry->counter_valid = true;
1605}
1606
1607static void
1608mlxsw_sp_neigh_counter_free(struct mlxsw_sp *mlxsw_sp,
1609 struct mlxsw_sp_neigh_entry *neigh_entry)
1610{
1611 if (!neigh_entry->counter_valid)
1612 return;
1613 mlxsw_sp_flow_counter_free(mlxsw_sp,
1614 neigh_entry->counter_index);
1615 neigh_entry->counter_valid = false;
1616}
1617
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001618static struct mlxsw_sp_neigh_entry *
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001619mlxsw_sp_neigh_entry_create(struct mlxsw_sp *mlxsw_sp, struct neighbour *n)
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001620{
1621 struct mlxsw_sp_neigh_entry *neigh_entry;
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01001622 struct mlxsw_sp_rif *rif;
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001623 int err;
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001624
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01001625 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, n->dev);
1626 if (!rif)
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001627 return ERR_PTR(-EINVAL);
1628
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01001629 neigh_entry = mlxsw_sp_neigh_entry_alloc(mlxsw_sp, n, rif->rif_index);
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001630 if (!neigh_entry)
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001631 return ERR_PTR(-ENOMEM);
1632
1633 err = mlxsw_sp_neigh_entry_insert(mlxsw_sp, neigh_entry);
1634 if (err)
1635 goto err_neigh_entry_insert;
1636
Arkadi Sharshevsky7cfcbc72017-08-24 08:40:08 +02001637 mlxsw_sp_neigh_counter_alloc(mlxsw_sp, neigh_entry);
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01001638 list_add(&neigh_entry->rif_list_node, &rif->neigh_list);
Ido Schimmel9665b742017-02-08 11:16:42 +01001639
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001640 return neigh_entry;
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001641
1642err_neigh_entry_insert:
1643 mlxsw_sp_neigh_entry_free(neigh_entry);
1644 return ERR_PTR(err);
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001645}
1646
1647static void
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001648mlxsw_sp_neigh_entry_destroy(struct mlxsw_sp *mlxsw_sp,
1649 struct mlxsw_sp_neigh_entry *neigh_entry)
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001650{
Ido Schimmel9665b742017-02-08 11:16:42 +01001651 list_del(&neigh_entry->rif_list_node);
Arkadi Sharshevsky7cfcbc72017-08-24 08:40:08 +02001652 mlxsw_sp_neigh_counter_free(mlxsw_sp, neigh_entry);
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001653 mlxsw_sp_neigh_entry_remove(mlxsw_sp, neigh_entry);
1654 mlxsw_sp_neigh_entry_free(neigh_entry);
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001655}
1656
1657static struct mlxsw_sp_neigh_entry *
Jiri Pirko33b13412016-11-10 12:31:04 +01001658mlxsw_sp_neigh_entry_lookup(struct mlxsw_sp *mlxsw_sp, struct neighbour *n)
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001659{
Jiri Pirko33b13412016-11-10 12:31:04 +01001660 struct mlxsw_sp_neigh_key key;
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001661
Jiri Pirko33b13412016-11-10 12:31:04 +01001662 key.n = n;
Ido Schimmel9011b672017-05-16 19:38:25 +02001663 return rhashtable_lookup_fast(&mlxsw_sp->router->neigh_ht,
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001664 &key, mlxsw_sp_neigh_ht_params);
1665}
1666
Yotam Gigic723c7352016-07-05 11:27:43 +02001667static void
1668mlxsw_sp_router_neighs_update_interval_init(struct mlxsw_sp *mlxsw_sp)
1669{
Arkadi Sharshevskya6c9b5d2017-07-18 10:10:18 +02001670 unsigned long interval;
Yotam Gigic723c7352016-07-05 11:27:43 +02001671
Ido Schimmelb5f3e0d2017-07-24 09:56:00 +02001672#if IS_ENABLED(CONFIG_IPV6)
Arkadi Sharshevskya6c9b5d2017-07-18 10:10:18 +02001673 interval = min_t(unsigned long,
1674 NEIGH_VAR(&arp_tbl.parms, DELAY_PROBE_TIME),
1675 NEIGH_VAR(&nd_tbl.parms, DELAY_PROBE_TIME));
Ido Schimmelb5f3e0d2017-07-24 09:56:00 +02001676#else
1677 interval = NEIGH_VAR(&arp_tbl.parms, DELAY_PROBE_TIME);
1678#endif
Ido Schimmel9011b672017-05-16 19:38:25 +02001679 mlxsw_sp->router->neighs_update.interval = jiffies_to_msecs(interval);
Yotam Gigic723c7352016-07-05 11:27:43 +02001680}
1681
1682static void mlxsw_sp_router_neigh_ent_ipv4_process(struct mlxsw_sp *mlxsw_sp,
1683 char *rauhtd_pl,
1684 int ent_index)
1685{
1686 struct net_device *dev;
1687 struct neighbour *n;
1688 __be32 dipn;
1689 u32 dip;
1690 u16 rif;
1691
1692 mlxsw_reg_rauhtd_ent_ipv4_unpack(rauhtd_pl, ent_index, &rif, &dip);
1693
Ido Schimmel5f9efff2017-05-16 19:38:27 +02001694 if (!mlxsw_sp->router->rifs[rif]) {
Yotam Gigic723c7352016-07-05 11:27:43 +02001695 dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Incorrect RIF in neighbour entry\n");
1696 return;
1697 }
1698
1699 dipn = htonl(dip);
Ido Schimmel5f9efff2017-05-16 19:38:27 +02001700 dev = mlxsw_sp->router->rifs[rif]->dev;
Yotam Gigic723c7352016-07-05 11:27:43 +02001701 n = neigh_lookup(&arp_tbl, &dipn, dev);
1702 if (!n) {
1703 netdev_err(dev, "Failed to find matching neighbour for IP=%pI4h\n",
1704 &dip);
1705 return;
1706 }
1707
1708 netdev_dbg(dev, "Updating neighbour with IP=%pI4h\n", &dip);
1709 neigh_event_send(n, NULL);
1710 neigh_release(n);
1711}
1712
Ido Schimmeldf9a21f2017-08-15 09:10:33 +02001713#if IS_ENABLED(CONFIG_IPV6)
Arkadi Sharshevsky60f040c2017-07-18 10:10:17 +02001714static void mlxsw_sp_router_neigh_ent_ipv6_process(struct mlxsw_sp *mlxsw_sp,
1715 char *rauhtd_pl,
1716 int rec_index)
1717{
1718 struct net_device *dev;
1719 struct neighbour *n;
1720 struct in6_addr dip;
1721 u16 rif;
1722
1723 mlxsw_reg_rauhtd_ent_ipv6_unpack(rauhtd_pl, rec_index, &rif,
1724 (char *) &dip);
1725
1726 if (!mlxsw_sp->router->rifs[rif]) {
1727 dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Incorrect RIF in neighbour entry\n");
1728 return;
1729 }
1730
1731 dev = mlxsw_sp->router->rifs[rif]->dev;
1732 n = neigh_lookup(&nd_tbl, &dip, dev);
1733 if (!n) {
1734 netdev_err(dev, "Failed to find matching neighbour for IP=%pI6c\n",
1735 &dip);
1736 return;
1737 }
1738
1739 netdev_dbg(dev, "Updating neighbour with IP=%pI6c\n", &dip);
1740 neigh_event_send(n, NULL);
1741 neigh_release(n);
1742}
Ido Schimmelb5f3e0d2017-07-24 09:56:00 +02001743#else
1744static void mlxsw_sp_router_neigh_ent_ipv6_process(struct mlxsw_sp *mlxsw_sp,
1745 char *rauhtd_pl,
1746 int rec_index)
1747{
1748}
1749#endif
Arkadi Sharshevsky60f040c2017-07-18 10:10:17 +02001750
Yotam Gigic723c7352016-07-05 11:27:43 +02001751static void mlxsw_sp_router_neigh_rec_ipv4_process(struct mlxsw_sp *mlxsw_sp,
1752 char *rauhtd_pl,
1753 int rec_index)
1754{
1755 u8 num_entries;
1756 int i;
1757
1758 num_entries = mlxsw_reg_rauhtd_ipv4_rec_num_entries_get(rauhtd_pl,
1759 rec_index);
1760 /* Hardware starts counting at 0, so add 1. */
1761 num_entries++;
1762
1763 /* Each record consists of several neighbour entries. */
1764 for (i = 0; i < num_entries; i++) {
1765 int ent_index;
1766
1767 ent_index = rec_index * MLXSW_REG_RAUHTD_IPV4_ENT_PER_REC + i;
1768 mlxsw_sp_router_neigh_ent_ipv4_process(mlxsw_sp, rauhtd_pl,
1769 ent_index);
1770 }
1771
1772}
1773
Arkadi Sharshevsky60f040c2017-07-18 10:10:17 +02001774static void mlxsw_sp_router_neigh_rec_ipv6_process(struct mlxsw_sp *mlxsw_sp,
1775 char *rauhtd_pl,
1776 int rec_index)
1777{
1778 /* One record contains one entry. */
1779 mlxsw_sp_router_neigh_ent_ipv6_process(mlxsw_sp, rauhtd_pl,
1780 rec_index);
1781}
1782
Yotam Gigic723c7352016-07-05 11:27:43 +02001783static void mlxsw_sp_router_neigh_rec_process(struct mlxsw_sp *mlxsw_sp,
1784 char *rauhtd_pl, int rec_index)
1785{
1786 switch (mlxsw_reg_rauhtd_rec_type_get(rauhtd_pl, rec_index)) {
1787 case MLXSW_REG_RAUHTD_TYPE_IPV4:
1788 mlxsw_sp_router_neigh_rec_ipv4_process(mlxsw_sp, rauhtd_pl,
1789 rec_index);
1790 break;
1791 case MLXSW_REG_RAUHTD_TYPE_IPV6:
Arkadi Sharshevsky60f040c2017-07-18 10:10:17 +02001792 mlxsw_sp_router_neigh_rec_ipv6_process(mlxsw_sp, rauhtd_pl,
1793 rec_index);
Yotam Gigic723c7352016-07-05 11:27:43 +02001794 break;
1795 }
1796}
1797
Arkadi Sharshevsky42cdb332016-11-11 16:34:26 +01001798static bool mlxsw_sp_router_rauhtd_is_full(char *rauhtd_pl)
1799{
1800 u8 num_rec, last_rec_index, num_entries;
1801
1802 num_rec = mlxsw_reg_rauhtd_num_rec_get(rauhtd_pl);
1803 last_rec_index = num_rec - 1;
1804
1805 if (num_rec < MLXSW_REG_RAUHTD_REC_MAX_NUM)
1806 return false;
1807 if (mlxsw_reg_rauhtd_rec_type_get(rauhtd_pl, last_rec_index) ==
1808 MLXSW_REG_RAUHTD_TYPE_IPV6)
1809 return true;
1810
1811 num_entries = mlxsw_reg_rauhtd_ipv4_rec_num_entries_get(rauhtd_pl,
1812 last_rec_index);
1813 if (++num_entries == MLXSW_REG_RAUHTD_IPV4_ENT_PER_REC)
1814 return true;
1815 return false;
1816}
1817
Arkadi Sharshevsky60f040c2017-07-18 10:10:17 +02001818static int
1819__mlxsw_sp_router_neighs_update_rauhtd(struct mlxsw_sp *mlxsw_sp,
1820 char *rauhtd_pl,
1821 enum mlxsw_reg_rauhtd_type type)
Yotam Gigic723c7352016-07-05 11:27:43 +02001822{
Arkadi Sharshevsky60f040c2017-07-18 10:10:17 +02001823 int i, num_rec;
1824 int err;
Yotam Gigic723c7352016-07-05 11:27:43 +02001825
1826 /* Make sure the neighbour's netdev isn't removed in the
1827 * process.
1828 */
1829 rtnl_lock();
1830 do {
Arkadi Sharshevsky60f040c2017-07-18 10:10:17 +02001831 mlxsw_reg_rauhtd_pack(rauhtd_pl, type);
Yotam Gigic723c7352016-07-05 11:27:43 +02001832 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(rauhtd),
1833 rauhtd_pl);
1834 if (err) {
Petr Machata7ff176f2017-10-02 12:21:57 +02001835 dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Failed to dump neighbour table\n");
Yotam Gigic723c7352016-07-05 11:27:43 +02001836 break;
1837 }
1838 num_rec = mlxsw_reg_rauhtd_num_rec_get(rauhtd_pl);
1839 for (i = 0; i < num_rec; i++)
1840 mlxsw_sp_router_neigh_rec_process(mlxsw_sp, rauhtd_pl,
1841 i);
Arkadi Sharshevsky42cdb332016-11-11 16:34:26 +01001842 } while (mlxsw_sp_router_rauhtd_is_full(rauhtd_pl));
Yotam Gigic723c7352016-07-05 11:27:43 +02001843 rtnl_unlock();
1844
Arkadi Sharshevsky60f040c2017-07-18 10:10:17 +02001845 return err;
1846}
1847
1848static int mlxsw_sp_router_neighs_update_rauhtd(struct mlxsw_sp *mlxsw_sp)
1849{
1850 enum mlxsw_reg_rauhtd_type type;
1851 char *rauhtd_pl;
1852 int err;
1853
1854 rauhtd_pl = kmalloc(MLXSW_REG_RAUHTD_LEN, GFP_KERNEL);
1855 if (!rauhtd_pl)
1856 return -ENOMEM;
1857
1858 type = MLXSW_REG_RAUHTD_TYPE_IPV4;
1859 err = __mlxsw_sp_router_neighs_update_rauhtd(mlxsw_sp, rauhtd_pl, type);
1860 if (err)
1861 goto out;
1862
1863 type = MLXSW_REG_RAUHTD_TYPE_IPV6;
1864 err = __mlxsw_sp_router_neighs_update_rauhtd(mlxsw_sp, rauhtd_pl, type);
1865out:
Yotam Gigic723c7352016-07-05 11:27:43 +02001866 kfree(rauhtd_pl);
Yotam Gigib2157142016-07-05 11:27:51 +02001867 return err;
1868}
1869
1870static void mlxsw_sp_router_neighs_update_nh(struct mlxsw_sp *mlxsw_sp)
1871{
1872 struct mlxsw_sp_neigh_entry *neigh_entry;
1873
1874 /* Take RTNL mutex here to prevent lists from changes */
1875 rtnl_lock();
Ido Schimmel9011b672017-05-16 19:38:25 +02001876 list_for_each_entry(neigh_entry, &mlxsw_sp->router->nexthop_neighs_list,
Ido Schimmel8a0b7272017-02-06 16:20:15 +01001877 nexthop_neighs_list_node)
Yotam Gigib2157142016-07-05 11:27:51 +02001878 /* If this neigh have nexthops, make the kernel think this neigh
1879 * is active regardless of the traffic.
1880 */
Ido Schimmel8a0b7272017-02-06 16:20:15 +01001881 neigh_event_send(neigh_entry->key.n, NULL);
Yotam Gigib2157142016-07-05 11:27:51 +02001882 rtnl_unlock();
1883}
1884
1885static void
1886mlxsw_sp_router_neighs_update_work_schedule(struct mlxsw_sp *mlxsw_sp)
1887{
Ido Schimmel9011b672017-05-16 19:38:25 +02001888 unsigned long interval = mlxsw_sp->router->neighs_update.interval;
Yotam Gigib2157142016-07-05 11:27:51 +02001889
Ido Schimmel9011b672017-05-16 19:38:25 +02001890 mlxsw_core_schedule_dw(&mlxsw_sp->router->neighs_update.dw,
Yotam Gigib2157142016-07-05 11:27:51 +02001891 msecs_to_jiffies(interval));
1892}
1893
1894static void mlxsw_sp_router_neighs_update_work(struct work_struct *work)
1895{
Ido Schimmel9011b672017-05-16 19:38:25 +02001896 struct mlxsw_sp_router *router;
Yotam Gigib2157142016-07-05 11:27:51 +02001897 int err;
1898
Ido Schimmel9011b672017-05-16 19:38:25 +02001899 router = container_of(work, struct mlxsw_sp_router,
1900 neighs_update.dw.work);
1901 err = mlxsw_sp_router_neighs_update_rauhtd(router->mlxsw_sp);
Yotam Gigib2157142016-07-05 11:27:51 +02001902 if (err)
Ido Schimmel9011b672017-05-16 19:38:25 +02001903 dev_err(router->mlxsw_sp->bus_info->dev, "Could not update kernel for neigh activity");
Yotam Gigib2157142016-07-05 11:27:51 +02001904
Ido Schimmel9011b672017-05-16 19:38:25 +02001905 mlxsw_sp_router_neighs_update_nh(router->mlxsw_sp);
Yotam Gigib2157142016-07-05 11:27:51 +02001906
Ido Schimmel9011b672017-05-16 19:38:25 +02001907 mlxsw_sp_router_neighs_update_work_schedule(router->mlxsw_sp);
Yotam Gigic723c7352016-07-05 11:27:43 +02001908}
1909
Yotam Gigi0b2361d2016-07-05 11:27:52 +02001910static void mlxsw_sp_router_probe_unresolved_nexthops(struct work_struct *work)
1911{
1912 struct mlxsw_sp_neigh_entry *neigh_entry;
Ido Schimmel9011b672017-05-16 19:38:25 +02001913 struct mlxsw_sp_router *router;
Yotam Gigi0b2361d2016-07-05 11:27:52 +02001914
Ido Schimmel9011b672017-05-16 19:38:25 +02001915 router = container_of(work, struct mlxsw_sp_router,
1916 nexthop_probe_dw.work);
Yotam Gigi0b2361d2016-07-05 11:27:52 +02001917 /* Iterate over nexthop neighbours, find those who are unresolved and
1918 * send arp on them. This solves the chicken-egg problem when
1919 * the nexthop wouldn't get offloaded until the neighbor is resolved
1920 * but it wouldn't get resolved ever in case traffic is flowing in HW
1921 * using different nexthop.
1922 *
1923 * Take RTNL mutex here to prevent lists from changes.
1924 */
1925 rtnl_lock();
Ido Schimmel9011b672017-05-16 19:38:25 +02001926 list_for_each_entry(neigh_entry, &router->nexthop_neighs_list,
Ido Schimmel8a0b7272017-02-06 16:20:15 +01001927 nexthop_neighs_list_node)
Ido Schimmel01b1aa32017-02-06 16:20:16 +01001928 if (!neigh_entry->connected)
Jiri Pirko33b13412016-11-10 12:31:04 +01001929 neigh_event_send(neigh_entry->key.n, NULL);
Yotam Gigi0b2361d2016-07-05 11:27:52 +02001930 rtnl_unlock();
1931
Ido Schimmel9011b672017-05-16 19:38:25 +02001932 mlxsw_core_schedule_dw(&router->nexthop_probe_dw,
Yotam Gigi0b2361d2016-07-05 11:27:52 +02001933 MLXSW_SP_UNRESOLVED_NH_PROBE_INTERVAL);
1934}
1935
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001936static void
1937mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp *mlxsw_sp,
1938 struct mlxsw_sp_neigh_entry *neigh_entry,
1939 bool removing);
1940
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001941static enum mlxsw_reg_rauht_op mlxsw_sp_rauht_op(bool adding)
Yotam Gigia6bf9e92016-07-05 11:27:44 +02001942{
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001943 return adding ? MLXSW_REG_RAUHT_OP_WRITE_ADD :
1944 MLXSW_REG_RAUHT_OP_WRITE_DELETE;
1945}
1946
1947static void
1948mlxsw_sp_router_neigh_entry_op4(struct mlxsw_sp *mlxsw_sp,
1949 struct mlxsw_sp_neigh_entry *neigh_entry,
1950 enum mlxsw_reg_rauht_op op)
1951{
Jiri Pirko33b13412016-11-10 12:31:04 +01001952 struct neighbour *n = neigh_entry->key.n;
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001953 u32 dip = ntohl(*((__be32 *) n->primary_key));
Yotam Gigia6bf9e92016-07-05 11:27:44 +02001954 char rauht_pl[MLXSW_REG_RAUHT_LEN];
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001955
1956 mlxsw_reg_rauht_pack4(rauht_pl, op, neigh_entry->rif, neigh_entry->ha,
1957 dip);
Arkadi Sharshevsky7cfcbc72017-08-24 08:40:08 +02001958 if (neigh_entry->counter_valid)
1959 mlxsw_reg_rauht_pack_counter(rauht_pl,
1960 neigh_entry->counter_index);
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001961 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rauht), rauht_pl);
1962}
1963
1964static void
Arkadi Sharshevskyd5eb89c2017-07-18 10:10:15 +02001965mlxsw_sp_router_neigh_entry_op6(struct mlxsw_sp *mlxsw_sp,
1966 struct mlxsw_sp_neigh_entry *neigh_entry,
1967 enum mlxsw_reg_rauht_op op)
1968{
1969 struct neighbour *n = neigh_entry->key.n;
1970 char rauht_pl[MLXSW_REG_RAUHT_LEN];
1971 const char *dip = n->primary_key;
1972
1973 mlxsw_reg_rauht_pack6(rauht_pl, op, neigh_entry->rif, neigh_entry->ha,
1974 dip);
Arkadi Sharshevsky7cfcbc72017-08-24 08:40:08 +02001975 if (neigh_entry->counter_valid)
1976 mlxsw_reg_rauht_pack_counter(rauht_pl,
1977 neigh_entry->counter_index);
Arkadi Sharshevskyd5eb89c2017-07-18 10:10:15 +02001978 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rauht), rauht_pl);
1979}
1980
Arkadi Sharshevsky1d1056d82017-08-31 17:59:13 +02001981bool mlxsw_sp_neigh_ipv6_ignore(struct mlxsw_sp_neigh_entry *neigh_entry)
Arkadi Sharshevskyd5eb89c2017-07-18 10:10:15 +02001982{
Arkadi Sharshevsky1d1056d82017-08-31 17:59:13 +02001983 struct neighbour *n = neigh_entry->key.n;
1984
Arkadi Sharshevskyd5eb89c2017-07-18 10:10:15 +02001985 /* Packets with a link-local destination address are trapped
1986 * after LPM lookup and never reach the neighbour table, so
1987 * there is no need to program such neighbours to the device.
1988 */
1989 if (ipv6_addr_type((struct in6_addr *) &n->primary_key) &
1990 IPV6_ADDR_LINKLOCAL)
1991 return true;
1992 return false;
1993}
1994
1995static void
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001996mlxsw_sp_neigh_entry_update(struct mlxsw_sp *mlxsw_sp,
1997 struct mlxsw_sp_neigh_entry *neigh_entry,
1998 bool adding)
1999{
2000 if (!adding && !neigh_entry->connected)
2001 return;
2002 neigh_entry->connected = adding;
Ido Schimmelb5f3e0d2017-07-24 09:56:00 +02002003 if (neigh_entry->key.n->tbl->family == AF_INET) {
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002004 mlxsw_sp_router_neigh_entry_op4(mlxsw_sp, neigh_entry,
2005 mlxsw_sp_rauht_op(adding));
Ido Schimmelb5f3e0d2017-07-24 09:56:00 +02002006 } else if (neigh_entry->key.n->tbl->family == AF_INET6) {
Arkadi Sharshevsky1d1056d82017-08-31 17:59:13 +02002007 if (mlxsw_sp_neigh_ipv6_ignore(neigh_entry))
Arkadi Sharshevskyd5eb89c2017-07-18 10:10:15 +02002008 return;
2009 mlxsw_sp_router_neigh_entry_op6(mlxsw_sp, neigh_entry,
2010 mlxsw_sp_rauht_op(adding));
2011 } else {
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002012 WARN_ON_ONCE(1);
Arkadi Sharshevskyd5eb89c2017-07-18 10:10:15 +02002013 }
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002014}
2015
Arkadi Sharshevskya481d712017-08-24 08:40:10 +02002016void
2017mlxsw_sp_neigh_entry_counter_update(struct mlxsw_sp *mlxsw_sp,
2018 struct mlxsw_sp_neigh_entry *neigh_entry,
2019 bool adding)
2020{
2021 if (adding)
2022 mlxsw_sp_neigh_counter_alloc(mlxsw_sp, neigh_entry);
2023 else
2024 mlxsw_sp_neigh_counter_free(mlxsw_sp, neigh_entry);
2025 mlxsw_sp_neigh_entry_update(mlxsw_sp, neigh_entry, true);
2026}
2027
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002028struct mlxsw_sp_neigh_event_work {
2029 struct work_struct work;
2030 struct mlxsw_sp *mlxsw_sp;
2031 struct neighbour *n;
2032};
2033
2034static void mlxsw_sp_router_neigh_event_work(struct work_struct *work)
2035{
2036 struct mlxsw_sp_neigh_event_work *neigh_work =
2037 container_of(work, struct mlxsw_sp_neigh_event_work, work);
2038 struct mlxsw_sp *mlxsw_sp = neigh_work->mlxsw_sp;
2039 struct mlxsw_sp_neigh_entry *neigh_entry;
2040 struct neighbour *n = neigh_work->n;
2041 unsigned char ha[ETH_ALEN];
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002042 bool entry_connected;
Ido Schimmel93a87e52016-12-23 09:32:49 +01002043 u8 nud_state, dead;
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002044
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002045 /* If these parameters are changed after we release the lock,
2046 * then we are guaranteed to receive another event letting us
2047 * know about it.
2048 */
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002049 read_lock_bh(&n->lock);
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002050 memcpy(ha, n->ha, ETH_ALEN);
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002051 nud_state = n->nud_state;
Ido Schimmel93a87e52016-12-23 09:32:49 +01002052 dead = n->dead;
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002053 read_unlock_bh(&n->lock);
2054
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002055 rtnl_lock();
Ido Schimmel93a87e52016-12-23 09:32:49 +01002056 entry_connected = nud_state & NUD_VALID && !dead;
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002057 neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp, n);
2058 if (!entry_connected && !neigh_entry)
2059 goto out;
2060 if (!neigh_entry) {
2061 neigh_entry = mlxsw_sp_neigh_entry_create(mlxsw_sp, n);
2062 if (IS_ERR(neigh_entry))
2063 goto out;
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002064 }
2065
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002066 memcpy(neigh_entry->ha, ha, ETH_ALEN);
2067 mlxsw_sp_neigh_entry_update(mlxsw_sp, neigh_entry, entry_connected);
2068 mlxsw_sp_nexthop_neigh_update(mlxsw_sp, neigh_entry, !entry_connected);
2069
2070 if (!neigh_entry->connected && list_empty(&neigh_entry->nexthop_list))
2071 mlxsw_sp_neigh_entry_destroy(mlxsw_sp, neigh_entry);
2072
2073out:
2074 rtnl_unlock();
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002075 neigh_release(n);
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002076 kfree(neigh_work);
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002077}
2078
Jiri Pirkoe7322632016-09-01 10:37:43 +02002079int mlxsw_sp_router_netevent_event(struct notifier_block *unused,
2080 unsigned long event, void *ptr)
Yotam Gigic723c7352016-07-05 11:27:43 +02002081{
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002082 struct mlxsw_sp_neigh_event_work *neigh_work;
Yotam Gigic723c7352016-07-05 11:27:43 +02002083 struct mlxsw_sp_port *mlxsw_sp_port;
2084 struct mlxsw_sp *mlxsw_sp;
2085 unsigned long interval;
2086 struct neigh_parms *p;
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002087 struct neighbour *n;
Yotam Gigic723c7352016-07-05 11:27:43 +02002088
2089 switch (event) {
2090 case NETEVENT_DELAY_PROBE_TIME_UPDATE:
2091 p = ptr;
2092
2093 /* We don't care about changes in the default table. */
Ido Schimmelb5f3e0d2017-07-24 09:56:00 +02002094 if (!p->dev || (p->tbl->family != AF_INET &&
2095 p->tbl->family != AF_INET6))
Yotam Gigic723c7352016-07-05 11:27:43 +02002096 return NOTIFY_DONE;
2097
2098 /* We are in atomic context and can't take RTNL mutex,
2099 * so use RCU variant to walk the device chain.
2100 */
2101 mlxsw_sp_port = mlxsw_sp_port_lower_dev_hold(p->dev);
2102 if (!mlxsw_sp_port)
2103 return NOTIFY_DONE;
2104
2105 mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2106 interval = jiffies_to_msecs(NEIGH_VAR(p, DELAY_PROBE_TIME));
Ido Schimmel9011b672017-05-16 19:38:25 +02002107 mlxsw_sp->router->neighs_update.interval = interval;
Yotam Gigic723c7352016-07-05 11:27:43 +02002108
2109 mlxsw_sp_port_dev_put(mlxsw_sp_port);
2110 break;
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002111 case NETEVENT_NEIGH_UPDATE:
2112 n = ptr;
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002113
Ido Schimmelb5f3e0d2017-07-24 09:56:00 +02002114 if (n->tbl->family != AF_INET && n->tbl->family != AF_INET6)
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002115 return NOTIFY_DONE;
2116
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002117 mlxsw_sp_port = mlxsw_sp_port_lower_dev_hold(n->dev);
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002118 if (!mlxsw_sp_port)
2119 return NOTIFY_DONE;
2120
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002121 neigh_work = kzalloc(sizeof(*neigh_work), GFP_ATOMIC);
2122 if (!neigh_work) {
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002123 mlxsw_sp_port_dev_put(mlxsw_sp_port);
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002124 return NOTIFY_BAD;
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002125 }
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002126
2127 INIT_WORK(&neigh_work->work, mlxsw_sp_router_neigh_event_work);
2128 neigh_work->mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2129 neigh_work->n = n;
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002130
2131 /* Take a reference to ensure the neighbour won't be
2132 * destructed until we drop the reference in delayed
2133 * work.
2134 */
2135 neigh_clone(n);
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002136 mlxsw_core_schedule_work(&neigh_work->work);
2137 mlxsw_sp_port_dev_put(mlxsw_sp_port);
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002138 break;
Yotam Gigic723c7352016-07-05 11:27:43 +02002139 }
2140
2141 return NOTIFY_DONE;
2142}
2143
Jiri Pirko6cf3c972016-07-05 11:27:39 +02002144static int mlxsw_sp_neigh_init(struct mlxsw_sp *mlxsw_sp)
2145{
Yotam Gigic723c7352016-07-05 11:27:43 +02002146 int err;
2147
Ido Schimmel9011b672017-05-16 19:38:25 +02002148 err = rhashtable_init(&mlxsw_sp->router->neigh_ht,
Yotam Gigic723c7352016-07-05 11:27:43 +02002149 &mlxsw_sp_neigh_ht_params);
2150 if (err)
2151 return err;
2152
2153 /* Initialize the polling interval according to the default
2154 * table.
2155 */
2156 mlxsw_sp_router_neighs_update_interval_init(mlxsw_sp);
2157
Yotam Gigi0b2361d2016-07-05 11:27:52 +02002158 /* Create the delayed works for the activity_update */
Ido Schimmel9011b672017-05-16 19:38:25 +02002159 INIT_DELAYED_WORK(&mlxsw_sp->router->neighs_update.dw,
Yotam Gigic723c7352016-07-05 11:27:43 +02002160 mlxsw_sp_router_neighs_update_work);
Ido Schimmel9011b672017-05-16 19:38:25 +02002161 INIT_DELAYED_WORK(&mlxsw_sp->router->nexthop_probe_dw,
Yotam Gigi0b2361d2016-07-05 11:27:52 +02002162 mlxsw_sp_router_probe_unresolved_nexthops);
Ido Schimmel9011b672017-05-16 19:38:25 +02002163 mlxsw_core_schedule_dw(&mlxsw_sp->router->neighs_update.dw, 0);
2164 mlxsw_core_schedule_dw(&mlxsw_sp->router->nexthop_probe_dw, 0);
Yotam Gigic723c7352016-07-05 11:27:43 +02002165 return 0;
Jiri Pirko6cf3c972016-07-05 11:27:39 +02002166}
2167
2168static void mlxsw_sp_neigh_fini(struct mlxsw_sp *mlxsw_sp)
2169{
Ido Schimmel9011b672017-05-16 19:38:25 +02002170 cancel_delayed_work_sync(&mlxsw_sp->router->neighs_update.dw);
2171 cancel_delayed_work_sync(&mlxsw_sp->router->nexthop_probe_dw);
2172 rhashtable_destroy(&mlxsw_sp->router->neigh_ht);
Jiri Pirko6cf3c972016-07-05 11:27:39 +02002173}
2174
Ido Schimmel9665b742017-02-08 11:16:42 +01002175static void mlxsw_sp_neigh_rif_gone_sync(struct mlxsw_sp *mlxsw_sp,
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01002176 struct mlxsw_sp_rif *rif)
Ido Schimmel9665b742017-02-08 11:16:42 +01002177{
2178 struct mlxsw_sp_neigh_entry *neigh_entry, *tmp;
2179
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01002180 list_for_each_entry_safe(neigh_entry, tmp, &rif->neigh_list,
Ido Schimmel4a3c67a2017-07-21 20:31:38 +02002181 rif_list_node) {
2182 mlxsw_sp_neigh_entry_update(mlxsw_sp, neigh_entry, false);
Ido Schimmel9665b742017-02-08 11:16:42 +01002183 mlxsw_sp_neigh_entry_destroy(mlxsw_sp, neigh_entry);
Ido Schimmel4a3c67a2017-07-21 20:31:38 +02002184 }
Ido Schimmel9665b742017-02-08 11:16:42 +01002185}
2186
Petr Machata35225e42017-09-02 23:49:22 +02002187enum mlxsw_sp_nexthop_type {
2188 MLXSW_SP_NEXTHOP_TYPE_ETH,
Petr Machata1012b9a2017-09-02 23:49:23 +02002189 MLXSW_SP_NEXTHOP_TYPE_IPIP,
Petr Machata35225e42017-09-02 23:49:22 +02002190};
2191
Ido Schimmelc53b8e12017-02-08 11:16:30 +01002192struct mlxsw_sp_nexthop_key {
2193 struct fib_nh *fib_nh;
2194};
2195
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002196struct mlxsw_sp_nexthop {
2197 struct list_head neigh_list_node; /* member of neigh entry list */
Ido Schimmel9665b742017-02-08 11:16:42 +01002198 struct list_head rif_list_node;
Arkadi Sharshevskydbe45982017-09-25 10:32:23 +02002199 struct list_head router_list_node;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002200 struct mlxsw_sp_nexthop_group *nh_grp; /* pointer back to the group
2201 * this belongs to
2202 */
Ido Schimmelc53b8e12017-02-08 11:16:30 +01002203 struct rhash_head ht_node;
2204 struct mlxsw_sp_nexthop_key key;
Ido Schimmel58adf2c2017-07-18 10:10:19 +02002205 unsigned char gw_addr[sizeof(struct in6_addr)];
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02002206 int ifindex;
Ido Schimmel408bd942017-10-22 23:11:46 +02002207 int nh_weight;
Ido Schimmeleb789982017-10-22 23:11:48 +02002208 int norm_nh_weight;
2209 int num_adj_entries;
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01002210 struct mlxsw_sp_rif *rif;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002211 u8 should_offload:1, /* set indicates this neigh is connected and
2212 * should be put to KVD linear area of this group.
2213 */
2214 offloaded:1, /* set in case the neigh is actually put into
2215 * KVD linear area of this group.
2216 */
2217 update:1; /* set indicates that MAC of this neigh should be
2218 * updated in HW
2219 */
Petr Machata35225e42017-09-02 23:49:22 +02002220 enum mlxsw_sp_nexthop_type type;
2221 union {
2222 struct mlxsw_sp_neigh_entry *neigh_entry;
Petr Machata1012b9a2017-09-02 23:49:23 +02002223 struct mlxsw_sp_ipip_entry *ipip_entry;
Petr Machata35225e42017-09-02 23:49:22 +02002224 };
Arkadi Sharshevskya5390272017-09-25 10:32:28 +02002225 unsigned int counter_index;
2226 bool counter_valid;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002227};
2228
2229struct mlxsw_sp_nexthop_group {
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02002230 void *priv;
Ido Schimmele9ad5e72017-02-08 11:16:29 +01002231 struct rhash_head ht_node;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002232 struct list_head fib_list; /* list of fib entries that use this group */
Ido Schimmel58adf2c2017-07-18 10:10:19 +02002233 struct neigh_table *neigh_tbl;
Ido Schimmelb3e8d1e2017-02-08 11:16:32 +01002234 u8 adj_index_valid:1,
2235 gateway:1; /* routes using the group use a gateway */
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002236 u32 adj_index;
2237 u16 ecmp_size;
2238 u16 count;
Ido Schimmeleb789982017-10-22 23:11:48 +02002239 int sum_norm_weight;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002240 struct mlxsw_sp_nexthop nexthops[0];
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01002241#define nh_rif nexthops[0].rif
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002242};
2243
Arkadi Sharshevsky427e6522017-09-25 10:32:30 +02002244void mlxsw_sp_nexthop_counter_alloc(struct mlxsw_sp *mlxsw_sp,
2245 struct mlxsw_sp_nexthop *nh)
Arkadi Sharshevskya5390272017-09-25 10:32:28 +02002246{
2247 struct devlink *devlink;
2248
2249 devlink = priv_to_devlink(mlxsw_sp->core);
2250 if (!devlink_dpipe_table_counter_enabled(devlink,
2251 MLXSW_SP_DPIPE_TABLE_NAME_ADJ))
2252 return;
2253
2254 if (mlxsw_sp_flow_counter_alloc(mlxsw_sp, &nh->counter_index))
2255 return;
2256
2257 nh->counter_valid = true;
2258}
2259
Arkadi Sharshevsky427e6522017-09-25 10:32:30 +02002260void mlxsw_sp_nexthop_counter_free(struct mlxsw_sp *mlxsw_sp,
2261 struct mlxsw_sp_nexthop *nh)
Arkadi Sharshevskya5390272017-09-25 10:32:28 +02002262{
2263 if (!nh->counter_valid)
2264 return;
2265 mlxsw_sp_flow_counter_free(mlxsw_sp, nh->counter_index);
2266 nh->counter_valid = false;
2267}
2268
2269int mlxsw_sp_nexthop_counter_get(struct mlxsw_sp *mlxsw_sp,
2270 struct mlxsw_sp_nexthop *nh, u64 *p_counter)
2271{
2272 if (!nh->counter_valid)
2273 return -EINVAL;
2274
2275 return mlxsw_sp_flow_counter_get(mlxsw_sp, nh->counter_index,
2276 p_counter, NULL);
2277}
2278
Arkadi Sharshevskyc556cd22017-09-25 10:32:25 +02002279struct mlxsw_sp_nexthop *mlxsw_sp_nexthop_next(struct mlxsw_sp_router *router,
2280 struct mlxsw_sp_nexthop *nh)
2281{
2282 if (!nh) {
2283 if (list_empty(&router->nexthop_list))
2284 return NULL;
2285 else
2286 return list_first_entry(&router->nexthop_list,
2287 typeof(*nh), router_list_node);
2288 }
2289 if (list_is_last(&nh->router_list_node, &router->nexthop_list))
2290 return NULL;
2291 return list_next_entry(nh, router_list_node);
2292}
2293
2294bool mlxsw_sp_nexthop_offload(struct mlxsw_sp_nexthop *nh)
2295{
2296 return nh->offloaded;
2297}
2298
2299unsigned char *mlxsw_sp_nexthop_ha(struct mlxsw_sp_nexthop *nh)
2300{
2301 if (!nh->offloaded)
2302 return NULL;
2303 return nh->neigh_entry->ha;
2304}
2305
2306int mlxsw_sp_nexthop_indexes(struct mlxsw_sp_nexthop *nh, u32 *p_adj_index,
Ido Schimmele69cd9d2017-10-22 23:11:43 +02002307 u32 *p_adj_size, u32 *p_adj_hash_index)
Arkadi Sharshevskyc556cd22017-09-25 10:32:25 +02002308{
2309 struct mlxsw_sp_nexthop_group *nh_grp = nh->nh_grp;
2310 u32 adj_hash_index = 0;
2311 int i;
2312
2313 if (!nh->offloaded || !nh_grp->adj_index_valid)
2314 return -EINVAL;
2315
2316 *p_adj_index = nh_grp->adj_index;
Ido Schimmele69cd9d2017-10-22 23:11:43 +02002317 *p_adj_size = nh_grp->ecmp_size;
Arkadi Sharshevskyc556cd22017-09-25 10:32:25 +02002318
2319 for (i = 0; i < nh_grp->count; i++) {
2320 struct mlxsw_sp_nexthop *nh_iter = &nh_grp->nexthops[i];
2321
2322 if (nh_iter == nh)
2323 break;
2324 if (nh_iter->offloaded)
Ido Schimmeleb789982017-10-22 23:11:48 +02002325 adj_hash_index += nh_iter->num_adj_entries;
Arkadi Sharshevskyc556cd22017-09-25 10:32:25 +02002326 }
2327
2328 *p_adj_hash_index = adj_hash_index;
2329 return 0;
2330}
2331
2332struct mlxsw_sp_rif *mlxsw_sp_nexthop_rif(struct mlxsw_sp_nexthop *nh)
2333{
2334 return nh->rif;
2335}
2336
2337bool mlxsw_sp_nexthop_group_has_ipip(struct mlxsw_sp_nexthop *nh)
2338{
2339 struct mlxsw_sp_nexthop_group *nh_grp = nh->nh_grp;
2340 int i;
2341
2342 for (i = 0; i < nh_grp->count; i++) {
2343 struct mlxsw_sp_nexthop *nh_iter = &nh_grp->nexthops[i];
2344
2345 if (nh_iter->type == MLXSW_SP_NEXTHOP_TYPE_IPIP)
2346 return true;
2347 }
2348 return false;
2349}
2350
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02002351static struct fib_info *
2352mlxsw_sp_nexthop4_group_fi(const struct mlxsw_sp_nexthop_group *nh_grp)
2353{
2354 return nh_grp->priv;
2355}
2356
2357struct mlxsw_sp_nexthop_group_cmp_arg {
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02002358 enum mlxsw_sp_l3proto proto;
2359 union {
2360 struct fib_info *fi;
2361 struct mlxsw_sp_fib6_entry *fib6_entry;
2362 };
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02002363};
2364
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02002365static bool
2366mlxsw_sp_nexthop6_group_has_nexthop(const struct mlxsw_sp_nexthop_group *nh_grp,
2367 const struct in6_addr *gw, int ifindex)
2368{
2369 int i;
2370
2371 for (i = 0; i < nh_grp->count; i++) {
2372 const struct mlxsw_sp_nexthop *nh;
2373
2374 nh = &nh_grp->nexthops[i];
2375 if (nh->ifindex == ifindex &&
2376 ipv6_addr_equal(gw, (struct in6_addr *) nh->gw_addr))
2377 return true;
2378 }
2379
2380 return false;
2381}
2382
2383static bool
2384mlxsw_sp_nexthop6_group_cmp(const struct mlxsw_sp_nexthop_group *nh_grp,
2385 const struct mlxsw_sp_fib6_entry *fib6_entry)
2386{
2387 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
2388
2389 if (nh_grp->count != fib6_entry->nrt6)
2390 return false;
2391
2392 list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) {
2393 struct in6_addr *gw;
2394 int ifindex;
2395
2396 ifindex = mlxsw_sp_rt6->rt->dst.dev->ifindex;
2397 gw = &mlxsw_sp_rt6->rt->rt6i_gateway;
2398 if (!mlxsw_sp_nexthop6_group_has_nexthop(nh_grp, gw, ifindex))
2399 return false;
2400 }
2401
2402 return true;
2403}
2404
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02002405static int
2406mlxsw_sp_nexthop_group_cmp(struct rhashtable_compare_arg *arg, const void *ptr)
2407{
2408 const struct mlxsw_sp_nexthop_group_cmp_arg *cmp_arg = arg->key;
2409 const struct mlxsw_sp_nexthop_group *nh_grp = ptr;
2410
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02002411 switch (cmp_arg->proto) {
2412 case MLXSW_SP_L3_PROTO_IPV4:
2413 return cmp_arg->fi != mlxsw_sp_nexthop4_group_fi(nh_grp);
2414 case MLXSW_SP_L3_PROTO_IPV6:
2415 return !mlxsw_sp_nexthop6_group_cmp(nh_grp,
2416 cmp_arg->fib6_entry);
2417 default:
2418 WARN_ON(1);
2419 return 1;
2420 }
2421}
2422
2423static int
2424mlxsw_sp_nexthop_group_type(const struct mlxsw_sp_nexthop_group *nh_grp)
2425{
2426 return nh_grp->neigh_tbl->family;
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02002427}
2428
2429static u32 mlxsw_sp_nexthop_group_hash_obj(const void *data, u32 len, u32 seed)
2430{
2431 const struct mlxsw_sp_nexthop_group *nh_grp = data;
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02002432 const struct mlxsw_sp_nexthop *nh;
2433 struct fib_info *fi;
2434 unsigned int val;
2435 int i;
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02002436
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02002437 switch (mlxsw_sp_nexthop_group_type(nh_grp)) {
2438 case AF_INET:
2439 fi = mlxsw_sp_nexthop4_group_fi(nh_grp);
2440 return jhash(&fi, sizeof(fi), seed);
2441 case AF_INET6:
2442 val = nh_grp->count;
2443 for (i = 0; i < nh_grp->count; i++) {
2444 nh = &nh_grp->nexthops[i];
2445 val ^= nh->ifindex;
2446 }
2447 return jhash(&val, sizeof(val), seed);
2448 default:
2449 WARN_ON(1);
2450 return 0;
2451 }
2452}
2453
2454static u32
2455mlxsw_sp_nexthop6_group_hash(struct mlxsw_sp_fib6_entry *fib6_entry, u32 seed)
2456{
2457 unsigned int val = fib6_entry->nrt6;
2458 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
2459 struct net_device *dev;
2460
2461 list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) {
2462 dev = mlxsw_sp_rt6->rt->dst.dev;
2463 val ^= dev->ifindex;
2464 }
2465
2466 return jhash(&val, sizeof(val), seed);
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02002467}
2468
2469static u32
2470mlxsw_sp_nexthop_group_hash(const void *data, u32 len, u32 seed)
2471{
2472 const struct mlxsw_sp_nexthop_group_cmp_arg *cmp_arg = data;
2473
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02002474 switch (cmp_arg->proto) {
2475 case MLXSW_SP_L3_PROTO_IPV4:
2476 return jhash(&cmp_arg->fi, sizeof(cmp_arg->fi), seed);
2477 case MLXSW_SP_L3_PROTO_IPV6:
2478 return mlxsw_sp_nexthop6_group_hash(cmp_arg->fib6_entry, seed);
2479 default:
2480 WARN_ON(1);
2481 return 0;
2482 }
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02002483}
2484
Ido Schimmele9ad5e72017-02-08 11:16:29 +01002485static const struct rhashtable_params mlxsw_sp_nexthop_group_ht_params = {
Ido Schimmele9ad5e72017-02-08 11:16:29 +01002486 .head_offset = offsetof(struct mlxsw_sp_nexthop_group, ht_node),
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02002487 .hashfn = mlxsw_sp_nexthop_group_hash,
2488 .obj_hashfn = mlxsw_sp_nexthop_group_hash_obj,
2489 .obj_cmpfn = mlxsw_sp_nexthop_group_cmp,
Ido Schimmele9ad5e72017-02-08 11:16:29 +01002490};
2491
2492static int mlxsw_sp_nexthop_group_insert(struct mlxsw_sp *mlxsw_sp,
2493 struct mlxsw_sp_nexthop_group *nh_grp)
2494{
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02002495 if (mlxsw_sp_nexthop_group_type(nh_grp) == AF_INET6 &&
2496 !nh_grp->gateway)
2497 return 0;
2498
Ido Schimmel9011b672017-05-16 19:38:25 +02002499 return rhashtable_insert_fast(&mlxsw_sp->router->nexthop_group_ht,
Ido Schimmele9ad5e72017-02-08 11:16:29 +01002500 &nh_grp->ht_node,
2501 mlxsw_sp_nexthop_group_ht_params);
2502}
2503
2504static void mlxsw_sp_nexthop_group_remove(struct mlxsw_sp *mlxsw_sp,
2505 struct mlxsw_sp_nexthop_group *nh_grp)
2506{
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02002507 if (mlxsw_sp_nexthop_group_type(nh_grp) == AF_INET6 &&
2508 !nh_grp->gateway)
2509 return;
2510
Ido Schimmel9011b672017-05-16 19:38:25 +02002511 rhashtable_remove_fast(&mlxsw_sp->router->nexthop_group_ht,
Ido Schimmele9ad5e72017-02-08 11:16:29 +01002512 &nh_grp->ht_node,
2513 mlxsw_sp_nexthop_group_ht_params);
2514}
2515
2516static struct mlxsw_sp_nexthop_group *
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02002517mlxsw_sp_nexthop4_group_lookup(struct mlxsw_sp *mlxsw_sp,
2518 struct fib_info *fi)
Ido Schimmele9ad5e72017-02-08 11:16:29 +01002519{
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02002520 struct mlxsw_sp_nexthop_group_cmp_arg cmp_arg;
2521
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02002522 cmp_arg.proto = MLXSW_SP_L3_PROTO_IPV4;
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02002523 cmp_arg.fi = fi;
2524 return rhashtable_lookup_fast(&mlxsw_sp->router->nexthop_group_ht,
2525 &cmp_arg,
Ido Schimmele9ad5e72017-02-08 11:16:29 +01002526 mlxsw_sp_nexthop_group_ht_params);
2527}
2528
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02002529static struct mlxsw_sp_nexthop_group *
2530mlxsw_sp_nexthop6_group_lookup(struct mlxsw_sp *mlxsw_sp,
2531 struct mlxsw_sp_fib6_entry *fib6_entry)
2532{
2533 struct mlxsw_sp_nexthop_group_cmp_arg cmp_arg;
2534
2535 cmp_arg.proto = MLXSW_SP_L3_PROTO_IPV6;
2536 cmp_arg.fib6_entry = fib6_entry;
2537 return rhashtable_lookup_fast(&mlxsw_sp->router->nexthop_group_ht,
2538 &cmp_arg,
2539 mlxsw_sp_nexthop_group_ht_params);
2540}
2541
Ido Schimmelc53b8e12017-02-08 11:16:30 +01002542static const struct rhashtable_params mlxsw_sp_nexthop_ht_params = {
2543 .key_offset = offsetof(struct mlxsw_sp_nexthop, key),
2544 .head_offset = offsetof(struct mlxsw_sp_nexthop, ht_node),
2545 .key_len = sizeof(struct mlxsw_sp_nexthop_key),
2546};
2547
2548static int mlxsw_sp_nexthop_insert(struct mlxsw_sp *mlxsw_sp,
2549 struct mlxsw_sp_nexthop *nh)
2550{
Ido Schimmel9011b672017-05-16 19:38:25 +02002551 return rhashtable_insert_fast(&mlxsw_sp->router->nexthop_ht,
Ido Schimmelc53b8e12017-02-08 11:16:30 +01002552 &nh->ht_node, mlxsw_sp_nexthop_ht_params);
2553}
2554
2555static void mlxsw_sp_nexthop_remove(struct mlxsw_sp *mlxsw_sp,
2556 struct mlxsw_sp_nexthop *nh)
2557{
Ido Schimmel9011b672017-05-16 19:38:25 +02002558 rhashtable_remove_fast(&mlxsw_sp->router->nexthop_ht, &nh->ht_node,
Ido Schimmelc53b8e12017-02-08 11:16:30 +01002559 mlxsw_sp_nexthop_ht_params);
2560}
2561
Ido Schimmelad178c82017-02-08 11:16:40 +01002562static struct mlxsw_sp_nexthop *
2563mlxsw_sp_nexthop_lookup(struct mlxsw_sp *mlxsw_sp,
2564 struct mlxsw_sp_nexthop_key key)
2565{
Ido Schimmel9011b672017-05-16 19:38:25 +02002566 return rhashtable_lookup_fast(&mlxsw_sp->router->nexthop_ht, &key,
Ido Schimmelad178c82017-02-08 11:16:40 +01002567 mlxsw_sp_nexthop_ht_params);
2568}
2569
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002570static int mlxsw_sp_adj_index_mass_update_vr(struct mlxsw_sp *mlxsw_sp,
Ido Schimmel76610eb2017-03-10 08:53:41 +01002571 const struct mlxsw_sp_fib *fib,
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002572 u32 adj_index, u16 ecmp_size,
2573 u32 new_adj_index,
2574 u16 new_ecmp_size)
2575{
2576 char raleu_pl[MLXSW_REG_RALEU_LEN];
2577
Ido Schimmel1a9234e662016-09-19 08:29:26 +02002578 mlxsw_reg_raleu_pack(raleu_pl,
Ido Schimmel76610eb2017-03-10 08:53:41 +01002579 (enum mlxsw_reg_ralxx_protocol) fib->proto,
2580 fib->vr->id, adj_index, ecmp_size, new_adj_index,
Ido Schimmel1a9234e662016-09-19 08:29:26 +02002581 new_ecmp_size);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002582 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raleu), raleu_pl);
2583}
2584
2585static int mlxsw_sp_adj_index_mass_update(struct mlxsw_sp *mlxsw_sp,
2586 struct mlxsw_sp_nexthop_group *nh_grp,
2587 u32 old_adj_index, u16 old_ecmp_size)
2588{
2589 struct mlxsw_sp_fib_entry *fib_entry;
Ido Schimmel76610eb2017-03-10 08:53:41 +01002590 struct mlxsw_sp_fib *fib = NULL;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002591 int err;
2592
2593 list_for_each_entry(fib_entry, &nh_grp->fib_list, nexthop_group_node) {
Ido Schimmel76610eb2017-03-10 08:53:41 +01002594 if (fib == fib_entry->fib_node->fib)
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002595 continue;
Ido Schimmel76610eb2017-03-10 08:53:41 +01002596 fib = fib_entry->fib_node->fib;
2597 err = mlxsw_sp_adj_index_mass_update_vr(mlxsw_sp, fib,
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002598 old_adj_index,
2599 old_ecmp_size,
2600 nh_grp->adj_index,
2601 nh_grp->ecmp_size);
2602 if (err)
2603 return err;
2604 }
2605 return 0;
2606}
2607
Ido Schimmeleb789982017-10-22 23:11:48 +02002608static int __mlxsw_sp_nexthop_update(struct mlxsw_sp *mlxsw_sp, u32 adj_index,
2609 struct mlxsw_sp_nexthop *nh)
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002610{
2611 struct mlxsw_sp_neigh_entry *neigh_entry = nh->neigh_entry;
2612 char ratr_pl[MLXSW_REG_RATR_LEN];
2613
2614 mlxsw_reg_ratr_pack(ratr_pl, MLXSW_REG_RATR_OP_WRITE_WRITE_ENTRY,
Petr Machata89e41982017-09-02 23:49:15 +02002615 true, MLXSW_REG_RATR_TYPE_ETHERNET,
2616 adj_index, neigh_entry->rif);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002617 mlxsw_reg_ratr_eth_entry_pack(ratr_pl, neigh_entry->ha);
Arkadi Sharshevskya5390272017-09-25 10:32:28 +02002618 if (nh->counter_valid)
2619 mlxsw_reg_ratr_counter_pack(ratr_pl, nh->counter_index, true);
2620 else
2621 mlxsw_reg_ratr_counter_pack(ratr_pl, 0, false);
2622
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002623 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ratr), ratr_pl);
2624}
2625
Ido Schimmeleb789982017-10-22 23:11:48 +02002626int mlxsw_sp_nexthop_update(struct mlxsw_sp *mlxsw_sp, u32 adj_index,
2627 struct mlxsw_sp_nexthop *nh)
2628{
2629 int i;
2630
2631 for (i = 0; i < nh->num_adj_entries; i++) {
2632 int err;
2633
2634 err = __mlxsw_sp_nexthop_update(mlxsw_sp, adj_index + i, nh);
2635 if (err)
2636 return err;
2637 }
2638
2639 return 0;
2640}
2641
2642static int __mlxsw_sp_nexthop_ipip_update(struct mlxsw_sp *mlxsw_sp,
2643 u32 adj_index,
2644 struct mlxsw_sp_nexthop *nh)
Petr Machata1012b9a2017-09-02 23:49:23 +02002645{
2646 const struct mlxsw_sp_ipip_ops *ipip_ops;
2647
2648 ipip_ops = mlxsw_sp->router->ipip_ops_arr[nh->ipip_entry->ipipt];
2649 return ipip_ops->nexthop_update(mlxsw_sp, adj_index, nh->ipip_entry);
2650}
2651
Ido Schimmeleb789982017-10-22 23:11:48 +02002652static int mlxsw_sp_nexthop_ipip_update(struct mlxsw_sp *mlxsw_sp,
2653 u32 adj_index,
2654 struct mlxsw_sp_nexthop *nh)
2655{
2656 int i;
2657
2658 for (i = 0; i < nh->num_adj_entries; i++) {
2659 int err;
2660
2661 err = __mlxsw_sp_nexthop_ipip_update(mlxsw_sp, adj_index + i,
2662 nh);
2663 if (err)
2664 return err;
2665 }
2666
2667 return 0;
2668}
2669
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002670static int
Petr Machata35225e42017-09-02 23:49:22 +02002671mlxsw_sp_nexthop_group_update(struct mlxsw_sp *mlxsw_sp,
2672 struct mlxsw_sp_nexthop_group *nh_grp,
2673 bool reallocate)
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002674{
2675 u32 adj_index = nh_grp->adj_index; /* base */
2676 struct mlxsw_sp_nexthop *nh;
2677 int i;
2678 int err;
2679
2680 for (i = 0; i < nh_grp->count; i++) {
2681 nh = &nh_grp->nexthops[i];
2682
2683 if (!nh->should_offload) {
2684 nh->offloaded = 0;
2685 continue;
2686 }
2687
Ido Schimmela59b7e02017-01-23 11:11:42 +01002688 if (nh->update || reallocate) {
Petr Machata35225e42017-09-02 23:49:22 +02002689 switch (nh->type) {
2690 case MLXSW_SP_NEXTHOP_TYPE_ETH:
Arkadi Sharshevskya5390272017-09-25 10:32:28 +02002691 err = mlxsw_sp_nexthop_update
Petr Machata35225e42017-09-02 23:49:22 +02002692 (mlxsw_sp, adj_index, nh);
2693 break;
Petr Machata1012b9a2017-09-02 23:49:23 +02002694 case MLXSW_SP_NEXTHOP_TYPE_IPIP:
2695 err = mlxsw_sp_nexthop_ipip_update
2696 (mlxsw_sp, adj_index, nh);
2697 break;
Petr Machata35225e42017-09-02 23:49:22 +02002698 }
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002699 if (err)
2700 return err;
2701 nh->update = 0;
2702 nh->offloaded = 1;
2703 }
Ido Schimmeleb789982017-10-22 23:11:48 +02002704 adj_index += nh->num_adj_entries;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002705 }
2706 return 0;
2707}
2708
Ido Schimmel1819ae32017-07-21 18:04:28 +02002709static bool
2710mlxsw_sp_fib_node_entry_is_first(const struct mlxsw_sp_fib_node *fib_node,
2711 const struct mlxsw_sp_fib_entry *fib_entry);
2712
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002713static int
2714mlxsw_sp_nexthop_fib_entries_update(struct mlxsw_sp *mlxsw_sp,
2715 struct mlxsw_sp_nexthop_group *nh_grp)
2716{
2717 struct mlxsw_sp_fib_entry *fib_entry;
2718 int err;
2719
2720 list_for_each_entry(fib_entry, &nh_grp->fib_list, nexthop_group_node) {
Ido Schimmel1819ae32017-07-21 18:04:28 +02002721 if (!mlxsw_sp_fib_node_entry_is_first(fib_entry->fib_node,
2722 fib_entry))
2723 continue;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002724 err = mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
2725 if (err)
2726 return err;
2727 }
2728 return 0;
2729}
2730
2731static void
Ido Schimmel77d964e2017-08-02 09:56:05 +02002732mlxsw_sp_fib_entry_offload_refresh(struct mlxsw_sp_fib_entry *fib_entry,
2733 enum mlxsw_reg_ralue_op op, int err);
2734
2735static void
2736mlxsw_sp_nexthop_fib_entries_refresh(struct mlxsw_sp_nexthop_group *nh_grp)
2737{
2738 enum mlxsw_reg_ralue_op op = MLXSW_REG_RALUE_OP_WRITE_WRITE;
2739 struct mlxsw_sp_fib_entry *fib_entry;
2740
2741 list_for_each_entry(fib_entry, &nh_grp->fib_list, nexthop_group_node) {
2742 if (!mlxsw_sp_fib_node_entry_is_first(fib_entry->fib_node,
2743 fib_entry))
2744 continue;
2745 mlxsw_sp_fib_entry_offload_refresh(fib_entry, op, 0);
2746 }
2747}
2748
Ido Schimmel425a08c2017-10-22 23:11:47 +02002749static void mlxsw_sp_adj_grp_size_round_up(u16 *p_adj_grp_size)
2750{
2751 /* Valid sizes for an adjacency group are:
2752 * 1-64, 512, 1024, 2048 and 4096.
2753 */
2754 if (*p_adj_grp_size <= 64)
2755 return;
2756 else if (*p_adj_grp_size <= 512)
2757 *p_adj_grp_size = 512;
2758 else if (*p_adj_grp_size <= 1024)
2759 *p_adj_grp_size = 1024;
2760 else if (*p_adj_grp_size <= 2048)
2761 *p_adj_grp_size = 2048;
2762 else
2763 *p_adj_grp_size = 4096;
2764}
2765
2766static void mlxsw_sp_adj_grp_size_round_down(u16 *p_adj_grp_size,
2767 unsigned int alloc_size)
2768{
2769 if (alloc_size >= 4096)
2770 *p_adj_grp_size = 4096;
2771 else if (alloc_size >= 2048)
2772 *p_adj_grp_size = 2048;
2773 else if (alloc_size >= 1024)
2774 *p_adj_grp_size = 1024;
2775 else if (alloc_size >= 512)
2776 *p_adj_grp_size = 512;
2777}
2778
2779static int mlxsw_sp_fix_adj_grp_size(struct mlxsw_sp *mlxsw_sp,
2780 u16 *p_adj_grp_size)
2781{
2782 unsigned int alloc_size;
2783 int err;
2784
2785 /* Round up the requested group size to the next size supported
2786 * by the device and make sure the request can be satisfied.
2787 */
2788 mlxsw_sp_adj_grp_size_round_up(p_adj_grp_size);
2789 err = mlxsw_sp_kvdl_alloc_size_query(mlxsw_sp, *p_adj_grp_size,
2790 &alloc_size);
2791 if (err)
2792 return err;
2793 /* It is possible the allocation results in more allocated
2794 * entries than requested. Try to use as much of them as
2795 * possible.
2796 */
2797 mlxsw_sp_adj_grp_size_round_down(p_adj_grp_size, alloc_size);
2798
2799 return 0;
2800}
2801
Ido Schimmel77d964e2017-08-02 09:56:05 +02002802static void
Ido Schimmeleb789982017-10-22 23:11:48 +02002803mlxsw_sp_nexthop_group_normalize(struct mlxsw_sp_nexthop_group *nh_grp)
2804{
2805 int i, g = 0, sum_norm_weight = 0;
2806 struct mlxsw_sp_nexthop *nh;
2807
2808 for (i = 0; i < nh_grp->count; i++) {
2809 nh = &nh_grp->nexthops[i];
2810
2811 if (!nh->should_offload)
2812 continue;
2813 if (g > 0)
2814 g = gcd(nh->nh_weight, g);
2815 else
2816 g = nh->nh_weight;
2817 }
2818
2819 for (i = 0; i < nh_grp->count; i++) {
2820 nh = &nh_grp->nexthops[i];
2821
2822 if (!nh->should_offload)
2823 continue;
2824 nh->norm_nh_weight = nh->nh_weight / g;
2825 sum_norm_weight += nh->norm_nh_weight;
2826 }
2827
2828 nh_grp->sum_norm_weight = sum_norm_weight;
2829}
2830
2831static void
2832mlxsw_sp_nexthop_group_rebalance(struct mlxsw_sp_nexthop_group *nh_grp)
2833{
2834 int total = nh_grp->sum_norm_weight;
2835 u16 ecmp_size = nh_grp->ecmp_size;
2836 int i, weight = 0, lower_bound = 0;
2837
2838 for (i = 0; i < nh_grp->count; i++) {
2839 struct mlxsw_sp_nexthop *nh = &nh_grp->nexthops[i];
2840 int upper_bound;
2841
2842 if (!nh->should_offload)
2843 continue;
2844 weight += nh->norm_nh_weight;
2845 upper_bound = DIV_ROUND_CLOSEST(ecmp_size * weight, total);
2846 nh->num_adj_entries = upper_bound - lower_bound;
2847 lower_bound = upper_bound;
2848 }
2849}
2850
2851static void
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002852mlxsw_sp_nexthop_group_refresh(struct mlxsw_sp *mlxsw_sp,
2853 struct mlxsw_sp_nexthop_group *nh_grp)
2854{
Ido Schimmeleb789982017-10-22 23:11:48 +02002855 u16 ecmp_size, old_ecmp_size;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002856 struct mlxsw_sp_nexthop *nh;
2857 bool offload_change = false;
2858 u32 adj_index;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002859 bool old_adj_index_valid;
2860 u32 old_adj_index;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002861 int i;
2862 int err;
2863
Ido Schimmelb3e8d1e2017-02-08 11:16:32 +01002864 if (!nh_grp->gateway) {
2865 mlxsw_sp_nexthop_fib_entries_update(mlxsw_sp, nh_grp);
2866 return;
2867 }
2868
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002869 for (i = 0; i < nh_grp->count; i++) {
2870 nh = &nh_grp->nexthops[i];
2871
Petr Machata56b8a9e2017-07-31 09:27:29 +02002872 if (nh->should_offload != nh->offloaded) {
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002873 offload_change = true;
2874 if (nh->should_offload)
2875 nh->update = 1;
2876 }
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002877 }
2878 if (!offload_change) {
2879 /* Nothing was added or removed, so no need to reallocate. Just
2880 * update MAC on existing adjacency indexes.
2881 */
Petr Machata35225e42017-09-02 23:49:22 +02002882 err = mlxsw_sp_nexthop_group_update(mlxsw_sp, nh_grp, false);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002883 if (err) {
2884 dev_warn(mlxsw_sp->bus_info->dev, "Failed to update neigh MAC in adjacency table.\n");
2885 goto set_trap;
2886 }
2887 return;
2888 }
Ido Schimmeleb789982017-10-22 23:11:48 +02002889 mlxsw_sp_nexthop_group_normalize(nh_grp);
2890 if (!nh_grp->sum_norm_weight)
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002891 /* No neigh of this group is connected so we just set
2892 * the trap and let everthing flow through kernel.
2893 */
2894 goto set_trap;
2895
Ido Schimmeleb789982017-10-22 23:11:48 +02002896 ecmp_size = nh_grp->sum_norm_weight;
Ido Schimmel425a08c2017-10-22 23:11:47 +02002897 err = mlxsw_sp_fix_adj_grp_size(mlxsw_sp, &ecmp_size);
2898 if (err)
2899 /* No valid allocation size available. */
2900 goto set_trap;
2901
Arkadi Sharshevsky13124442017-03-25 08:28:22 +01002902 err = mlxsw_sp_kvdl_alloc(mlxsw_sp, ecmp_size, &adj_index);
2903 if (err) {
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002904 /* We ran out of KVD linear space, just set the
2905 * trap and let everything flow through kernel.
2906 */
2907 dev_warn(mlxsw_sp->bus_info->dev, "Failed to allocate KVD linear area for nexthop group.\n");
2908 goto set_trap;
2909 }
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002910 old_adj_index_valid = nh_grp->adj_index_valid;
2911 old_adj_index = nh_grp->adj_index;
2912 old_ecmp_size = nh_grp->ecmp_size;
2913 nh_grp->adj_index_valid = 1;
2914 nh_grp->adj_index = adj_index;
2915 nh_grp->ecmp_size = ecmp_size;
Ido Schimmeleb789982017-10-22 23:11:48 +02002916 mlxsw_sp_nexthop_group_rebalance(nh_grp);
Petr Machata35225e42017-09-02 23:49:22 +02002917 err = mlxsw_sp_nexthop_group_update(mlxsw_sp, nh_grp, true);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002918 if (err) {
2919 dev_warn(mlxsw_sp->bus_info->dev, "Failed to update neigh MAC in adjacency table.\n");
2920 goto set_trap;
2921 }
2922
2923 if (!old_adj_index_valid) {
2924 /* The trap was set for fib entries, so we have to call
2925 * fib entry update to unset it and use adjacency index.
2926 */
2927 err = mlxsw_sp_nexthop_fib_entries_update(mlxsw_sp, nh_grp);
2928 if (err) {
2929 dev_warn(mlxsw_sp->bus_info->dev, "Failed to add adjacency index to fib entries.\n");
2930 goto set_trap;
2931 }
2932 return;
2933 }
2934
2935 err = mlxsw_sp_adj_index_mass_update(mlxsw_sp, nh_grp,
2936 old_adj_index, old_ecmp_size);
2937 mlxsw_sp_kvdl_free(mlxsw_sp, old_adj_index);
2938 if (err) {
2939 dev_warn(mlxsw_sp->bus_info->dev, "Failed to mass-update adjacency index for nexthop group.\n");
2940 goto set_trap;
2941 }
Ido Schimmel77d964e2017-08-02 09:56:05 +02002942
2943 /* Offload state within the group changed, so update the flags. */
2944 mlxsw_sp_nexthop_fib_entries_refresh(nh_grp);
2945
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002946 return;
2947
2948set_trap:
2949 old_adj_index_valid = nh_grp->adj_index_valid;
2950 nh_grp->adj_index_valid = 0;
2951 for (i = 0; i < nh_grp->count; i++) {
2952 nh = &nh_grp->nexthops[i];
2953 nh->offloaded = 0;
2954 }
2955 err = mlxsw_sp_nexthop_fib_entries_update(mlxsw_sp, nh_grp);
2956 if (err)
2957 dev_warn(mlxsw_sp->bus_info->dev, "Failed to set traps for fib entries.\n");
2958 if (old_adj_index_valid)
2959 mlxsw_sp_kvdl_free(mlxsw_sp, nh_grp->adj_index);
2960}
2961
2962static void __mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp_nexthop *nh,
2963 bool removing)
2964{
Petr Machata213666a2017-07-31 09:27:30 +02002965 if (!removing)
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002966 nh->should_offload = 1;
Petr Machata213666a2017-07-31 09:27:30 +02002967 else if (nh->offloaded)
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002968 nh->should_offload = 0;
2969 nh->update = 1;
2970}
2971
2972static void
2973mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp *mlxsw_sp,
2974 struct mlxsw_sp_neigh_entry *neigh_entry,
2975 bool removing)
2976{
2977 struct mlxsw_sp_nexthop *nh;
2978
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002979 list_for_each_entry(nh, &neigh_entry->nexthop_list,
2980 neigh_list_node) {
2981 __mlxsw_sp_nexthop_neigh_update(nh, removing);
2982 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nh_grp);
2983 }
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002984}
2985
Ido Schimmel9665b742017-02-08 11:16:42 +01002986static void mlxsw_sp_nexthop_rif_init(struct mlxsw_sp_nexthop *nh,
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01002987 struct mlxsw_sp_rif *rif)
Ido Schimmel9665b742017-02-08 11:16:42 +01002988{
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01002989 if (nh->rif)
Ido Schimmel9665b742017-02-08 11:16:42 +01002990 return;
2991
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01002992 nh->rif = rif;
2993 list_add(&nh->rif_list_node, &rif->nexthop_list);
Ido Schimmel9665b742017-02-08 11:16:42 +01002994}
2995
2996static void mlxsw_sp_nexthop_rif_fini(struct mlxsw_sp_nexthop *nh)
2997{
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01002998 if (!nh->rif)
Ido Schimmel9665b742017-02-08 11:16:42 +01002999 return;
3000
3001 list_del(&nh->rif_list_node);
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003002 nh->rif = NULL;
Ido Schimmel9665b742017-02-08 11:16:42 +01003003}
3004
Ido Schimmela8c97012017-02-08 11:16:35 +01003005static int mlxsw_sp_nexthop_neigh_init(struct mlxsw_sp *mlxsw_sp,
3006 struct mlxsw_sp_nexthop *nh)
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003007{
3008 struct mlxsw_sp_neigh_entry *neigh_entry;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003009 struct neighbour *n;
Ido Schimmel93a87e52016-12-23 09:32:49 +01003010 u8 nud_state, dead;
Ido Schimmelc53b8e12017-02-08 11:16:30 +01003011 int err;
3012
Ido Schimmelad178c82017-02-08 11:16:40 +01003013 if (!nh->nh_grp->gateway || nh->neigh_entry)
Ido Schimmelb8399a12017-02-08 11:16:33 +01003014 return 0;
3015
Jiri Pirko33b13412016-11-10 12:31:04 +01003016 /* Take a reference of neigh here ensuring that neigh would
Petr Machata8de3c172017-07-31 09:27:25 +02003017 * not be destructed before the nexthop entry is finished.
Jiri Pirko33b13412016-11-10 12:31:04 +01003018 * The reference is taken either in neigh_lookup() or
Ido Schimmelfd76d912017-02-06 16:20:17 +01003019 * in neigh_create() in case n is not found.
Jiri Pirko33b13412016-11-10 12:31:04 +01003020 */
Ido Schimmel58adf2c2017-07-18 10:10:19 +02003021 n = neigh_lookup(nh->nh_grp->neigh_tbl, &nh->gw_addr, nh->rif->dev);
Jiri Pirko33b13412016-11-10 12:31:04 +01003022 if (!n) {
Ido Schimmel58adf2c2017-07-18 10:10:19 +02003023 n = neigh_create(nh->nh_grp->neigh_tbl, &nh->gw_addr,
3024 nh->rif->dev);
Ido Schimmela8c97012017-02-08 11:16:35 +01003025 if (IS_ERR(n))
3026 return PTR_ERR(n);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003027 neigh_event_send(n, NULL);
Jiri Pirko33b13412016-11-10 12:31:04 +01003028 }
3029 neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp, n);
3030 if (!neigh_entry) {
Ido Schimmel5c8802f2017-02-06 16:20:13 +01003031 neigh_entry = mlxsw_sp_neigh_entry_create(mlxsw_sp, n);
3032 if (IS_ERR(neigh_entry)) {
Ido Schimmelc53b8e12017-02-08 11:16:30 +01003033 err = -EINVAL;
3034 goto err_neigh_entry_create;
Ido Schimmel5c8802f2017-02-06 16:20:13 +01003035 }
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003036 }
Yotam Gigib2157142016-07-05 11:27:51 +02003037
3038 /* If that is the first nexthop connected to that neigh, add to
3039 * nexthop_neighs_list
3040 */
3041 if (list_empty(&neigh_entry->nexthop_list))
3042 list_add_tail(&neigh_entry->nexthop_neighs_list_node,
Ido Schimmel9011b672017-05-16 19:38:25 +02003043 &mlxsw_sp->router->nexthop_neighs_list);
Yotam Gigib2157142016-07-05 11:27:51 +02003044
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003045 nh->neigh_entry = neigh_entry;
3046 list_add_tail(&nh->neigh_list_node, &neigh_entry->nexthop_list);
3047 read_lock_bh(&n->lock);
3048 nud_state = n->nud_state;
Ido Schimmel93a87e52016-12-23 09:32:49 +01003049 dead = n->dead;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003050 read_unlock_bh(&n->lock);
Ido Schimmel93a87e52016-12-23 09:32:49 +01003051 __mlxsw_sp_nexthop_neigh_update(nh, !(nud_state & NUD_VALID && !dead));
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003052
3053 return 0;
Ido Schimmelc53b8e12017-02-08 11:16:30 +01003054
3055err_neigh_entry_create:
3056 neigh_release(n);
Ido Schimmelc53b8e12017-02-08 11:16:30 +01003057 return err;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003058}
3059
Ido Schimmela8c97012017-02-08 11:16:35 +01003060static void mlxsw_sp_nexthop_neigh_fini(struct mlxsw_sp *mlxsw_sp,
3061 struct mlxsw_sp_nexthop *nh)
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003062{
3063 struct mlxsw_sp_neigh_entry *neigh_entry = nh->neigh_entry;
Ido Schimmela8c97012017-02-08 11:16:35 +01003064 struct neighbour *n;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003065
Ido Schimmelb8399a12017-02-08 11:16:33 +01003066 if (!neigh_entry)
Ido Schimmela8c97012017-02-08 11:16:35 +01003067 return;
3068 n = neigh_entry->key.n;
Ido Schimmelb8399a12017-02-08 11:16:33 +01003069
Ido Schimmel58312122016-12-23 09:32:50 +01003070 __mlxsw_sp_nexthop_neigh_update(nh, true);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003071 list_del(&nh->neigh_list_node);
Ido Schimmele58be792017-02-08 11:16:28 +01003072 nh->neigh_entry = NULL;
Yotam Gigib2157142016-07-05 11:27:51 +02003073
3074 /* If that is the last nexthop connected to that neigh, remove from
3075 * nexthop_neighs_list
3076 */
Ido Schimmele58be792017-02-08 11:16:28 +01003077 if (list_empty(&neigh_entry->nexthop_list))
3078 list_del(&neigh_entry->nexthop_neighs_list_node);
Yotam Gigib2157142016-07-05 11:27:51 +02003079
Ido Schimmel5c8802f2017-02-06 16:20:13 +01003080 if (!neigh_entry->connected && list_empty(&neigh_entry->nexthop_list))
3081 mlxsw_sp_neigh_entry_destroy(mlxsw_sp, neigh_entry);
3082
3083 neigh_release(n);
Ido Schimmela8c97012017-02-08 11:16:35 +01003084}
Ido Schimmelc53b8e12017-02-08 11:16:30 +01003085
Petr Machata1012b9a2017-09-02 23:49:23 +02003086static int mlxsw_sp_nexthop_ipip_init(struct mlxsw_sp *mlxsw_sp,
Petr Machata1012b9a2017-09-02 23:49:23 +02003087 struct mlxsw_sp_nexthop *nh,
3088 struct net_device *ol_dev)
3089{
3090 if (!nh->nh_grp->gateway || nh->ipip_entry)
3091 return 0;
3092
Petr Machata4cccb732017-10-16 16:26:39 +02003093 nh->ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
3094 if (!nh->ipip_entry)
3095 return -ENOENT;
Petr Machata1012b9a2017-09-02 23:49:23 +02003096
3097 __mlxsw_sp_nexthop_neigh_update(nh, false);
3098 return 0;
3099}
3100
3101static void mlxsw_sp_nexthop_ipip_fini(struct mlxsw_sp *mlxsw_sp,
3102 struct mlxsw_sp_nexthop *nh)
3103{
3104 struct mlxsw_sp_ipip_entry *ipip_entry = nh->ipip_entry;
3105
3106 if (!ipip_entry)
3107 return;
3108
3109 __mlxsw_sp_nexthop_neigh_update(nh, true);
Petr Machata1012b9a2017-09-02 23:49:23 +02003110 nh->ipip_entry = NULL;
3111}
3112
3113static bool mlxsw_sp_nexthop4_ipip_type(const struct mlxsw_sp *mlxsw_sp,
3114 const struct fib_nh *fib_nh,
3115 enum mlxsw_sp_ipip_type *p_ipipt)
3116{
3117 struct net_device *dev = fib_nh->nh_dev;
3118
3119 return dev &&
3120 fib_nh->nh_parent->fib_type == RTN_UNICAST &&
3121 mlxsw_sp_netdev_ipip_type(mlxsw_sp, dev, p_ipipt);
3122}
3123
Petr Machata35225e42017-09-02 23:49:22 +02003124static void mlxsw_sp_nexthop_type_fini(struct mlxsw_sp *mlxsw_sp,
3125 struct mlxsw_sp_nexthop *nh)
3126{
3127 switch (nh->type) {
3128 case MLXSW_SP_NEXTHOP_TYPE_ETH:
3129 mlxsw_sp_nexthop_neigh_fini(mlxsw_sp, nh);
3130 mlxsw_sp_nexthop_rif_fini(nh);
3131 break;
Petr Machata1012b9a2017-09-02 23:49:23 +02003132 case MLXSW_SP_NEXTHOP_TYPE_IPIP:
Petr Machatade0f43c2017-10-02 12:14:57 +02003133 mlxsw_sp_nexthop_rif_fini(nh);
Petr Machata1012b9a2017-09-02 23:49:23 +02003134 mlxsw_sp_nexthop_ipip_fini(mlxsw_sp, nh);
3135 break;
Petr Machata35225e42017-09-02 23:49:22 +02003136 }
3137}
3138
3139static int mlxsw_sp_nexthop4_type_init(struct mlxsw_sp *mlxsw_sp,
3140 struct mlxsw_sp_nexthop *nh,
3141 struct fib_nh *fib_nh)
3142{
Petr Machata1012b9a2017-09-02 23:49:23 +02003143 struct mlxsw_sp_router *router = mlxsw_sp->router;
Petr Machata35225e42017-09-02 23:49:22 +02003144 struct net_device *dev = fib_nh->nh_dev;
Petr Machata1012b9a2017-09-02 23:49:23 +02003145 enum mlxsw_sp_ipip_type ipipt;
Petr Machata35225e42017-09-02 23:49:22 +02003146 struct mlxsw_sp_rif *rif;
3147 int err;
3148
Petr Machata1012b9a2017-09-02 23:49:23 +02003149 if (mlxsw_sp_nexthop4_ipip_type(mlxsw_sp, fib_nh, &ipipt) &&
3150 router->ipip_ops_arr[ipipt]->can_offload(mlxsw_sp, dev,
3151 MLXSW_SP_L3_PROTO_IPV4)) {
3152 nh->type = MLXSW_SP_NEXTHOP_TYPE_IPIP;
Petr Machata4cccb732017-10-16 16:26:39 +02003153 err = mlxsw_sp_nexthop_ipip_init(mlxsw_sp, nh, dev);
Petr Machatade0f43c2017-10-02 12:14:57 +02003154 if (err)
3155 return err;
3156 mlxsw_sp_nexthop_rif_init(nh, &nh->ipip_entry->ol_lb->common);
3157 return 0;
Petr Machata1012b9a2017-09-02 23:49:23 +02003158 }
3159
Petr Machata35225e42017-09-02 23:49:22 +02003160 nh->type = MLXSW_SP_NEXTHOP_TYPE_ETH;
3161 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
3162 if (!rif)
3163 return 0;
3164
3165 mlxsw_sp_nexthop_rif_init(nh, rif);
3166 err = mlxsw_sp_nexthop_neigh_init(mlxsw_sp, nh);
3167 if (err)
3168 goto err_neigh_init;
3169
3170 return 0;
3171
3172err_neigh_init:
3173 mlxsw_sp_nexthop_rif_fini(nh);
3174 return err;
3175}
3176
3177static void mlxsw_sp_nexthop4_type_fini(struct mlxsw_sp *mlxsw_sp,
3178 struct mlxsw_sp_nexthop *nh)
3179{
3180 mlxsw_sp_nexthop_type_fini(mlxsw_sp, nh);
3181}
3182
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02003183static int mlxsw_sp_nexthop4_init(struct mlxsw_sp *mlxsw_sp,
3184 struct mlxsw_sp_nexthop_group *nh_grp,
3185 struct mlxsw_sp_nexthop *nh,
3186 struct fib_nh *fib_nh)
Ido Schimmela8c97012017-02-08 11:16:35 +01003187{
3188 struct net_device *dev = fib_nh->nh_dev;
Ido Schimmeldf6dd79b2017-02-08 14:36:49 +01003189 struct in_device *in_dev;
Ido Schimmela8c97012017-02-08 11:16:35 +01003190 int err;
3191
3192 nh->nh_grp = nh_grp;
3193 nh->key.fib_nh = fib_nh;
Ido Schimmel408bd942017-10-22 23:11:46 +02003194#ifdef CONFIG_IP_ROUTE_MULTIPATH
3195 nh->nh_weight = fib_nh->nh_weight;
3196#else
3197 nh->nh_weight = 1;
3198#endif
Ido Schimmel58adf2c2017-07-18 10:10:19 +02003199 memcpy(&nh->gw_addr, &fib_nh->nh_gw, sizeof(fib_nh->nh_gw));
Ido Schimmela8c97012017-02-08 11:16:35 +01003200 err = mlxsw_sp_nexthop_insert(mlxsw_sp, nh);
3201 if (err)
3202 return err;
3203
Arkadi Sharshevskya5390272017-09-25 10:32:28 +02003204 mlxsw_sp_nexthop_counter_alloc(mlxsw_sp, nh);
Arkadi Sharshevskydbe45982017-09-25 10:32:23 +02003205 list_add_tail(&nh->router_list_node, &mlxsw_sp->router->nexthop_list);
3206
Ido Schimmel97989ee2017-03-10 08:53:38 +01003207 if (!dev)
3208 return 0;
3209
Ido Schimmeldf6dd79b2017-02-08 14:36:49 +01003210 in_dev = __in_dev_get_rtnl(dev);
3211 if (in_dev && IN_DEV_IGNORE_ROUTES_WITH_LINKDOWN(in_dev) &&
3212 fib_nh->nh_flags & RTNH_F_LINKDOWN)
3213 return 0;
3214
Petr Machata35225e42017-09-02 23:49:22 +02003215 err = mlxsw_sp_nexthop4_type_init(mlxsw_sp, nh, fib_nh);
Ido Schimmela8c97012017-02-08 11:16:35 +01003216 if (err)
3217 goto err_nexthop_neigh_init;
3218
3219 return 0;
3220
3221err_nexthop_neigh_init:
3222 mlxsw_sp_nexthop_remove(mlxsw_sp, nh);
3223 return err;
3224}
3225
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02003226static void mlxsw_sp_nexthop4_fini(struct mlxsw_sp *mlxsw_sp,
3227 struct mlxsw_sp_nexthop *nh)
Ido Schimmela8c97012017-02-08 11:16:35 +01003228{
Petr Machata35225e42017-09-02 23:49:22 +02003229 mlxsw_sp_nexthop4_type_fini(mlxsw_sp, nh);
Arkadi Sharshevskydbe45982017-09-25 10:32:23 +02003230 list_del(&nh->router_list_node);
Arkadi Sharshevskya5390272017-09-25 10:32:28 +02003231 mlxsw_sp_nexthop_counter_free(mlxsw_sp, nh);
Ido Schimmelc53b8e12017-02-08 11:16:30 +01003232 mlxsw_sp_nexthop_remove(mlxsw_sp, nh);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003233}
3234
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02003235static void mlxsw_sp_nexthop4_event(struct mlxsw_sp *mlxsw_sp,
3236 unsigned long event, struct fib_nh *fib_nh)
Ido Schimmelad178c82017-02-08 11:16:40 +01003237{
3238 struct mlxsw_sp_nexthop_key key;
3239 struct mlxsw_sp_nexthop *nh;
Ido Schimmelad178c82017-02-08 11:16:40 +01003240
Ido Schimmel9011b672017-05-16 19:38:25 +02003241 if (mlxsw_sp->router->aborted)
Ido Schimmelad178c82017-02-08 11:16:40 +01003242 return;
3243
3244 key.fib_nh = fib_nh;
3245 nh = mlxsw_sp_nexthop_lookup(mlxsw_sp, key);
3246 if (WARN_ON_ONCE(!nh))
3247 return;
3248
Ido Schimmelad178c82017-02-08 11:16:40 +01003249 switch (event) {
3250 case FIB_EVENT_NH_ADD:
Petr Machata35225e42017-09-02 23:49:22 +02003251 mlxsw_sp_nexthop4_type_init(mlxsw_sp, nh, fib_nh);
Ido Schimmelad178c82017-02-08 11:16:40 +01003252 break;
3253 case FIB_EVENT_NH_DEL:
Petr Machata35225e42017-09-02 23:49:22 +02003254 mlxsw_sp_nexthop4_type_fini(mlxsw_sp, nh);
Ido Schimmelad178c82017-02-08 11:16:40 +01003255 break;
3256 }
3257
3258 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nh_grp);
3259}
3260
Ido Schimmel9665b742017-02-08 11:16:42 +01003261static void mlxsw_sp_nexthop_rif_gone_sync(struct mlxsw_sp *mlxsw_sp,
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003262 struct mlxsw_sp_rif *rif)
Ido Schimmel9665b742017-02-08 11:16:42 +01003263{
3264 struct mlxsw_sp_nexthop *nh, *tmp;
3265
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003266 list_for_each_entry_safe(nh, tmp, &rif->nexthop_list, rif_list_node) {
Petr Machata35225e42017-09-02 23:49:22 +02003267 mlxsw_sp_nexthop_type_fini(mlxsw_sp, nh);
Ido Schimmel9665b742017-02-08 11:16:42 +01003268 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nh_grp);
3269 }
3270}
3271
Petr Machata9b014512017-09-02 23:49:20 +02003272static bool mlxsw_sp_fi_is_gateway(const struct mlxsw_sp *mlxsw_sp,
3273 const struct fib_info *fi)
3274{
Petr Machata1012b9a2017-09-02 23:49:23 +02003275 return fi->fib_nh->nh_scope == RT_SCOPE_LINK ||
3276 mlxsw_sp_nexthop4_ipip_type(mlxsw_sp, fi->fib_nh, NULL);
Petr Machata9b014512017-09-02 23:49:20 +02003277}
3278
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003279static struct mlxsw_sp_nexthop_group *
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02003280mlxsw_sp_nexthop4_group_create(struct mlxsw_sp *mlxsw_sp, struct fib_info *fi)
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003281{
3282 struct mlxsw_sp_nexthop_group *nh_grp;
3283 struct mlxsw_sp_nexthop *nh;
3284 struct fib_nh *fib_nh;
3285 size_t alloc_size;
3286 int i;
3287 int err;
3288
3289 alloc_size = sizeof(*nh_grp) +
3290 fi->fib_nhs * sizeof(struct mlxsw_sp_nexthop);
3291 nh_grp = kzalloc(alloc_size, GFP_KERNEL);
3292 if (!nh_grp)
3293 return ERR_PTR(-ENOMEM);
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02003294 nh_grp->priv = fi;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003295 INIT_LIST_HEAD(&nh_grp->fib_list);
Ido Schimmel58adf2c2017-07-18 10:10:19 +02003296 nh_grp->neigh_tbl = &arp_tbl;
3297
Petr Machata9b014512017-09-02 23:49:20 +02003298 nh_grp->gateway = mlxsw_sp_fi_is_gateway(mlxsw_sp, fi);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003299 nh_grp->count = fi->fib_nhs;
Ido Schimmel7387dbb2017-07-12 09:12:53 +02003300 fib_info_hold(fi);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003301 for (i = 0; i < nh_grp->count; i++) {
3302 nh = &nh_grp->nexthops[i];
3303 fib_nh = &fi->fib_nh[i];
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02003304 err = mlxsw_sp_nexthop4_init(mlxsw_sp, nh_grp, nh, fib_nh);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003305 if (err)
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02003306 goto err_nexthop4_init;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003307 }
Ido Schimmele9ad5e72017-02-08 11:16:29 +01003308 err = mlxsw_sp_nexthop_group_insert(mlxsw_sp, nh_grp);
3309 if (err)
3310 goto err_nexthop_group_insert;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003311 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
3312 return nh_grp;
3313
Ido Schimmele9ad5e72017-02-08 11:16:29 +01003314err_nexthop_group_insert:
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02003315err_nexthop4_init:
Ido Schimmeldf6dd79b2017-02-08 14:36:49 +01003316 for (i--; i >= 0; i--) {
3317 nh = &nh_grp->nexthops[i];
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02003318 mlxsw_sp_nexthop4_fini(mlxsw_sp, nh);
Ido Schimmeldf6dd79b2017-02-08 14:36:49 +01003319 }
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02003320 fib_info_put(fi);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003321 kfree(nh_grp);
3322 return ERR_PTR(err);
3323}
3324
3325static void
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02003326mlxsw_sp_nexthop4_group_destroy(struct mlxsw_sp *mlxsw_sp,
3327 struct mlxsw_sp_nexthop_group *nh_grp)
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003328{
3329 struct mlxsw_sp_nexthop *nh;
3330 int i;
3331
Ido Schimmele9ad5e72017-02-08 11:16:29 +01003332 mlxsw_sp_nexthop_group_remove(mlxsw_sp, nh_grp);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003333 for (i = 0; i < nh_grp->count; i++) {
3334 nh = &nh_grp->nexthops[i];
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02003335 mlxsw_sp_nexthop4_fini(mlxsw_sp, nh);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003336 }
Ido Schimmel58312122016-12-23 09:32:50 +01003337 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
3338 WARN_ON_ONCE(nh_grp->adj_index_valid);
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02003339 fib_info_put(mlxsw_sp_nexthop4_group_fi(nh_grp));
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003340 kfree(nh_grp);
3341}
3342
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02003343static int mlxsw_sp_nexthop4_group_get(struct mlxsw_sp *mlxsw_sp,
3344 struct mlxsw_sp_fib_entry *fib_entry,
3345 struct fib_info *fi)
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003346{
3347 struct mlxsw_sp_nexthop_group *nh_grp;
3348
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02003349 nh_grp = mlxsw_sp_nexthop4_group_lookup(mlxsw_sp, fi);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003350 if (!nh_grp) {
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02003351 nh_grp = mlxsw_sp_nexthop4_group_create(mlxsw_sp, fi);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003352 if (IS_ERR(nh_grp))
3353 return PTR_ERR(nh_grp);
3354 }
3355 list_add_tail(&fib_entry->nexthop_group_node, &nh_grp->fib_list);
3356 fib_entry->nh_group = nh_grp;
3357 return 0;
3358}
3359
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02003360static void mlxsw_sp_nexthop4_group_put(struct mlxsw_sp *mlxsw_sp,
3361 struct mlxsw_sp_fib_entry *fib_entry)
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003362{
3363 struct mlxsw_sp_nexthop_group *nh_grp = fib_entry->nh_group;
3364
3365 list_del(&fib_entry->nexthop_group_node);
3366 if (!list_empty(&nh_grp->fib_list))
3367 return;
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02003368 mlxsw_sp_nexthop4_group_destroy(mlxsw_sp, nh_grp);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003369}
3370
Ido Schimmel013b20f2017-02-08 11:16:36 +01003371static bool
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02003372mlxsw_sp_fib4_entry_should_offload(const struct mlxsw_sp_fib_entry *fib_entry)
3373{
3374 struct mlxsw_sp_fib4_entry *fib4_entry;
3375
3376 fib4_entry = container_of(fib_entry, struct mlxsw_sp_fib4_entry,
3377 common);
3378 return !fib4_entry->tos;
3379}
3380
3381static bool
Ido Schimmel013b20f2017-02-08 11:16:36 +01003382mlxsw_sp_fib_entry_should_offload(const struct mlxsw_sp_fib_entry *fib_entry)
3383{
3384 struct mlxsw_sp_nexthop_group *nh_group = fib_entry->nh_group;
3385
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02003386 switch (fib_entry->fib_node->fib->proto) {
3387 case MLXSW_SP_L3_PROTO_IPV4:
3388 if (!mlxsw_sp_fib4_entry_should_offload(fib_entry))
3389 return false;
3390 break;
3391 case MLXSW_SP_L3_PROTO_IPV6:
3392 break;
3393 }
Ido Schimmel9aecce12017-02-09 10:28:42 +01003394
Ido Schimmel013b20f2017-02-08 11:16:36 +01003395 switch (fib_entry->type) {
3396 case MLXSW_SP_FIB_ENTRY_TYPE_REMOTE:
3397 return !!nh_group->adj_index_valid;
3398 case MLXSW_SP_FIB_ENTRY_TYPE_LOCAL:
Ido Schimmel70ad3502017-02-08 11:16:38 +01003399 return !!nh_group->nh_rif;
Petr Machata4607f6d2017-09-02 23:49:25 +02003400 case MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP:
3401 return true;
Ido Schimmel013b20f2017-02-08 11:16:36 +01003402 default:
3403 return false;
3404 }
3405}
3406
Ido Schimmel428b8512017-08-03 13:28:28 +02003407static struct mlxsw_sp_nexthop *
3408mlxsw_sp_rt6_nexthop(struct mlxsw_sp_nexthop_group *nh_grp,
3409 const struct mlxsw_sp_rt6 *mlxsw_sp_rt6)
3410{
3411 int i;
3412
3413 for (i = 0; i < nh_grp->count; i++) {
3414 struct mlxsw_sp_nexthop *nh = &nh_grp->nexthops[i];
3415 struct rt6_info *rt = mlxsw_sp_rt6->rt;
3416
3417 if (nh->rif && nh->rif->dev == rt->dst.dev &&
3418 ipv6_addr_equal((const struct in6_addr *) &nh->gw_addr,
3419 &rt->rt6i_gateway))
3420 return nh;
3421 continue;
3422 }
3423
3424 return NULL;
3425}
3426
Ido Schimmel3984d1a2017-08-02 09:56:03 +02003427static void
3428mlxsw_sp_fib4_entry_offload_set(struct mlxsw_sp_fib_entry *fib_entry)
3429{
3430 struct mlxsw_sp_nexthop_group *nh_grp = fib_entry->nh_group;
3431 int i;
3432
Petr Machata4607f6d2017-09-02 23:49:25 +02003433 if (fib_entry->type == MLXSW_SP_FIB_ENTRY_TYPE_LOCAL ||
3434 fib_entry->type == MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP) {
Ido Schimmel3984d1a2017-08-02 09:56:03 +02003435 nh_grp->nexthops->key.fib_nh->nh_flags |= RTNH_F_OFFLOAD;
3436 return;
3437 }
3438
3439 for (i = 0; i < nh_grp->count; i++) {
3440 struct mlxsw_sp_nexthop *nh = &nh_grp->nexthops[i];
3441
3442 if (nh->offloaded)
3443 nh->key.fib_nh->nh_flags |= RTNH_F_OFFLOAD;
3444 else
3445 nh->key.fib_nh->nh_flags &= ~RTNH_F_OFFLOAD;
3446 }
3447}
3448
3449static void
3450mlxsw_sp_fib4_entry_offload_unset(struct mlxsw_sp_fib_entry *fib_entry)
3451{
3452 struct mlxsw_sp_nexthop_group *nh_grp = fib_entry->nh_group;
3453 int i;
3454
3455 for (i = 0; i < nh_grp->count; i++) {
3456 struct mlxsw_sp_nexthop *nh = &nh_grp->nexthops[i];
3457
3458 nh->key.fib_nh->nh_flags &= ~RTNH_F_OFFLOAD;
3459 }
3460}
3461
Ido Schimmel428b8512017-08-03 13:28:28 +02003462static void
3463mlxsw_sp_fib6_entry_offload_set(struct mlxsw_sp_fib_entry *fib_entry)
3464{
3465 struct mlxsw_sp_fib6_entry *fib6_entry;
3466 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
3467
3468 fib6_entry = container_of(fib_entry, struct mlxsw_sp_fib6_entry,
3469 common);
3470
3471 if (fib_entry->type == MLXSW_SP_FIB_ENTRY_TYPE_LOCAL) {
3472 list_first_entry(&fib6_entry->rt6_list, struct mlxsw_sp_rt6,
Ido Schimmelfe400792017-08-15 09:09:49 +02003473 list)->rt->rt6i_nh_flags |= RTNH_F_OFFLOAD;
Ido Schimmel428b8512017-08-03 13:28:28 +02003474 return;
3475 }
3476
3477 list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) {
3478 struct mlxsw_sp_nexthop_group *nh_grp = fib_entry->nh_group;
3479 struct mlxsw_sp_nexthop *nh;
3480
3481 nh = mlxsw_sp_rt6_nexthop(nh_grp, mlxsw_sp_rt6);
3482 if (nh && nh->offloaded)
Ido Schimmelfe400792017-08-15 09:09:49 +02003483 mlxsw_sp_rt6->rt->rt6i_nh_flags |= RTNH_F_OFFLOAD;
Ido Schimmel428b8512017-08-03 13:28:28 +02003484 else
Ido Schimmelfe400792017-08-15 09:09:49 +02003485 mlxsw_sp_rt6->rt->rt6i_nh_flags &= ~RTNH_F_OFFLOAD;
Ido Schimmel428b8512017-08-03 13:28:28 +02003486 }
3487}
3488
3489static void
3490mlxsw_sp_fib6_entry_offload_unset(struct mlxsw_sp_fib_entry *fib_entry)
3491{
3492 struct mlxsw_sp_fib6_entry *fib6_entry;
3493 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
3494
3495 fib6_entry = container_of(fib_entry, struct mlxsw_sp_fib6_entry,
3496 common);
3497 list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) {
3498 struct rt6_info *rt = mlxsw_sp_rt6->rt;
3499
Ido Schimmelfe400792017-08-15 09:09:49 +02003500 rt->rt6i_nh_flags &= ~RTNH_F_OFFLOAD;
Ido Schimmel428b8512017-08-03 13:28:28 +02003501 }
3502}
3503
Ido Schimmel013b20f2017-02-08 11:16:36 +01003504static void mlxsw_sp_fib_entry_offload_set(struct mlxsw_sp_fib_entry *fib_entry)
3505{
Ido Schimmel76610eb2017-03-10 08:53:41 +01003506 switch (fib_entry->fib_node->fib->proto) {
Ido Schimmel013b20f2017-02-08 11:16:36 +01003507 case MLXSW_SP_L3_PROTO_IPV4:
Ido Schimmel3984d1a2017-08-02 09:56:03 +02003508 mlxsw_sp_fib4_entry_offload_set(fib_entry);
Ido Schimmel013b20f2017-02-08 11:16:36 +01003509 break;
3510 case MLXSW_SP_L3_PROTO_IPV6:
Ido Schimmel428b8512017-08-03 13:28:28 +02003511 mlxsw_sp_fib6_entry_offload_set(fib_entry);
3512 break;
Ido Schimmel013b20f2017-02-08 11:16:36 +01003513 }
3514}
3515
3516static void
3517mlxsw_sp_fib_entry_offload_unset(struct mlxsw_sp_fib_entry *fib_entry)
3518{
Ido Schimmel76610eb2017-03-10 08:53:41 +01003519 switch (fib_entry->fib_node->fib->proto) {
Ido Schimmel013b20f2017-02-08 11:16:36 +01003520 case MLXSW_SP_L3_PROTO_IPV4:
Ido Schimmel3984d1a2017-08-02 09:56:03 +02003521 mlxsw_sp_fib4_entry_offload_unset(fib_entry);
Ido Schimmel013b20f2017-02-08 11:16:36 +01003522 break;
3523 case MLXSW_SP_L3_PROTO_IPV6:
Ido Schimmel428b8512017-08-03 13:28:28 +02003524 mlxsw_sp_fib6_entry_offload_unset(fib_entry);
3525 break;
Ido Schimmel013b20f2017-02-08 11:16:36 +01003526 }
Ido Schimmel013b20f2017-02-08 11:16:36 +01003527}
3528
3529static void
3530mlxsw_sp_fib_entry_offload_refresh(struct mlxsw_sp_fib_entry *fib_entry,
3531 enum mlxsw_reg_ralue_op op, int err)
3532{
3533 switch (op) {
3534 case MLXSW_REG_RALUE_OP_WRITE_DELETE:
Ido Schimmel013b20f2017-02-08 11:16:36 +01003535 return mlxsw_sp_fib_entry_offload_unset(fib_entry);
3536 case MLXSW_REG_RALUE_OP_WRITE_WRITE:
3537 if (err)
3538 return;
Ido Schimmel1353ee72017-08-02 09:56:04 +02003539 if (mlxsw_sp_fib_entry_should_offload(fib_entry))
Ido Schimmel013b20f2017-02-08 11:16:36 +01003540 mlxsw_sp_fib_entry_offload_set(fib_entry);
Petr Machata85f44a12017-10-02 12:21:58 +02003541 else
Ido Schimmel013b20f2017-02-08 11:16:36 +01003542 mlxsw_sp_fib_entry_offload_unset(fib_entry);
3543 return;
3544 default:
3545 return;
3546 }
3547}
3548
Ido Schimmel9dbf4d72017-07-18 10:10:24 +02003549static void
3550mlxsw_sp_fib_entry_ralue_pack(char *ralue_pl,
3551 const struct mlxsw_sp_fib_entry *fib_entry,
3552 enum mlxsw_reg_ralue_op op)
3553{
3554 struct mlxsw_sp_fib *fib = fib_entry->fib_node->fib;
3555 enum mlxsw_reg_ralxx_protocol proto;
3556 u32 *p_dip;
3557
3558 proto = (enum mlxsw_reg_ralxx_protocol) fib->proto;
3559
3560 switch (fib->proto) {
3561 case MLXSW_SP_L3_PROTO_IPV4:
3562 p_dip = (u32 *) fib_entry->fib_node->key.addr;
3563 mlxsw_reg_ralue_pack4(ralue_pl, proto, op, fib->vr->id,
3564 fib_entry->fib_node->key.prefix_len,
3565 *p_dip);
3566 break;
3567 case MLXSW_SP_L3_PROTO_IPV6:
3568 mlxsw_reg_ralue_pack6(ralue_pl, proto, op, fib->vr->id,
3569 fib_entry->fib_node->key.prefix_len,
3570 fib_entry->fib_node->key.addr);
3571 break;
3572 }
3573}
3574
3575static int mlxsw_sp_fib_entry_op_remote(struct mlxsw_sp *mlxsw_sp,
3576 struct mlxsw_sp_fib_entry *fib_entry,
3577 enum mlxsw_reg_ralue_op op)
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003578{
3579 char ralue_pl[MLXSW_REG_RALUE_LEN];
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003580 enum mlxsw_reg_ralue_trap_action trap_action;
3581 u16 trap_id = 0;
3582 u32 adjacency_index = 0;
3583 u16 ecmp_size = 0;
3584
3585 /* In case the nexthop group adjacency index is valid, use it
3586 * with provided ECMP size. Otherwise, setup trap and pass
3587 * traffic to kernel.
3588 */
Ido Schimmel4b411472017-02-08 11:16:37 +01003589 if (mlxsw_sp_fib_entry_should_offload(fib_entry)) {
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003590 trap_action = MLXSW_REG_RALUE_TRAP_ACTION_NOP;
3591 adjacency_index = fib_entry->nh_group->adj_index;
3592 ecmp_size = fib_entry->nh_group->ecmp_size;
3593 } else {
3594 trap_action = MLXSW_REG_RALUE_TRAP_ACTION_TRAP;
3595 trap_id = MLXSW_TRAP_ID_RTR_INGRESS0;
3596 }
3597
Ido Schimmel9dbf4d72017-07-18 10:10:24 +02003598 mlxsw_sp_fib_entry_ralue_pack(ralue_pl, fib_entry, op);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003599 mlxsw_reg_ralue_act_remote_pack(ralue_pl, trap_action, trap_id,
3600 adjacency_index, ecmp_size);
3601 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
3602}
3603
Ido Schimmel9dbf4d72017-07-18 10:10:24 +02003604static int mlxsw_sp_fib_entry_op_local(struct mlxsw_sp *mlxsw_sp,
3605 struct mlxsw_sp_fib_entry *fib_entry,
3606 enum mlxsw_reg_ralue_op op)
Jiri Pirko61c503f2016-07-04 08:23:11 +02003607{
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003608 struct mlxsw_sp_rif *rif = fib_entry->nh_group->nh_rif;
Ido Schimmel70ad3502017-02-08 11:16:38 +01003609 enum mlxsw_reg_ralue_trap_action trap_action;
Jiri Pirko61c503f2016-07-04 08:23:11 +02003610 char ralue_pl[MLXSW_REG_RALUE_LEN];
Ido Schimmel70ad3502017-02-08 11:16:38 +01003611 u16 trap_id = 0;
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003612 u16 rif_index = 0;
Ido Schimmel70ad3502017-02-08 11:16:38 +01003613
3614 if (mlxsw_sp_fib_entry_should_offload(fib_entry)) {
3615 trap_action = MLXSW_REG_RALUE_TRAP_ACTION_NOP;
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003616 rif_index = rif->rif_index;
Ido Schimmel70ad3502017-02-08 11:16:38 +01003617 } else {
3618 trap_action = MLXSW_REG_RALUE_TRAP_ACTION_TRAP;
3619 trap_id = MLXSW_TRAP_ID_RTR_INGRESS0;
3620 }
Jiri Pirko61c503f2016-07-04 08:23:11 +02003621
Ido Schimmel9dbf4d72017-07-18 10:10:24 +02003622 mlxsw_sp_fib_entry_ralue_pack(ralue_pl, fib_entry, op);
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003623 mlxsw_reg_ralue_act_local_pack(ralue_pl, trap_action, trap_id,
3624 rif_index);
Jiri Pirko61c503f2016-07-04 08:23:11 +02003625 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
3626}
3627
Ido Schimmel9dbf4d72017-07-18 10:10:24 +02003628static int mlxsw_sp_fib_entry_op_trap(struct mlxsw_sp *mlxsw_sp,
3629 struct mlxsw_sp_fib_entry *fib_entry,
3630 enum mlxsw_reg_ralue_op op)
Jiri Pirko61c503f2016-07-04 08:23:11 +02003631{
3632 char ralue_pl[MLXSW_REG_RALUE_LEN];
Jiri Pirko61c503f2016-07-04 08:23:11 +02003633
Ido Schimmel9dbf4d72017-07-18 10:10:24 +02003634 mlxsw_sp_fib_entry_ralue_pack(ralue_pl, fib_entry, op);
Jiri Pirko61c503f2016-07-04 08:23:11 +02003635 mlxsw_reg_ralue_act_ip2me_pack(ralue_pl);
3636 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
3637}
3638
Petr Machata4607f6d2017-09-02 23:49:25 +02003639static int
3640mlxsw_sp_fib_entry_op_ipip_decap(struct mlxsw_sp *mlxsw_sp,
3641 struct mlxsw_sp_fib_entry *fib_entry,
3642 enum mlxsw_reg_ralue_op op)
3643{
3644 struct mlxsw_sp_ipip_entry *ipip_entry = fib_entry->decap.ipip_entry;
3645 const struct mlxsw_sp_ipip_ops *ipip_ops;
3646
3647 if (WARN_ON(!ipip_entry))
3648 return -EINVAL;
3649
3650 ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt];
3651 return ipip_ops->fib_entry_op(mlxsw_sp, ipip_entry, op,
3652 fib_entry->decap.tunnel_index);
3653}
3654
Ido Schimmel9dbf4d72017-07-18 10:10:24 +02003655static int __mlxsw_sp_fib_entry_op(struct mlxsw_sp *mlxsw_sp,
3656 struct mlxsw_sp_fib_entry *fib_entry,
3657 enum mlxsw_reg_ralue_op op)
Jiri Pirko61c503f2016-07-04 08:23:11 +02003658{
3659 switch (fib_entry->type) {
3660 case MLXSW_SP_FIB_ENTRY_TYPE_REMOTE:
Ido Schimmel9dbf4d72017-07-18 10:10:24 +02003661 return mlxsw_sp_fib_entry_op_remote(mlxsw_sp, fib_entry, op);
Jiri Pirko61c503f2016-07-04 08:23:11 +02003662 case MLXSW_SP_FIB_ENTRY_TYPE_LOCAL:
Ido Schimmel9dbf4d72017-07-18 10:10:24 +02003663 return mlxsw_sp_fib_entry_op_local(mlxsw_sp, fib_entry, op);
Jiri Pirko61c503f2016-07-04 08:23:11 +02003664 case MLXSW_SP_FIB_ENTRY_TYPE_TRAP:
Ido Schimmel9dbf4d72017-07-18 10:10:24 +02003665 return mlxsw_sp_fib_entry_op_trap(mlxsw_sp, fib_entry, op);
Petr Machata4607f6d2017-09-02 23:49:25 +02003666 case MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP:
3667 return mlxsw_sp_fib_entry_op_ipip_decap(mlxsw_sp,
3668 fib_entry, op);
Jiri Pirko61c503f2016-07-04 08:23:11 +02003669 }
3670 return -EINVAL;
3671}
3672
3673static int mlxsw_sp_fib_entry_op(struct mlxsw_sp *mlxsw_sp,
3674 struct mlxsw_sp_fib_entry *fib_entry,
3675 enum mlxsw_reg_ralue_op op)
3676{
Ido Schimmel9dbf4d72017-07-18 10:10:24 +02003677 int err = __mlxsw_sp_fib_entry_op(mlxsw_sp, fib_entry, op);
Ido Schimmel013b20f2017-02-08 11:16:36 +01003678
Ido Schimmel013b20f2017-02-08 11:16:36 +01003679 mlxsw_sp_fib_entry_offload_refresh(fib_entry, op, err);
Ido Schimmel9dbf4d72017-07-18 10:10:24 +02003680
Ido Schimmel013b20f2017-02-08 11:16:36 +01003681 return err;
Jiri Pirko61c503f2016-07-04 08:23:11 +02003682}
3683
3684static int mlxsw_sp_fib_entry_update(struct mlxsw_sp *mlxsw_sp,
3685 struct mlxsw_sp_fib_entry *fib_entry)
3686{
Jiri Pirko7146da32016-09-01 10:37:41 +02003687 return mlxsw_sp_fib_entry_op(mlxsw_sp, fib_entry,
3688 MLXSW_REG_RALUE_OP_WRITE_WRITE);
Jiri Pirko61c503f2016-07-04 08:23:11 +02003689}
3690
3691static int mlxsw_sp_fib_entry_del(struct mlxsw_sp *mlxsw_sp,
3692 struct mlxsw_sp_fib_entry *fib_entry)
3693{
3694 return mlxsw_sp_fib_entry_op(mlxsw_sp, fib_entry,
3695 MLXSW_REG_RALUE_OP_WRITE_DELETE);
3696}
3697
Jiri Pirko61c503f2016-07-04 08:23:11 +02003698static int
Ido Schimmel013b20f2017-02-08 11:16:36 +01003699mlxsw_sp_fib4_entry_type_set(struct mlxsw_sp *mlxsw_sp,
3700 const struct fib_entry_notifier_info *fen_info,
3701 struct mlxsw_sp_fib_entry *fib_entry)
Jiri Pirko61c503f2016-07-04 08:23:11 +02003702{
Petr Machata4607f6d2017-09-02 23:49:25 +02003703 union mlxsw_sp_l3addr dip = { .addr4 = htonl(fen_info->dst) };
3704 struct net_device *dev = fen_info->fi->fib_dev;
3705 struct mlxsw_sp_ipip_entry *ipip_entry;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02003706 struct fib_info *fi = fen_info->fi;
Jiri Pirko61c503f2016-07-04 08:23:11 +02003707
Ido Schimmel97989ee2017-03-10 08:53:38 +01003708 switch (fen_info->type) {
Ido Schimmel97989ee2017-03-10 08:53:38 +01003709 case RTN_LOCAL:
Petr Machata4607f6d2017-09-02 23:49:25 +02003710 ipip_entry = mlxsw_sp_ipip_entry_find_by_decap(mlxsw_sp, dev,
3711 MLXSW_SP_L3_PROTO_IPV4, dip);
3712 if (ipip_entry) {
3713 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP;
3714 return mlxsw_sp_fib_entry_decap_init(mlxsw_sp,
3715 fib_entry,
3716 ipip_entry);
3717 }
3718 /* fall through */
3719 case RTN_BROADCAST:
Jiri Pirko61c503f2016-07-04 08:23:11 +02003720 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
3721 return 0;
Ido Schimmel97989ee2017-03-10 08:53:38 +01003722 case RTN_UNREACHABLE: /* fall through */
3723 case RTN_BLACKHOLE: /* fall through */
3724 case RTN_PROHIBIT:
3725 /* Packets hitting these routes need to be trapped, but
3726 * can do so with a lower priority than packets directed
3727 * at the host, so use action type local instead of trap.
3728 */
Jiri Pirkob45f64d2016-09-26 12:52:31 +02003729 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_LOCAL;
Ido Schimmel97989ee2017-03-10 08:53:38 +01003730 return 0;
3731 case RTN_UNICAST:
Petr Machata9b014512017-09-02 23:49:20 +02003732 if (mlxsw_sp_fi_is_gateway(mlxsw_sp, fi))
Ido Schimmel97989ee2017-03-10 08:53:38 +01003733 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_REMOTE;
Petr Machata9b014512017-09-02 23:49:20 +02003734 else
3735 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_LOCAL;
Ido Schimmel97989ee2017-03-10 08:53:38 +01003736 return 0;
3737 default:
3738 return -EINVAL;
3739 }
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003740}
3741
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02003742static struct mlxsw_sp_fib4_entry *
Ido Schimmel9aecce12017-02-09 10:28:42 +01003743mlxsw_sp_fib4_entry_create(struct mlxsw_sp *mlxsw_sp,
3744 struct mlxsw_sp_fib_node *fib_node,
3745 const struct fib_entry_notifier_info *fen_info)
Jiri Pirko5b004412016-09-01 10:37:40 +02003746{
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02003747 struct mlxsw_sp_fib4_entry *fib4_entry;
Jiri Pirko5b004412016-09-01 10:37:40 +02003748 struct mlxsw_sp_fib_entry *fib_entry;
Ido Schimmel9aecce12017-02-09 10:28:42 +01003749 int err;
3750
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02003751 fib4_entry = kzalloc(sizeof(*fib4_entry), GFP_KERNEL);
3752 if (!fib4_entry)
3753 return ERR_PTR(-ENOMEM);
3754 fib_entry = &fib4_entry->common;
Ido Schimmel9aecce12017-02-09 10:28:42 +01003755
3756 err = mlxsw_sp_fib4_entry_type_set(mlxsw_sp, fen_info, fib_entry);
3757 if (err)
3758 goto err_fib4_entry_type_set;
3759
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02003760 err = mlxsw_sp_nexthop4_group_get(mlxsw_sp, fib_entry, fen_info->fi);
Ido Schimmel9aecce12017-02-09 10:28:42 +01003761 if (err)
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02003762 goto err_nexthop4_group_get;
Ido Schimmel9aecce12017-02-09 10:28:42 +01003763
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02003764 fib4_entry->prio = fen_info->fi->fib_priority;
3765 fib4_entry->tb_id = fen_info->tb_id;
3766 fib4_entry->type = fen_info->type;
3767 fib4_entry->tos = fen_info->tos;
Ido Schimmel9aecce12017-02-09 10:28:42 +01003768
3769 fib_entry->fib_node = fib_node;
3770
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02003771 return fib4_entry;
Ido Schimmel9aecce12017-02-09 10:28:42 +01003772
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02003773err_nexthop4_group_get:
Ido Schimmel9aecce12017-02-09 10:28:42 +01003774err_fib4_entry_type_set:
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02003775 kfree(fib4_entry);
Ido Schimmel9aecce12017-02-09 10:28:42 +01003776 return ERR_PTR(err);
3777}
3778
3779static void mlxsw_sp_fib4_entry_destroy(struct mlxsw_sp *mlxsw_sp,
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02003780 struct mlxsw_sp_fib4_entry *fib4_entry)
Ido Schimmel9aecce12017-02-09 10:28:42 +01003781{
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02003782 mlxsw_sp_nexthop4_group_put(mlxsw_sp, &fib4_entry->common);
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02003783 kfree(fib4_entry);
Ido Schimmel9aecce12017-02-09 10:28:42 +01003784}
3785
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02003786static struct mlxsw_sp_fib4_entry *
Ido Schimmel9aecce12017-02-09 10:28:42 +01003787mlxsw_sp_fib4_entry_lookup(struct mlxsw_sp *mlxsw_sp,
3788 const struct fib_entry_notifier_info *fen_info)
3789{
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02003790 struct mlxsw_sp_fib4_entry *fib4_entry;
Ido Schimmel9aecce12017-02-09 10:28:42 +01003791 struct mlxsw_sp_fib_node *fib_node;
Ido Schimmel160e22a2017-07-18 10:10:20 +02003792 struct mlxsw_sp_fib *fib;
3793 struct mlxsw_sp_vr *vr;
Ido Schimmel9aecce12017-02-09 10:28:42 +01003794
Ido Schimmel160e22a2017-07-18 10:10:20 +02003795 vr = mlxsw_sp_vr_find(mlxsw_sp, fen_info->tb_id);
3796 if (!vr)
3797 return NULL;
3798 fib = mlxsw_sp_vr_fib(vr, MLXSW_SP_L3_PROTO_IPV4);
3799
3800 fib_node = mlxsw_sp_fib_node_lookup(fib, &fen_info->dst,
3801 sizeof(fen_info->dst),
3802 fen_info->dst_len);
3803 if (!fib_node)
Ido Schimmel9aecce12017-02-09 10:28:42 +01003804 return NULL;
3805
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02003806 list_for_each_entry(fib4_entry, &fib_node->entry_list, common.list) {
3807 if (fib4_entry->tb_id == fen_info->tb_id &&
3808 fib4_entry->tos == fen_info->tos &&
3809 fib4_entry->type == fen_info->type &&
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02003810 mlxsw_sp_nexthop4_group_fi(fib4_entry->common.nh_group) ==
3811 fen_info->fi) {
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02003812 return fib4_entry;
Ido Schimmel9aecce12017-02-09 10:28:42 +01003813 }
3814 }
3815
3816 return NULL;
3817}
3818
3819static const struct rhashtable_params mlxsw_sp_fib_ht_params = {
3820 .key_offset = offsetof(struct mlxsw_sp_fib_node, key),
3821 .head_offset = offsetof(struct mlxsw_sp_fib_node, ht_node),
3822 .key_len = sizeof(struct mlxsw_sp_fib_key),
3823 .automatic_shrinking = true,
3824};
3825
3826static int mlxsw_sp_fib_node_insert(struct mlxsw_sp_fib *fib,
3827 struct mlxsw_sp_fib_node *fib_node)
3828{
3829 return rhashtable_insert_fast(&fib->ht, &fib_node->ht_node,
3830 mlxsw_sp_fib_ht_params);
3831}
3832
3833static void mlxsw_sp_fib_node_remove(struct mlxsw_sp_fib *fib,
3834 struct mlxsw_sp_fib_node *fib_node)
3835{
3836 rhashtable_remove_fast(&fib->ht, &fib_node->ht_node,
3837 mlxsw_sp_fib_ht_params);
3838}
3839
3840static struct mlxsw_sp_fib_node *
3841mlxsw_sp_fib_node_lookup(struct mlxsw_sp_fib *fib, const void *addr,
3842 size_t addr_len, unsigned char prefix_len)
3843{
3844 struct mlxsw_sp_fib_key key;
3845
3846 memset(&key, 0, sizeof(key));
3847 memcpy(key.addr, addr, addr_len);
3848 key.prefix_len = prefix_len;
3849 return rhashtable_lookup_fast(&fib->ht, &key, mlxsw_sp_fib_ht_params);
3850}
3851
3852static struct mlxsw_sp_fib_node *
Ido Schimmel76610eb2017-03-10 08:53:41 +01003853mlxsw_sp_fib_node_create(struct mlxsw_sp_fib *fib, const void *addr,
Ido Schimmel9aecce12017-02-09 10:28:42 +01003854 size_t addr_len, unsigned char prefix_len)
3855{
3856 struct mlxsw_sp_fib_node *fib_node;
3857
3858 fib_node = kzalloc(sizeof(*fib_node), GFP_KERNEL);
3859 if (!fib_node)
3860 return NULL;
3861
3862 INIT_LIST_HEAD(&fib_node->entry_list);
Ido Schimmel76610eb2017-03-10 08:53:41 +01003863 list_add(&fib_node->list, &fib->node_list);
Ido Schimmel9aecce12017-02-09 10:28:42 +01003864 memcpy(fib_node->key.addr, addr, addr_len);
3865 fib_node->key.prefix_len = prefix_len;
Ido Schimmel9aecce12017-02-09 10:28:42 +01003866
3867 return fib_node;
3868}
3869
3870static void mlxsw_sp_fib_node_destroy(struct mlxsw_sp_fib_node *fib_node)
3871{
Ido Schimmel9aecce12017-02-09 10:28:42 +01003872 list_del(&fib_node->list);
3873 WARN_ON(!list_empty(&fib_node->entry_list));
3874 kfree(fib_node);
3875}
3876
3877static bool
3878mlxsw_sp_fib_node_entry_is_first(const struct mlxsw_sp_fib_node *fib_node,
3879 const struct mlxsw_sp_fib_entry *fib_entry)
3880{
3881 return list_first_entry(&fib_node->entry_list,
3882 struct mlxsw_sp_fib_entry, list) == fib_entry;
3883}
3884
Ido Schimmelfc922bb2017-08-14 10:54:05 +02003885static int mlxsw_sp_fib_lpm_tree_link(struct mlxsw_sp *mlxsw_sp,
3886 struct mlxsw_sp_fib *fib,
3887 struct mlxsw_sp_fib_node *fib_node)
3888{
3889 struct mlxsw_sp_prefix_usage req_prefix_usage = {{ 0 } };
3890 struct mlxsw_sp_lpm_tree *lpm_tree;
3891 int err;
3892
3893 /* Since the tree is shared between all virtual routers we must
3894 * make sure it contains all the required prefix lengths. This
3895 * can be computed by either adding the new prefix length to the
3896 * existing prefix usage of a bound tree, or by aggregating the
3897 * prefix lengths across all virtual routers and adding the new
3898 * one as well.
3899 */
3900 if (fib->lpm_tree)
3901 mlxsw_sp_prefix_usage_cpy(&req_prefix_usage,
3902 &fib->lpm_tree->prefix_usage);
3903 else
3904 mlxsw_sp_vrs_prefixes(mlxsw_sp, fib->proto, &req_prefix_usage);
3905 mlxsw_sp_prefix_usage_set(&req_prefix_usage, fib_node->key.prefix_len);
3906
3907 lpm_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, &req_prefix_usage,
3908 fib->proto);
3909 if (IS_ERR(lpm_tree))
3910 return PTR_ERR(lpm_tree);
3911
3912 if (fib->lpm_tree && fib->lpm_tree->id == lpm_tree->id)
3913 return 0;
3914
3915 err = mlxsw_sp_vrs_lpm_tree_replace(mlxsw_sp, fib, lpm_tree);
3916 if (err)
3917 return err;
3918
3919 return 0;
3920}
3921
3922static void mlxsw_sp_fib_lpm_tree_unlink(struct mlxsw_sp *mlxsw_sp,
3923 struct mlxsw_sp_fib *fib)
3924{
Ido Schimmelfc922bb2017-08-14 10:54:05 +02003925 if (!mlxsw_sp_prefix_usage_none(&fib->prefix_usage))
3926 return;
3927 mlxsw_sp_vr_lpm_tree_unbind(mlxsw_sp, fib);
3928 mlxsw_sp_lpm_tree_put(mlxsw_sp, fib->lpm_tree);
3929 fib->lpm_tree = NULL;
3930}
3931
Ido Schimmel9aecce12017-02-09 10:28:42 +01003932static void mlxsw_sp_fib_node_prefix_inc(struct mlxsw_sp_fib_node *fib_node)
3933{
3934 unsigned char prefix_len = fib_node->key.prefix_len;
Ido Schimmel76610eb2017-03-10 08:53:41 +01003935 struct mlxsw_sp_fib *fib = fib_node->fib;
Ido Schimmel9aecce12017-02-09 10:28:42 +01003936
3937 if (fib->prefix_ref_count[prefix_len]++ == 0)
3938 mlxsw_sp_prefix_usage_set(&fib->prefix_usage, prefix_len);
3939}
3940
3941static void mlxsw_sp_fib_node_prefix_dec(struct mlxsw_sp_fib_node *fib_node)
3942{
3943 unsigned char prefix_len = fib_node->key.prefix_len;
Ido Schimmel76610eb2017-03-10 08:53:41 +01003944 struct mlxsw_sp_fib *fib = fib_node->fib;
Ido Schimmel9aecce12017-02-09 10:28:42 +01003945
3946 if (--fib->prefix_ref_count[prefix_len] == 0)
3947 mlxsw_sp_prefix_usage_clear(&fib->prefix_usage, prefix_len);
3948}
3949
Ido Schimmel76610eb2017-03-10 08:53:41 +01003950static int mlxsw_sp_fib_node_init(struct mlxsw_sp *mlxsw_sp,
3951 struct mlxsw_sp_fib_node *fib_node,
3952 struct mlxsw_sp_fib *fib)
3953{
Ido Schimmel76610eb2017-03-10 08:53:41 +01003954 int err;
3955
3956 err = mlxsw_sp_fib_node_insert(fib, fib_node);
3957 if (err)
3958 return err;
3959 fib_node->fib = fib;
3960
Ido Schimmelfc922bb2017-08-14 10:54:05 +02003961 err = mlxsw_sp_fib_lpm_tree_link(mlxsw_sp, fib, fib_node);
3962 if (err)
3963 goto err_fib_lpm_tree_link;
Ido Schimmel76610eb2017-03-10 08:53:41 +01003964
3965 mlxsw_sp_fib_node_prefix_inc(fib_node);
3966
3967 return 0;
3968
Ido Schimmelfc922bb2017-08-14 10:54:05 +02003969err_fib_lpm_tree_link:
Ido Schimmel76610eb2017-03-10 08:53:41 +01003970 fib_node->fib = NULL;
3971 mlxsw_sp_fib_node_remove(fib, fib_node);
3972 return err;
3973}
3974
3975static void mlxsw_sp_fib_node_fini(struct mlxsw_sp *mlxsw_sp,
3976 struct mlxsw_sp_fib_node *fib_node)
3977{
Ido Schimmel76610eb2017-03-10 08:53:41 +01003978 struct mlxsw_sp_fib *fib = fib_node->fib;
3979
3980 mlxsw_sp_fib_node_prefix_dec(fib_node);
Ido Schimmelfc922bb2017-08-14 10:54:05 +02003981 mlxsw_sp_fib_lpm_tree_unlink(mlxsw_sp, fib);
Ido Schimmel76610eb2017-03-10 08:53:41 +01003982 fib_node->fib = NULL;
3983 mlxsw_sp_fib_node_remove(fib, fib_node);
3984}
3985
Ido Schimmel9aecce12017-02-09 10:28:42 +01003986static struct mlxsw_sp_fib_node *
Ido Schimmel731ea1c2017-07-18 10:10:21 +02003987mlxsw_sp_fib_node_get(struct mlxsw_sp *mlxsw_sp, u32 tb_id, const void *addr,
3988 size_t addr_len, unsigned char prefix_len,
3989 enum mlxsw_sp_l3proto proto)
Ido Schimmel9aecce12017-02-09 10:28:42 +01003990{
3991 struct mlxsw_sp_fib_node *fib_node;
Ido Schimmel76610eb2017-03-10 08:53:41 +01003992 struct mlxsw_sp_fib *fib;
Jiri Pirko5b004412016-09-01 10:37:40 +02003993 struct mlxsw_sp_vr *vr;
3994 int err;
3995
David Ahernf8fa9b42017-10-18 09:56:56 -07003996 vr = mlxsw_sp_vr_get(mlxsw_sp, tb_id, NULL);
Jiri Pirko5b004412016-09-01 10:37:40 +02003997 if (IS_ERR(vr))
3998 return ERR_CAST(vr);
Ido Schimmel731ea1c2017-07-18 10:10:21 +02003999 fib = mlxsw_sp_vr_fib(vr, proto);
Jiri Pirko5b004412016-09-01 10:37:40 +02004000
Ido Schimmel731ea1c2017-07-18 10:10:21 +02004001 fib_node = mlxsw_sp_fib_node_lookup(fib, addr, addr_len, prefix_len);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004002 if (fib_node)
4003 return fib_node;
4004
Ido Schimmel731ea1c2017-07-18 10:10:21 +02004005 fib_node = mlxsw_sp_fib_node_create(fib, addr, addr_len, prefix_len);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004006 if (!fib_node) {
Jiri Pirko5b004412016-09-01 10:37:40 +02004007 err = -ENOMEM;
Ido Schimmel9aecce12017-02-09 10:28:42 +01004008 goto err_fib_node_create;
Jiri Pirko5b004412016-09-01 10:37:40 +02004009 }
Jiri Pirko5b004412016-09-01 10:37:40 +02004010
Ido Schimmel76610eb2017-03-10 08:53:41 +01004011 err = mlxsw_sp_fib_node_init(mlxsw_sp, fib_node, fib);
4012 if (err)
4013 goto err_fib_node_init;
4014
Ido Schimmel9aecce12017-02-09 10:28:42 +01004015 return fib_node;
Jiri Pirko5b004412016-09-01 10:37:40 +02004016
Ido Schimmel76610eb2017-03-10 08:53:41 +01004017err_fib_node_init:
4018 mlxsw_sp_fib_node_destroy(fib_node);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004019err_fib_node_create:
Ido Schimmel76610eb2017-03-10 08:53:41 +01004020 mlxsw_sp_vr_put(vr);
Jiri Pirko5b004412016-09-01 10:37:40 +02004021 return ERR_PTR(err);
4022}
4023
Ido Schimmel731ea1c2017-07-18 10:10:21 +02004024static void mlxsw_sp_fib_node_put(struct mlxsw_sp *mlxsw_sp,
4025 struct mlxsw_sp_fib_node *fib_node)
Jiri Pirko5b004412016-09-01 10:37:40 +02004026{
Ido Schimmel76610eb2017-03-10 08:53:41 +01004027 struct mlxsw_sp_vr *vr = fib_node->fib->vr;
Jiri Pirko5b004412016-09-01 10:37:40 +02004028
Ido Schimmel9aecce12017-02-09 10:28:42 +01004029 if (!list_empty(&fib_node->entry_list))
4030 return;
Ido Schimmel76610eb2017-03-10 08:53:41 +01004031 mlxsw_sp_fib_node_fini(mlxsw_sp, fib_node);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004032 mlxsw_sp_fib_node_destroy(fib_node);
Ido Schimmel76610eb2017-03-10 08:53:41 +01004033 mlxsw_sp_vr_put(vr);
Jiri Pirko5b004412016-09-01 10:37:40 +02004034}
4035
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004036static struct mlxsw_sp_fib4_entry *
Ido Schimmel9aecce12017-02-09 10:28:42 +01004037mlxsw_sp_fib4_node_entry_find(const struct mlxsw_sp_fib_node *fib_node,
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004038 const struct mlxsw_sp_fib4_entry *new4_entry)
Jiri Pirko61c503f2016-07-04 08:23:11 +02004039{
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004040 struct mlxsw_sp_fib4_entry *fib4_entry;
Ido Schimmel9aecce12017-02-09 10:28:42 +01004041
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004042 list_for_each_entry(fib4_entry, &fib_node->entry_list, common.list) {
4043 if (fib4_entry->tb_id > new4_entry->tb_id)
Ido Schimmel9aecce12017-02-09 10:28:42 +01004044 continue;
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004045 if (fib4_entry->tb_id != new4_entry->tb_id)
Ido Schimmel9aecce12017-02-09 10:28:42 +01004046 break;
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004047 if (fib4_entry->tos > new4_entry->tos)
Ido Schimmel9aecce12017-02-09 10:28:42 +01004048 continue;
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004049 if (fib4_entry->prio >= new4_entry->prio ||
4050 fib4_entry->tos < new4_entry->tos)
4051 return fib4_entry;
Ido Schimmel9aecce12017-02-09 10:28:42 +01004052 }
4053
4054 return NULL;
4055}
4056
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004057static int
4058mlxsw_sp_fib4_node_list_append(struct mlxsw_sp_fib4_entry *fib4_entry,
4059 struct mlxsw_sp_fib4_entry *new4_entry)
Ido Schimmel4283bce2017-02-09 10:28:43 +01004060{
4061 struct mlxsw_sp_fib_node *fib_node;
4062
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004063 if (WARN_ON(!fib4_entry))
Ido Schimmel4283bce2017-02-09 10:28:43 +01004064 return -EINVAL;
4065
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004066 fib_node = fib4_entry->common.fib_node;
4067 list_for_each_entry_from(fib4_entry, &fib_node->entry_list,
4068 common.list) {
4069 if (fib4_entry->tb_id != new4_entry->tb_id ||
4070 fib4_entry->tos != new4_entry->tos ||
4071 fib4_entry->prio != new4_entry->prio)
Ido Schimmel4283bce2017-02-09 10:28:43 +01004072 break;
4073 }
4074
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004075 list_add_tail(&new4_entry->common.list, &fib4_entry->common.list);
Ido Schimmel4283bce2017-02-09 10:28:43 +01004076 return 0;
4077}
4078
Ido Schimmel9aecce12017-02-09 10:28:42 +01004079static int
Ido Schimmel9efbee62017-07-18 10:10:28 +02004080mlxsw_sp_fib4_node_list_insert(struct mlxsw_sp_fib4_entry *new4_entry,
Ido Schimmel599cf8f2017-02-09 10:28:44 +01004081 bool replace, bool append)
Ido Schimmel9aecce12017-02-09 10:28:42 +01004082{
Ido Schimmel9efbee62017-07-18 10:10:28 +02004083 struct mlxsw_sp_fib_node *fib_node = new4_entry->common.fib_node;
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004084 struct mlxsw_sp_fib4_entry *fib4_entry;
Ido Schimmel9aecce12017-02-09 10:28:42 +01004085
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004086 fib4_entry = mlxsw_sp_fib4_node_entry_find(fib_node, new4_entry);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004087
Ido Schimmel4283bce2017-02-09 10:28:43 +01004088 if (append)
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004089 return mlxsw_sp_fib4_node_list_append(fib4_entry, new4_entry);
4090 if (replace && WARN_ON(!fib4_entry))
Ido Schimmel599cf8f2017-02-09 10:28:44 +01004091 return -EINVAL;
Ido Schimmel4283bce2017-02-09 10:28:43 +01004092
Ido Schimmel599cf8f2017-02-09 10:28:44 +01004093 /* Insert new entry before replaced one, so that we can later
4094 * remove the second.
4095 */
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004096 if (fib4_entry) {
4097 list_add_tail(&new4_entry->common.list,
4098 &fib4_entry->common.list);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004099 } else {
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004100 struct mlxsw_sp_fib4_entry *last;
Ido Schimmel9aecce12017-02-09 10:28:42 +01004101
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004102 list_for_each_entry(last, &fib_node->entry_list, common.list) {
4103 if (new4_entry->tb_id > last->tb_id)
Ido Schimmel9aecce12017-02-09 10:28:42 +01004104 break;
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004105 fib4_entry = last;
Ido Schimmel9aecce12017-02-09 10:28:42 +01004106 }
4107
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004108 if (fib4_entry)
4109 list_add(&new4_entry->common.list,
4110 &fib4_entry->common.list);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004111 else
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004112 list_add(&new4_entry->common.list,
4113 &fib_node->entry_list);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004114 }
4115
4116 return 0;
4117}
4118
4119static void
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004120mlxsw_sp_fib4_node_list_remove(struct mlxsw_sp_fib4_entry *fib4_entry)
Ido Schimmel9aecce12017-02-09 10:28:42 +01004121{
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004122 list_del(&fib4_entry->common.list);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004123}
4124
Ido Schimmel80c238f2017-07-18 10:10:29 +02004125static int mlxsw_sp_fib_node_entry_add(struct mlxsw_sp *mlxsw_sp,
4126 struct mlxsw_sp_fib_entry *fib_entry)
Ido Schimmel9aecce12017-02-09 10:28:42 +01004127{
Ido Schimmel9efbee62017-07-18 10:10:28 +02004128 struct mlxsw_sp_fib_node *fib_node = fib_entry->fib_node;
4129
Ido Schimmel9aecce12017-02-09 10:28:42 +01004130 if (!mlxsw_sp_fib_node_entry_is_first(fib_node, fib_entry))
4131 return 0;
4132
4133 /* To prevent packet loss, overwrite the previously offloaded
4134 * entry.
4135 */
4136 if (!list_is_singular(&fib_node->entry_list)) {
4137 enum mlxsw_reg_ralue_op op = MLXSW_REG_RALUE_OP_WRITE_DELETE;
4138 struct mlxsw_sp_fib_entry *n = list_next_entry(fib_entry, list);
4139
4140 mlxsw_sp_fib_entry_offload_refresh(n, op, 0);
4141 }
4142
4143 return mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
4144}
4145
Ido Schimmel80c238f2017-07-18 10:10:29 +02004146static void mlxsw_sp_fib_node_entry_del(struct mlxsw_sp *mlxsw_sp,
4147 struct mlxsw_sp_fib_entry *fib_entry)
Ido Schimmel9aecce12017-02-09 10:28:42 +01004148{
Ido Schimmel9efbee62017-07-18 10:10:28 +02004149 struct mlxsw_sp_fib_node *fib_node = fib_entry->fib_node;
4150
Ido Schimmel9aecce12017-02-09 10:28:42 +01004151 if (!mlxsw_sp_fib_node_entry_is_first(fib_node, fib_entry))
4152 return;
4153
4154 /* Promote the next entry by overwriting the deleted entry */
4155 if (!list_is_singular(&fib_node->entry_list)) {
4156 struct mlxsw_sp_fib_entry *n = list_next_entry(fib_entry, list);
4157 enum mlxsw_reg_ralue_op op = MLXSW_REG_RALUE_OP_WRITE_DELETE;
4158
4159 mlxsw_sp_fib_entry_update(mlxsw_sp, n);
4160 mlxsw_sp_fib_entry_offload_refresh(fib_entry, op, 0);
4161 return;
4162 }
4163
4164 mlxsw_sp_fib_entry_del(mlxsw_sp, fib_entry);
4165}
4166
4167static int mlxsw_sp_fib4_node_entry_link(struct mlxsw_sp *mlxsw_sp,
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004168 struct mlxsw_sp_fib4_entry *fib4_entry,
Ido Schimmel599cf8f2017-02-09 10:28:44 +01004169 bool replace, bool append)
Ido Schimmel9aecce12017-02-09 10:28:42 +01004170{
Ido Schimmel9aecce12017-02-09 10:28:42 +01004171 int err;
4172
Ido Schimmel9efbee62017-07-18 10:10:28 +02004173 err = mlxsw_sp_fib4_node_list_insert(fib4_entry, replace, append);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004174 if (err)
4175 return err;
4176
Ido Schimmel80c238f2017-07-18 10:10:29 +02004177 err = mlxsw_sp_fib_node_entry_add(mlxsw_sp, &fib4_entry->common);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004178 if (err)
Ido Schimmel80c238f2017-07-18 10:10:29 +02004179 goto err_fib_node_entry_add;
Ido Schimmel9aecce12017-02-09 10:28:42 +01004180
Ido Schimmel9aecce12017-02-09 10:28:42 +01004181 return 0;
4182
Ido Schimmel80c238f2017-07-18 10:10:29 +02004183err_fib_node_entry_add:
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004184 mlxsw_sp_fib4_node_list_remove(fib4_entry);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004185 return err;
4186}
4187
4188static void
4189mlxsw_sp_fib4_node_entry_unlink(struct mlxsw_sp *mlxsw_sp,
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004190 struct mlxsw_sp_fib4_entry *fib4_entry)
Ido Schimmel9aecce12017-02-09 10:28:42 +01004191{
Ido Schimmel80c238f2017-07-18 10:10:29 +02004192 mlxsw_sp_fib_node_entry_del(mlxsw_sp, &fib4_entry->common);
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004193 mlxsw_sp_fib4_node_list_remove(fib4_entry);
Petr Machata4607f6d2017-09-02 23:49:25 +02004194
4195 if (fib4_entry->common.type == MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP)
4196 mlxsw_sp_fib_entry_decap_fini(mlxsw_sp, &fib4_entry->common);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004197}
4198
Ido Schimmel599cf8f2017-02-09 10:28:44 +01004199static void mlxsw_sp_fib4_entry_replace(struct mlxsw_sp *mlxsw_sp,
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004200 struct mlxsw_sp_fib4_entry *fib4_entry,
Ido Schimmel599cf8f2017-02-09 10:28:44 +01004201 bool replace)
4202{
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004203 struct mlxsw_sp_fib_node *fib_node = fib4_entry->common.fib_node;
4204 struct mlxsw_sp_fib4_entry *replaced;
Ido Schimmel599cf8f2017-02-09 10:28:44 +01004205
4206 if (!replace)
4207 return;
4208
4209 /* We inserted the new entry before replaced one */
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004210 replaced = list_next_entry(fib4_entry, common.list);
Ido Schimmel599cf8f2017-02-09 10:28:44 +01004211
4212 mlxsw_sp_fib4_node_entry_unlink(mlxsw_sp, replaced);
4213 mlxsw_sp_fib4_entry_destroy(mlxsw_sp, replaced);
Ido Schimmel731ea1c2017-07-18 10:10:21 +02004214 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
Ido Schimmel599cf8f2017-02-09 10:28:44 +01004215}
4216
Ido Schimmel9aecce12017-02-09 10:28:42 +01004217static int
4218mlxsw_sp_router_fib4_add(struct mlxsw_sp *mlxsw_sp,
Ido Schimmel4283bce2017-02-09 10:28:43 +01004219 const struct fib_entry_notifier_info *fen_info,
Ido Schimmel599cf8f2017-02-09 10:28:44 +01004220 bool replace, bool append)
Ido Schimmel9aecce12017-02-09 10:28:42 +01004221{
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004222 struct mlxsw_sp_fib4_entry *fib4_entry;
Ido Schimmel9aecce12017-02-09 10:28:42 +01004223 struct mlxsw_sp_fib_node *fib_node;
Jiri Pirko61c503f2016-07-04 08:23:11 +02004224 int err;
4225
Ido Schimmel9011b672017-05-16 19:38:25 +02004226 if (mlxsw_sp->router->aborted)
Jiri Pirkob45f64d2016-09-26 12:52:31 +02004227 return 0;
4228
Ido Schimmel731ea1c2017-07-18 10:10:21 +02004229 fib_node = mlxsw_sp_fib_node_get(mlxsw_sp, fen_info->tb_id,
4230 &fen_info->dst, sizeof(fen_info->dst),
4231 fen_info->dst_len,
4232 MLXSW_SP_L3_PROTO_IPV4);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004233 if (IS_ERR(fib_node)) {
4234 dev_warn(mlxsw_sp->bus_info->dev, "Failed to get FIB node\n");
4235 return PTR_ERR(fib_node);
4236 }
4237
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004238 fib4_entry = mlxsw_sp_fib4_entry_create(mlxsw_sp, fib_node, fen_info);
4239 if (IS_ERR(fib4_entry)) {
Ido Schimmel9aecce12017-02-09 10:28:42 +01004240 dev_warn(mlxsw_sp->bus_info->dev, "Failed to create FIB entry\n");
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004241 err = PTR_ERR(fib4_entry);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004242 goto err_fib4_entry_create;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02004243 }
Jiri Pirko61c503f2016-07-04 08:23:11 +02004244
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004245 err = mlxsw_sp_fib4_node_entry_link(mlxsw_sp, fib4_entry, replace,
Ido Schimmel599cf8f2017-02-09 10:28:44 +01004246 append);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02004247 if (err) {
Ido Schimmel9aecce12017-02-09 10:28:42 +01004248 dev_warn(mlxsw_sp->bus_info->dev, "Failed to link FIB entry to node\n");
4249 goto err_fib4_node_entry_link;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02004250 }
Ido Schimmel9aecce12017-02-09 10:28:42 +01004251
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004252 mlxsw_sp_fib4_entry_replace(mlxsw_sp, fib4_entry, replace);
Ido Schimmel599cf8f2017-02-09 10:28:44 +01004253
Jiri Pirko61c503f2016-07-04 08:23:11 +02004254 return 0;
4255
Ido Schimmel9aecce12017-02-09 10:28:42 +01004256err_fib4_node_entry_link:
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004257 mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib4_entry);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004258err_fib4_entry_create:
Ido Schimmel731ea1c2017-07-18 10:10:21 +02004259 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
Jiri Pirko61c503f2016-07-04 08:23:11 +02004260 return err;
4261}
4262
Jiri Pirko37956d72016-10-20 16:05:43 +02004263static void mlxsw_sp_router_fib4_del(struct mlxsw_sp *mlxsw_sp,
4264 struct fib_entry_notifier_info *fen_info)
Jiri Pirko61c503f2016-07-04 08:23:11 +02004265{
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004266 struct mlxsw_sp_fib4_entry *fib4_entry;
Ido Schimmel9aecce12017-02-09 10:28:42 +01004267 struct mlxsw_sp_fib_node *fib_node;
Jiri Pirko61c503f2016-07-04 08:23:11 +02004268
Ido Schimmel9011b672017-05-16 19:38:25 +02004269 if (mlxsw_sp->router->aborted)
Jiri Pirko37956d72016-10-20 16:05:43 +02004270 return;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02004271
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004272 fib4_entry = mlxsw_sp_fib4_entry_lookup(mlxsw_sp, fen_info);
4273 if (WARN_ON(!fib4_entry))
Jiri Pirko37956d72016-10-20 16:05:43 +02004274 return;
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004275 fib_node = fib4_entry->common.fib_node;
Jiri Pirko5b004412016-09-01 10:37:40 +02004276
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004277 mlxsw_sp_fib4_node_entry_unlink(mlxsw_sp, fib4_entry);
4278 mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib4_entry);
Ido Schimmel731ea1c2017-07-18 10:10:21 +02004279 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
Jiri Pirko61c503f2016-07-04 08:23:11 +02004280}
Jiri Pirkob45f64d2016-09-26 12:52:31 +02004281
Ido Schimmel428b8512017-08-03 13:28:28 +02004282static bool mlxsw_sp_fib6_rt_should_ignore(const struct rt6_info *rt)
4283{
4284 /* Packets with link-local destination IP arriving to the router
4285 * are trapped to the CPU, so no need to program specific routes
4286 * for them.
4287 */
4288 if (ipv6_addr_type(&rt->rt6i_dst.addr) & IPV6_ADDR_LINKLOCAL)
4289 return true;
4290
4291 /* Multicast routes aren't supported, so ignore them. Neighbour
4292 * Discovery packets are specifically trapped.
4293 */
4294 if (ipv6_addr_type(&rt->rt6i_dst.addr) & IPV6_ADDR_MULTICAST)
4295 return true;
4296
4297 /* Cloned routes are irrelevant in the forwarding path. */
4298 if (rt->rt6i_flags & RTF_CACHE)
4299 return true;
4300
4301 return false;
4302}
4303
4304static struct mlxsw_sp_rt6 *mlxsw_sp_rt6_create(struct rt6_info *rt)
4305{
4306 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
4307
4308 mlxsw_sp_rt6 = kzalloc(sizeof(*mlxsw_sp_rt6), GFP_KERNEL);
4309 if (!mlxsw_sp_rt6)
4310 return ERR_PTR(-ENOMEM);
4311
4312 /* In case of route replace, replaced route is deleted with
4313 * no notification. Take reference to prevent accessing freed
4314 * memory.
4315 */
4316 mlxsw_sp_rt6->rt = rt;
4317 rt6_hold(rt);
4318
4319 return mlxsw_sp_rt6;
4320}
4321
4322#if IS_ENABLED(CONFIG_IPV6)
4323static void mlxsw_sp_rt6_release(struct rt6_info *rt)
4324{
4325 rt6_release(rt);
4326}
4327#else
4328static void mlxsw_sp_rt6_release(struct rt6_info *rt)
4329{
4330}
4331#endif
4332
4333static void mlxsw_sp_rt6_destroy(struct mlxsw_sp_rt6 *mlxsw_sp_rt6)
4334{
4335 mlxsw_sp_rt6_release(mlxsw_sp_rt6->rt);
4336 kfree(mlxsw_sp_rt6);
4337}
4338
4339static bool mlxsw_sp_fib6_rt_can_mp(const struct rt6_info *rt)
4340{
4341 /* RTF_CACHE routes are ignored */
4342 return (rt->rt6i_flags & (RTF_GATEWAY | RTF_ADDRCONF)) == RTF_GATEWAY;
4343}
4344
4345static struct rt6_info *
4346mlxsw_sp_fib6_entry_rt(const struct mlxsw_sp_fib6_entry *fib6_entry)
4347{
4348 return list_first_entry(&fib6_entry->rt6_list, struct mlxsw_sp_rt6,
4349 list)->rt;
4350}
4351
4352static struct mlxsw_sp_fib6_entry *
4353mlxsw_sp_fib6_node_mp_entry_find(const struct mlxsw_sp_fib_node *fib_node,
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02004354 const struct rt6_info *nrt, bool replace)
Ido Schimmel428b8512017-08-03 13:28:28 +02004355{
4356 struct mlxsw_sp_fib6_entry *fib6_entry;
4357
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02004358 if (!mlxsw_sp_fib6_rt_can_mp(nrt) || replace)
Ido Schimmel428b8512017-08-03 13:28:28 +02004359 return NULL;
4360
4361 list_for_each_entry(fib6_entry, &fib_node->entry_list, common.list) {
4362 struct rt6_info *rt = mlxsw_sp_fib6_entry_rt(fib6_entry);
4363
4364 /* RT6_TABLE_LOCAL and RT6_TABLE_MAIN share the same
4365 * virtual router.
4366 */
4367 if (rt->rt6i_table->tb6_id > nrt->rt6i_table->tb6_id)
4368 continue;
4369 if (rt->rt6i_table->tb6_id != nrt->rt6i_table->tb6_id)
4370 break;
4371 if (rt->rt6i_metric < nrt->rt6i_metric)
4372 continue;
4373 if (rt->rt6i_metric == nrt->rt6i_metric &&
4374 mlxsw_sp_fib6_rt_can_mp(rt))
4375 return fib6_entry;
4376 if (rt->rt6i_metric > nrt->rt6i_metric)
4377 break;
4378 }
4379
4380 return NULL;
4381}
4382
4383static struct mlxsw_sp_rt6 *
4384mlxsw_sp_fib6_entry_rt_find(const struct mlxsw_sp_fib6_entry *fib6_entry,
4385 const struct rt6_info *rt)
4386{
4387 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
4388
4389 list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) {
4390 if (mlxsw_sp_rt6->rt == rt)
4391 return mlxsw_sp_rt6;
4392 }
4393
4394 return NULL;
4395}
4396
Petr Machata8f28a302017-09-02 23:49:24 +02004397static bool mlxsw_sp_nexthop6_ipip_type(const struct mlxsw_sp *mlxsw_sp,
4398 const struct rt6_info *rt,
4399 enum mlxsw_sp_ipip_type *ret)
4400{
4401 return rt->dst.dev &&
4402 mlxsw_sp_netdev_ipip_type(mlxsw_sp, rt->dst.dev, ret);
4403}
4404
Petr Machata35225e42017-09-02 23:49:22 +02004405static int mlxsw_sp_nexthop6_type_init(struct mlxsw_sp *mlxsw_sp,
4406 struct mlxsw_sp_nexthop_group *nh_grp,
4407 struct mlxsw_sp_nexthop *nh,
4408 const struct rt6_info *rt)
Ido Schimmel428b8512017-08-03 13:28:28 +02004409{
Petr Machata8f28a302017-09-02 23:49:24 +02004410 struct mlxsw_sp_router *router = mlxsw_sp->router;
Ido Schimmel428b8512017-08-03 13:28:28 +02004411 struct net_device *dev = rt->dst.dev;
Petr Machata8f28a302017-09-02 23:49:24 +02004412 enum mlxsw_sp_ipip_type ipipt;
Ido Schimmel428b8512017-08-03 13:28:28 +02004413 struct mlxsw_sp_rif *rif;
4414 int err;
4415
Petr Machata8f28a302017-09-02 23:49:24 +02004416 if (mlxsw_sp_nexthop6_ipip_type(mlxsw_sp, rt, &ipipt) &&
4417 router->ipip_ops_arr[ipipt]->can_offload(mlxsw_sp, dev,
4418 MLXSW_SP_L3_PROTO_IPV6)) {
4419 nh->type = MLXSW_SP_NEXTHOP_TYPE_IPIP;
Petr Machata4cccb732017-10-16 16:26:39 +02004420 err = mlxsw_sp_nexthop_ipip_init(mlxsw_sp, nh, dev);
Petr Machatade0f43c2017-10-02 12:14:57 +02004421 if (err)
4422 return err;
4423 mlxsw_sp_nexthop_rif_init(nh, &nh->ipip_entry->ol_lb->common);
4424 return 0;
Petr Machata8f28a302017-09-02 23:49:24 +02004425 }
4426
Petr Machata35225e42017-09-02 23:49:22 +02004427 nh->type = MLXSW_SP_NEXTHOP_TYPE_ETH;
Ido Schimmel428b8512017-08-03 13:28:28 +02004428 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
4429 if (!rif)
4430 return 0;
4431 mlxsw_sp_nexthop_rif_init(nh, rif);
4432
4433 err = mlxsw_sp_nexthop_neigh_init(mlxsw_sp, nh);
4434 if (err)
4435 goto err_nexthop_neigh_init;
4436
4437 return 0;
4438
4439err_nexthop_neigh_init:
4440 mlxsw_sp_nexthop_rif_fini(nh);
4441 return err;
4442}
4443
Petr Machata35225e42017-09-02 23:49:22 +02004444static void mlxsw_sp_nexthop6_type_fini(struct mlxsw_sp *mlxsw_sp,
4445 struct mlxsw_sp_nexthop *nh)
4446{
4447 mlxsw_sp_nexthop_type_fini(mlxsw_sp, nh);
4448}
4449
4450static int mlxsw_sp_nexthop6_init(struct mlxsw_sp *mlxsw_sp,
4451 struct mlxsw_sp_nexthop_group *nh_grp,
4452 struct mlxsw_sp_nexthop *nh,
4453 const struct rt6_info *rt)
4454{
4455 struct net_device *dev = rt->dst.dev;
4456
4457 nh->nh_grp = nh_grp;
Ido Schimmel408bd942017-10-22 23:11:46 +02004458 nh->nh_weight = 1;
Petr Machata35225e42017-09-02 23:49:22 +02004459 memcpy(&nh->gw_addr, &rt->rt6i_gateway, sizeof(nh->gw_addr));
Arkadi Sharshevskya5390272017-09-25 10:32:28 +02004460 mlxsw_sp_nexthop_counter_alloc(mlxsw_sp, nh);
Petr Machata35225e42017-09-02 23:49:22 +02004461
Arkadi Sharshevskydbe45982017-09-25 10:32:23 +02004462 list_add_tail(&nh->router_list_node, &mlxsw_sp->router->nexthop_list);
4463
Petr Machata35225e42017-09-02 23:49:22 +02004464 if (!dev)
4465 return 0;
4466 nh->ifindex = dev->ifindex;
4467
4468 return mlxsw_sp_nexthop6_type_init(mlxsw_sp, nh_grp, nh, rt);
4469}
4470
Ido Schimmel428b8512017-08-03 13:28:28 +02004471static void mlxsw_sp_nexthop6_fini(struct mlxsw_sp *mlxsw_sp,
4472 struct mlxsw_sp_nexthop *nh)
4473{
Petr Machata35225e42017-09-02 23:49:22 +02004474 mlxsw_sp_nexthop6_type_fini(mlxsw_sp, nh);
Arkadi Sharshevskydbe45982017-09-25 10:32:23 +02004475 list_del(&nh->router_list_node);
Arkadi Sharshevskya5390272017-09-25 10:32:28 +02004476 mlxsw_sp_nexthop_counter_free(mlxsw_sp, nh);
Ido Schimmel428b8512017-08-03 13:28:28 +02004477}
4478
Petr Machataf6050ee2017-09-02 23:49:21 +02004479static bool mlxsw_sp_rt6_is_gateway(const struct mlxsw_sp *mlxsw_sp,
4480 const struct rt6_info *rt)
4481{
Petr Machata8f28a302017-09-02 23:49:24 +02004482 return rt->rt6i_flags & RTF_GATEWAY ||
4483 mlxsw_sp_nexthop6_ipip_type(mlxsw_sp, rt, NULL);
Petr Machataf6050ee2017-09-02 23:49:21 +02004484}
4485
Ido Schimmel428b8512017-08-03 13:28:28 +02004486static struct mlxsw_sp_nexthop_group *
4487mlxsw_sp_nexthop6_group_create(struct mlxsw_sp *mlxsw_sp,
4488 struct mlxsw_sp_fib6_entry *fib6_entry)
4489{
4490 struct mlxsw_sp_nexthop_group *nh_grp;
4491 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
4492 struct mlxsw_sp_nexthop *nh;
4493 size_t alloc_size;
4494 int i = 0;
4495 int err;
4496
4497 alloc_size = sizeof(*nh_grp) +
4498 fib6_entry->nrt6 * sizeof(struct mlxsw_sp_nexthop);
4499 nh_grp = kzalloc(alloc_size, GFP_KERNEL);
4500 if (!nh_grp)
4501 return ERR_PTR(-ENOMEM);
4502 INIT_LIST_HEAD(&nh_grp->fib_list);
4503#if IS_ENABLED(CONFIG_IPV6)
4504 nh_grp->neigh_tbl = &nd_tbl;
4505#endif
4506 mlxsw_sp_rt6 = list_first_entry(&fib6_entry->rt6_list,
4507 struct mlxsw_sp_rt6, list);
Petr Machataf6050ee2017-09-02 23:49:21 +02004508 nh_grp->gateway = mlxsw_sp_rt6_is_gateway(mlxsw_sp, mlxsw_sp_rt6->rt);
Ido Schimmel428b8512017-08-03 13:28:28 +02004509 nh_grp->count = fib6_entry->nrt6;
4510 for (i = 0; i < nh_grp->count; i++) {
4511 struct rt6_info *rt = mlxsw_sp_rt6->rt;
4512
4513 nh = &nh_grp->nexthops[i];
4514 err = mlxsw_sp_nexthop6_init(mlxsw_sp, nh_grp, nh, rt);
4515 if (err)
4516 goto err_nexthop6_init;
4517 mlxsw_sp_rt6 = list_next_entry(mlxsw_sp_rt6, list);
4518 }
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02004519
4520 err = mlxsw_sp_nexthop_group_insert(mlxsw_sp, nh_grp);
4521 if (err)
4522 goto err_nexthop_group_insert;
4523
Ido Schimmel428b8512017-08-03 13:28:28 +02004524 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
4525 return nh_grp;
4526
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02004527err_nexthop_group_insert:
Ido Schimmel428b8512017-08-03 13:28:28 +02004528err_nexthop6_init:
4529 for (i--; i >= 0; i--) {
4530 nh = &nh_grp->nexthops[i];
4531 mlxsw_sp_nexthop6_fini(mlxsw_sp, nh);
4532 }
4533 kfree(nh_grp);
4534 return ERR_PTR(err);
4535}
4536
4537static void
4538mlxsw_sp_nexthop6_group_destroy(struct mlxsw_sp *mlxsw_sp,
4539 struct mlxsw_sp_nexthop_group *nh_grp)
4540{
4541 struct mlxsw_sp_nexthop *nh;
4542 int i = nh_grp->count;
4543
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02004544 mlxsw_sp_nexthop_group_remove(mlxsw_sp, nh_grp);
Ido Schimmel428b8512017-08-03 13:28:28 +02004545 for (i--; i >= 0; i--) {
4546 nh = &nh_grp->nexthops[i];
4547 mlxsw_sp_nexthop6_fini(mlxsw_sp, nh);
4548 }
4549 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
4550 WARN_ON(nh_grp->adj_index_valid);
4551 kfree(nh_grp);
4552}
4553
4554static int mlxsw_sp_nexthop6_group_get(struct mlxsw_sp *mlxsw_sp,
4555 struct mlxsw_sp_fib6_entry *fib6_entry)
4556{
4557 struct mlxsw_sp_nexthop_group *nh_grp;
4558
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02004559 nh_grp = mlxsw_sp_nexthop6_group_lookup(mlxsw_sp, fib6_entry);
4560 if (!nh_grp) {
4561 nh_grp = mlxsw_sp_nexthop6_group_create(mlxsw_sp, fib6_entry);
4562 if (IS_ERR(nh_grp))
4563 return PTR_ERR(nh_grp);
4564 }
Ido Schimmel428b8512017-08-03 13:28:28 +02004565
4566 list_add_tail(&fib6_entry->common.nexthop_group_node,
4567 &nh_grp->fib_list);
4568 fib6_entry->common.nh_group = nh_grp;
4569
4570 return 0;
4571}
4572
4573static void mlxsw_sp_nexthop6_group_put(struct mlxsw_sp *mlxsw_sp,
4574 struct mlxsw_sp_fib_entry *fib_entry)
4575{
4576 struct mlxsw_sp_nexthop_group *nh_grp = fib_entry->nh_group;
4577
4578 list_del(&fib_entry->nexthop_group_node);
4579 if (!list_empty(&nh_grp->fib_list))
4580 return;
4581 mlxsw_sp_nexthop6_group_destroy(mlxsw_sp, nh_grp);
4582}
4583
4584static int
4585mlxsw_sp_nexthop6_group_update(struct mlxsw_sp *mlxsw_sp,
4586 struct mlxsw_sp_fib6_entry *fib6_entry)
4587{
4588 struct mlxsw_sp_nexthop_group *old_nh_grp = fib6_entry->common.nh_group;
4589 int err;
4590
4591 fib6_entry->common.nh_group = NULL;
4592 list_del(&fib6_entry->common.nexthop_group_node);
4593
4594 err = mlxsw_sp_nexthop6_group_get(mlxsw_sp, fib6_entry);
4595 if (err)
4596 goto err_nexthop6_group_get;
4597
4598 /* In case this entry is offloaded, then the adjacency index
4599 * currently associated with it in the device's table is that
4600 * of the old group. Start using the new one instead.
4601 */
4602 err = mlxsw_sp_fib_node_entry_add(mlxsw_sp, &fib6_entry->common);
4603 if (err)
4604 goto err_fib_node_entry_add;
4605
4606 if (list_empty(&old_nh_grp->fib_list))
4607 mlxsw_sp_nexthop6_group_destroy(mlxsw_sp, old_nh_grp);
4608
4609 return 0;
4610
4611err_fib_node_entry_add:
4612 mlxsw_sp_nexthop6_group_put(mlxsw_sp, &fib6_entry->common);
4613err_nexthop6_group_get:
4614 list_add_tail(&fib6_entry->common.nexthop_group_node,
4615 &old_nh_grp->fib_list);
4616 fib6_entry->common.nh_group = old_nh_grp;
4617 return err;
4618}
4619
4620static int
4621mlxsw_sp_fib6_entry_nexthop_add(struct mlxsw_sp *mlxsw_sp,
4622 struct mlxsw_sp_fib6_entry *fib6_entry,
4623 struct rt6_info *rt)
4624{
4625 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
4626 int err;
4627
4628 mlxsw_sp_rt6 = mlxsw_sp_rt6_create(rt);
4629 if (IS_ERR(mlxsw_sp_rt6))
4630 return PTR_ERR(mlxsw_sp_rt6);
4631
4632 list_add_tail(&mlxsw_sp_rt6->list, &fib6_entry->rt6_list);
4633 fib6_entry->nrt6++;
4634
4635 err = mlxsw_sp_nexthop6_group_update(mlxsw_sp, fib6_entry);
4636 if (err)
4637 goto err_nexthop6_group_update;
4638
4639 return 0;
4640
4641err_nexthop6_group_update:
4642 fib6_entry->nrt6--;
4643 list_del(&mlxsw_sp_rt6->list);
4644 mlxsw_sp_rt6_destroy(mlxsw_sp_rt6);
4645 return err;
4646}
4647
4648static void
4649mlxsw_sp_fib6_entry_nexthop_del(struct mlxsw_sp *mlxsw_sp,
4650 struct mlxsw_sp_fib6_entry *fib6_entry,
4651 struct rt6_info *rt)
4652{
4653 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
4654
4655 mlxsw_sp_rt6 = mlxsw_sp_fib6_entry_rt_find(fib6_entry, rt);
4656 if (WARN_ON(!mlxsw_sp_rt6))
4657 return;
4658
4659 fib6_entry->nrt6--;
4660 list_del(&mlxsw_sp_rt6->list);
4661 mlxsw_sp_nexthop6_group_update(mlxsw_sp, fib6_entry);
4662 mlxsw_sp_rt6_destroy(mlxsw_sp_rt6);
4663}
4664
Petr Machataf6050ee2017-09-02 23:49:21 +02004665static void mlxsw_sp_fib6_entry_type_set(struct mlxsw_sp *mlxsw_sp,
4666 struct mlxsw_sp_fib_entry *fib_entry,
Ido Schimmel428b8512017-08-03 13:28:28 +02004667 const struct rt6_info *rt)
4668{
4669 /* Packets hitting RTF_REJECT routes need to be discarded by the
4670 * stack. We can rely on their destination device not having a
4671 * RIF (it's the loopback device) and can thus use action type
4672 * local, which will cause them to be trapped with a lower
4673 * priority than packets that need to be locally received.
4674 */
Ido Schimmeld3b6d372017-09-01 10:58:55 +02004675 if (rt->rt6i_flags & (RTF_LOCAL | RTF_ANYCAST))
Ido Schimmel428b8512017-08-03 13:28:28 +02004676 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
4677 else if (rt->rt6i_flags & RTF_REJECT)
4678 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_LOCAL;
Petr Machataf6050ee2017-09-02 23:49:21 +02004679 else if (mlxsw_sp_rt6_is_gateway(mlxsw_sp, rt))
Ido Schimmel428b8512017-08-03 13:28:28 +02004680 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_REMOTE;
4681 else
4682 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_LOCAL;
4683}
4684
4685static void
4686mlxsw_sp_fib6_entry_rt_destroy_all(struct mlxsw_sp_fib6_entry *fib6_entry)
4687{
4688 struct mlxsw_sp_rt6 *mlxsw_sp_rt6, *tmp;
4689
4690 list_for_each_entry_safe(mlxsw_sp_rt6, tmp, &fib6_entry->rt6_list,
4691 list) {
4692 fib6_entry->nrt6--;
4693 list_del(&mlxsw_sp_rt6->list);
4694 mlxsw_sp_rt6_destroy(mlxsw_sp_rt6);
4695 }
4696}
4697
4698static struct mlxsw_sp_fib6_entry *
4699mlxsw_sp_fib6_entry_create(struct mlxsw_sp *mlxsw_sp,
4700 struct mlxsw_sp_fib_node *fib_node,
4701 struct rt6_info *rt)
4702{
4703 struct mlxsw_sp_fib6_entry *fib6_entry;
4704 struct mlxsw_sp_fib_entry *fib_entry;
4705 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
4706 int err;
4707
4708 fib6_entry = kzalloc(sizeof(*fib6_entry), GFP_KERNEL);
4709 if (!fib6_entry)
4710 return ERR_PTR(-ENOMEM);
4711 fib_entry = &fib6_entry->common;
4712
4713 mlxsw_sp_rt6 = mlxsw_sp_rt6_create(rt);
4714 if (IS_ERR(mlxsw_sp_rt6)) {
4715 err = PTR_ERR(mlxsw_sp_rt6);
4716 goto err_rt6_create;
4717 }
4718
Petr Machataf6050ee2017-09-02 23:49:21 +02004719 mlxsw_sp_fib6_entry_type_set(mlxsw_sp, fib_entry, mlxsw_sp_rt6->rt);
Ido Schimmel428b8512017-08-03 13:28:28 +02004720
4721 INIT_LIST_HEAD(&fib6_entry->rt6_list);
4722 list_add_tail(&mlxsw_sp_rt6->list, &fib6_entry->rt6_list);
4723 fib6_entry->nrt6 = 1;
4724 err = mlxsw_sp_nexthop6_group_get(mlxsw_sp, fib6_entry);
4725 if (err)
4726 goto err_nexthop6_group_get;
4727
4728 fib_entry->fib_node = fib_node;
4729
4730 return fib6_entry;
4731
4732err_nexthop6_group_get:
4733 list_del(&mlxsw_sp_rt6->list);
4734 mlxsw_sp_rt6_destroy(mlxsw_sp_rt6);
4735err_rt6_create:
4736 kfree(fib6_entry);
4737 return ERR_PTR(err);
4738}
4739
4740static void mlxsw_sp_fib6_entry_destroy(struct mlxsw_sp *mlxsw_sp,
4741 struct mlxsw_sp_fib6_entry *fib6_entry)
4742{
4743 mlxsw_sp_nexthop6_group_put(mlxsw_sp, &fib6_entry->common);
4744 mlxsw_sp_fib6_entry_rt_destroy_all(fib6_entry);
4745 WARN_ON(fib6_entry->nrt6);
4746 kfree(fib6_entry);
4747}
4748
4749static struct mlxsw_sp_fib6_entry *
4750mlxsw_sp_fib6_node_entry_find(const struct mlxsw_sp_fib_node *fib_node,
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02004751 const struct rt6_info *nrt, bool replace)
Ido Schimmel428b8512017-08-03 13:28:28 +02004752{
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02004753 struct mlxsw_sp_fib6_entry *fib6_entry, *fallback = NULL;
Ido Schimmel428b8512017-08-03 13:28:28 +02004754
4755 list_for_each_entry(fib6_entry, &fib_node->entry_list, common.list) {
4756 struct rt6_info *rt = mlxsw_sp_fib6_entry_rt(fib6_entry);
4757
4758 if (rt->rt6i_table->tb6_id > nrt->rt6i_table->tb6_id)
4759 continue;
4760 if (rt->rt6i_table->tb6_id != nrt->rt6i_table->tb6_id)
4761 break;
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02004762 if (replace && rt->rt6i_metric == nrt->rt6i_metric) {
4763 if (mlxsw_sp_fib6_rt_can_mp(rt) ==
4764 mlxsw_sp_fib6_rt_can_mp(nrt))
4765 return fib6_entry;
4766 if (mlxsw_sp_fib6_rt_can_mp(nrt))
4767 fallback = fallback ?: fib6_entry;
4768 }
Ido Schimmel428b8512017-08-03 13:28:28 +02004769 if (rt->rt6i_metric > nrt->rt6i_metric)
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02004770 return fallback ?: fib6_entry;
Ido Schimmel428b8512017-08-03 13:28:28 +02004771 }
4772
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02004773 return fallback;
Ido Schimmel428b8512017-08-03 13:28:28 +02004774}
4775
4776static int
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02004777mlxsw_sp_fib6_node_list_insert(struct mlxsw_sp_fib6_entry *new6_entry,
4778 bool replace)
Ido Schimmel428b8512017-08-03 13:28:28 +02004779{
4780 struct mlxsw_sp_fib_node *fib_node = new6_entry->common.fib_node;
4781 struct rt6_info *nrt = mlxsw_sp_fib6_entry_rt(new6_entry);
4782 struct mlxsw_sp_fib6_entry *fib6_entry;
4783
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02004784 fib6_entry = mlxsw_sp_fib6_node_entry_find(fib_node, nrt, replace);
4785
4786 if (replace && WARN_ON(!fib6_entry))
4787 return -EINVAL;
Ido Schimmel428b8512017-08-03 13:28:28 +02004788
4789 if (fib6_entry) {
4790 list_add_tail(&new6_entry->common.list,
4791 &fib6_entry->common.list);
4792 } else {
4793 struct mlxsw_sp_fib6_entry *last;
4794
4795 list_for_each_entry(last, &fib_node->entry_list, common.list) {
4796 struct rt6_info *rt = mlxsw_sp_fib6_entry_rt(last);
4797
4798 if (nrt->rt6i_table->tb6_id > rt->rt6i_table->tb6_id)
4799 break;
4800 fib6_entry = last;
4801 }
4802
4803 if (fib6_entry)
4804 list_add(&new6_entry->common.list,
4805 &fib6_entry->common.list);
4806 else
4807 list_add(&new6_entry->common.list,
4808 &fib_node->entry_list);
4809 }
4810
4811 return 0;
4812}
4813
4814static void
4815mlxsw_sp_fib6_node_list_remove(struct mlxsw_sp_fib6_entry *fib6_entry)
4816{
4817 list_del(&fib6_entry->common.list);
4818}
4819
4820static int mlxsw_sp_fib6_node_entry_link(struct mlxsw_sp *mlxsw_sp,
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02004821 struct mlxsw_sp_fib6_entry *fib6_entry,
4822 bool replace)
Ido Schimmel428b8512017-08-03 13:28:28 +02004823{
4824 int err;
4825
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02004826 err = mlxsw_sp_fib6_node_list_insert(fib6_entry, replace);
Ido Schimmel428b8512017-08-03 13:28:28 +02004827 if (err)
4828 return err;
4829
4830 err = mlxsw_sp_fib_node_entry_add(mlxsw_sp, &fib6_entry->common);
4831 if (err)
4832 goto err_fib_node_entry_add;
4833
4834 return 0;
4835
4836err_fib_node_entry_add:
4837 mlxsw_sp_fib6_node_list_remove(fib6_entry);
4838 return err;
4839}
4840
4841static void
4842mlxsw_sp_fib6_node_entry_unlink(struct mlxsw_sp *mlxsw_sp,
4843 struct mlxsw_sp_fib6_entry *fib6_entry)
4844{
4845 mlxsw_sp_fib_node_entry_del(mlxsw_sp, &fib6_entry->common);
4846 mlxsw_sp_fib6_node_list_remove(fib6_entry);
4847}
4848
4849static struct mlxsw_sp_fib6_entry *
4850mlxsw_sp_fib6_entry_lookup(struct mlxsw_sp *mlxsw_sp,
4851 const struct rt6_info *rt)
4852{
4853 struct mlxsw_sp_fib6_entry *fib6_entry;
4854 struct mlxsw_sp_fib_node *fib_node;
4855 struct mlxsw_sp_fib *fib;
4856 struct mlxsw_sp_vr *vr;
4857
4858 vr = mlxsw_sp_vr_find(mlxsw_sp, rt->rt6i_table->tb6_id);
4859 if (!vr)
4860 return NULL;
4861 fib = mlxsw_sp_vr_fib(vr, MLXSW_SP_L3_PROTO_IPV6);
4862
4863 fib_node = mlxsw_sp_fib_node_lookup(fib, &rt->rt6i_dst.addr,
4864 sizeof(rt->rt6i_dst.addr),
4865 rt->rt6i_dst.plen);
4866 if (!fib_node)
4867 return NULL;
4868
4869 list_for_each_entry(fib6_entry, &fib_node->entry_list, common.list) {
4870 struct rt6_info *iter_rt = mlxsw_sp_fib6_entry_rt(fib6_entry);
4871
4872 if (rt->rt6i_table->tb6_id == iter_rt->rt6i_table->tb6_id &&
4873 rt->rt6i_metric == iter_rt->rt6i_metric &&
4874 mlxsw_sp_fib6_entry_rt_find(fib6_entry, rt))
4875 return fib6_entry;
4876 }
4877
4878 return NULL;
4879}
4880
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02004881static void mlxsw_sp_fib6_entry_replace(struct mlxsw_sp *mlxsw_sp,
4882 struct mlxsw_sp_fib6_entry *fib6_entry,
4883 bool replace)
4884{
4885 struct mlxsw_sp_fib_node *fib_node = fib6_entry->common.fib_node;
4886 struct mlxsw_sp_fib6_entry *replaced;
4887
4888 if (!replace)
4889 return;
4890
4891 replaced = list_next_entry(fib6_entry, common.list);
4892
4893 mlxsw_sp_fib6_node_entry_unlink(mlxsw_sp, replaced);
4894 mlxsw_sp_fib6_entry_destroy(mlxsw_sp, replaced);
4895 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
4896}
4897
Ido Schimmel428b8512017-08-03 13:28:28 +02004898static int mlxsw_sp_router_fib6_add(struct mlxsw_sp *mlxsw_sp,
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02004899 struct rt6_info *rt, bool replace)
Ido Schimmel428b8512017-08-03 13:28:28 +02004900{
4901 struct mlxsw_sp_fib6_entry *fib6_entry;
4902 struct mlxsw_sp_fib_node *fib_node;
4903 int err;
4904
4905 if (mlxsw_sp->router->aborted)
4906 return 0;
4907
Ido Schimmelf36f5ac2017-08-03 13:28:30 +02004908 if (rt->rt6i_src.plen)
4909 return -EINVAL;
4910
Ido Schimmel428b8512017-08-03 13:28:28 +02004911 if (mlxsw_sp_fib6_rt_should_ignore(rt))
4912 return 0;
4913
4914 fib_node = mlxsw_sp_fib_node_get(mlxsw_sp, rt->rt6i_table->tb6_id,
4915 &rt->rt6i_dst.addr,
4916 sizeof(rt->rt6i_dst.addr),
4917 rt->rt6i_dst.plen,
4918 MLXSW_SP_L3_PROTO_IPV6);
4919 if (IS_ERR(fib_node))
4920 return PTR_ERR(fib_node);
4921
4922 /* Before creating a new entry, try to append route to an existing
4923 * multipath entry.
4924 */
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02004925 fib6_entry = mlxsw_sp_fib6_node_mp_entry_find(fib_node, rt, replace);
Ido Schimmel428b8512017-08-03 13:28:28 +02004926 if (fib6_entry) {
4927 err = mlxsw_sp_fib6_entry_nexthop_add(mlxsw_sp, fib6_entry, rt);
4928 if (err)
4929 goto err_fib6_entry_nexthop_add;
4930 return 0;
4931 }
4932
4933 fib6_entry = mlxsw_sp_fib6_entry_create(mlxsw_sp, fib_node, rt);
4934 if (IS_ERR(fib6_entry)) {
4935 err = PTR_ERR(fib6_entry);
4936 goto err_fib6_entry_create;
4937 }
4938
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02004939 err = mlxsw_sp_fib6_node_entry_link(mlxsw_sp, fib6_entry, replace);
Ido Schimmel428b8512017-08-03 13:28:28 +02004940 if (err)
4941 goto err_fib6_node_entry_link;
4942
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02004943 mlxsw_sp_fib6_entry_replace(mlxsw_sp, fib6_entry, replace);
4944
Ido Schimmel428b8512017-08-03 13:28:28 +02004945 return 0;
4946
4947err_fib6_node_entry_link:
4948 mlxsw_sp_fib6_entry_destroy(mlxsw_sp, fib6_entry);
4949err_fib6_entry_create:
4950err_fib6_entry_nexthop_add:
4951 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
4952 return err;
4953}
4954
4955static void mlxsw_sp_router_fib6_del(struct mlxsw_sp *mlxsw_sp,
4956 struct rt6_info *rt)
4957{
4958 struct mlxsw_sp_fib6_entry *fib6_entry;
4959 struct mlxsw_sp_fib_node *fib_node;
4960
4961 if (mlxsw_sp->router->aborted)
4962 return;
4963
4964 if (mlxsw_sp_fib6_rt_should_ignore(rt))
4965 return;
4966
4967 fib6_entry = mlxsw_sp_fib6_entry_lookup(mlxsw_sp, rt);
4968 if (WARN_ON(!fib6_entry))
4969 return;
4970
4971 /* If route is part of a multipath entry, but not the last one
4972 * removed, then only reduce its nexthop group.
4973 */
4974 if (!list_is_singular(&fib6_entry->rt6_list)) {
4975 mlxsw_sp_fib6_entry_nexthop_del(mlxsw_sp, fib6_entry, rt);
4976 return;
4977 }
4978
4979 fib_node = fib6_entry->common.fib_node;
4980
4981 mlxsw_sp_fib6_node_entry_unlink(mlxsw_sp, fib6_entry);
4982 mlxsw_sp_fib6_entry_destroy(mlxsw_sp, fib6_entry);
4983 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
4984}
4985
Ido Schimmelbc65a8a2017-07-18 10:10:25 +02004986static int __mlxsw_sp_router_set_abort_trap(struct mlxsw_sp *mlxsw_sp,
4987 enum mlxsw_reg_ralxx_protocol proto,
4988 u8 tree_id)
Jiri Pirkob45f64d2016-09-26 12:52:31 +02004989{
4990 char ralta_pl[MLXSW_REG_RALTA_LEN];
4991 char ralst_pl[MLXSW_REG_RALST_LEN];
Ido Schimmelb5d90e62017-03-10 08:53:43 +01004992 int i, err;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02004993
Ido Schimmelbc65a8a2017-07-18 10:10:25 +02004994 mlxsw_reg_ralta_pack(ralta_pl, true, proto, tree_id);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02004995 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralta), ralta_pl);
4996 if (err)
4997 return err;
4998
Ido Schimmelbc65a8a2017-07-18 10:10:25 +02004999 mlxsw_reg_ralst_pack(ralst_pl, 0xff, tree_id);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005000 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralst), ralst_pl);
5001 if (err)
5002 return err;
5003
Ido Schimmelb5d90e62017-03-10 08:53:43 +01005004 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
Ido Schimmel9011b672017-05-16 19:38:25 +02005005 struct mlxsw_sp_vr *vr = &mlxsw_sp->router->vrs[i];
Ido Schimmelb5d90e62017-03-10 08:53:43 +01005006 char raltb_pl[MLXSW_REG_RALTB_LEN];
5007 char ralue_pl[MLXSW_REG_RALUE_LEN];
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005008
Ido Schimmelbc65a8a2017-07-18 10:10:25 +02005009 mlxsw_reg_raltb_pack(raltb_pl, vr->id, proto, tree_id);
Ido Schimmelb5d90e62017-03-10 08:53:43 +01005010 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raltb),
5011 raltb_pl);
5012 if (err)
5013 return err;
5014
Ido Schimmelbc65a8a2017-07-18 10:10:25 +02005015 mlxsw_reg_ralue_pack(ralue_pl, proto,
5016 MLXSW_REG_RALUE_OP_WRITE_WRITE, vr->id, 0);
Ido Schimmelb5d90e62017-03-10 08:53:43 +01005017 mlxsw_reg_ralue_act_ip2me_pack(ralue_pl);
5018 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue),
5019 ralue_pl);
5020 if (err)
5021 return err;
5022 }
5023
5024 return 0;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005025}
5026
Yotam Gigid42b0962017-09-27 08:23:20 +02005027static int mlxsw_sp_router_fibmr_add(struct mlxsw_sp *mlxsw_sp,
5028 struct mfc_entry_notifier_info *men_info,
5029 bool replace)
5030{
5031 struct mlxsw_sp_vr *vr;
5032
5033 if (mlxsw_sp->router->aborted)
5034 return 0;
5035
David Ahernf8fa9b42017-10-18 09:56:56 -07005036 vr = mlxsw_sp_vr_get(mlxsw_sp, men_info->tb_id, NULL);
Yotam Gigid42b0962017-09-27 08:23:20 +02005037 if (IS_ERR(vr))
5038 return PTR_ERR(vr);
5039
5040 return mlxsw_sp_mr_route4_add(vr->mr4_table, men_info->mfc, replace);
5041}
5042
5043static void mlxsw_sp_router_fibmr_del(struct mlxsw_sp *mlxsw_sp,
5044 struct mfc_entry_notifier_info *men_info)
5045{
5046 struct mlxsw_sp_vr *vr;
5047
5048 if (mlxsw_sp->router->aborted)
5049 return;
5050
5051 vr = mlxsw_sp_vr_find(mlxsw_sp, men_info->tb_id);
5052 if (WARN_ON(!vr))
5053 return;
5054
5055 mlxsw_sp_mr_route4_del(vr->mr4_table, men_info->mfc);
5056 mlxsw_sp_vr_put(vr);
5057}
5058
5059static int
5060mlxsw_sp_router_fibmr_vif_add(struct mlxsw_sp *mlxsw_sp,
5061 struct vif_entry_notifier_info *ven_info)
5062{
5063 struct mlxsw_sp_rif *rif;
5064 struct mlxsw_sp_vr *vr;
5065
5066 if (mlxsw_sp->router->aborted)
5067 return 0;
5068
David Ahernf8fa9b42017-10-18 09:56:56 -07005069 vr = mlxsw_sp_vr_get(mlxsw_sp, ven_info->tb_id, NULL);
Yotam Gigid42b0962017-09-27 08:23:20 +02005070 if (IS_ERR(vr))
5071 return PTR_ERR(vr);
5072
5073 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, ven_info->dev);
5074 return mlxsw_sp_mr_vif_add(vr->mr4_table, ven_info->dev,
5075 ven_info->vif_index,
5076 ven_info->vif_flags, rif);
5077}
5078
5079static void
5080mlxsw_sp_router_fibmr_vif_del(struct mlxsw_sp *mlxsw_sp,
5081 struct vif_entry_notifier_info *ven_info)
5082{
5083 struct mlxsw_sp_vr *vr;
5084
5085 if (mlxsw_sp->router->aborted)
5086 return;
5087
5088 vr = mlxsw_sp_vr_find(mlxsw_sp, ven_info->tb_id);
5089 if (WARN_ON(!vr))
5090 return;
5091
5092 mlxsw_sp_mr_vif_del(vr->mr4_table, ven_info->vif_index);
5093 mlxsw_sp_vr_put(vr);
5094}
5095
Ido Schimmelbc65a8a2017-07-18 10:10:25 +02005096static int mlxsw_sp_router_set_abort_trap(struct mlxsw_sp *mlxsw_sp)
5097{
5098 enum mlxsw_reg_ralxx_protocol proto = MLXSW_REG_RALXX_PROTOCOL_IPV4;
5099 int err;
5100
5101 err = __mlxsw_sp_router_set_abort_trap(mlxsw_sp, proto,
5102 MLXSW_SP_LPM_TREE_MIN);
5103 if (err)
5104 return err;
5105
Yotam Gigid42b0962017-09-27 08:23:20 +02005106 /* The multicast router code does not need an abort trap as by default,
5107 * packets that don't match any routes are trapped to the CPU.
5108 */
5109
Ido Schimmelbc65a8a2017-07-18 10:10:25 +02005110 proto = MLXSW_REG_RALXX_PROTOCOL_IPV6;
5111 return __mlxsw_sp_router_set_abort_trap(mlxsw_sp, proto,
5112 MLXSW_SP_LPM_TREE_MIN + 1);
5113}
5114
Ido Schimmel9aecce12017-02-09 10:28:42 +01005115static void mlxsw_sp_fib4_node_flush(struct mlxsw_sp *mlxsw_sp,
5116 struct mlxsw_sp_fib_node *fib_node)
5117{
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02005118 struct mlxsw_sp_fib4_entry *fib4_entry, *tmp;
Ido Schimmel9aecce12017-02-09 10:28:42 +01005119
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02005120 list_for_each_entry_safe(fib4_entry, tmp, &fib_node->entry_list,
5121 common.list) {
5122 bool do_break = &tmp->common.list == &fib_node->entry_list;
Ido Schimmel9aecce12017-02-09 10:28:42 +01005123
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02005124 mlxsw_sp_fib4_node_entry_unlink(mlxsw_sp, fib4_entry);
5125 mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib4_entry);
Ido Schimmel731ea1c2017-07-18 10:10:21 +02005126 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
Ido Schimmel9aecce12017-02-09 10:28:42 +01005127 /* Break when entry list is empty and node was freed.
5128 * Otherwise, we'll access freed memory in the next
5129 * iteration.
5130 */
5131 if (do_break)
5132 break;
5133 }
5134}
5135
Ido Schimmel428b8512017-08-03 13:28:28 +02005136static void mlxsw_sp_fib6_node_flush(struct mlxsw_sp *mlxsw_sp,
5137 struct mlxsw_sp_fib_node *fib_node)
5138{
5139 struct mlxsw_sp_fib6_entry *fib6_entry, *tmp;
5140
5141 list_for_each_entry_safe(fib6_entry, tmp, &fib_node->entry_list,
5142 common.list) {
5143 bool do_break = &tmp->common.list == &fib_node->entry_list;
5144
5145 mlxsw_sp_fib6_node_entry_unlink(mlxsw_sp, fib6_entry);
5146 mlxsw_sp_fib6_entry_destroy(mlxsw_sp, fib6_entry);
5147 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
5148 if (do_break)
5149 break;
5150 }
5151}
5152
Ido Schimmel9aecce12017-02-09 10:28:42 +01005153static void mlxsw_sp_fib_node_flush(struct mlxsw_sp *mlxsw_sp,
5154 struct mlxsw_sp_fib_node *fib_node)
5155{
Ido Schimmel76610eb2017-03-10 08:53:41 +01005156 switch (fib_node->fib->proto) {
Ido Schimmel9aecce12017-02-09 10:28:42 +01005157 case MLXSW_SP_L3_PROTO_IPV4:
5158 mlxsw_sp_fib4_node_flush(mlxsw_sp, fib_node);
5159 break;
5160 case MLXSW_SP_L3_PROTO_IPV6:
Ido Schimmel428b8512017-08-03 13:28:28 +02005161 mlxsw_sp_fib6_node_flush(mlxsw_sp, fib_node);
Ido Schimmel9aecce12017-02-09 10:28:42 +01005162 break;
5163 }
5164}
5165
Ido Schimmel76610eb2017-03-10 08:53:41 +01005166static void mlxsw_sp_vr_fib_flush(struct mlxsw_sp *mlxsw_sp,
5167 struct mlxsw_sp_vr *vr,
5168 enum mlxsw_sp_l3proto proto)
5169{
5170 struct mlxsw_sp_fib *fib = mlxsw_sp_vr_fib(vr, proto);
5171 struct mlxsw_sp_fib_node *fib_node, *tmp;
5172
5173 list_for_each_entry_safe(fib_node, tmp, &fib->node_list, list) {
5174 bool do_break = &tmp->list == &fib->node_list;
5175
5176 mlxsw_sp_fib_node_flush(mlxsw_sp, fib_node);
5177 if (do_break)
5178 break;
5179 }
5180}
5181
Ido Schimmelac571de2016-11-14 11:26:32 +01005182static void mlxsw_sp_router_fib_flush(struct mlxsw_sp *mlxsw_sp)
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005183{
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005184 int i;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005185
Jiri Pirkoc1a38312016-10-21 16:07:23 +02005186 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
Ido Schimmel9011b672017-05-16 19:38:25 +02005187 struct mlxsw_sp_vr *vr = &mlxsw_sp->router->vrs[i];
Ido Schimmelac571de2016-11-14 11:26:32 +01005188
Ido Schimmel76610eb2017-03-10 08:53:41 +01005189 if (!mlxsw_sp_vr_is_used(vr))
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005190 continue;
Yotam Gigid42b0962017-09-27 08:23:20 +02005191
5192 mlxsw_sp_mr_table_flush(vr->mr4_table);
Ido Schimmel76610eb2017-03-10 08:53:41 +01005193 mlxsw_sp_vr_fib_flush(mlxsw_sp, vr, MLXSW_SP_L3_PROTO_IPV4);
Ido Schimmela3d9bc52017-07-18 10:10:22 +02005194
5195 /* If virtual router was only used for IPv4, then it's no
5196 * longer used.
5197 */
5198 if (!mlxsw_sp_vr_is_used(vr))
5199 continue;
5200 mlxsw_sp_vr_fib_flush(mlxsw_sp, vr, MLXSW_SP_L3_PROTO_IPV6);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005201 }
Ido Schimmelac571de2016-11-14 11:26:32 +01005202}
5203
Ido Schimmelbc65a8a2017-07-18 10:10:25 +02005204static void mlxsw_sp_router_fib_abort(struct mlxsw_sp *mlxsw_sp)
Ido Schimmelac571de2016-11-14 11:26:32 +01005205{
5206 int err;
5207
Ido Schimmel9011b672017-05-16 19:38:25 +02005208 if (mlxsw_sp->router->aborted)
Ido Schimmeld331d302016-11-16 09:51:58 +01005209 return;
5210 dev_warn(mlxsw_sp->bus_info->dev, "FIB abort triggered. Note that FIB entries are no longer being offloaded to this device.\n");
Ido Schimmelac571de2016-11-14 11:26:32 +01005211 mlxsw_sp_router_fib_flush(mlxsw_sp);
Ido Schimmel9011b672017-05-16 19:38:25 +02005212 mlxsw_sp->router->aborted = true;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005213 err = mlxsw_sp_router_set_abort_trap(mlxsw_sp);
5214 if (err)
5215 dev_warn(mlxsw_sp->bus_info->dev, "Failed to set abort trap.\n");
5216}
5217
Ido Schimmel30572242016-12-03 16:45:01 +01005218struct mlxsw_sp_fib_event_work {
Ido Schimmela0e47612017-02-06 16:20:10 +01005219 struct work_struct work;
Ido Schimmelad178c82017-02-08 11:16:40 +01005220 union {
Ido Schimmel428b8512017-08-03 13:28:28 +02005221 struct fib6_entry_notifier_info fen6_info;
Ido Schimmelad178c82017-02-08 11:16:40 +01005222 struct fib_entry_notifier_info fen_info;
Ido Schimmel5d7bfd12017-03-16 09:08:14 +01005223 struct fib_rule_notifier_info fr_info;
Ido Schimmelad178c82017-02-08 11:16:40 +01005224 struct fib_nh_notifier_info fnh_info;
Yotam Gigid42b0962017-09-27 08:23:20 +02005225 struct mfc_entry_notifier_info men_info;
5226 struct vif_entry_notifier_info ven_info;
Ido Schimmelad178c82017-02-08 11:16:40 +01005227 };
Ido Schimmel30572242016-12-03 16:45:01 +01005228 struct mlxsw_sp *mlxsw_sp;
5229 unsigned long event;
5230};
5231
Ido Schimmel66a57632017-08-03 13:28:26 +02005232static void mlxsw_sp_router_fib4_event_work(struct work_struct *work)
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005233{
Ido Schimmel30572242016-12-03 16:45:01 +01005234 struct mlxsw_sp_fib_event_work *fib_work =
Ido Schimmela0e47612017-02-06 16:20:10 +01005235 container_of(work, struct mlxsw_sp_fib_event_work, work);
Ido Schimmel30572242016-12-03 16:45:01 +01005236 struct mlxsw_sp *mlxsw_sp = fib_work->mlxsw_sp;
Ido Schimmel5d7bfd12017-03-16 09:08:14 +01005237 struct fib_rule *rule;
Ido Schimmel599cf8f2017-02-09 10:28:44 +01005238 bool replace, append;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005239 int err;
5240
Ido Schimmel30572242016-12-03 16:45:01 +01005241 /* Protect internal structures from changes */
5242 rtnl_lock();
5243 switch (fib_work->event) {
Ido Schimmel599cf8f2017-02-09 10:28:44 +01005244 case FIB_EVENT_ENTRY_REPLACE: /* fall through */
Ido Schimmel4283bce2017-02-09 10:28:43 +01005245 case FIB_EVENT_ENTRY_APPEND: /* fall through */
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005246 case FIB_EVENT_ENTRY_ADD:
Ido Schimmel599cf8f2017-02-09 10:28:44 +01005247 replace = fib_work->event == FIB_EVENT_ENTRY_REPLACE;
Ido Schimmel4283bce2017-02-09 10:28:43 +01005248 append = fib_work->event == FIB_EVENT_ENTRY_APPEND;
5249 err = mlxsw_sp_router_fib4_add(mlxsw_sp, &fib_work->fen_info,
Ido Schimmel599cf8f2017-02-09 10:28:44 +01005250 replace, append);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005251 if (err)
Ido Schimmelbc65a8a2017-07-18 10:10:25 +02005252 mlxsw_sp_router_fib_abort(mlxsw_sp);
Ido Schimmel30572242016-12-03 16:45:01 +01005253 fib_info_put(fib_work->fen_info.fi);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005254 break;
5255 case FIB_EVENT_ENTRY_DEL:
Ido Schimmel30572242016-12-03 16:45:01 +01005256 mlxsw_sp_router_fib4_del(mlxsw_sp, &fib_work->fen_info);
5257 fib_info_put(fib_work->fen_info.fi);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005258 break;
5259 case FIB_EVENT_RULE_ADD: /* fall through */
5260 case FIB_EVENT_RULE_DEL:
Ido Schimmel5d7bfd12017-03-16 09:08:14 +01005261 rule = fib_work->fr_info.rule;
Ido Schimmelc7f6e662017-03-16 09:08:20 +01005262 if (!fib4_rule_default(rule) && !rule->l3mdev)
Ido Schimmelbc65a8a2017-07-18 10:10:25 +02005263 mlxsw_sp_router_fib_abort(mlxsw_sp);
Ido Schimmel5d7bfd12017-03-16 09:08:14 +01005264 fib_rule_put(rule);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005265 break;
Ido Schimmelad178c82017-02-08 11:16:40 +01005266 case FIB_EVENT_NH_ADD: /* fall through */
5267 case FIB_EVENT_NH_DEL:
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02005268 mlxsw_sp_nexthop4_event(mlxsw_sp, fib_work->event,
5269 fib_work->fnh_info.fib_nh);
Ido Schimmelad178c82017-02-08 11:16:40 +01005270 fib_info_put(fib_work->fnh_info.fib_nh->nh_parent);
5271 break;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005272 }
Ido Schimmel30572242016-12-03 16:45:01 +01005273 rtnl_unlock();
5274 kfree(fib_work);
5275}
5276
Ido Schimmel66a57632017-08-03 13:28:26 +02005277static void mlxsw_sp_router_fib6_event_work(struct work_struct *work)
5278{
Ido Schimmel583419f2017-08-03 13:28:27 +02005279 struct mlxsw_sp_fib_event_work *fib_work =
5280 container_of(work, struct mlxsw_sp_fib_event_work, work);
5281 struct mlxsw_sp *mlxsw_sp = fib_work->mlxsw_sp;
5282 struct fib_rule *rule;
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02005283 bool replace;
Ido Schimmel428b8512017-08-03 13:28:28 +02005284 int err;
Ido Schimmel583419f2017-08-03 13:28:27 +02005285
5286 rtnl_lock();
5287 switch (fib_work->event) {
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02005288 case FIB_EVENT_ENTRY_REPLACE: /* fall through */
Ido Schimmel428b8512017-08-03 13:28:28 +02005289 case FIB_EVENT_ENTRY_ADD:
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02005290 replace = fib_work->event == FIB_EVENT_ENTRY_REPLACE;
Ido Schimmel428b8512017-08-03 13:28:28 +02005291 err = mlxsw_sp_router_fib6_add(mlxsw_sp,
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02005292 fib_work->fen6_info.rt, replace);
Ido Schimmel428b8512017-08-03 13:28:28 +02005293 if (err)
5294 mlxsw_sp_router_fib_abort(mlxsw_sp);
5295 mlxsw_sp_rt6_release(fib_work->fen6_info.rt);
5296 break;
5297 case FIB_EVENT_ENTRY_DEL:
5298 mlxsw_sp_router_fib6_del(mlxsw_sp, fib_work->fen6_info.rt);
5299 mlxsw_sp_rt6_release(fib_work->fen6_info.rt);
5300 break;
Ido Schimmel583419f2017-08-03 13:28:27 +02005301 case FIB_EVENT_RULE_ADD: /* fall through */
5302 case FIB_EVENT_RULE_DEL:
5303 rule = fib_work->fr_info.rule;
5304 if (!fib6_rule_default(rule) && !rule->l3mdev)
5305 mlxsw_sp_router_fib_abort(mlxsw_sp);
5306 fib_rule_put(rule);
5307 break;
5308 }
5309 rtnl_unlock();
5310 kfree(fib_work);
Ido Schimmel66a57632017-08-03 13:28:26 +02005311}
5312
Yotam Gigid42b0962017-09-27 08:23:20 +02005313static void mlxsw_sp_router_fibmr_event_work(struct work_struct *work)
5314{
5315 struct mlxsw_sp_fib_event_work *fib_work =
5316 container_of(work, struct mlxsw_sp_fib_event_work, work);
5317 struct mlxsw_sp *mlxsw_sp = fib_work->mlxsw_sp;
5318 struct fib_rule *rule;
5319 bool replace;
5320 int err;
5321
5322 rtnl_lock();
5323 switch (fib_work->event) {
5324 case FIB_EVENT_ENTRY_REPLACE: /* fall through */
5325 case FIB_EVENT_ENTRY_ADD:
5326 replace = fib_work->event == FIB_EVENT_ENTRY_REPLACE;
5327
5328 err = mlxsw_sp_router_fibmr_add(mlxsw_sp, &fib_work->men_info,
5329 replace);
5330 if (err)
5331 mlxsw_sp_router_fib_abort(mlxsw_sp);
5332 ipmr_cache_put(fib_work->men_info.mfc);
5333 break;
5334 case FIB_EVENT_ENTRY_DEL:
5335 mlxsw_sp_router_fibmr_del(mlxsw_sp, &fib_work->men_info);
5336 ipmr_cache_put(fib_work->men_info.mfc);
5337 break;
5338 case FIB_EVENT_VIF_ADD:
5339 err = mlxsw_sp_router_fibmr_vif_add(mlxsw_sp,
5340 &fib_work->ven_info);
5341 if (err)
5342 mlxsw_sp_router_fib_abort(mlxsw_sp);
5343 dev_put(fib_work->ven_info.dev);
5344 break;
5345 case FIB_EVENT_VIF_DEL:
5346 mlxsw_sp_router_fibmr_vif_del(mlxsw_sp,
5347 &fib_work->ven_info);
5348 dev_put(fib_work->ven_info.dev);
5349 break;
5350 case FIB_EVENT_RULE_ADD: /* fall through */
5351 case FIB_EVENT_RULE_DEL:
5352 rule = fib_work->fr_info.rule;
5353 if (!ipmr_rule_default(rule) && !rule->l3mdev)
5354 mlxsw_sp_router_fib_abort(mlxsw_sp);
5355 fib_rule_put(rule);
5356 break;
5357 }
5358 rtnl_unlock();
5359 kfree(fib_work);
5360}
5361
Ido Schimmel66a57632017-08-03 13:28:26 +02005362static void mlxsw_sp_router_fib4_event(struct mlxsw_sp_fib_event_work *fib_work,
5363 struct fib_notifier_info *info)
5364{
David Ahern3c75f9b2017-10-18 15:01:38 -07005365 struct fib_entry_notifier_info *fen_info;
5366 struct fib_rule_notifier_info *fr_info;
5367 struct fib_nh_notifier_info *fnh_info;
5368
Ido Schimmel66a57632017-08-03 13:28:26 +02005369 switch (fib_work->event) {
5370 case FIB_EVENT_ENTRY_REPLACE: /* fall through */
5371 case FIB_EVENT_ENTRY_APPEND: /* fall through */
5372 case FIB_EVENT_ENTRY_ADD: /* fall through */
5373 case FIB_EVENT_ENTRY_DEL:
David Ahern3c75f9b2017-10-18 15:01:38 -07005374 fen_info = container_of(info, struct fib_entry_notifier_info,
5375 info);
5376 fib_work->fen_info = *fen_info;
5377 /* Take reference on fib_info to prevent it from being
Ido Schimmel66a57632017-08-03 13:28:26 +02005378 * freed while work is queued. Release it afterwards.
5379 */
5380 fib_info_hold(fib_work->fen_info.fi);
5381 break;
5382 case FIB_EVENT_RULE_ADD: /* fall through */
5383 case FIB_EVENT_RULE_DEL:
David Ahern3c75f9b2017-10-18 15:01:38 -07005384 fr_info = container_of(info, struct fib_rule_notifier_info,
5385 info);
5386 fib_work->fr_info = *fr_info;
Ido Schimmel66a57632017-08-03 13:28:26 +02005387 fib_rule_get(fib_work->fr_info.rule);
5388 break;
5389 case FIB_EVENT_NH_ADD: /* fall through */
5390 case FIB_EVENT_NH_DEL:
David Ahern3c75f9b2017-10-18 15:01:38 -07005391 fnh_info = container_of(info, struct fib_nh_notifier_info,
5392 info);
5393 fib_work->fnh_info = *fnh_info;
Ido Schimmel66a57632017-08-03 13:28:26 +02005394 fib_info_hold(fib_work->fnh_info.fib_nh->nh_parent);
5395 break;
5396 }
5397}
5398
5399static void mlxsw_sp_router_fib6_event(struct mlxsw_sp_fib_event_work *fib_work,
5400 struct fib_notifier_info *info)
5401{
David Ahern3c75f9b2017-10-18 15:01:38 -07005402 struct fib6_entry_notifier_info *fen6_info;
5403 struct fib_rule_notifier_info *fr_info;
5404
Ido Schimmel583419f2017-08-03 13:28:27 +02005405 switch (fib_work->event) {
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02005406 case FIB_EVENT_ENTRY_REPLACE: /* fall through */
Ido Schimmel428b8512017-08-03 13:28:28 +02005407 case FIB_EVENT_ENTRY_ADD: /* fall through */
5408 case FIB_EVENT_ENTRY_DEL:
David Ahern3c75f9b2017-10-18 15:01:38 -07005409 fen6_info = container_of(info, struct fib6_entry_notifier_info,
5410 info);
5411 fib_work->fen6_info = *fen6_info;
Ido Schimmel428b8512017-08-03 13:28:28 +02005412 rt6_hold(fib_work->fen6_info.rt);
5413 break;
Ido Schimmel583419f2017-08-03 13:28:27 +02005414 case FIB_EVENT_RULE_ADD: /* fall through */
5415 case FIB_EVENT_RULE_DEL:
David Ahern3c75f9b2017-10-18 15:01:38 -07005416 fr_info = container_of(info, struct fib_rule_notifier_info,
5417 info);
5418 fib_work->fr_info = *fr_info;
Ido Schimmel583419f2017-08-03 13:28:27 +02005419 fib_rule_get(fib_work->fr_info.rule);
5420 break;
5421 }
Ido Schimmel66a57632017-08-03 13:28:26 +02005422}
5423
Yotam Gigid42b0962017-09-27 08:23:20 +02005424static void
5425mlxsw_sp_router_fibmr_event(struct mlxsw_sp_fib_event_work *fib_work,
5426 struct fib_notifier_info *info)
5427{
5428 switch (fib_work->event) {
5429 case FIB_EVENT_ENTRY_REPLACE: /* fall through */
5430 case FIB_EVENT_ENTRY_ADD: /* fall through */
5431 case FIB_EVENT_ENTRY_DEL:
5432 memcpy(&fib_work->men_info, info, sizeof(fib_work->men_info));
5433 ipmr_cache_hold(fib_work->men_info.mfc);
5434 break;
5435 case FIB_EVENT_VIF_ADD: /* fall through */
5436 case FIB_EVENT_VIF_DEL:
5437 memcpy(&fib_work->ven_info, info, sizeof(fib_work->ven_info));
5438 dev_hold(fib_work->ven_info.dev);
5439 break;
5440 case FIB_EVENT_RULE_ADD: /* fall through */
5441 case FIB_EVENT_RULE_DEL:
5442 memcpy(&fib_work->fr_info, info, sizeof(fib_work->fr_info));
5443 fib_rule_get(fib_work->fr_info.rule);
5444 break;
5445 }
5446}
5447
Ido Schimmel30572242016-12-03 16:45:01 +01005448/* Called with rcu_read_lock() */
5449static int mlxsw_sp_router_fib_event(struct notifier_block *nb,
5450 unsigned long event, void *ptr)
5451{
Ido Schimmel30572242016-12-03 16:45:01 +01005452 struct mlxsw_sp_fib_event_work *fib_work;
5453 struct fib_notifier_info *info = ptr;
Ido Schimmel7e39d112017-05-16 19:38:28 +02005454 struct mlxsw_sp_router *router;
Ido Schimmel30572242016-12-03 16:45:01 +01005455
Ido Schimmel8e29f972017-09-15 15:31:07 +02005456 if (!net_eq(info->net, &init_net) ||
Yotam Gigi664375e2017-09-27 08:23:22 +02005457 (info->family != AF_INET && info->family != AF_INET6 &&
5458 info->family != RTNL_FAMILY_IPMR))
Ido Schimmel30572242016-12-03 16:45:01 +01005459 return NOTIFY_DONE;
5460
5461 fib_work = kzalloc(sizeof(*fib_work), GFP_ATOMIC);
5462 if (WARN_ON(!fib_work))
5463 return NOTIFY_BAD;
5464
Ido Schimmel7e39d112017-05-16 19:38:28 +02005465 router = container_of(nb, struct mlxsw_sp_router, fib_nb);
5466 fib_work->mlxsw_sp = router->mlxsw_sp;
Ido Schimmel30572242016-12-03 16:45:01 +01005467 fib_work->event = event;
5468
Ido Schimmel66a57632017-08-03 13:28:26 +02005469 switch (info->family) {
5470 case AF_INET:
5471 INIT_WORK(&fib_work->work, mlxsw_sp_router_fib4_event_work);
5472 mlxsw_sp_router_fib4_event(fib_work, info);
Ido Schimmel30572242016-12-03 16:45:01 +01005473 break;
Ido Schimmel66a57632017-08-03 13:28:26 +02005474 case AF_INET6:
5475 INIT_WORK(&fib_work->work, mlxsw_sp_router_fib6_event_work);
5476 mlxsw_sp_router_fib6_event(fib_work, info);
Ido Schimmelad178c82017-02-08 11:16:40 +01005477 break;
Yotam Gigid42b0962017-09-27 08:23:20 +02005478 case RTNL_FAMILY_IPMR:
5479 INIT_WORK(&fib_work->work, mlxsw_sp_router_fibmr_event_work);
5480 mlxsw_sp_router_fibmr_event(fib_work, info);
5481 break;
Ido Schimmel30572242016-12-03 16:45:01 +01005482 }
5483
Ido Schimmela0e47612017-02-06 16:20:10 +01005484 mlxsw_core_schedule_work(&fib_work->work);
Ido Schimmel30572242016-12-03 16:45:01 +01005485
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005486 return NOTIFY_DONE;
5487}
5488
Ido Schimmel4724ba562017-03-10 08:53:39 +01005489static struct mlxsw_sp_rif *
5490mlxsw_sp_rif_find_by_dev(const struct mlxsw_sp *mlxsw_sp,
5491 const struct net_device *dev)
5492{
5493 int i;
5494
5495 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++)
Ido Schimmel5f9efff2017-05-16 19:38:27 +02005496 if (mlxsw_sp->router->rifs[i] &&
5497 mlxsw_sp->router->rifs[i]->dev == dev)
5498 return mlxsw_sp->router->rifs[i];
Ido Schimmel4724ba562017-03-10 08:53:39 +01005499
5500 return NULL;
5501}
5502
5503static int mlxsw_sp_router_rif_disable(struct mlxsw_sp *mlxsw_sp, u16 rif)
5504{
5505 char ritr_pl[MLXSW_REG_RITR_LEN];
5506 int err;
5507
5508 mlxsw_reg_ritr_rif_pack(ritr_pl, rif);
5509 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
5510 if (WARN_ON_ONCE(err))
5511 return err;
5512
5513 mlxsw_reg_ritr_enable_set(ritr_pl, false);
5514 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
5515}
5516
5517static void mlxsw_sp_router_rif_gone_sync(struct mlxsw_sp *mlxsw_sp,
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01005518 struct mlxsw_sp_rif *rif)
Ido Schimmel4724ba562017-03-10 08:53:39 +01005519{
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01005520 mlxsw_sp_router_rif_disable(mlxsw_sp, rif->rif_index);
5521 mlxsw_sp_nexthop_rif_gone_sync(mlxsw_sp, rif);
5522 mlxsw_sp_neigh_rif_gone_sync(mlxsw_sp, rif);
Ido Schimmel4724ba562017-03-10 08:53:39 +01005523}
5524
Arkadi Sharshevsky5ea12372017-07-18 10:10:13 +02005525static bool
5526mlxsw_sp_rif_should_config(struct mlxsw_sp_rif *rif, struct net_device *dev,
5527 unsigned long event)
Ido Schimmel4724ba562017-03-10 08:53:39 +01005528{
Arkadi Sharshevsky5ea12372017-07-18 10:10:13 +02005529 struct inet6_dev *inet6_dev;
5530 bool addr_list_empty = true;
5531 struct in_device *idev;
5532
Ido Schimmel4724ba562017-03-10 08:53:39 +01005533 switch (event) {
5534 case NETDEV_UP:
Petr Machataf1b1f272017-07-31 09:27:28 +02005535 return rif == NULL;
Ido Schimmel4724ba562017-03-10 08:53:39 +01005536 case NETDEV_DOWN:
Arkadi Sharshevsky5ea12372017-07-18 10:10:13 +02005537 idev = __in_dev_get_rtnl(dev);
5538 if (idev && idev->ifa_list)
5539 addr_list_empty = false;
5540
5541 inet6_dev = __in6_dev_get(dev);
5542 if (addr_list_empty && inet6_dev &&
5543 !list_empty(&inet6_dev->addr_list))
5544 addr_list_empty = false;
5545
5546 if (rif && addr_list_empty &&
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01005547 !netif_is_l3_slave(rif->dev))
Ido Schimmel4724ba562017-03-10 08:53:39 +01005548 return true;
5549 /* It is possible we already removed the RIF ourselves
5550 * if it was assigned to a netdev that is now a bridge
5551 * or LAG slave.
5552 */
5553 return false;
5554 }
5555
5556 return false;
5557}
5558
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02005559static enum mlxsw_sp_rif_type
5560mlxsw_sp_dev_rif_type(const struct mlxsw_sp *mlxsw_sp,
5561 const struct net_device *dev)
5562{
5563 enum mlxsw_sp_fid_type type;
5564
Petr Machata6ddb7422017-09-02 23:49:19 +02005565 if (mlxsw_sp_netdev_ipip_type(mlxsw_sp, dev, NULL))
5566 return MLXSW_SP_RIF_TYPE_IPIP_LB;
5567
5568 /* Otherwise RIF type is derived from the type of the underlying FID. */
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02005569 if (is_vlan_dev(dev) && netif_is_bridge_master(vlan_dev_real_dev(dev)))
5570 type = MLXSW_SP_FID_TYPE_8021Q;
5571 else if (netif_is_bridge_master(dev) && br_vlan_enabled(dev))
5572 type = MLXSW_SP_FID_TYPE_8021Q;
5573 else if (netif_is_bridge_master(dev))
5574 type = MLXSW_SP_FID_TYPE_8021D;
5575 else
5576 type = MLXSW_SP_FID_TYPE_RFID;
5577
5578 return mlxsw_sp_fid_type_rif_type(mlxsw_sp, type);
5579}
5580
Ido Schimmelde5ed992017-06-04 16:53:40 +02005581static int mlxsw_sp_rif_index_alloc(struct mlxsw_sp *mlxsw_sp, u16 *p_rif_index)
Ido Schimmel4724ba562017-03-10 08:53:39 +01005582{
5583 int i;
5584
Ido Schimmelde5ed992017-06-04 16:53:40 +02005585 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++) {
5586 if (!mlxsw_sp->router->rifs[i]) {
5587 *p_rif_index = i;
5588 return 0;
5589 }
5590 }
Ido Schimmel4724ba562017-03-10 08:53:39 +01005591
Ido Schimmelde5ed992017-06-04 16:53:40 +02005592 return -ENOBUFS;
Ido Schimmel4724ba562017-03-10 08:53:39 +01005593}
5594
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02005595static struct mlxsw_sp_rif *mlxsw_sp_rif_alloc(size_t rif_size, u16 rif_index,
5596 u16 vr_id,
5597 struct net_device *l3_dev)
Ido Schimmel4724ba562017-03-10 08:53:39 +01005598{
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01005599 struct mlxsw_sp_rif *rif;
Ido Schimmel4724ba562017-03-10 08:53:39 +01005600
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02005601 rif = kzalloc(rif_size, GFP_KERNEL);
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01005602 if (!rif)
Ido Schimmel4724ba562017-03-10 08:53:39 +01005603 return NULL;
5604
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01005605 INIT_LIST_HEAD(&rif->nexthop_list);
5606 INIT_LIST_HEAD(&rif->neigh_list);
5607 ether_addr_copy(rif->addr, l3_dev->dev_addr);
5608 rif->mtu = l3_dev->mtu;
5609 rif->vr_id = vr_id;
5610 rif->dev = l3_dev;
5611 rif->rif_index = rif_index;
Ido Schimmel4724ba562017-03-10 08:53:39 +01005612
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01005613 return rif;
Ido Schimmel4724ba562017-03-10 08:53:39 +01005614}
5615
Ido Schimmel5f9efff2017-05-16 19:38:27 +02005616struct mlxsw_sp_rif *mlxsw_sp_rif_by_index(const struct mlxsw_sp *mlxsw_sp,
5617 u16 rif_index)
5618{
5619 return mlxsw_sp->router->rifs[rif_index];
5620}
5621
Arkadi Sharshevskyfd1b9d42017-03-28 17:24:16 +02005622u16 mlxsw_sp_rif_index(const struct mlxsw_sp_rif *rif)
5623{
5624 return rif->rif_index;
5625}
5626
Petr Machata92107cf2017-09-02 23:49:28 +02005627u16 mlxsw_sp_ipip_lb_rif_index(const struct mlxsw_sp_rif_ipip_lb *lb_rif)
5628{
5629 return lb_rif->common.rif_index;
5630}
5631
5632u16 mlxsw_sp_ipip_lb_ul_vr_id(const struct mlxsw_sp_rif_ipip_lb *lb_rif)
5633{
5634 return lb_rif->ul_vr_id;
5635}
5636
Arkadi Sharshevskyfd1b9d42017-03-28 17:24:16 +02005637int mlxsw_sp_rif_dev_ifindex(const struct mlxsw_sp_rif *rif)
5638{
5639 return rif->dev->ifindex;
5640}
5641
Yotam Gigi91e4d592017-09-19 10:00:19 +02005642const struct net_device *mlxsw_sp_rif_dev(const struct mlxsw_sp_rif *rif)
5643{
5644 return rif->dev;
5645}
5646
Ido Schimmel4724ba562017-03-10 08:53:39 +01005647static struct mlxsw_sp_rif *
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02005648mlxsw_sp_rif_create(struct mlxsw_sp *mlxsw_sp,
David Ahernf8fa9b42017-10-18 09:56:56 -07005649 const struct mlxsw_sp_rif_params *params,
5650 struct netlink_ext_ack *extack)
Ido Schimmel4724ba562017-03-10 08:53:39 +01005651{
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02005652 u32 tb_id = l3mdev_fib_table(params->dev);
5653 const struct mlxsw_sp_rif_ops *ops;
Petr Machata010cadf2017-09-02 23:49:18 +02005654 struct mlxsw_sp_fid *fid = NULL;
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02005655 enum mlxsw_sp_rif_type type;
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01005656 struct mlxsw_sp_rif *rif;
Ido Schimmela1107482017-05-26 08:37:39 +02005657 struct mlxsw_sp_vr *vr;
5658 u16 rif_index;
Ido Schimmel4724ba562017-03-10 08:53:39 +01005659 int err;
5660
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02005661 type = mlxsw_sp_dev_rif_type(mlxsw_sp, params->dev);
5662 ops = mlxsw_sp->router->rif_ops_arr[type];
5663
David Ahernf8fa9b42017-10-18 09:56:56 -07005664 vr = mlxsw_sp_vr_get(mlxsw_sp, tb_id ? : RT_TABLE_MAIN, extack);
Ido Schimmelc9ec53f2017-05-26 08:37:38 +02005665 if (IS_ERR(vr))
5666 return ERR_CAST(vr);
Petr Machata28a04c72017-10-02 12:14:56 +02005667 vr->rif_count++;
Ido Schimmelc9ec53f2017-05-26 08:37:38 +02005668
Ido Schimmelde5ed992017-06-04 16:53:40 +02005669 err = mlxsw_sp_rif_index_alloc(mlxsw_sp, &rif_index);
David Ahernf8fa9b42017-10-18 09:56:56 -07005670 if (err) {
5671 NL_SET_ERR_MSG(extack, "spectrum: Exceeded number of supported router interfaces");
Ido Schimmelde5ed992017-06-04 16:53:40 +02005672 goto err_rif_index_alloc;
David Ahernf8fa9b42017-10-18 09:56:56 -07005673 }
Ido Schimmel4724ba562017-03-10 08:53:39 +01005674
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02005675 rif = mlxsw_sp_rif_alloc(ops->rif_size, rif_index, vr->id, params->dev);
Ido Schimmela13a5942017-05-26 08:37:33 +02005676 if (!rif) {
5677 err = -ENOMEM;
5678 goto err_rif_alloc;
5679 }
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02005680 rif->mlxsw_sp = mlxsw_sp;
5681 rif->ops = ops;
Ido Schimmela13a5942017-05-26 08:37:33 +02005682
Petr Machata010cadf2017-09-02 23:49:18 +02005683 if (ops->fid_get) {
5684 fid = ops->fid_get(rif);
5685 if (IS_ERR(fid)) {
5686 err = PTR_ERR(fid);
5687 goto err_fid_get;
5688 }
5689 rif->fid = fid;
Ido Schimmel4d93cee2017-05-26 08:37:34 +02005690 }
5691
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02005692 if (ops->setup)
5693 ops->setup(rif, params);
5694
5695 err = ops->configure(rif);
Ido Schimmel4724ba562017-03-10 08:53:39 +01005696 if (err)
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02005697 goto err_configure;
Ido Schimmel4724ba562017-03-10 08:53:39 +01005698
Yotam Gigid42b0962017-09-27 08:23:20 +02005699 err = mlxsw_sp_mr_rif_add(vr->mr4_table, rif);
5700 if (err)
5701 goto err_mr_rif_add;
5702
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02005703 mlxsw_sp_rif_counters_alloc(rif);
Ido Schimmel5f9efff2017-05-16 19:38:27 +02005704 mlxsw_sp->router->rifs[rif_index] = rif;
Ido Schimmel4724ba562017-03-10 08:53:39 +01005705
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01005706 return rif;
Ido Schimmel4724ba562017-03-10 08:53:39 +01005707
Yotam Gigid42b0962017-09-27 08:23:20 +02005708err_mr_rif_add:
5709 ops->deconfigure(rif);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02005710err_configure:
Petr Machata010cadf2017-09-02 23:49:18 +02005711 if (fid)
5712 mlxsw_sp_fid_put(fid);
Ido Schimmela1107482017-05-26 08:37:39 +02005713err_fid_get:
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02005714 kfree(rif);
5715err_rif_alloc:
Ido Schimmelde5ed992017-06-04 16:53:40 +02005716err_rif_index_alloc:
Petr Machata28a04c72017-10-02 12:14:56 +02005717 vr->rif_count--;
Ido Schimmelc9ec53f2017-05-26 08:37:38 +02005718 mlxsw_sp_vr_put(vr);
Ido Schimmel4724ba562017-03-10 08:53:39 +01005719 return ERR_PTR(err);
5720}
5721
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02005722void mlxsw_sp_rif_destroy(struct mlxsw_sp_rif *rif)
Ido Schimmel4724ba562017-03-10 08:53:39 +01005723{
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02005724 const struct mlxsw_sp_rif_ops *ops = rif->ops;
5725 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
Ido Schimmela1107482017-05-26 08:37:39 +02005726 struct mlxsw_sp_fid *fid = rif->fid;
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02005727 struct mlxsw_sp_vr *vr;
Ido Schimmel4724ba562017-03-10 08:53:39 +01005728
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01005729 mlxsw_sp_router_rif_gone_sync(mlxsw_sp, rif);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02005730 vr = &mlxsw_sp->router->vrs[rif->vr_id];
Arkadi Sharshevskye0c0afd2017-03-28 17:24:15 +02005731
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02005732 mlxsw_sp->router->rifs[rif->rif_index] = NULL;
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02005733 mlxsw_sp_rif_counters_free(rif);
Yotam Gigid42b0962017-09-27 08:23:20 +02005734 mlxsw_sp_mr_rif_del(vr->mr4_table, rif);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02005735 ops->deconfigure(rif);
Petr Machata010cadf2017-09-02 23:49:18 +02005736 if (fid)
5737 /* Loopback RIFs are not associated with a FID. */
5738 mlxsw_sp_fid_put(fid);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02005739 kfree(rif);
Petr Machata28a04c72017-10-02 12:14:56 +02005740 vr->rif_count--;
Ido Schimmelc9ec53f2017-05-26 08:37:38 +02005741 mlxsw_sp_vr_put(vr);
Ido Schimmel4724ba562017-03-10 08:53:39 +01005742}
5743
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02005744static void
5745mlxsw_sp_rif_subport_params_init(struct mlxsw_sp_rif_params *params,
5746 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
5747{
5748 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
5749
5750 params->vid = mlxsw_sp_port_vlan->vid;
5751 params->lag = mlxsw_sp_port->lagged;
5752 if (params->lag)
5753 params->lag_id = mlxsw_sp_port->lag_id;
5754 else
5755 params->system_port = mlxsw_sp_port->local_port;
5756}
5757
Ido Schimmel7cbecf22017-05-26 08:37:28 +02005758static int
Ido Schimmela1107482017-05-26 08:37:39 +02005759mlxsw_sp_port_vlan_router_join(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan,
David Ahernf8fa9b42017-10-18 09:56:56 -07005760 struct net_device *l3_dev,
5761 struct netlink_ext_ack *extack)
Ido Schimmel4724ba562017-03-10 08:53:39 +01005762{
Ido Schimmel7cbecf22017-05-26 08:37:28 +02005763 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
Ido Schimmel1b8f09a2017-05-26 08:37:36 +02005764 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
Ido Schimmel7cbecf22017-05-26 08:37:28 +02005765 u16 vid = mlxsw_sp_port_vlan->vid;
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01005766 struct mlxsw_sp_rif *rif;
Ido Schimmela1107482017-05-26 08:37:39 +02005767 struct mlxsw_sp_fid *fid;
Ido Schimmel03ea01e2017-05-23 21:56:30 +02005768 int err;
Ido Schimmel4724ba562017-03-10 08:53:39 +01005769
Ido Schimmel1b8f09a2017-05-26 08:37:36 +02005770 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev);
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01005771 if (!rif) {
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02005772 struct mlxsw_sp_rif_params params = {
5773 .dev = l3_dev,
5774 };
5775
5776 mlxsw_sp_rif_subport_params_init(&params, mlxsw_sp_port_vlan);
David Ahernf8fa9b42017-10-18 09:56:56 -07005777 rif = mlxsw_sp_rif_create(mlxsw_sp, &params, extack);
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01005778 if (IS_ERR(rif))
5779 return PTR_ERR(rif);
Ido Schimmel4724ba562017-03-10 08:53:39 +01005780 }
5781
Ido Schimmela1107482017-05-26 08:37:39 +02005782 /* FID was already created, just take a reference */
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02005783 fid = rif->ops->fid_get(rif);
Ido Schimmela1107482017-05-26 08:37:39 +02005784 err = mlxsw_sp_fid_port_vid_map(fid, mlxsw_sp_port, vid);
5785 if (err)
5786 goto err_fid_port_vid_map;
5787
Ido Schimmel7cbecf22017-05-26 08:37:28 +02005788 err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, false);
Ido Schimmel03ea01e2017-05-23 21:56:30 +02005789 if (err)
5790 goto err_port_vid_learning_set;
5791
Ido Schimmel7cbecf22017-05-26 08:37:28 +02005792 err = mlxsw_sp_port_vid_stp_set(mlxsw_sp_port, vid,
Ido Schimmel03ea01e2017-05-23 21:56:30 +02005793 BR_STATE_FORWARDING);
5794 if (err)
5795 goto err_port_vid_stp_set;
5796
Ido Schimmela1107482017-05-26 08:37:39 +02005797 mlxsw_sp_port_vlan->fid = fid;
Ido Schimmel4724ba562017-03-10 08:53:39 +01005798
Ido Schimmel4724ba562017-03-10 08:53:39 +01005799 return 0;
Ido Schimmel03ea01e2017-05-23 21:56:30 +02005800
5801err_port_vid_stp_set:
Ido Schimmel7cbecf22017-05-26 08:37:28 +02005802 mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, true);
Ido Schimmel03ea01e2017-05-23 21:56:30 +02005803err_port_vid_learning_set:
Ido Schimmela1107482017-05-26 08:37:39 +02005804 mlxsw_sp_fid_port_vid_unmap(fid, mlxsw_sp_port, vid);
5805err_fid_port_vid_map:
5806 mlxsw_sp_fid_put(fid);
Ido Schimmel03ea01e2017-05-23 21:56:30 +02005807 return err;
Ido Schimmel4724ba562017-03-10 08:53:39 +01005808}
5809
Ido Schimmela1107482017-05-26 08:37:39 +02005810void
5811mlxsw_sp_port_vlan_router_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
Ido Schimmel4724ba562017-03-10 08:53:39 +01005812{
Ido Schimmelce95e152017-05-26 08:37:27 +02005813 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
Ido Schimmel7cbecf22017-05-26 08:37:28 +02005814 struct mlxsw_sp_fid *fid = mlxsw_sp_port_vlan->fid;
Ido Schimmelce95e152017-05-26 08:37:27 +02005815 u16 vid = mlxsw_sp_port_vlan->vid;
Ido Schimmelce95e152017-05-26 08:37:27 +02005816
Ido Schimmela1107482017-05-26 08:37:39 +02005817 if (WARN_ON(mlxsw_sp_fid_type(fid) != MLXSW_SP_FID_TYPE_RFID))
5818 return;
Ido Schimmel4aafc362017-05-26 08:37:25 +02005819
Ido Schimmela1107482017-05-26 08:37:39 +02005820 mlxsw_sp_port_vlan->fid = NULL;
Ido Schimmel7cbecf22017-05-26 08:37:28 +02005821 mlxsw_sp_port_vid_stp_set(mlxsw_sp_port, vid, BR_STATE_BLOCKING);
5822 mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, true);
Ido Schimmela1107482017-05-26 08:37:39 +02005823 mlxsw_sp_fid_port_vid_unmap(fid, mlxsw_sp_port, vid);
5824 /* If router port holds the last reference on the rFID, then the
5825 * associated Sub-port RIF will be destroyed.
5826 */
5827 mlxsw_sp_fid_put(fid);
Ido Schimmel4724ba562017-03-10 08:53:39 +01005828}
5829
Ido Schimmel7cbecf22017-05-26 08:37:28 +02005830static int mlxsw_sp_inetaddr_port_vlan_event(struct net_device *l3_dev,
5831 struct net_device *port_dev,
David Ahernf8fa9b42017-10-18 09:56:56 -07005832 unsigned long event, u16 vid,
5833 struct netlink_ext_ack *extack)
Ido Schimmel4724ba562017-03-10 08:53:39 +01005834{
5835 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(port_dev);
Ido Schimmelce95e152017-05-26 08:37:27 +02005836 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
Ido Schimmel4724ba562017-03-10 08:53:39 +01005837
Ido Schimmelce95e152017-05-26 08:37:27 +02005838 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
Ido Schimmel7cbecf22017-05-26 08:37:28 +02005839 if (WARN_ON(!mlxsw_sp_port_vlan))
5840 return -EINVAL;
Ido Schimmel4724ba562017-03-10 08:53:39 +01005841
5842 switch (event) {
5843 case NETDEV_UP:
Ido Schimmela1107482017-05-26 08:37:39 +02005844 return mlxsw_sp_port_vlan_router_join(mlxsw_sp_port_vlan,
David Ahernf8fa9b42017-10-18 09:56:56 -07005845 l3_dev, extack);
Ido Schimmel4724ba562017-03-10 08:53:39 +01005846 case NETDEV_DOWN:
Ido Schimmela1107482017-05-26 08:37:39 +02005847 mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port_vlan);
Ido Schimmel4724ba562017-03-10 08:53:39 +01005848 break;
5849 }
5850
5851 return 0;
5852}
5853
5854static int mlxsw_sp_inetaddr_port_event(struct net_device *port_dev,
David Ahernf8fa9b42017-10-18 09:56:56 -07005855 unsigned long event,
5856 struct netlink_ext_ack *extack)
Ido Schimmel4724ba562017-03-10 08:53:39 +01005857{
Jiri Pirko2b94e582017-04-18 16:55:37 +02005858 if (netif_is_bridge_port(port_dev) ||
5859 netif_is_lag_port(port_dev) ||
5860 netif_is_ovs_port(port_dev))
Ido Schimmel4724ba562017-03-10 08:53:39 +01005861 return 0;
5862
David Ahernf8fa9b42017-10-18 09:56:56 -07005863 return mlxsw_sp_inetaddr_port_vlan_event(port_dev, port_dev, event, 1,
5864 extack);
Ido Schimmel4724ba562017-03-10 08:53:39 +01005865}
5866
5867static int __mlxsw_sp_inetaddr_lag_event(struct net_device *l3_dev,
5868 struct net_device *lag_dev,
David Ahernf8fa9b42017-10-18 09:56:56 -07005869 unsigned long event, u16 vid,
5870 struct netlink_ext_ack *extack)
Ido Schimmel4724ba562017-03-10 08:53:39 +01005871{
5872 struct net_device *port_dev;
5873 struct list_head *iter;
5874 int err;
5875
5876 netdev_for_each_lower_dev(lag_dev, port_dev, iter) {
5877 if (mlxsw_sp_port_dev_check(port_dev)) {
Ido Schimmel7cbecf22017-05-26 08:37:28 +02005878 err = mlxsw_sp_inetaddr_port_vlan_event(l3_dev,
5879 port_dev,
David Ahernf8fa9b42017-10-18 09:56:56 -07005880 event, vid,
5881 extack);
Ido Schimmel4724ba562017-03-10 08:53:39 +01005882 if (err)
5883 return err;
5884 }
5885 }
5886
5887 return 0;
5888}
5889
5890static int mlxsw_sp_inetaddr_lag_event(struct net_device *lag_dev,
David Ahernf8fa9b42017-10-18 09:56:56 -07005891 unsigned long event,
5892 struct netlink_ext_ack *extack)
Ido Schimmel4724ba562017-03-10 08:53:39 +01005893{
5894 if (netif_is_bridge_port(lag_dev))
5895 return 0;
5896
David Ahernf8fa9b42017-10-18 09:56:56 -07005897 return __mlxsw_sp_inetaddr_lag_event(lag_dev, lag_dev, event, 1,
5898 extack);
Ido Schimmel4724ba562017-03-10 08:53:39 +01005899}
5900
Ido Schimmel4724ba562017-03-10 08:53:39 +01005901static int mlxsw_sp_inetaddr_bridge_event(struct net_device *l3_dev,
David Ahernf8fa9b42017-10-18 09:56:56 -07005902 unsigned long event,
5903 struct netlink_ext_ack *extack)
Ido Schimmel4724ba562017-03-10 08:53:39 +01005904{
5905 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(l3_dev);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02005906 struct mlxsw_sp_rif_params params = {
5907 .dev = l3_dev,
5908 };
Ido Schimmela1107482017-05-26 08:37:39 +02005909 struct mlxsw_sp_rif *rif;
Ido Schimmel4724ba562017-03-10 08:53:39 +01005910
5911 switch (event) {
5912 case NETDEV_UP:
David Ahernf8fa9b42017-10-18 09:56:56 -07005913 rif = mlxsw_sp_rif_create(mlxsw_sp, &params, extack);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02005914 if (IS_ERR(rif))
5915 return PTR_ERR(rif);
5916 break;
Ido Schimmel4724ba562017-03-10 08:53:39 +01005917 case NETDEV_DOWN:
Ido Schimmela1107482017-05-26 08:37:39 +02005918 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02005919 mlxsw_sp_rif_destroy(rif);
Ido Schimmel4724ba562017-03-10 08:53:39 +01005920 break;
5921 }
5922
5923 return 0;
5924}
5925
5926static int mlxsw_sp_inetaddr_vlan_event(struct net_device *vlan_dev,
David Ahernf8fa9b42017-10-18 09:56:56 -07005927 unsigned long event,
5928 struct netlink_ext_ack *extack)
Ido Schimmel4724ba562017-03-10 08:53:39 +01005929{
5930 struct net_device *real_dev = vlan_dev_real_dev(vlan_dev);
Ido Schimmel4724ba562017-03-10 08:53:39 +01005931 u16 vid = vlan_dev_vlan_id(vlan_dev);
5932
Ido Schimmel6b27c8a2017-06-28 09:03:12 +03005933 if (netif_is_bridge_port(vlan_dev))
5934 return 0;
5935
Ido Schimmel4724ba562017-03-10 08:53:39 +01005936 if (mlxsw_sp_port_dev_check(real_dev))
Ido Schimmel7cbecf22017-05-26 08:37:28 +02005937 return mlxsw_sp_inetaddr_port_vlan_event(vlan_dev, real_dev,
David Ahernf8fa9b42017-10-18 09:56:56 -07005938 event, vid, extack);
Ido Schimmel4724ba562017-03-10 08:53:39 +01005939 else if (netif_is_lag_master(real_dev))
5940 return __mlxsw_sp_inetaddr_lag_event(vlan_dev, real_dev, event,
David Ahernf8fa9b42017-10-18 09:56:56 -07005941 vid, extack);
Ido Schimmelc57529e2017-05-26 08:37:31 +02005942 else if (netif_is_bridge_master(real_dev) && br_vlan_enabled(real_dev))
David Ahernf8fa9b42017-10-18 09:56:56 -07005943 return mlxsw_sp_inetaddr_bridge_event(vlan_dev, event, extack);
Ido Schimmel4724ba562017-03-10 08:53:39 +01005944
5945 return 0;
5946}
5947
Ido Schimmelb1e45522017-04-30 19:47:14 +03005948static int __mlxsw_sp_inetaddr_event(struct net_device *dev,
David Ahernf8fa9b42017-10-18 09:56:56 -07005949 unsigned long event,
5950 struct netlink_ext_ack *extack)
Ido Schimmelb1e45522017-04-30 19:47:14 +03005951{
5952 if (mlxsw_sp_port_dev_check(dev))
David Ahernf8fa9b42017-10-18 09:56:56 -07005953 return mlxsw_sp_inetaddr_port_event(dev, event, extack);
Ido Schimmelb1e45522017-04-30 19:47:14 +03005954 else if (netif_is_lag_master(dev))
David Ahernf8fa9b42017-10-18 09:56:56 -07005955 return mlxsw_sp_inetaddr_lag_event(dev, event, extack);
Ido Schimmelb1e45522017-04-30 19:47:14 +03005956 else if (netif_is_bridge_master(dev))
David Ahernf8fa9b42017-10-18 09:56:56 -07005957 return mlxsw_sp_inetaddr_bridge_event(dev, event, extack);
Ido Schimmelb1e45522017-04-30 19:47:14 +03005958 else if (is_vlan_dev(dev))
David Ahernf8fa9b42017-10-18 09:56:56 -07005959 return mlxsw_sp_inetaddr_vlan_event(dev, event, extack);
Ido Schimmelb1e45522017-04-30 19:47:14 +03005960 else
5961 return 0;
5962}
5963
Ido Schimmel4724ba562017-03-10 08:53:39 +01005964int mlxsw_sp_inetaddr_event(struct notifier_block *unused,
5965 unsigned long event, void *ptr)
5966{
5967 struct in_ifaddr *ifa = (struct in_ifaddr *) ptr;
5968 struct net_device *dev = ifa->ifa_dev->dev;
5969 struct mlxsw_sp *mlxsw_sp;
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01005970 struct mlxsw_sp_rif *rif;
Ido Schimmel4724ba562017-03-10 08:53:39 +01005971 int err = 0;
5972
David Ahern89d5dd22017-10-18 09:56:55 -07005973 /* NETDEV_UP event is handled by mlxsw_sp_inetaddr_valid_event */
5974 if (event == NETDEV_UP)
5975 goto out;
5976
5977 mlxsw_sp = mlxsw_sp_lower_get(dev);
5978 if (!mlxsw_sp)
5979 goto out;
5980
5981 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
5982 if (!mlxsw_sp_rif_should_config(rif, dev, event))
5983 goto out;
5984
David Ahernf8fa9b42017-10-18 09:56:56 -07005985 err = __mlxsw_sp_inetaddr_event(dev, event, NULL);
David Ahern89d5dd22017-10-18 09:56:55 -07005986out:
5987 return notifier_from_errno(err);
5988}
5989
5990int mlxsw_sp_inetaddr_valid_event(struct notifier_block *unused,
5991 unsigned long event, void *ptr)
5992{
5993 struct in_validator_info *ivi = (struct in_validator_info *) ptr;
5994 struct net_device *dev = ivi->ivi_dev->dev;
5995 struct mlxsw_sp *mlxsw_sp;
5996 struct mlxsw_sp_rif *rif;
5997 int err = 0;
5998
Ido Schimmel4724ba562017-03-10 08:53:39 +01005999 mlxsw_sp = mlxsw_sp_lower_get(dev);
6000 if (!mlxsw_sp)
6001 goto out;
6002
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01006003 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
Arkadi Sharshevsky5ea12372017-07-18 10:10:13 +02006004 if (!mlxsw_sp_rif_should_config(rif, dev, event))
Ido Schimmel4724ba562017-03-10 08:53:39 +01006005 goto out;
6006
David Ahernf8fa9b42017-10-18 09:56:56 -07006007 err = __mlxsw_sp_inetaddr_event(dev, event, ivi->extack);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006008out:
6009 return notifier_from_errno(err);
6010}
6011
Arkadi Sharshevsky5ea12372017-07-18 10:10:13 +02006012struct mlxsw_sp_inet6addr_event_work {
6013 struct work_struct work;
6014 struct net_device *dev;
6015 unsigned long event;
6016};
6017
6018static void mlxsw_sp_inet6addr_event_work(struct work_struct *work)
6019{
6020 struct mlxsw_sp_inet6addr_event_work *inet6addr_work =
6021 container_of(work, struct mlxsw_sp_inet6addr_event_work, work);
6022 struct net_device *dev = inet6addr_work->dev;
6023 unsigned long event = inet6addr_work->event;
6024 struct mlxsw_sp *mlxsw_sp;
6025 struct mlxsw_sp_rif *rif;
6026
6027 rtnl_lock();
6028 mlxsw_sp = mlxsw_sp_lower_get(dev);
6029 if (!mlxsw_sp)
6030 goto out;
6031
6032 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
6033 if (!mlxsw_sp_rif_should_config(rif, dev, event))
6034 goto out;
6035
David Ahernf8fa9b42017-10-18 09:56:56 -07006036 __mlxsw_sp_inetaddr_event(dev, event, NULL);
Arkadi Sharshevsky5ea12372017-07-18 10:10:13 +02006037out:
6038 rtnl_unlock();
6039 dev_put(dev);
6040 kfree(inet6addr_work);
6041}
6042
6043/* Called with rcu_read_lock() */
6044int mlxsw_sp_inet6addr_event(struct notifier_block *unused,
6045 unsigned long event, void *ptr)
6046{
6047 struct inet6_ifaddr *if6 = (struct inet6_ifaddr *) ptr;
6048 struct mlxsw_sp_inet6addr_event_work *inet6addr_work;
6049 struct net_device *dev = if6->idev->dev;
6050
David Ahern89d5dd22017-10-18 09:56:55 -07006051 /* NETDEV_UP event is handled by mlxsw_sp_inet6addr_valid_event */
6052 if (event == NETDEV_UP)
6053 return NOTIFY_DONE;
6054
Arkadi Sharshevsky5ea12372017-07-18 10:10:13 +02006055 if (!mlxsw_sp_port_dev_lower_find_rcu(dev))
6056 return NOTIFY_DONE;
6057
6058 inet6addr_work = kzalloc(sizeof(*inet6addr_work), GFP_ATOMIC);
6059 if (!inet6addr_work)
6060 return NOTIFY_BAD;
6061
6062 INIT_WORK(&inet6addr_work->work, mlxsw_sp_inet6addr_event_work);
6063 inet6addr_work->dev = dev;
6064 inet6addr_work->event = event;
6065 dev_hold(dev);
6066 mlxsw_core_schedule_work(&inet6addr_work->work);
6067
6068 return NOTIFY_DONE;
6069}
6070
David Ahern89d5dd22017-10-18 09:56:55 -07006071int mlxsw_sp_inet6addr_valid_event(struct notifier_block *unused,
6072 unsigned long event, void *ptr)
6073{
6074 struct in6_validator_info *i6vi = (struct in6_validator_info *) ptr;
6075 struct net_device *dev = i6vi->i6vi_dev->dev;
6076 struct mlxsw_sp *mlxsw_sp;
6077 struct mlxsw_sp_rif *rif;
6078 int err = 0;
6079
6080 mlxsw_sp = mlxsw_sp_lower_get(dev);
6081 if (!mlxsw_sp)
6082 goto out;
6083
6084 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
6085 if (!mlxsw_sp_rif_should_config(rif, dev, event))
6086 goto out;
6087
David Ahernf8fa9b42017-10-18 09:56:56 -07006088 err = __mlxsw_sp_inetaddr_event(dev, event, i6vi->extack);
David Ahern89d5dd22017-10-18 09:56:55 -07006089out:
6090 return notifier_from_errno(err);
6091}
6092
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01006093static int mlxsw_sp_rif_edit(struct mlxsw_sp *mlxsw_sp, u16 rif_index,
Ido Schimmel4724ba562017-03-10 08:53:39 +01006094 const char *mac, int mtu)
6095{
6096 char ritr_pl[MLXSW_REG_RITR_LEN];
6097 int err;
6098
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01006099 mlxsw_reg_ritr_rif_pack(ritr_pl, rif_index);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006100 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
6101 if (err)
6102 return err;
6103
6104 mlxsw_reg_ritr_mtu_set(ritr_pl, mtu);
6105 mlxsw_reg_ritr_if_mac_memcpy_to(ritr_pl, mac);
6106 mlxsw_reg_ritr_op_set(ritr_pl, MLXSW_REG_RITR_RIF_CREATE);
6107 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
6108}
6109
6110int mlxsw_sp_netdevice_router_port_event(struct net_device *dev)
6111{
6112 struct mlxsw_sp *mlxsw_sp;
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01006113 struct mlxsw_sp_rif *rif;
Ido Schimmela1107482017-05-26 08:37:39 +02006114 u16 fid_index;
Ido Schimmel4724ba562017-03-10 08:53:39 +01006115 int err;
6116
6117 mlxsw_sp = mlxsw_sp_lower_get(dev);
6118 if (!mlxsw_sp)
6119 return 0;
6120
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01006121 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
6122 if (!rif)
Ido Schimmel4724ba562017-03-10 08:53:39 +01006123 return 0;
Ido Schimmela1107482017-05-26 08:37:39 +02006124 fid_index = mlxsw_sp_fid_index(rif->fid);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006125
Ido Schimmela1107482017-05-26 08:37:39 +02006126 err = mlxsw_sp_rif_fdb_op(mlxsw_sp, rif->addr, fid_index, false);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006127 if (err)
6128 return err;
6129
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01006130 err = mlxsw_sp_rif_edit(mlxsw_sp, rif->rif_index, dev->dev_addr,
6131 dev->mtu);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006132 if (err)
6133 goto err_rif_edit;
6134
Ido Schimmela1107482017-05-26 08:37:39 +02006135 err = mlxsw_sp_rif_fdb_op(mlxsw_sp, dev->dev_addr, fid_index, true);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006136 if (err)
6137 goto err_rif_fdb_op;
6138
Yotam Gigifd890fe2017-09-27 08:23:21 +02006139 if (rif->mtu != dev->mtu) {
6140 struct mlxsw_sp_vr *vr;
6141
6142 /* The RIF is relevant only to its mr_table instance, as unlike
6143 * unicast routing, in multicast routing a RIF cannot be shared
6144 * between several multicast routing tables.
6145 */
6146 vr = &mlxsw_sp->router->vrs[rif->vr_id];
6147 mlxsw_sp_mr_rif_mtu_update(vr->mr4_table, rif, dev->mtu);
6148 }
6149
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01006150 ether_addr_copy(rif->addr, dev->dev_addr);
6151 rif->mtu = dev->mtu;
Ido Schimmel4724ba562017-03-10 08:53:39 +01006152
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01006153 netdev_dbg(dev, "Updated RIF=%d\n", rif->rif_index);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006154
6155 return 0;
6156
6157err_rif_fdb_op:
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01006158 mlxsw_sp_rif_edit(mlxsw_sp, rif->rif_index, rif->addr, rif->mtu);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006159err_rif_edit:
Ido Schimmela1107482017-05-26 08:37:39 +02006160 mlxsw_sp_rif_fdb_op(mlxsw_sp, rif->addr, fid_index, true);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006161 return err;
6162}
6163
Ido Schimmelb1e45522017-04-30 19:47:14 +03006164static int mlxsw_sp_port_vrf_join(struct mlxsw_sp *mlxsw_sp,
David Ahernf8fa9b42017-10-18 09:56:56 -07006165 struct net_device *l3_dev,
6166 struct netlink_ext_ack *extack)
Ido Schimmel7179eb52017-03-16 09:08:18 +01006167{
Ido Schimmelb1e45522017-04-30 19:47:14 +03006168 struct mlxsw_sp_rif *rif;
Ido Schimmel7179eb52017-03-16 09:08:18 +01006169
Ido Schimmelb1e45522017-04-30 19:47:14 +03006170 /* If netdev is already associated with a RIF, then we need to
6171 * destroy it and create a new one with the new virtual router ID.
Ido Schimmel7179eb52017-03-16 09:08:18 +01006172 */
Ido Schimmelb1e45522017-04-30 19:47:14 +03006173 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev);
6174 if (rif)
David Ahernf8fa9b42017-10-18 09:56:56 -07006175 __mlxsw_sp_inetaddr_event(l3_dev, NETDEV_DOWN, extack);
Ido Schimmel7179eb52017-03-16 09:08:18 +01006176
David Ahernf8fa9b42017-10-18 09:56:56 -07006177 return __mlxsw_sp_inetaddr_event(l3_dev, NETDEV_UP, extack);
Ido Schimmel7179eb52017-03-16 09:08:18 +01006178}
6179
Ido Schimmelb1e45522017-04-30 19:47:14 +03006180static void mlxsw_sp_port_vrf_leave(struct mlxsw_sp *mlxsw_sp,
6181 struct net_device *l3_dev)
Ido Schimmel7179eb52017-03-16 09:08:18 +01006182{
Ido Schimmelb1e45522017-04-30 19:47:14 +03006183 struct mlxsw_sp_rif *rif;
Ido Schimmel7179eb52017-03-16 09:08:18 +01006184
Ido Schimmelb1e45522017-04-30 19:47:14 +03006185 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev);
6186 if (!rif)
Ido Schimmel7179eb52017-03-16 09:08:18 +01006187 return;
David Ahernf8fa9b42017-10-18 09:56:56 -07006188 __mlxsw_sp_inetaddr_event(l3_dev, NETDEV_DOWN, NULL);
Ido Schimmel7179eb52017-03-16 09:08:18 +01006189}
6190
Ido Schimmelb1e45522017-04-30 19:47:14 +03006191int mlxsw_sp_netdevice_vrf_event(struct net_device *l3_dev, unsigned long event,
6192 struct netdev_notifier_changeupper_info *info)
Ido Schimmel3d70e4582017-03-16 09:08:19 +01006193{
Ido Schimmelb1e45522017-04-30 19:47:14 +03006194 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(l3_dev);
6195 int err = 0;
Ido Schimmel3d70e4582017-03-16 09:08:19 +01006196
Ido Schimmelb1e45522017-04-30 19:47:14 +03006197 if (!mlxsw_sp)
6198 return 0;
Ido Schimmel3d70e4582017-03-16 09:08:19 +01006199
Ido Schimmelb1e45522017-04-30 19:47:14 +03006200 switch (event) {
6201 case NETDEV_PRECHANGEUPPER:
6202 return 0;
6203 case NETDEV_CHANGEUPPER:
David Ahernf8fa9b42017-10-18 09:56:56 -07006204 if (info->linking) {
6205 struct netlink_ext_ack *extack;
6206
6207 extack = netdev_notifier_info_to_extack(&info->info);
6208 err = mlxsw_sp_port_vrf_join(mlxsw_sp, l3_dev, extack);
6209 } else {
Ido Schimmelb1e45522017-04-30 19:47:14 +03006210 mlxsw_sp_port_vrf_leave(mlxsw_sp, l3_dev);
David Ahernf8fa9b42017-10-18 09:56:56 -07006211 }
Ido Schimmelb1e45522017-04-30 19:47:14 +03006212 break;
6213 }
Ido Schimmel3d70e4582017-03-16 09:08:19 +01006214
Ido Schimmelb1e45522017-04-30 19:47:14 +03006215 return err;
Ido Schimmel3d70e4582017-03-16 09:08:19 +01006216}
6217
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006218static struct mlxsw_sp_rif_subport *
6219mlxsw_sp_rif_subport_rif(const struct mlxsw_sp_rif *rif)
Ido Schimmela1107482017-05-26 08:37:39 +02006220{
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006221 return container_of(rif, struct mlxsw_sp_rif_subport, common);
Ido Schimmela1107482017-05-26 08:37:39 +02006222}
6223
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006224static void mlxsw_sp_rif_subport_setup(struct mlxsw_sp_rif *rif,
6225 const struct mlxsw_sp_rif_params *params)
6226{
6227 struct mlxsw_sp_rif_subport *rif_subport;
6228
6229 rif_subport = mlxsw_sp_rif_subport_rif(rif);
6230 rif_subport->vid = params->vid;
6231 rif_subport->lag = params->lag;
6232 if (params->lag)
6233 rif_subport->lag_id = params->lag_id;
6234 else
6235 rif_subport->system_port = params->system_port;
6236}
6237
6238static int mlxsw_sp_rif_subport_op(struct mlxsw_sp_rif *rif, bool enable)
6239{
6240 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
6241 struct mlxsw_sp_rif_subport *rif_subport;
6242 char ritr_pl[MLXSW_REG_RITR_LEN];
6243
6244 rif_subport = mlxsw_sp_rif_subport_rif(rif);
6245 mlxsw_reg_ritr_pack(ritr_pl, enable, MLXSW_REG_RITR_SP_IF,
Petr Machata9571e822017-09-02 23:49:14 +02006246 rif->rif_index, rif->vr_id, rif->dev->mtu);
6247 mlxsw_reg_ritr_mac_pack(ritr_pl, rif->dev->dev_addr);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006248 mlxsw_reg_ritr_sp_if_pack(ritr_pl, rif_subport->lag,
6249 rif_subport->lag ? rif_subport->lag_id :
6250 rif_subport->system_port,
6251 rif_subport->vid);
6252
6253 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
6254}
6255
6256static int mlxsw_sp_rif_subport_configure(struct mlxsw_sp_rif *rif)
6257{
Petr Machata010cadf2017-09-02 23:49:18 +02006258 int err;
6259
6260 err = mlxsw_sp_rif_subport_op(rif, true);
6261 if (err)
6262 return err;
6263
6264 err = mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
6265 mlxsw_sp_fid_index(rif->fid), true);
6266 if (err)
6267 goto err_rif_fdb_op;
6268
6269 mlxsw_sp_fid_rif_set(rif->fid, rif);
6270 return 0;
6271
6272err_rif_fdb_op:
6273 mlxsw_sp_rif_subport_op(rif, false);
6274 return err;
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006275}
6276
6277static void mlxsw_sp_rif_subport_deconfigure(struct mlxsw_sp_rif *rif)
6278{
Petr Machata010cadf2017-09-02 23:49:18 +02006279 struct mlxsw_sp_fid *fid = rif->fid;
6280
6281 mlxsw_sp_fid_rif_set(fid, NULL);
6282 mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
6283 mlxsw_sp_fid_index(fid), false);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006284 mlxsw_sp_rif_subport_op(rif, false);
6285}
6286
6287static struct mlxsw_sp_fid *
6288mlxsw_sp_rif_subport_fid_get(struct mlxsw_sp_rif *rif)
6289{
6290 return mlxsw_sp_fid_rfid_get(rif->mlxsw_sp, rif->rif_index);
6291}
6292
6293static const struct mlxsw_sp_rif_ops mlxsw_sp_rif_subport_ops = {
6294 .type = MLXSW_SP_RIF_TYPE_SUBPORT,
6295 .rif_size = sizeof(struct mlxsw_sp_rif_subport),
6296 .setup = mlxsw_sp_rif_subport_setup,
6297 .configure = mlxsw_sp_rif_subport_configure,
6298 .deconfigure = mlxsw_sp_rif_subport_deconfigure,
6299 .fid_get = mlxsw_sp_rif_subport_fid_get,
6300};
6301
6302static int mlxsw_sp_rif_vlan_fid_op(struct mlxsw_sp_rif *rif,
6303 enum mlxsw_reg_ritr_if_type type,
6304 u16 vid_fid, bool enable)
6305{
6306 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
6307 char ritr_pl[MLXSW_REG_RITR_LEN];
6308
6309 mlxsw_reg_ritr_pack(ritr_pl, enable, type, rif->rif_index, rif->vr_id,
Petr Machata9571e822017-09-02 23:49:14 +02006310 rif->dev->mtu);
6311 mlxsw_reg_ritr_mac_pack(ritr_pl, rif->dev->dev_addr);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006312 mlxsw_reg_ritr_fid_set(ritr_pl, type, vid_fid);
6313
6314 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
6315}
6316
Yotam Gigib35750f2017-10-09 11:15:33 +02006317u8 mlxsw_sp_router_port(const struct mlxsw_sp *mlxsw_sp)
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006318{
6319 return mlxsw_core_max_ports(mlxsw_sp->core) + 1;
6320}
6321
6322static int mlxsw_sp_rif_vlan_configure(struct mlxsw_sp_rif *rif)
6323{
6324 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
6325 u16 vid = mlxsw_sp_fid_8021q_vid(rif->fid);
6326 int err;
6327
6328 err = mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_VLAN_IF, vid, true);
6329 if (err)
6330 return err;
6331
Ido Schimmel0d284812017-07-18 10:10:12 +02006332 err = mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
6333 mlxsw_sp_router_port(mlxsw_sp), true);
6334 if (err)
6335 goto err_fid_mc_flood_set;
6336
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006337 err = mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
6338 mlxsw_sp_router_port(mlxsw_sp), true);
6339 if (err)
6340 goto err_fid_bc_flood_set;
6341
Petr Machata010cadf2017-09-02 23:49:18 +02006342 err = mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
6343 mlxsw_sp_fid_index(rif->fid), true);
6344 if (err)
6345 goto err_rif_fdb_op;
6346
6347 mlxsw_sp_fid_rif_set(rif->fid, rif);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006348 return 0;
6349
Petr Machata010cadf2017-09-02 23:49:18 +02006350err_rif_fdb_op:
6351 mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
6352 mlxsw_sp_router_port(mlxsw_sp), false);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006353err_fid_bc_flood_set:
Ido Schimmel0d284812017-07-18 10:10:12 +02006354 mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
6355 mlxsw_sp_router_port(mlxsw_sp), false);
6356err_fid_mc_flood_set:
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006357 mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_VLAN_IF, vid, false);
6358 return err;
6359}
6360
6361static void mlxsw_sp_rif_vlan_deconfigure(struct mlxsw_sp_rif *rif)
6362{
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006363 u16 vid = mlxsw_sp_fid_8021q_vid(rif->fid);
Petr Machata010cadf2017-09-02 23:49:18 +02006364 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
6365 struct mlxsw_sp_fid *fid = rif->fid;
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006366
Petr Machata010cadf2017-09-02 23:49:18 +02006367 mlxsw_sp_fid_rif_set(fid, NULL);
6368 mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
6369 mlxsw_sp_fid_index(fid), false);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006370 mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
6371 mlxsw_sp_router_port(mlxsw_sp), false);
Ido Schimmel0d284812017-07-18 10:10:12 +02006372 mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
6373 mlxsw_sp_router_port(mlxsw_sp), false);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006374 mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_VLAN_IF, vid, false);
6375}
6376
6377static struct mlxsw_sp_fid *
6378mlxsw_sp_rif_vlan_fid_get(struct mlxsw_sp_rif *rif)
6379{
6380 u16 vid = is_vlan_dev(rif->dev) ? vlan_dev_vlan_id(rif->dev) : 1;
6381
6382 return mlxsw_sp_fid_8021q_get(rif->mlxsw_sp, vid);
6383}
6384
6385static const struct mlxsw_sp_rif_ops mlxsw_sp_rif_vlan_ops = {
6386 .type = MLXSW_SP_RIF_TYPE_VLAN,
6387 .rif_size = sizeof(struct mlxsw_sp_rif),
6388 .configure = mlxsw_sp_rif_vlan_configure,
6389 .deconfigure = mlxsw_sp_rif_vlan_deconfigure,
6390 .fid_get = mlxsw_sp_rif_vlan_fid_get,
6391};
6392
6393static int mlxsw_sp_rif_fid_configure(struct mlxsw_sp_rif *rif)
6394{
6395 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
6396 u16 fid_index = mlxsw_sp_fid_index(rif->fid);
6397 int err;
6398
6399 err = mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_FID_IF, fid_index,
6400 true);
6401 if (err)
6402 return err;
6403
Ido Schimmel0d284812017-07-18 10:10:12 +02006404 err = mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
6405 mlxsw_sp_router_port(mlxsw_sp), true);
6406 if (err)
6407 goto err_fid_mc_flood_set;
6408
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006409 err = mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
6410 mlxsw_sp_router_port(mlxsw_sp), true);
6411 if (err)
6412 goto err_fid_bc_flood_set;
6413
Petr Machata010cadf2017-09-02 23:49:18 +02006414 err = mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
6415 mlxsw_sp_fid_index(rif->fid), true);
6416 if (err)
6417 goto err_rif_fdb_op;
6418
6419 mlxsw_sp_fid_rif_set(rif->fid, rif);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006420 return 0;
6421
Petr Machata010cadf2017-09-02 23:49:18 +02006422err_rif_fdb_op:
6423 mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
6424 mlxsw_sp_router_port(mlxsw_sp), false);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006425err_fid_bc_flood_set:
Ido Schimmel0d284812017-07-18 10:10:12 +02006426 mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
6427 mlxsw_sp_router_port(mlxsw_sp), false);
6428err_fid_mc_flood_set:
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006429 mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_FID_IF, fid_index, false);
6430 return err;
6431}
6432
6433static void mlxsw_sp_rif_fid_deconfigure(struct mlxsw_sp_rif *rif)
6434{
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006435 u16 fid_index = mlxsw_sp_fid_index(rif->fid);
Petr Machata010cadf2017-09-02 23:49:18 +02006436 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
6437 struct mlxsw_sp_fid *fid = rif->fid;
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006438
Petr Machata010cadf2017-09-02 23:49:18 +02006439 mlxsw_sp_fid_rif_set(fid, NULL);
6440 mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
6441 mlxsw_sp_fid_index(fid), false);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006442 mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
6443 mlxsw_sp_router_port(mlxsw_sp), false);
Ido Schimmel0d284812017-07-18 10:10:12 +02006444 mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
6445 mlxsw_sp_router_port(mlxsw_sp), false);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006446 mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_FID_IF, fid_index, false);
6447}
6448
6449static struct mlxsw_sp_fid *
6450mlxsw_sp_rif_fid_fid_get(struct mlxsw_sp_rif *rif)
6451{
6452 return mlxsw_sp_fid_8021d_get(rif->mlxsw_sp, rif->dev->ifindex);
6453}
6454
6455static const struct mlxsw_sp_rif_ops mlxsw_sp_rif_fid_ops = {
6456 .type = MLXSW_SP_RIF_TYPE_FID,
6457 .rif_size = sizeof(struct mlxsw_sp_rif),
6458 .configure = mlxsw_sp_rif_fid_configure,
6459 .deconfigure = mlxsw_sp_rif_fid_deconfigure,
6460 .fid_get = mlxsw_sp_rif_fid_fid_get,
6461};
6462
Petr Machata6ddb7422017-09-02 23:49:19 +02006463static struct mlxsw_sp_rif_ipip_lb *
6464mlxsw_sp_rif_ipip_lb_rif(struct mlxsw_sp_rif *rif)
6465{
6466 return container_of(rif, struct mlxsw_sp_rif_ipip_lb, common);
6467}
6468
6469static void
6470mlxsw_sp_rif_ipip_lb_setup(struct mlxsw_sp_rif *rif,
6471 const struct mlxsw_sp_rif_params *params)
6472{
6473 struct mlxsw_sp_rif_params_ipip_lb *params_lb;
6474 struct mlxsw_sp_rif_ipip_lb *rif_lb;
6475
6476 params_lb = container_of(params, struct mlxsw_sp_rif_params_ipip_lb,
6477 common);
6478 rif_lb = mlxsw_sp_rif_ipip_lb_rif(rif);
6479 rif_lb->lb_config = params_lb->lb_config;
6480}
6481
6482static int
6483mlxsw_sp_rif_ipip_lb_op(struct mlxsw_sp_rif_ipip_lb *lb_rif,
6484 struct mlxsw_sp_vr *ul_vr, bool enable)
6485{
6486 struct mlxsw_sp_rif_ipip_lb_config lb_cf = lb_rif->lb_config;
6487 struct mlxsw_sp_rif *rif = &lb_rif->common;
6488 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
6489 char ritr_pl[MLXSW_REG_RITR_LEN];
6490 u32 saddr4;
6491
6492 switch (lb_cf.ul_protocol) {
6493 case MLXSW_SP_L3_PROTO_IPV4:
6494 saddr4 = be32_to_cpu(lb_cf.saddr.addr4);
6495 mlxsw_reg_ritr_pack(ritr_pl, enable, MLXSW_REG_RITR_LOOPBACK_IF,
6496 rif->rif_index, rif->vr_id, rif->dev->mtu);
6497 mlxsw_reg_ritr_loopback_ipip4_pack(ritr_pl, lb_cf.lb_ipipt,
6498 MLXSW_REG_RITR_LOOPBACK_IPIP_OPTIONS_GRE_KEY_PRESET,
6499 ul_vr->id, saddr4, lb_cf.okey);
6500 break;
6501
6502 case MLXSW_SP_L3_PROTO_IPV6:
6503 return -EAFNOSUPPORT;
6504 }
6505
6506 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
6507}
6508
6509static int
6510mlxsw_sp_rif_ipip_lb_configure(struct mlxsw_sp_rif *rif)
6511{
6512 struct mlxsw_sp_rif_ipip_lb *lb_rif = mlxsw_sp_rif_ipip_lb_rif(rif);
6513 u32 ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(rif->dev);
6514 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
6515 struct mlxsw_sp_vr *ul_vr;
6516 int err;
6517
David Ahernf8fa9b42017-10-18 09:56:56 -07006518 ul_vr = mlxsw_sp_vr_get(mlxsw_sp, ul_tb_id, NULL);
Petr Machata6ddb7422017-09-02 23:49:19 +02006519 if (IS_ERR(ul_vr))
6520 return PTR_ERR(ul_vr);
6521
6522 err = mlxsw_sp_rif_ipip_lb_op(lb_rif, ul_vr, true);
6523 if (err)
6524 goto err_loopback_op;
6525
6526 lb_rif->ul_vr_id = ul_vr->id;
6527 ++ul_vr->rif_count;
6528 return 0;
6529
6530err_loopback_op:
6531 mlxsw_sp_vr_put(ul_vr);
6532 return err;
6533}
6534
6535static void mlxsw_sp_rif_ipip_lb_deconfigure(struct mlxsw_sp_rif *rif)
6536{
6537 struct mlxsw_sp_rif_ipip_lb *lb_rif = mlxsw_sp_rif_ipip_lb_rif(rif);
6538 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
6539 struct mlxsw_sp_vr *ul_vr;
6540
6541 ul_vr = &mlxsw_sp->router->vrs[lb_rif->ul_vr_id];
6542 mlxsw_sp_rif_ipip_lb_op(lb_rif, ul_vr, false);
6543
6544 --ul_vr->rif_count;
6545 mlxsw_sp_vr_put(ul_vr);
6546}
6547
6548static const struct mlxsw_sp_rif_ops mlxsw_sp_rif_ipip_lb_ops = {
6549 .type = MLXSW_SP_RIF_TYPE_IPIP_LB,
6550 .rif_size = sizeof(struct mlxsw_sp_rif_ipip_lb),
6551 .setup = mlxsw_sp_rif_ipip_lb_setup,
6552 .configure = mlxsw_sp_rif_ipip_lb_configure,
6553 .deconfigure = mlxsw_sp_rif_ipip_lb_deconfigure,
6554};
6555
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006556static const struct mlxsw_sp_rif_ops *mlxsw_sp_rif_ops_arr[] = {
6557 [MLXSW_SP_RIF_TYPE_SUBPORT] = &mlxsw_sp_rif_subport_ops,
6558 [MLXSW_SP_RIF_TYPE_VLAN] = &mlxsw_sp_rif_vlan_ops,
6559 [MLXSW_SP_RIF_TYPE_FID] = &mlxsw_sp_rif_fid_ops,
Petr Machata6ddb7422017-09-02 23:49:19 +02006560 [MLXSW_SP_RIF_TYPE_IPIP_LB] = &mlxsw_sp_rif_ipip_lb_ops,
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006561};
6562
Ido Schimmel348b8fc2017-05-16 19:38:29 +02006563static int mlxsw_sp_rifs_init(struct mlxsw_sp *mlxsw_sp)
6564{
6565 u64 max_rifs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS);
6566
6567 mlxsw_sp->router->rifs = kcalloc(max_rifs,
6568 sizeof(struct mlxsw_sp_rif *),
6569 GFP_KERNEL);
6570 if (!mlxsw_sp->router->rifs)
6571 return -ENOMEM;
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006572
6573 mlxsw_sp->router->rif_ops_arr = mlxsw_sp_rif_ops_arr;
6574
Ido Schimmel348b8fc2017-05-16 19:38:29 +02006575 return 0;
6576}
6577
6578static void mlxsw_sp_rifs_fini(struct mlxsw_sp *mlxsw_sp)
6579{
6580 int i;
6581
6582 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++)
6583 WARN_ON_ONCE(mlxsw_sp->router->rifs[i]);
6584
6585 kfree(mlxsw_sp->router->rifs);
6586}
6587
Petr Machatadcbda282017-10-20 09:16:16 +02006588static int
6589mlxsw_sp_ipip_config_tigcr(struct mlxsw_sp *mlxsw_sp)
6590{
6591 char tigcr_pl[MLXSW_REG_TIGCR_LEN];
6592
6593 mlxsw_reg_tigcr_pack(tigcr_pl, true, 0);
6594 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(tigcr), tigcr_pl);
6595}
6596
Petr Machata38ebc0f2017-09-02 23:49:17 +02006597static int mlxsw_sp_ipips_init(struct mlxsw_sp *mlxsw_sp)
6598{
6599 mlxsw_sp->router->ipip_ops_arr = mlxsw_sp_ipip_ops_arr;
Petr Machata1012b9a2017-09-02 23:49:23 +02006600 INIT_LIST_HEAD(&mlxsw_sp->router->ipip_list);
Petr Machatadcbda282017-10-20 09:16:16 +02006601 return mlxsw_sp_ipip_config_tigcr(mlxsw_sp);
Petr Machata38ebc0f2017-09-02 23:49:17 +02006602}
6603
6604static void mlxsw_sp_ipips_fini(struct mlxsw_sp *mlxsw_sp)
6605{
Petr Machata1012b9a2017-09-02 23:49:23 +02006606 WARN_ON(!list_empty(&mlxsw_sp->router->ipip_list));
Petr Machata38ebc0f2017-09-02 23:49:17 +02006607}
6608
Ido Schimmelc3852ef2016-12-03 16:45:07 +01006609static void mlxsw_sp_router_fib_dump_flush(struct notifier_block *nb)
6610{
Ido Schimmel7e39d112017-05-16 19:38:28 +02006611 struct mlxsw_sp_router *router;
Ido Schimmelc3852ef2016-12-03 16:45:07 +01006612
6613 /* Flush pending FIB notifications and then flush the device's
6614 * table before requesting another dump. The FIB notification
6615 * block is unregistered, so no need to take RTNL.
6616 */
6617 mlxsw_core_flush_owq();
Ido Schimmel7e39d112017-05-16 19:38:28 +02006618 router = container_of(nb, struct mlxsw_sp_router, fib_nb);
6619 mlxsw_sp_router_fib_flush(router->mlxsw_sp);
Ido Schimmelc3852ef2016-12-03 16:45:07 +01006620}
6621
Ido Schimmel4724ba562017-03-10 08:53:39 +01006622static int __mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp)
6623{
6624 char rgcr_pl[MLXSW_REG_RGCR_LEN];
6625 u64 max_rifs;
6626 int err;
6627
6628 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_RIFS))
6629 return -EIO;
Ido Schimmel4724ba562017-03-10 08:53:39 +01006630 max_rifs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006631
Arkadi Sharshevskye29237e2017-07-18 10:10:09 +02006632 mlxsw_reg_rgcr_pack(rgcr_pl, true, true);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006633 mlxsw_reg_rgcr_max_router_interfaces_set(rgcr_pl, max_rifs);
6634 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rgcr), rgcr_pl);
6635 if (err)
Ido Schimmel348b8fc2017-05-16 19:38:29 +02006636 return err;
Ido Schimmel4724ba562017-03-10 08:53:39 +01006637 return 0;
Ido Schimmel4724ba562017-03-10 08:53:39 +01006638}
6639
6640static void __mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp)
6641{
6642 char rgcr_pl[MLXSW_REG_RGCR_LEN];
Ido Schimmel4724ba562017-03-10 08:53:39 +01006643
Arkadi Sharshevskye29237e2017-07-18 10:10:09 +02006644 mlxsw_reg_rgcr_pack(rgcr_pl, false, false);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006645 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rgcr), rgcr_pl);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006646}
6647
Jiri Pirkob45f64d2016-09-26 12:52:31 +02006648int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp)
6649{
Ido Schimmel9011b672017-05-16 19:38:25 +02006650 struct mlxsw_sp_router *router;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02006651 int err;
6652
Ido Schimmel9011b672017-05-16 19:38:25 +02006653 router = kzalloc(sizeof(*mlxsw_sp->router), GFP_KERNEL);
6654 if (!router)
6655 return -ENOMEM;
6656 mlxsw_sp->router = router;
6657 router->mlxsw_sp = mlxsw_sp;
6658
6659 INIT_LIST_HEAD(&mlxsw_sp->router->nexthop_neighs_list);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02006660 err = __mlxsw_sp_router_init(mlxsw_sp);
6661 if (err)
Ido Schimmel9011b672017-05-16 19:38:25 +02006662 goto err_router_init;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02006663
Ido Schimmel348b8fc2017-05-16 19:38:29 +02006664 err = mlxsw_sp_rifs_init(mlxsw_sp);
6665 if (err)
6666 goto err_rifs_init;
6667
Petr Machata38ebc0f2017-09-02 23:49:17 +02006668 err = mlxsw_sp_ipips_init(mlxsw_sp);
6669 if (err)
6670 goto err_ipips_init;
6671
Ido Schimmel9011b672017-05-16 19:38:25 +02006672 err = rhashtable_init(&mlxsw_sp->router->nexthop_ht,
Ido Schimmelc53b8e12017-02-08 11:16:30 +01006673 &mlxsw_sp_nexthop_ht_params);
6674 if (err)
6675 goto err_nexthop_ht_init;
6676
Ido Schimmel9011b672017-05-16 19:38:25 +02006677 err = rhashtable_init(&mlxsw_sp->router->nexthop_group_ht,
Ido Schimmele9ad5e72017-02-08 11:16:29 +01006678 &mlxsw_sp_nexthop_group_ht_params);
6679 if (err)
6680 goto err_nexthop_group_ht_init;
6681
Arkadi Sharshevskydbe45982017-09-25 10:32:23 +02006682 INIT_LIST_HEAD(&mlxsw_sp->router->nexthop_list);
Ido Schimmel8494ab02017-03-24 08:02:47 +01006683 err = mlxsw_sp_lpm_init(mlxsw_sp);
6684 if (err)
6685 goto err_lpm_init;
6686
Yotam Gigid42b0962017-09-27 08:23:20 +02006687 err = mlxsw_sp_mr_init(mlxsw_sp, &mlxsw_sp_mr_tcam_ops);
6688 if (err)
6689 goto err_mr_init;
6690
Jiri Pirkob45f64d2016-09-26 12:52:31 +02006691 err = mlxsw_sp_vrs_init(mlxsw_sp);
6692 if (err)
6693 goto err_vrs_init;
6694
Ido Schimmel8c9583a2016-10-27 15:12:57 +02006695 err = mlxsw_sp_neigh_init(mlxsw_sp);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02006696 if (err)
6697 goto err_neigh_init;
6698
Ido Schimmel7e39d112017-05-16 19:38:28 +02006699 mlxsw_sp->router->fib_nb.notifier_call = mlxsw_sp_router_fib_event;
6700 err = register_fib_notifier(&mlxsw_sp->router->fib_nb,
Ido Schimmelc3852ef2016-12-03 16:45:07 +01006701 mlxsw_sp_router_fib_dump_flush);
6702 if (err)
6703 goto err_register_fib_notifier;
6704
Jiri Pirkob45f64d2016-09-26 12:52:31 +02006705 return 0;
6706
Ido Schimmelc3852ef2016-12-03 16:45:07 +01006707err_register_fib_notifier:
6708 mlxsw_sp_neigh_fini(mlxsw_sp);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02006709err_neigh_init:
6710 mlxsw_sp_vrs_fini(mlxsw_sp);
6711err_vrs_init:
Yotam Gigid42b0962017-09-27 08:23:20 +02006712 mlxsw_sp_mr_fini(mlxsw_sp);
6713err_mr_init:
Ido Schimmel8494ab02017-03-24 08:02:47 +01006714 mlxsw_sp_lpm_fini(mlxsw_sp);
6715err_lpm_init:
Ido Schimmel9011b672017-05-16 19:38:25 +02006716 rhashtable_destroy(&mlxsw_sp->router->nexthop_group_ht);
Ido Schimmele9ad5e72017-02-08 11:16:29 +01006717err_nexthop_group_ht_init:
Ido Schimmel9011b672017-05-16 19:38:25 +02006718 rhashtable_destroy(&mlxsw_sp->router->nexthop_ht);
Ido Schimmelc53b8e12017-02-08 11:16:30 +01006719err_nexthop_ht_init:
Petr Machata38ebc0f2017-09-02 23:49:17 +02006720 mlxsw_sp_ipips_fini(mlxsw_sp);
6721err_ipips_init:
Ido Schimmel348b8fc2017-05-16 19:38:29 +02006722 mlxsw_sp_rifs_fini(mlxsw_sp);
6723err_rifs_init:
Jiri Pirkob45f64d2016-09-26 12:52:31 +02006724 __mlxsw_sp_router_fini(mlxsw_sp);
Ido Schimmel9011b672017-05-16 19:38:25 +02006725err_router_init:
6726 kfree(mlxsw_sp->router);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02006727 return err;
6728}
6729
6730void mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp)
6731{
Ido Schimmel7e39d112017-05-16 19:38:28 +02006732 unregister_fib_notifier(&mlxsw_sp->router->fib_nb);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02006733 mlxsw_sp_neigh_fini(mlxsw_sp);
6734 mlxsw_sp_vrs_fini(mlxsw_sp);
Yotam Gigid42b0962017-09-27 08:23:20 +02006735 mlxsw_sp_mr_fini(mlxsw_sp);
Ido Schimmel8494ab02017-03-24 08:02:47 +01006736 mlxsw_sp_lpm_fini(mlxsw_sp);
Ido Schimmel9011b672017-05-16 19:38:25 +02006737 rhashtable_destroy(&mlxsw_sp->router->nexthop_group_ht);
6738 rhashtable_destroy(&mlxsw_sp->router->nexthop_ht);
Petr Machata38ebc0f2017-09-02 23:49:17 +02006739 mlxsw_sp_ipips_fini(mlxsw_sp);
Ido Schimmel348b8fc2017-05-16 19:38:29 +02006740 mlxsw_sp_rifs_fini(mlxsw_sp);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02006741 __mlxsw_sp_router_fini(mlxsw_sp);
Ido Schimmel9011b672017-05-16 19:38:25 +02006742 kfree(mlxsw_sp->router);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02006743}