blob: 832bfa12551262fd482ceb1d480f4d810bb331d0 [file] [log] [blame]
Ido Schimmel464dce12016-07-02 11:00:15 +02001/*
2 * drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
Petr Machata6ddb7422017-09-02 23:49:19 +02003 * Copyright (c) 2016-2017 Mellanox Technologies. All rights reserved.
Ido Schimmel464dce12016-07-02 11:00:15 +02004 * Copyright (c) 2016 Jiri Pirko <jiri@mellanox.com>
5 * Copyright (c) 2016 Ido Schimmel <idosch@mellanox.com>
Yotam Gigic723c7352016-07-05 11:27:43 +02006 * Copyright (c) 2016 Yotam Gigi <yotamg@mellanox.com>
Petr Machata6ddb7422017-09-02 23:49:19 +02007 * Copyright (c) 2017 Petr Machata <petrm@mellanox.com>
Ido Schimmel464dce12016-07-02 11:00:15 +02008 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions are met:
11 *
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. Neither the names of the copyright holders nor the names of its
18 * contributors may be used to endorse or promote products derived from
19 * this software without specific prior written permission.
20 *
21 * Alternatively, this software may be distributed under the terms of the
22 * GNU General Public License ("GPL") version 2 as published by the Free
23 * Software Foundation.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
26 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
29 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
36 */
37
38#include <linux/kernel.h>
39#include <linux/types.h>
Jiri Pirko5e9c16c2016-07-04 08:23:04 +020040#include <linux/rhashtable.h>
41#include <linux/bitops.h>
42#include <linux/in6.h>
Yotam Gigic723c7352016-07-05 11:27:43 +020043#include <linux/notifier.h>
Ido Schimmeldf6dd79b2017-02-08 14:36:49 +010044#include <linux/inetdevice.h>
Ido Schimmel9db032b2017-03-16 09:08:17 +010045#include <linux/netdevice.h>
Ido Schimmel03ea01e2017-05-23 21:56:30 +020046#include <linux/if_bridge.h>
Ido Schimmelb5f3e0d2017-07-24 09:56:00 +020047#include <linux/socket.h>
Ido Schimmel428b8512017-08-03 13:28:28 +020048#include <linux/route.h>
Ido Schimmeleb789982017-10-22 23:11:48 +020049#include <linux/gcd.h>
Ido Schimmelaf658b62017-11-02 17:14:09 +010050#include <linux/random.h>
Yotam Gigic723c7352016-07-05 11:27:43 +020051#include <net/netevent.h>
Jiri Pirko6cf3c972016-07-05 11:27:39 +020052#include <net/neighbour.h>
53#include <net/arp.h>
Jiri Pirkob45f64d2016-09-26 12:52:31 +020054#include <net/ip_fib.h>
Ido Schimmel583419f2017-08-03 13:28:27 +020055#include <net/ip6_fib.h>
Ido Schimmel5d7bfd12017-03-16 09:08:14 +010056#include <net/fib_rules.h>
Petr Machata6ddb7422017-09-02 23:49:19 +020057#include <net/ip_tunnels.h>
Ido Schimmel57837882017-03-16 09:08:16 +010058#include <net/l3mdev.h>
Arkadi Sharshevsky5ea12372017-07-18 10:10:13 +020059#include <net/addrconf.h>
Arkadi Sharshevskyd5eb89c2017-07-18 10:10:15 +020060#include <net/ndisc.h>
61#include <net/ipv6.h>
Ido Schimmel04b1d4e2017-08-03 13:28:11 +020062#include <net/fib_notifier.h>
Ido Schimmel464dce12016-07-02 11:00:15 +020063
64#include "spectrum.h"
65#include "core.h"
66#include "reg.h"
Arkadi Sharshevskye0c0afd2017-03-28 17:24:15 +020067#include "spectrum_cnt.h"
68#include "spectrum_dpipe.h"
Petr Machata38ebc0f2017-09-02 23:49:17 +020069#include "spectrum_ipip.h"
Yotam Gigid42b0962017-09-27 08:23:20 +020070#include "spectrum_mr.h"
71#include "spectrum_mr_tcam.h"
Arkadi Sharshevskye0c0afd2017-03-28 17:24:15 +020072#include "spectrum_router.h"
Ido Schimmel464dce12016-07-02 11:00:15 +020073
Ido Schimmel9011b672017-05-16 19:38:25 +020074struct mlxsw_sp_vr;
75struct mlxsw_sp_lpm_tree;
Ido Schimmele4f3c1c2017-05-26 08:37:40 +020076struct mlxsw_sp_rif_ops;
Ido Schimmel9011b672017-05-16 19:38:25 +020077
78struct mlxsw_sp_router {
79 struct mlxsw_sp *mlxsw_sp;
Ido Schimmel5f9efff2017-05-16 19:38:27 +020080 struct mlxsw_sp_rif **rifs;
Ido Schimmel9011b672017-05-16 19:38:25 +020081 struct mlxsw_sp_vr *vrs;
82 struct rhashtable neigh_ht;
83 struct rhashtable nexthop_group_ht;
84 struct rhashtable nexthop_ht;
Arkadi Sharshevskydbe45982017-09-25 10:32:23 +020085 struct list_head nexthop_list;
Ido Schimmel9011b672017-05-16 19:38:25 +020086 struct {
87 struct mlxsw_sp_lpm_tree *trees;
88 unsigned int tree_count;
89 } lpm;
90 struct {
91 struct delayed_work dw;
92 unsigned long interval; /* ms */
93 } neighs_update;
94 struct delayed_work nexthop_probe_dw;
95#define MLXSW_SP_UNRESOLVED_NH_PROBE_INTERVAL 5000 /* ms */
96 struct list_head nexthop_neighs_list;
Petr Machata1012b9a2017-09-02 23:49:23 +020097 struct list_head ipip_list;
Ido Schimmel9011b672017-05-16 19:38:25 +020098 bool aborted;
Ido Schimmel7e39d112017-05-16 19:38:28 +020099 struct notifier_block fib_nb;
Ido Schimmel48fac882017-11-02 17:14:06 +0100100 struct notifier_block netevent_nb;
Ido Schimmele4f3c1c2017-05-26 08:37:40 +0200101 const struct mlxsw_sp_rif_ops **rif_ops_arr;
Petr Machata38ebc0f2017-09-02 23:49:17 +0200102 const struct mlxsw_sp_ipip_ops **ipip_ops_arr;
Ido Schimmel9011b672017-05-16 19:38:25 +0200103};
104
Ido Schimmel4724ba562017-03-10 08:53:39 +0100105struct mlxsw_sp_rif {
106 struct list_head nexthop_list;
107 struct list_head neigh_list;
108 struct net_device *dev;
Ido Schimmela1107482017-05-26 08:37:39 +0200109 struct mlxsw_sp_fid *fid;
Ido Schimmel4724ba562017-03-10 08:53:39 +0100110 unsigned char addr[ETH_ALEN];
111 int mtu;
Arkadi Sharshevskybf952332017-03-17 09:38:00 +0100112 u16 rif_index;
Ido Schimmel69132292017-03-10 08:53:42 +0100113 u16 vr_id;
Ido Schimmele4f3c1c2017-05-26 08:37:40 +0200114 const struct mlxsw_sp_rif_ops *ops;
115 struct mlxsw_sp *mlxsw_sp;
116
Arkadi Sharshevskye0c0afd2017-03-28 17:24:15 +0200117 unsigned int counter_ingress;
118 bool counter_ingress_valid;
119 unsigned int counter_egress;
120 bool counter_egress_valid;
Ido Schimmel4724ba562017-03-10 08:53:39 +0100121};
122
Ido Schimmele4f3c1c2017-05-26 08:37:40 +0200123struct mlxsw_sp_rif_params {
124 struct net_device *dev;
125 union {
126 u16 system_port;
127 u16 lag_id;
128 };
129 u16 vid;
130 bool lag;
131};
132
Ido Schimmel4d93cee2017-05-26 08:37:34 +0200133struct mlxsw_sp_rif_subport {
134 struct mlxsw_sp_rif common;
135 union {
136 u16 system_port;
137 u16 lag_id;
138 };
139 u16 vid;
140 bool lag;
141};
142
Petr Machata6ddb7422017-09-02 23:49:19 +0200143struct mlxsw_sp_rif_ipip_lb {
144 struct mlxsw_sp_rif common;
145 struct mlxsw_sp_rif_ipip_lb_config lb_config;
146 u16 ul_vr_id; /* Reserved for Spectrum-2. */
147};
148
149struct mlxsw_sp_rif_params_ipip_lb {
150 struct mlxsw_sp_rif_params common;
151 struct mlxsw_sp_rif_ipip_lb_config lb_config;
152};
153
Ido Schimmele4f3c1c2017-05-26 08:37:40 +0200154struct mlxsw_sp_rif_ops {
155 enum mlxsw_sp_rif_type type;
156 size_t rif_size;
157
158 void (*setup)(struct mlxsw_sp_rif *rif,
159 const struct mlxsw_sp_rif_params *params);
160 int (*configure)(struct mlxsw_sp_rif *rif);
161 void (*deconfigure)(struct mlxsw_sp_rif *rif);
162 struct mlxsw_sp_fid * (*fid_get)(struct mlxsw_sp_rif *rif);
163};
164
Arkadi Sharshevskye0c0afd2017-03-28 17:24:15 +0200165static unsigned int *
166mlxsw_sp_rif_p_counter_get(struct mlxsw_sp_rif *rif,
167 enum mlxsw_sp_rif_counter_dir dir)
168{
169 switch (dir) {
170 case MLXSW_SP_RIF_COUNTER_EGRESS:
171 return &rif->counter_egress;
172 case MLXSW_SP_RIF_COUNTER_INGRESS:
173 return &rif->counter_ingress;
174 }
175 return NULL;
176}
177
178static bool
179mlxsw_sp_rif_counter_valid_get(struct mlxsw_sp_rif *rif,
180 enum mlxsw_sp_rif_counter_dir dir)
181{
182 switch (dir) {
183 case MLXSW_SP_RIF_COUNTER_EGRESS:
184 return rif->counter_egress_valid;
185 case MLXSW_SP_RIF_COUNTER_INGRESS:
186 return rif->counter_ingress_valid;
187 }
188 return false;
189}
190
191static void
192mlxsw_sp_rif_counter_valid_set(struct mlxsw_sp_rif *rif,
193 enum mlxsw_sp_rif_counter_dir dir,
194 bool valid)
195{
196 switch (dir) {
197 case MLXSW_SP_RIF_COUNTER_EGRESS:
198 rif->counter_egress_valid = valid;
199 break;
200 case MLXSW_SP_RIF_COUNTER_INGRESS:
201 rif->counter_ingress_valid = valid;
202 break;
203 }
204}
205
206static int mlxsw_sp_rif_counter_edit(struct mlxsw_sp *mlxsw_sp, u16 rif_index,
207 unsigned int counter_index, bool enable,
208 enum mlxsw_sp_rif_counter_dir dir)
209{
210 char ritr_pl[MLXSW_REG_RITR_LEN];
211 bool is_egress = false;
212 int err;
213
214 if (dir == MLXSW_SP_RIF_COUNTER_EGRESS)
215 is_egress = true;
216 mlxsw_reg_ritr_rif_pack(ritr_pl, rif_index);
217 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
218 if (err)
219 return err;
220
221 mlxsw_reg_ritr_counter_pack(ritr_pl, counter_index, enable,
222 is_egress);
223 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
224}
225
226int mlxsw_sp_rif_counter_value_get(struct mlxsw_sp *mlxsw_sp,
227 struct mlxsw_sp_rif *rif,
228 enum mlxsw_sp_rif_counter_dir dir, u64 *cnt)
229{
230 char ricnt_pl[MLXSW_REG_RICNT_LEN];
231 unsigned int *p_counter_index;
232 bool valid;
233 int err;
234
235 valid = mlxsw_sp_rif_counter_valid_get(rif, dir);
236 if (!valid)
237 return -EINVAL;
238
239 p_counter_index = mlxsw_sp_rif_p_counter_get(rif, dir);
240 if (!p_counter_index)
241 return -EINVAL;
242 mlxsw_reg_ricnt_pack(ricnt_pl, *p_counter_index,
243 MLXSW_REG_RICNT_OPCODE_NOP);
244 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ricnt), ricnt_pl);
245 if (err)
246 return err;
247 *cnt = mlxsw_reg_ricnt_good_unicast_packets_get(ricnt_pl);
248 return 0;
249}
250
251static int mlxsw_sp_rif_counter_clear(struct mlxsw_sp *mlxsw_sp,
252 unsigned int counter_index)
253{
254 char ricnt_pl[MLXSW_REG_RICNT_LEN];
255
256 mlxsw_reg_ricnt_pack(ricnt_pl, counter_index,
257 MLXSW_REG_RICNT_OPCODE_CLEAR);
258 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ricnt), ricnt_pl);
259}
260
261int mlxsw_sp_rif_counter_alloc(struct mlxsw_sp *mlxsw_sp,
262 struct mlxsw_sp_rif *rif,
263 enum mlxsw_sp_rif_counter_dir dir)
264{
265 unsigned int *p_counter_index;
266 int err;
267
268 p_counter_index = mlxsw_sp_rif_p_counter_get(rif, dir);
269 if (!p_counter_index)
270 return -EINVAL;
271 err = mlxsw_sp_counter_alloc(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_RIF,
272 p_counter_index);
273 if (err)
274 return err;
275
276 err = mlxsw_sp_rif_counter_clear(mlxsw_sp, *p_counter_index);
277 if (err)
278 goto err_counter_clear;
279
280 err = mlxsw_sp_rif_counter_edit(mlxsw_sp, rif->rif_index,
281 *p_counter_index, true, dir);
282 if (err)
283 goto err_counter_edit;
284 mlxsw_sp_rif_counter_valid_set(rif, dir, true);
285 return 0;
286
287err_counter_edit:
288err_counter_clear:
289 mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_RIF,
290 *p_counter_index);
291 return err;
292}
293
294void mlxsw_sp_rif_counter_free(struct mlxsw_sp *mlxsw_sp,
295 struct mlxsw_sp_rif *rif,
296 enum mlxsw_sp_rif_counter_dir dir)
297{
298 unsigned int *p_counter_index;
299
Arkadi Sharshevsky6b1206b2017-05-18 09:18:53 +0200300 if (!mlxsw_sp_rif_counter_valid_get(rif, dir))
301 return;
302
Arkadi Sharshevskye0c0afd2017-03-28 17:24:15 +0200303 p_counter_index = mlxsw_sp_rif_p_counter_get(rif, dir);
304 if (WARN_ON(!p_counter_index))
305 return;
306 mlxsw_sp_rif_counter_edit(mlxsw_sp, rif->rif_index,
307 *p_counter_index, false, dir);
308 mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_RIF,
309 *p_counter_index);
310 mlxsw_sp_rif_counter_valid_set(rif, dir, false);
311}
312
Ido Schimmele4f3c1c2017-05-26 08:37:40 +0200313static void mlxsw_sp_rif_counters_alloc(struct mlxsw_sp_rif *rif)
314{
315 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
316 struct devlink *devlink;
317
318 devlink = priv_to_devlink(mlxsw_sp->core);
319 if (!devlink_dpipe_table_counter_enabled(devlink,
320 MLXSW_SP_DPIPE_TABLE_NAME_ERIF))
321 return;
322 mlxsw_sp_rif_counter_alloc(mlxsw_sp, rif, MLXSW_SP_RIF_COUNTER_EGRESS);
323}
324
325static void mlxsw_sp_rif_counters_free(struct mlxsw_sp_rif *rif)
326{
327 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
328
329 mlxsw_sp_rif_counter_free(mlxsw_sp, rif, MLXSW_SP_RIF_COUNTER_EGRESS);
330}
331
Ido Schimmel4724ba562017-03-10 08:53:39 +0100332static struct mlxsw_sp_rif *
333mlxsw_sp_rif_find_by_dev(const struct mlxsw_sp *mlxsw_sp,
334 const struct net_device *dev);
335
Ido Schimmel7dcc18a2017-07-18 10:10:30 +0200336#define MLXSW_SP_PREFIX_COUNT (sizeof(struct in6_addr) * BITS_PER_BYTE + 1)
Ido Schimmel9011b672017-05-16 19:38:25 +0200337
338struct mlxsw_sp_prefix_usage {
339 DECLARE_BITMAP(b, MLXSW_SP_PREFIX_COUNT);
340};
341
Jiri Pirko53342022016-07-04 08:23:08 +0200342#define mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage) \
343 for_each_set_bit(prefix, (prefix_usage)->b, MLXSW_SP_PREFIX_COUNT)
344
345static bool
346mlxsw_sp_prefix_usage_eq(struct mlxsw_sp_prefix_usage *prefix_usage1,
347 struct mlxsw_sp_prefix_usage *prefix_usage2)
348{
349 return !memcmp(prefix_usage1, prefix_usage2, sizeof(*prefix_usage1));
350}
351
Jiri Pirko6b75c482016-07-04 08:23:09 +0200352static bool
353mlxsw_sp_prefix_usage_none(struct mlxsw_sp_prefix_usage *prefix_usage)
354{
355 struct mlxsw_sp_prefix_usage prefix_usage_none = {{ 0 } };
356
357 return mlxsw_sp_prefix_usage_eq(prefix_usage, &prefix_usage_none);
358}
359
360static void
361mlxsw_sp_prefix_usage_cpy(struct mlxsw_sp_prefix_usage *prefix_usage1,
362 struct mlxsw_sp_prefix_usage *prefix_usage2)
363{
364 memcpy(prefix_usage1, prefix_usage2, sizeof(*prefix_usage1));
365}
366
367static void
Jiri Pirko5e9c16c2016-07-04 08:23:04 +0200368mlxsw_sp_prefix_usage_set(struct mlxsw_sp_prefix_usage *prefix_usage,
369 unsigned char prefix_len)
370{
371 set_bit(prefix_len, prefix_usage->b);
372}
373
374static void
375mlxsw_sp_prefix_usage_clear(struct mlxsw_sp_prefix_usage *prefix_usage,
376 unsigned char prefix_len)
377{
378 clear_bit(prefix_len, prefix_usage->b);
379}
380
381struct mlxsw_sp_fib_key {
382 unsigned char addr[sizeof(struct in6_addr)];
383 unsigned char prefix_len;
384};
385
Jiri Pirko61c503f2016-07-04 08:23:11 +0200386enum mlxsw_sp_fib_entry_type {
387 MLXSW_SP_FIB_ENTRY_TYPE_REMOTE,
388 MLXSW_SP_FIB_ENTRY_TYPE_LOCAL,
389 MLXSW_SP_FIB_ENTRY_TYPE_TRAP,
Petr Machata4607f6d2017-09-02 23:49:25 +0200390
391 /* This is a special case of local delivery, where a packet should be
392 * decapsulated on reception. Note that there is no corresponding ENCAP,
393 * because that's a type of next hop, not of FIB entry. (There can be
394 * several next hops in a REMOTE entry, and some of them may be
395 * encapsulating entries.)
396 */
397 MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP,
Jiri Pirko61c503f2016-07-04 08:23:11 +0200398};
399
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +0200400struct mlxsw_sp_nexthop_group;
Ido Schimmel9011b672017-05-16 19:38:25 +0200401struct mlxsw_sp_fib;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +0200402
Ido Schimmel9aecce12017-02-09 10:28:42 +0100403struct mlxsw_sp_fib_node {
404 struct list_head entry_list;
Jiri Pirkob45f64d2016-09-26 12:52:31 +0200405 struct list_head list;
Ido Schimmel9aecce12017-02-09 10:28:42 +0100406 struct rhash_head ht_node;
Ido Schimmel76610eb2017-03-10 08:53:41 +0100407 struct mlxsw_sp_fib *fib;
Ido Schimmel9aecce12017-02-09 10:28:42 +0100408 struct mlxsw_sp_fib_key key;
409};
410
Petr Machata4607f6d2017-09-02 23:49:25 +0200411struct mlxsw_sp_fib_entry_decap {
412 struct mlxsw_sp_ipip_entry *ipip_entry;
413 u32 tunnel_index;
414};
415
Ido Schimmel9aecce12017-02-09 10:28:42 +0100416struct mlxsw_sp_fib_entry {
417 struct list_head list;
418 struct mlxsw_sp_fib_node *fib_node;
419 enum mlxsw_sp_fib_entry_type type;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +0200420 struct list_head nexthop_group_node;
421 struct mlxsw_sp_nexthop_group *nh_group;
Petr Machata4607f6d2017-09-02 23:49:25 +0200422 struct mlxsw_sp_fib_entry_decap decap; /* Valid for decap entries. */
Jiri Pirko5e9c16c2016-07-04 08:23:04 +0200423};
424
Ido Schimmel4f1c7f12017-07-18 10:10:26 +0200425struct mlxsw_sp_fib4_entry {
426 struct mlxsw_sp_fib_entry common;
427 u32 tb_id;
428 u32 prio;
429 u8 tos;
430 u8 type;
431};
432
Ido Schimmel428b8512017-08-03 13:28:28 +0200433struct mlxsw_sp_fib6_entry {
434 struct mlxsw_sp_fib_entry common;
435 struct list_head rt6_list;
436 unsigned int nrt6;
437};
438
439struct mlxsw_sp_rt6 {
440 struct list_head list;
441 struct rt6_info *rt;
442};
443
Ido Schimmel9011b672017-05-16 19:38:25 +0200444struct mlxsw_sp_lpm_tree {
445 u8 id; /* tree ID */
446 unsigned int ref_count;
447 enum mlxsw_sp_l3proto proto;
448 struct mlxsw_sp_prefix_usage prefix_usage;
449};
450
Jiri Pirko5e9c16c2016-07-04 08:23:04 +0200451struct mlxsw_sp_fib {
452 struct rhashtable ht;
Ido Schimmel9aecce12017-02-09 10:28:42 +0100453 struct list_head node_list;
Ido Schimmel76610eb2017-03-10 08:53:41 +0100454 struct mlxsw_sp_vr *vr;
455 struct mlxsw_sp_lpm_tree *lpm_tree;
Jiri Pirko5e9c16c2016-07-04 08:23:04 +0200456 unsigned long prefix_ref_count[MLXSW_SP_PREFIX_COUNT];
457 struct mlxsw_sp_prefix_usage prefix_usage;
Ido Schimmel76610eb2017-03-10 08:53:41 +0100458 enum mlxsw_sp_l3proto proto;
Jiri Pirko5e9c16c2016-07-04 08:23:04 +0200459};
460
Ido Schimmel9011b672017-05-16 19:38:25 +0200461struct mlxsw_sp_vr {
462 u16 id; /* virtual router ID */
463 u32 tb_id; /* kernel fib table id */
464 unsigned int rif_count;
465 struct mlxsw_sp_fib *fib4;
Ido Schimmela3d9bc52017-07-18 10:10:22 +0200466 struct mlxsw_sp_fib *fib6;
Yotam Gigid42b0962017-09-27 08:23:20 +0200467 struct mlxsw_sp_mr_table *mr4_table;
Ido Schimmel9011b672017-05-16 19:38:25 +0200468};
469
Ido Schimmel9aecce12017-02-09 10:28:42 +0100470static const struct rhashtable_params mlxsw_sp_fib_ht_params;
Jiri Pirko5e9c16c2016-07-04 08:23:04 +0200471
Ido Schimmel76610eb2017-03-10 08:53:41 +0100472static struct mlxsw_sp_fib *mlxsw_sp_fib_create(struct mlxsw_sp_vr *vr,
473 enum mlxsw_sp_l3proto proto)
Jiri Pirko5e9c16c2016-07-04 08:23:04 +0200474{
475 struct mlxsw_sp_fib *fib;
476 int err;
477
478 fib = kzalloc(sizeof(*fib), GFP_KERNEL);
479 if (!fib)
480 return ERR_PTR(-ENOMEM);
481 err = rhashtable_init(&fib->ht, &mlxsw_sp_fib_ht_params);
482 if (err)
483 goto err_rhashtable_init;
Ido Schimmel9aecce12017-02-09 10:28:42 +0100484 INIT_LIST_HEAD(&fib->node_list);
Ido Schimmel76610eb2017-03-10 08:53:41 +0100485 fib->proto = proto;
486 fib->vr = vr;
Jiri Pirko5e9c16c2016-07-04 08:23:04 +0200487 return fib;
488
489err_rhashtable_init:
490 kfree(fib);
491 return ERR_PTR(err);
492}
493
494static void mlxsw_sp_fib_destroy(struct mlxsw_sp_fib *fib)
495{
Ido Schimmel9aecce12017-02-09 10:28:42 +0100496 WARN_ON(!list_empty(&fib->node_list));
Ido Schimmel76610eb2017-03-10 08:53:41 +0100497 WARN_ON(fib->lpm_tree);
Jiri Pirko5e9c16c2016-07-04 08:23:04 +0200498 rhashtable_destroy(&fib->ht);
499 kfree(fib);
500}
501
Jiri Pirko53342022016-07-04 08:23:08 +0200502static struct mlxsw_sp_lpm_tree *
Ido Schimmel382dbb42017-03-10 08:53:40 +0100503mlxsw_sp_lpm_tree_find_unused(struct mlxsw_sp *mlxsw_sp)
Jiri Pirko53342022016-07-04 08:23:08 +0200504{
505 static struct mlxsw_sp_lpm_tree *lpm_tree;
506 int i;
507
Ido Schimmel9011b672017-05-16 19:38:25 +0200508 for (i = 0; i < mlxsw_sp->router->lpm.tree_count; i++) {
509 lpm_tree = &mlxsw_sp->router->lpm.trees[i];
Ido Schimmel382dbb42017-03-10 08:53:40 +0100510 if (lpm_tree->ref_count == 0)
511 return lpm_tree;
Jiri Pirko53342022016-07-04 08:23:08 +0200512 }
513 return NULL;
514}
515
516static int mlxsw_sp_lpm_tree_alloc(struct mlxsw_sp *mlxsw_sp,
517 struct mlxsw_sp_lpm_tree *lpm_tree)
518{
519 char ralta_pl[MLXSW_REG_RALTA_LEN];
520
Ido Schimmel1a9234e662016-09-19 08:29:26 +0200521 mlxsw_reg_ralta_pack(ralta_pl, true,
522 (enum mlxsw_reg_ralxx_protocol) lpm_tree->proto,
523 lpm_tree->id);
Jiri Pirko53342022016-07-04 08:23:08 +0200524 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralta), ralta_pl);
525}
526
Ido Schimmelcc702672017-08-14 10:54:03 +0200527static void mlxsw_sp_lpm_tree_free(struct mlxsw_sp *mlxsw_sp,
528 struct mlxsw_sp_lpm_tree *lpm_tree)
Jiri Pirko53342022016-07-04 08:23:08 +0200529{
530 char ralta_pl[MLXSW_REG_RALTA_LEN];
531
Ido Schimmel1a9234e662016-09-19 08:29:26 +0200532 mlxsw_reg_ralta_pack(ralta_pl, false,
533 (enum mlxsw_reg_ralxx_protocol) lpm_tree->proto,
534 lpm_tree->id);
Ido Schimmelcc702672017-08-14 10:54:03 +0200535 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralta), ralta_pl);
Jiri Pirko53342022016-07-04 08:23:08 +0200536}
537
538static int
539mlxsw_sp_lpm_tree_left_struct_set(struct mlxsw_sp *mlxsw_sp,
540 struct mlxsw_sp_prefix_usage *prefix_usage,
541 struct mlxsw_sp_lpm_tree *lpm_tree)
542{
543 char ralst_pl[MLXSW_REG_RALST_LEN];
544 u8 root_bin = 0;
545 u8 prefix;
546 u8 last_prefix = MLXSW_REG_RALST_BIN_NO_CHILD;
547
548 mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage)
549 root_bin = prefix;
550
551 mlxsw_reg_ralst_pack(ralst_pl, root_bin, lpm_tree->id);
552 mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage) {
553 if (prefix == 0)
554 continue;
555 mlxsw_reg_ralst_bin_pack(ralst_pl, prefix, last_prefix,
556 MLXSW_REG_RALST_BIN_NO_CHILD);
557 last_prefix = prefix;
558 }
559 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralst), ralst_pl);
560}
561
562static struct mlxsw_sp_lpm_tree *
563mlxsw_sp_lpm_tree_create(struct mlxsw_sp *mlxsw_sp,
564 struct mlxsw_sp_prefix_usage *prefix_usage,
Ido Schimmel382dbb42017-03-10 08:53:40 +0100565 enum mlxsw_sp_l3proto proto)
Jiri Pirko53342022016-07-04 08:23:08 +0200566{
567 struct mlxsw_sp_lpm_tree *lpm_tree;
568 int err;
569
Ido Schimmel382dbb42017-03-10 08:53:40 +0100570 lpm_tree = mlxsw_sp_lpm_tree_find_unused(mlxsw_sp);
Jiri Pirko53342022016-07-04 08:23:08 +0200571 if (!lpm_tree)
572 return ERR_PTR(-EBUSY);
573 lpm_tree->proto = proto;
574 err = mlxsw_sp_lpm_tree_alloc(mlxsw_sp, lpm_tree);
575 if (err)
576 return ERR_PTR(err);
577
578 err = mlxsw_sp_lpm_tree_left_struct_set(mlxsw_sp, prefix_usage,
579 lpm_tree);
580 if (err)
581 goto err_left_struct_set;
Jiri Pirko2083d362016-10-25 11:25:56 +0200582 memcpy(&lpm_tree->prefix_usage, prefix_usage,
583 sizeof(lpm_tree->prefix_usage));
Jiri Pirko53342022016-07-04 08:23:08 +0200584 return lpm_tree;
585
586err_left_struct_set:
587 mlxsw_sp_lpm_tree_free(mlxsw_sp, lpm_tree);
588 return ERR_PTR(err);
589}
590
Ido Schimmelcc702672017-08-14 10:54:03 +0200591static void mlxsw_sp_lpm_tree_destroy(struct mlxsw_sp *mlxsw_sp,
592 struct mlxsw_sp_lpm_tree *lpm_tree)
Jiri Pirko53342022016-07-04 08:23:08 +0200593{
Ido Schimmelcc702672017-08-14 10:54:03 +0200594 mlxsw_sp_lpm_tree_free(mlxsw_sp, lpm_tree);
Jiri Pirko53342022016-07-04 08:23:08 +0200595}
596
597static struct mlxsw_sp_lpm_tree *
598mlxsw_sp_lpm_tree_get(struct mlxsw_sp *mlxsw_sp,
599 struct mlxsw_sp_prefix_usage *prefix_usage,
Ido Schimmel382dbb42017-03-10 08:53:40 +0100600 enum mlxsw_sp_l3proto proto)
Jiri Pirko53342022016-07-04 08:23:08 +0200601{
602 struct mlxsw_sp_lpm_tree *lpm_tree;
603 int i;
604
Ido Schimmel9011b672017-05-16 19:38:25 +0200605 for (i = 0; i < mlxsw_sp->router->lpm.tree_count; i++) {
606 lpm_tree = &mlxsw_sp->router->lpm.trees[i];
Jiri Pirko8b99bec2016-10-25 11:25:57 +0200607 if (lpm_tree->ref_count != 0 &&
608 lpm_tree->proto == proto &&
Jiri Pirko53342022016-07-04 08:23:08 +0200609 mlxsw_sp_prefix_usage_eq(&lpm_tree->prefix_usage,
610 prefix_usage))
Ido Schimmelfc922bb2017-08-14 10:54:05 +0200611 return lpm_tree;
Jiri Pirko53342022016-07-04 08:23:08 +0200612 }
Ido Schimmelfc922bb2017-08-14 10:54:05 +0200613 return mlxsw_sp_lpm_tree_create(mlxsw_sp, prefix_usage, proto);
614}
Jiri Pirko53342022016-07-04 08:23:08 +0200615
Ido Schimmelfc922bb2017-08-14 10:54:05 +0200616static void mlxsw_sp_lpm_tree_hold(struct mlxsw_sp_lpm_tree *lpm_tree)
617{
Jiri Pirko53342022016-07-04 08:23:08 +0200618 lpm_tree->ref_count++;
Jiri Pirko53342022016-07-04 08:23:08 +0200619}
620
Ido Schimmelcc702672017-08-14 10:54:03 +0200621static void mlxsw_sp_lpm_tree_put(struct mlxsw_sp *mlxsw_sp,
622 struct mlxsw_sp_lpm_tree *lpm_tree)
Jiri Pirko53342022016-07-04 08:23:08 +0200623{
624 if (--lpm_tree->ref_count == 0)
Ido Schimmelcc702672017-08-14 10:54:03 +0200625 mlxsw_sp_lpm_tree_destroy(mlxsw_sp, lpm_tree);
Jiri Pirko53342022016-07-04 08:23:08 +0200626}
627
Ido Schimmeld7a60302017-06-08 08:47:43 +0200628#define MLXSW_SP_LPM_TREE_MIN 1 /* tree 0 is reserved */
Ido Schimmel8494ab02017-03-24 08:02:47 +0100629
630static int mlxsw_sp_lpm_init(struct mlxsw_sp *mlxsw_sp)
Jiri Pirko53342022016-07-04 08:23:08 +0200631{
632 struct mlxsw_sp_lpm_tree *lpm_tree;
Ido Schimmel8494ab02017-03-24 08:02:47 +0100633 u64 max_trees;
Jiri Pirko53342022016-07-04 08:23:08 +0200634 int i;
635
Ido Schimmel8494ab02017-03-24 08:02:47 +0100636 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_LPM_TREES))
637 return -EIO;
638
639 max_trees = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_LPM_TREES);
Ido Schimmel9011b672017-05-16 19:38:25 +0200640 mlxsw_sp->router->lpm.tree_count = max_trees - MLXSW_SP_LPM_TREE_MIN;
641 mlxsw_sp->router->lpm.trees = kcalloc(mlxsw_sp->router->lpm.tree_count,
Ido Schimmel8494ab02017-03-24 08:02:47 +0100642 sizeof(struct mlxsw_sp_lpm_tree),
643 GFP_KERNEL);
Ido Schimmel9011b672017-05-16 19:38:25 +0200644 if (!mlxsw_sp->router->lpm.trees)
Ido Schimmel8494ab02017-03-24 08:02:47 +0100645 return -ENOMEM;
646
Ido Schimmel9011b672017-05-16 19:38:25 +0200647 for (i = 0; i < mlxsw_sp->router->lpm.tree_count; i++) {
648 lpm_tree = &mlxsw_sp->router->lpm.trees[i];
Jiri Pirko53342022016-07-04 08:23:08 +0200649 lpm_tree->id = i + MLXSW_SP_LPM_TREE_MIN;
650 }
Ido Schimmel8494ab02017-03-24 08:02:47 +0100651
652 return 0;
653}
654
655static void mlxsw_sp_lpm_fini(struct mlxsw_sp *mlxsw_sp)
656{
Ido Schimmel9011b672017-05-16 19:38:25 +0200657 kfree(mlxsw_sp->router->lpm.trees);
Jiri Pirko53342022016-07-04 08:23:08 +0200658}
659
Ido Schimmel76610eb2017-03-10 08:53:41 +0100660static bool mlxsw_sp_vr_is_used(const struct mlxsw_sp_vr *vr)
661{
Yotam Gigid42b0962017-09-27 08:23:20 +0200662 return !!vr->fib4 || !!vr->fib6 || !!vr->mr4_table;
Ido Schimmel76610eb2017-03-10 08:53:41 +0100663}
664
Jiri Pirko6b75c482016-07-04 08:23:09 +0200665static struct mlxsw_sp_vr *mlxsw_sp_vr_find_unused(struct mlxsw_sp *mlxsw_sp)
666{
667 struct mlxsw_sp_vr *vr;
668 int i;
669
Jiri Pirkoc1a38312016-10-21 16:07:23 +0200670 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
Ido Schimmel9011b672017-05-16 19:38:25 +0200671 vr = &mlxsw_sp->router->vrs[i];
Ido Schimmel76610eb2017-03-10 08:53:41 +0100672 if (!mlxsw_sp_vr_is_used(vr))
Jiri Pirko6b75c482016-07-04 08:23:09 +0200673 return vr;
674 }
675 return NULL;
676}
677
678static int mlxsw_sp_vr_lpm_tree_bind(struct mlxsw_sp *mlxsw_sp,
Ido Schimmel0adb2142017-08-14 10:54:04 +0200679 const struct mlxsw_sp_fib *fib, u8 tree_id)
Jiri Pirko6b75c482016-07-04 08:23:09 +0200680{
681 char raltb_pl[MLXSW_REG_RALTB_LEN];
682
Ido Schimmel76610eb2017-03-10 08:53:41 +0100683 mlxsw_reg_raltb_pack(raltb_pl, fib->vr->id,
684 (enum mlxsw_reg_ralxx_protocol) fib->proto,
Ido Schimmel0adb2142017-08-14 10:54:04 +0200685 tree_id);
Jiri Pirko6b75c482016-07-04 08:23:09 +0200686 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raltb), raltb_pl);
687}
688
689static int mlxsw_sp_vr_lpm_tree_unbind(struct mlxsw_sp *mlxsw_sp,
Ido Schimmel76610eb2017-03-10 08:53:41 +0100690 const struct mlxsw_sp_fib *fib)
Jiri Pirko6b75c482016-07-04 08:23:09 +0200691{
692 char raltb_pl[MLXSW_REG_RALTB_LEN];
693
694 /* Bind to tree 0 which is default */
Ido Schimmel76610eb2017-03-10 08:53:41 +0100695 mlxsw_reg_raltb_pack(raltb_pl, fib->vr->id,
696 (enum mlxsw_reg_ralxx_protocol) fib->proto, 0);
Jiri Pirko6b75c482016-07-04 08:23:09 +0200697 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raltb), raltb_pl);
698}
699
700static u32 mlxsw_sp_fix_tb_id(u32 tb_id)
701{
Yotam Gigi7e50d432017-09-27 08:23:19 +0200702 /* For our purpose, squash main, default and local tables into one */
703 if (tb_id == RT_TABLE_LOCAL || tb_id == RT_TABLE_DEFAULT)
Jiri Pirko6b75c482016-07-04 08:23:09 +0200704 tb_id = RT_TABLE_MAIN;
705 return tb_id;
706}
707
708static struct mlxsw_sp_vr *mlxsw_sp_vr_find(struct mlxsw_sp *mlxsw_sp,
Ido Schimmel76610eb2017-03-10 08:53:41 +0100709 u32 tb_id)
Jiri Pirko6b75c482016-07-04 08:23:09 +0200710{
711 struct mlxsw_sp_vr *vr;
712 int i;
713
714 tb_id = mlxsw_sp_fix_tb_id(tb_id);
Nogah Frankel9497c042016-09-20 11:16:54 +0200715
Jiri Pirkoc1a38312016-10-21 16:07:23 +0200716 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
Ido Schimmel9011b672017-05-16 19:38:25 +0200717 vr = &mlxsw_sp->router->vrs[i];
Ido Schimmel76610eb2017-03-10 08:53:41 +0100718 if (mlxsw_sp_vr_is_used(vr) && vr->tb_id == tb_id)
Jiri Pirko6b75c482016-07-04 08:23:09 +0200719 return vr;
720 }
721 return NULL;
722}
723
Ido Schimmel76610eb2017-03-10 08:53:41 +0100724static struct mlxsw_sp_fib *mlxsw_sp_vr_fib(const struct mlxsw_sp_vr *vr,
725 enum mlxsw_sp_l3proto proto)
Jiri Pirko6b75c482016-07-04 08:23:09 +0200726{
Ido Schimmel76610eb2017-03-10 08:53:41 +0100727 switch (proto) {
728 case MLXSW_SP_L3_PROTO_IPV4:
729 return vr->fib4;
730 case MLXSW_SP_L3_PROTO_IPV6:
Ido Schimmela3d9bc52017-07-18 10:10:22 +0200731 return vr->fib6;
Ido Schimmel76610eb2017-03-10 08:53:41 +0100732 }
733 return NULL;
734}
735
736static struct mlxsw_sp_vr *mlxsw_sp_vr_create(struct mlxsw_sp *mlxsw_sp,
David Ahernf8fa9b42017-10-18 09:56:56 -0700737 u32 tb_id,
738 struct netlink_ext_ack *extack)
Ido Schimmel76610eb2017-03-10 08:53:41 +0100739{
Jiri Pirko6b75c482016-07-04 08:23:09 +0200740 struct mlxsw_sp_vr *vr;
Ido Schimmela3d9bc52017-07-18 10:10:22 +0200741 int err;
Jiri Pirko6b75c482016-07-04 08:23:09 +0200742
743 vr = mlxsw_sp_vr_find_unused(mlxsw_sp);
David Ahernf8fa9b42017-10-18 09:56:56 -0700744 if (!vr) {
745 NL_SET_ERR_MSG(extack, "spectrum: Exceeded number of supported virtual routers");
Jiri Pirko6b75c482016-07-04 08:23:09 +0200746 return ERR_PTR(-EBUSY);
David Ahernf8fa9b42017-10-18 09:56:56 -0700747 }
Ido Schimmel76610eb2017-03-10 08:53:41 +0100748 vr->fib4 = mlxsw_sp_fib_create(vr, MLXSW_SP_L3_PROTO_IPV4);
749 if (IS_ERR(vr->fib4))
750 return ERR_CAST(vr->fib4);
Ido Schimmela3d9bc52017-07-18 10:10:22 +0200751 vr->fib6 = mlxsw_sp_fib_create(vr, MLXSW_SP_L3_PROTO_IPV6);
752 if (IS_ERR(vr->fib6)) {
753 err = PTR_ERR(vr->fib6);
754 goto err_fib6_create;
755 }
Yotam Gigid42b0962017-09-27 08:23:20 +0200756 vr->mr4_table = mlxsw_sp_mr_table_create(mlxsw_sp, vr->id,
757 MLXSW_SP_L3_PROTO_IPV4);
758 if (IS_ERR(vr->mr4_table)) {
759 err = PTR_ERR(vr->mr4_table);
760 goto err_mr_table_create;
761 }
Jiri Pirko6b75c482016-07-04 08:23:09 +0200762 vr->tb_id = tb_id;
Jiri Pirko6b75c482016-07-04 08:23:09 +0200763 return vr;
Ido Schimmela3d9bc52017-07-18 10:10:22 +0200764
Yotam Gigid42b0962017-09-27 08:23:20 +0200765err_mr_table_create:
766 mlxsw_sp_fib_destroy(vr->fib6);
767 vr->fib6 = NULL;
Ido Schimmela3d9bc52017-07-18 10:10:22 +0200768err_fib6_create:
769 mlxsw_sp_fib_destroy(vr->fib4);
770 vr->fib4 = NULL;
771 return ERR_PTR(err);
Jiri Pirko6b75c482016-07-04 08:23:09 +0200772}
773
Ido Schimmel76610eb2017-03-10 08:53:41 +0100774static void mlxsw_sp_vr_destroy(struct mlxsw_sp_vr *vr)
Jiri Pirko6b75c482016-07-04 08:23:09 +0200775{
Yotam Gigid42b0962017-09-27 08:23:20 +0200776 mlxsw_sp_mr_table_destroy(vr->mr4_table);
777 vr->mr4_table = NULL;
Ido Schimmela3d9bc52017-07-18 10:10:22 +0200778 mlxsw_sp_fib_destroy(vr->fib6);
779 vr->fib6 = NULL;
Ido Schimmel76610eb2017-03-10 08:53:41 +0100780 mlxsw_sp_fib_destroy(vr->fib4);
781 vr->fib4 = NULL;
Jiri Pirko6b75c482016-07-04 08:23:09 +0200782}
783
David Ahernf8fa9b42017-10-18 09:56:56 -0700784static struct mlxsw_sp_vr *mlxsw_sp_vr_get(struct mlxsw_sp *mlxsw_sp, u32 tb_id,
785 struct netlink_ext_ack *extack)
Jiri Pirko6b75c482016-07-04 08:23:09 +0200786{
787 struct mlxsw_sp_vr *vr;
Jiri Pirko6b75c482016-07-04 08:23:09 +0200788
789 tb_id = mlxsw_sp_fix_tb_id(tb_id);
Ido Schimmel76610eb2017-03-10 08:53:41 +0100790 vr = mlxsw_sp_vr_find(mlxsw_sp, tb_id);
791 if (!vr)
David Ahernf8fa9b42017-10-18 09:56:56 -0700792 vr = mlxsw_sp_vr_create(mlxsw_sp, tb_id, extack);
Jiri Pirko6b75c482016-07-04 08:23:09 +0200793 return vr;
794}
795
Ido Schimmel76610eb2017-03-10 08:53:41 +0100796static void mlxsw_sp_vr_put(struct mlxsw_sp_vr *vr)
Jiri Pirko6b75c482016-07-04 08:23:09 +0200797{
Ido Schimmela3d9bc52017-07-18 10:10:22 +0200798 if (!vr->rif_count && list_empty(&vr->fib4->node_list) &&
Yotam Gigid42b0962017-09-27 08:23:20 +0200799 list_empty(&vr->fib6->node_list) &&
800 mlxsw_sp_mr_table_empty(vr->mr4_table))
Ido Schimmel76610eb2017-03-10 08:53:41 +0100801 mlxsw_sp_vr_destroy(vr);
Jiri Pirko6b75c482016-07-04 08:23:09 +0200802}
803
Ido Schimmelfc922bb2017-08-14 10:54:05 +0200804static bool
805mlxsw_sp_vr_lpm_tree_should_replace(struct mlxsw_sp_vr *vr,
806 enum mlxsw_sp_l3proto proto, u8 tree_id)
807{
808 struct mlxsw_sp_fib *fib = mlxsw_sp_vr_fib(vr, proto);
809
810 if (!mlxsw_sp_vr_is_used(vr))
811 return false;
812 if (fib->lpm_tree && fib->lpm_tree->id == tree_id)
813 return true;
814 return false;
815}
816
817static int mlxsw_sp_vr_lpm_tree_replace(struct mlxsw_sp *mlxsw_sp,
818 struct mlxsw_sp_fib *fib,
819 struct mlxsw_sp_lpm_tree *new_tree)
820{
821 struct mlxsw_sp_lpm_tree *old_tree = fib->lpm_tree;
822 int err;
823
824 err = mlxsw_sp_vr_lpm_tree_bind(mlxsw_sp, fib, new_tree->id);
825 if (err)
826 return err;
827 fib->lpm_tree = new_tree;
828 mlxsw_sp_lpm_tree_hold(new_tree);
829 mlxsw_sp_lpm_tree_put(mlxsw_sp, old_tree);
830 return 0;
831}
832
833static int mlxsw_sp_vrs_lpm_tree_replace(struct mlxsw_sp *mlxsw_sp,
834 struct mlxsw_sp_fib *fib,
835 struct mlxsw_sp_lpm_tree *new_tree)
836{
837 struct mlxsw_sp_lpm_tree *old_tree = fib->lpm_tree;
838 enum mlxsw_sp_l3proto proto = fib->proto;
839 u8 old_id, new_id = new_tree->id;
840 struct mlxsw_sp_vr *vr;
841 int i, err;
842
843 if (!old_tree)
844 goto no_replace;
845 old_id = old_tree->id;
846
847 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
848 vr = &mlxsw_sp->router->vrs[i];
849 if (!mlxsw_sp_vr_lpm_tree_should_replace(vr, proto, old_id))
850 continue;
851 err = mlxsw_sp_vr_lpm_tree_replace(mlxsw_sp,
852 mlxsw_sp_vr_fib(vr, proto),
853 new_tree);
854 if (err)
855 goto err_tree_replace;
856 }
857
858 return 0;
859
860err_tree_replace:
861 for (i--; i >= 0; i--) {
862 if (!mlxsw_sp_vr_lpm_tree_should_replace(vr, proto, new_id))
863 continue;
864 mlxsw_sp_vr_lpm_tree_replace(mlxsw_sp,
865 mlxsw_sp_vr_fib(vr, proto),
866 old_tree);
867 }
868 return err;
869
870no_replace:
871 err = mlxsw_sp_vr_lpm_tree_bind(mlxsw_sp, fib, new_tree->id);
872 if (err)
873 return err;
874 fib->lpm_tree = new_tree;
875 mlxsw_sp_lpm_tree_hold(new_tree);
876 return 0;
877}
878
879static void
880mlxsw_sp_vrs_prefixes(struct mlxsw_sp *mlxsw_sp,
881 enum mlxsw_sp_l3proto proto,
882 struct mlxsw_sp_prefix_usage *req_prefix_usage)
883{
884 int i;
885
886 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
887 struct mlxsw_sp_vr *vr = &mlxsw_sp->router->vrs[i];
888 struct mlxsw_sp_fib *fib = mlxsw_sp_vr_fib(vr, proto);
889 unsigned char prefix;
890
891 if (!mlxsw_sp_vr_is_used(vr))
892 continue;
893 mlxsw_sp_prefix_usage_for_each(prefix, &fib->prefix_usage)
894 mlxsw_sp_prefix_usage_set(req_prefix_usage, prefix);
895 }
896}
897
Nogah Frankel9497c042016-09-20 11:16:54 +0200898static int mlxsw_sp_vrs_init(struct mlxsw_sp *mlxsw_sp)
Jiri Pirko6b75c482016-07-04 08:23:09 +0200899{
900 struct mlxsw_sp_vr *vr;
Jiri Pirkoc1a38312016-10-21 16:07:23 +0200901 u64 max_vrs;
Jiri Pirko6b75c482016-07-04 08:23:09 +0200902 int i;
903
Jiri Pirkoc1a38312016-10-21 16:07:23 +0200904 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_VRS))
Nogah Frankel9497c042016-09-20 11:16:54 +0200905 return -EIO;
906
Jiri Pirkoc1a38312016-10-21 16:07:23 +0200907 max_vrs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS);
Ido Schimmel9011b672017-05-16 19:38:25 +0200908 mlxsw_sp->router->vrs = kcalloc(max_vrs, sizeof(struct mlxsw_sp_vr),
909 GFP_KERNEL);
910 if (!mlxsw_sp->router->vrs)
Nogah Frankel9497c042016-09-20 11:16:54 +0200911 return -ENOMEM;
912
Jiri Pirkoc1a38312016-10-21 16:07:23 +0200913 for (i = 0; i < max_vrs; i++) {
Ido Schimmel9011b672017-05-16 19:38:25 +0200914 vr = &mlxsw_sp->router->vrs[i];
Jiri Pirko6b75c482016-07-04 08:23:09 +0200915 vr->id = i;
916 }
Nogah Frankel9497c042016-09-20 11:16:54 +0200917
918 return 0;
919}
920
Ido Schimmelac571de2016-11-14 11:26:32 +0100921static void mlxsw_sp_router_fib_flush(struct mlxsw_sp *mlxsw_sp);
922
Nogah Frankel9497c042016-09-20 11:16:54 +0200923static void mlxsw_sp_vrs_fini(struct mlxsw_sp *mlxsw_sp)
924{
Ido Schimmel30572242016-12-03 16:45:01 +0100925 /* At this stage we're guaranteed not to have new incoming
926 * FIB notifications and the work queue is free from FIBs
927 * sitting on top of mlxsw netdevs. However, we can still
928 * have other FIBs queued. Flush the queue before flushing
929 * the device's tables. No need for locks, as we're the only
930 * writer.
931 */
932 mlxsw_core_flush_owq();
Ido Schimmelac571de2016-11-14 11:26:32 +0100933 mlxsw_sp_router_fib_flush(mlxsw_sp);
Ido Schimmel9011b672017-05-16 19:38:25 +0200934 kfree(mlxsw_sp->router->vrs);
Jiri Pirko6b75c482016-07-04 08:23:09 +0200935}
936
Petr Machata6ddb7422017-09-02 23:49:19 +0200937static struct net_device *
938__mlxsw_sp_ipip_netdev_ul_dev_get(const struct net_device *ol_dev)
939{
940 struct ip_tunnel *tun = netdev_priv(ol_dev);
941 struct net *net = dev_net(ol_dev);
942
943 return __dev_get_by_index(net, tun->parms.link);
944}
945
946static u32 mlxsw_sp_ipip_dev_ul_tb_id(const struct net_device *ol_dev)
947{
948 struct net_device *d = __mlxsw_sp_ipip_netdev_ul_dev_get(ol_dev);
949
950 if (d)
951 return l3mdev_fib_table(d) ? : RT_TABLE_MAIN;
952 else
953 return l3mdev_fib_table(ol_dev) ? : RT_TABLE_MAIN;
954}
955
Petr Machata1012b9a2017-09-02 23:49:23 +0200956static struct mlxsw_sp_rif *
957mlxsw_sp_rif_create(struct mlxsw_sp *mlxsw_sp,
David Ahernf8fa9b42017-10-18 09:56:56 -0700958 const struct mlxsw_sp_rif_params *params,
959 struct netlink_ext_ack *extack);
Petr Machata1012b9a2017-09-02 23:49:23 +0200960
961static struct mlxsw_sp_rif_ipip_lb *
962mlxsw_sp_ipip_ol_ipip_lb_create(struct mlxsw_sp *mlxsw_sp,
963 enum mlxsw_sp_ipip_type ipipt,
Petr Machata7e75af62017-11-03 10:03:36 +0100964 struct net_device *ol_dev,
965 struct netlink_ext_ack *extack)
Petr Machata1012b9a2017-09-02 23:49:23 +0200966{
967 struct mlxsw_sp_rif_params_ipip_lb lb_params;
968 const struct mlxsw_sp_ipip_ops *ipip_ops;
969 struct mlxsw_sp_rif *rif;
970
971 ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipipt];
972 lb_params = (struct mlxsw_sp_rif_params_ipip_lb) {
973 .common.dev = ol_dev,
974 .common.lag = false,
975 .lb_config = ipip_ops->ol_loopback_config(mlxsw_sp, ol_dev),
976 };
977
Petr Machata7e75af62017-11-03 10:03:36 +0100978 rif = mlxsw_sp_rif_create(mlxsw_sp, &lb_params.common, extack);
Petr Machata1012b9a2017-09-02 23:49:23 +0200979 if (IS_ERR(rif))
980 return ERR_CAST(rif);
981 return container_of(rif, struct mlxsw_sp_rif_ipip_lb, common);
982}
983
984static struct mlxsw_sp_ipip_entry *
985mlxsw_sp_ipip_entry_alloc(struct mlxsw_sp *mlxsw_sp,
986 enum mlxsw_sp_ipip_type ipipt,
987 struct net_device *ol_dev)
988{
989 struct mlxsw_sp_ipip_entry *ipip_entry;
990 struct mlxsw_sp_ipip_entry *ret = NULL;
991
992 ipip_entry = kzalloc(sizeof(*ipip_entry), GFP_KERNEL);
993 if (!ipip_entry)
994 return ERR_PTR(-ENOMEM);
995
996 ipip_entry->ol_lb = mlxsw_sp_ipip_ol_ipip_lb_create(mlxsw_sp, ipipt,
Petr Machata7e75af62017-11-03 10:03:36 +0100997 ol_dev, NULL);
Petr Machata1012b9a2017-09-02 23:49:23 +0200998 if (IS_ERR(ipip_entry->ol_lb)) {
999 ret = ERR_CAST(ipip_entry->ol_lb);
1000 goto err_ol_ipip_lb_create;
1001 }
1002
1003 ipip_entry->ipipt = ipipt;
1004 ipip_entry->ol_dev = ol_dev;
1005
1006 return ipip_entry;
1007
1008err_ol_ipip_lb_create:
1009 kfree(ipip_entry);
1010 return ret;
1011}
1012
1013static void
Petr Machata4cccb732017-10-16 16:26:39 +02001014mlxsw_sp_ipip_entry_dealloc(struct mlxsw_sp_ipip_entry *ipip_entry)
Petr Machata1012b9a2017-09-02 23:49:23 +02001015{
Petr Machata1012b9a2017-09-02 23:49:23 +02001016 mlxsw_sp_rif_destroy(&ipip_entry->ol_lb->common);
1017 kfree(ipip_entry);
1018}
1019
Petr Machata1012b9a2017-09-02 23:49:23 +02001020static bool mlxsw_sp_l3addr_eq(const union mlxsw_sp_l3addr *addr1,
1021 const union mlxsw_sp_l3addr *addr2)
1022{
1023 return !memcmp(addr1, addr2, sizeof(*addr1));
1024}
1025
1026static bool
1027mlxsw_sp_ipip_entry_saddr_matches(struct mlxsw_sp *mlxsw_sp,
1028 const enum mlxsw_sp_l3proto ul_proto,
1029 union mlxsw_sp_l3addr saddr,
1030 u32 ul_tb_id,
1031 struct mlxsw_sp_ipip_entry *ipip_entry)
1032{
1033 u32 tun_ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(ipip_entry->ol_dev);
1034 enum mlxsw_sp_ipip_type ipipt = ipip_entry->ipipt;
1035 union mlxsw_sp_l3addr tun_saddr;
1036
1037 if (mlxsw_sp->router->ipip_ops_arr[ipipt]->ul_proto != ul_proto)
1038 return false;
1039
1040 tun_saddr = mlxsw_sp_ipip_netdev_saddr(ul_proto, ipip_entry->ol_dev);
1041 return tun_ul_tb_id == ul_tb_id &&
1042 mlxsw_sp_l3addr_eq(&tun_saddr, &saddr);
1043}
1044
Petr Machata4607f6d2017-09-02 23:49:25 +02001045static int
1046mlxsw_sp_fib_entry_decap_init(struct mlxsw_sp *mlxsw_sp,
1047 struct mlxsw_sp_fib_entry *fib_entry,
1048 struct mlxsw_sp_ipip_entry *ipip_entry)
1049{
1050 u32 tunnel_index;
1051 int err;
1052
1053 err = mlxsw_sp_kvdl_alloc(mlxsw_sp, 1, &tunnel_index);
1054 if (err)
1055 return err;
1056
1057 ipip_entry->decap_fib_entry = fib_entry;
1058 fib_entry->decap.ipip_entry = ipip_entry;
1059 fib_entry->decap.tunnel_index = tunnel_index;
1060 return 0;
1061}
1062
1063static void mlxsw_sp_fib_entry_decap_fini(struct mlxsw_sp *mlxsw_sp,
1064 struct mlxsw_sp_fib_entry *fib_entry)
1065{
1066 /* Unlink this node from the IPIP entry that it's the decap entry of. */
1067 fib_entry->decap.ipip_entry->decap_fib_entry = NULL;
1068 fib_entry->decap.ipip_entry = NULL;
1069 mlxsw_sp_kvdl_free(mlxsw_sp, fib_entry->decap.tunnel_index);
1070}
1071
Petr Machata1cc38fb2017-09-02 23:49:26 +02001072static struct mlxsw_sp_fib_node *
1073mlxsw_sp_fib_node_lookup(struct mlxsw_sp_fib *fib, const void *addr,
1074 size_t addr_len, unsigned char prefix_len);
Petr Machata4607f6d2017-09-02 23:49:25 +02001075static int mlxsw_sp_fib_entry_update(struct mlxsw_sp *mlxsw_sp,
1076 struct mlxsw_sp_fib_entry *fib_entry);
1077
1078static void
1079mlxsw_sp_ipip_entry_demote_decap(struct mlxsw_sp *mlxsw_sp,
1080 struct mlxsw_sp_ipip_entry *ipip_entry)
1081{
1082 struct mlxsw_sp_fib_entry *fib_entry = ipip_entry->decap_fib_entry;
1083
1084 mlxsw_sp_fib_entry_decap_fini(mlxsw_sp, fib_entry);
1085 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
1086
1087 mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
1088}
1089
Petr Machata1cc38fb2017-09-02 23:49:26 +02001090static void
1091mlxsw_sp_ipip_entry_promote_decap(struct mlxsw_sp *mlxsw_sp,
1092 struct mlxsw_sp_ipip_entry *ipip_entry,
1093 struct mlxsw_sp_fib_entry *decap_fib_entry)
1094{
1095 if (mlxsw_sp_fib_entry_decap_init(mlxsw_sp, decap_fib_entry,
1096 ipip_entry))
1097 return;
1098 decap_fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP;
1099
1100 if (mlxsw_sp_fib_entry_update(mlxsw_sp, decap_fib_entry))
1101 mlxsw_sp_ipip_entry_demote_decap(mlxsw_sp, ipip_entry);
1102}
1103
1104/* Given an IPIP entry, find the corresponding decap route. */
1105static struct mlxsw_sp_fib_entry *
1106mlxsw_sp_ipip_entry_find_decap(struct mlxsw_sp *mlxsw_sp,
1107 struct mlxsw_sp_ipip_entry *ipip_entry)
1108{
1109 static struct mlxsw_sp_fib_node *fib_node;
1110 const struct mlxsw_sp_ipip_ops *ipip_ops;
1111 struct mlxsw_sp_fib_entry *fib_entry;
1112 unsigned char saddr_prefix_len;
1113 union mlxsw_sp_l3addr saddr;
1114 struct mlxsw_sp_fib *ul_fib;
1115 struct mlxsw_sp_vr *ul_vr;
1116 const void *saddrp;
1117 size_t saddr_len;
1118 u32 ul_tb_id;
1119 u32 saddr4;
1120
1121 ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt];
1122
1123 ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(ipip_entry->ol_dev);
1124 ul_vr = mlxsw_sp_vr_find(mlxsw_sp, ul_tb_id);
1125 if (!ul_vr)
1126 return NULL;
1127
1128 ul_fib = mlxsw_sp_vr_fib(ul_vr, ipip_ops->ul_proto);
1129 saddr = mlxsw_sp_ipip_netdev_saddr(ipip_ops->ul_proto,
1130 ipip_entry->ol_dev);
1131
1132 switch (ipip_ops->ul_proto) {
1133 case MLXSW_SP_L3_PROTO_IPV4:
1134 saddr4 = be32_to_cpu(saddr.addr4);
1135 saddrp = &saddr4;
1136 saddr_len = 4;
1137 saddr_prefix_len = 32;
1138 break;
1139 case MLXSW_SP_L3_PROTO_IPV6:
1140 WARN_ON(1);
1141 return NULL;
1142 }
1143
1144 fib_node = mlxsw_sp_fib_node_lookup(ul_fib, saddrp, saddr_len,
1145 saddr_prefix_len);
1146 if (!fib_node || list_empty(&fib_node->entry_list))
1147 return NULL;
1148
1149 fib_entry = list_first_entry(&fib_node->entry_list,
1150 struct mlxsw_sp_fib_entry, list);
1151 if (fib_entry->type != MLXSW_SP_FIB_ENTRY_TYPE_TRAP)
1152 return NULL;
1153
1154 return fib_entry;
1155}
1156
Petr Machata1012b9a2017-09-02 23:49:23 +02001157static struct mlxsw_sp_ipip_entry *
Petr Machata4cccb732017-10-16 16:26:39 +02001158mlxsw_sp_ipip_entry_create(struct mlxsw_sp *mlxsw_sp,
1159 enum mlxsw_sp_ipip_type ipipt,
1160 struct net_device *ol_dev)
Petr Machata1012b9a2017-09-02 23:49:23 +02001161{
Petr Machata1012b9a2017-09-02 23:49:23 +02001162 struct mlxsw_sp_ipip_entry *ipip_entry;
Petr Machata1012b9a2017-09-02 23:49:23 +02001163
1164 ipip_entry = mlxsw_sp_ipip_entry_alloc(mlxsw_sp, ipipt, ol_dev);
1165 if (IS_ERR(ipip_entry))
1166 return ipip_entry;
1167
1168 list_add_tail(&ipip_entry->ipip_list_node,
1169 &mlxsw_sp->router->ipip_list);
1170
Petr Machata1012b9a2017-09-02 23:49:23 +02001171 return ipip_entry;
1172}
1173
1174static void
Petr Machata4cccb732017-10-16 16:26:39 +02001175mlxsw_sp_ipip_entry_destroy(struct mlxsw_sp *mlxsw_sp,
1176 struct mlxsw_sp_ipip_entry *ipip_entry)
Petr Machata1012b9a2017-09-02 23:49:23 +02001177{
Petr Machata4cccb732017-10-16 16:26:39 +02001178 list_del(&ipip_entry->ipip_list_node);
1179 mlxsw_sp_ipip_entry_dealloc(ipip_entry);
Petr Machata1012b9a2017-09-02 23:49:23 +02001180}
1181
Petr Machata4607f6d2017-09-02 23:49:25 +02001182static bool
1183mlxsw_sp_ipip_entry_matches_decap(struct mlxsw_sp *mlxsw_sp,
1184 const struct net_device *ul_dev,
1185 enum mlxsw_sp_l3proto ul_proto,
1186 union mlxsw_sp_l3addr ul_dip,
1187 struct mlxsw_sp_ipip_entry *ipip_entry)
1188{
1189 u32 ul_tb_id = l3mdev_fib_table(ul_dev) ? : RT_TABLE_MAIN;
1190 enum mlxsw_sp_ipip_type ipipt = ipip_entry->ipipt;
1191 struct net_device *ipip_ul_dev;
1192
1193 if (mlxsw_sp->router->ipip_ops_arr[ipipt]->ul_proto != ul_proto)
1194 return false;
1195
1196 ipip_ul_dev = __mlxsw_sp_ipip_netdev_ul_dev_get(ipip_entry->ol_dev);
1197 return mlxsw_sp_ipip_entry_saddr_matches(mlxsw_sp, ul_proto, ul_dip,
1198 ul_tb_id, ipip_entry) &&
1199 (!ipip_ul_dev || ipip_ul_dev == ul_dev);
1200}
1201
1202/* Given decap parameters, find the corresponding IPIP entry. */
1203static struct mlxsw_sp_ipip_entry *
1204mlxsw_sp_ipip_entry_find_by_decap(struct mlxsw_sp *mlxsw_sp,
1205 const struct net_device *ul_dev,
1206 enum mlxsw_sp_l3proto ul_proto,
1207 union mlxsw_sp_l3addr ul_dip)
1208{
1209 struct mlxsw_sp_ipip_entry *ipip_entry;
1210
1211 list_for_each_entry(ipip_entry, &mlxsw_sp->router->ipip_list,
1212 ipip_list_node)
1213 if (mlxsw_sp_ipip_entry_matches_decap(mlxsw_sp, ul_dev,
1214 ul_proto, ul_dip,
1215 ipip_entry))
1216 return ipip_entry;
1217
1218 return NULL;
1219}
1220
Petr Machata6698c162017-10-16 16:26:36 +02001221static bool mlxsw_sp_netdev_ipip_type(const struct mlxsw_sp *mlxsw_sp,
1222 const struct net_device *dev,
1223 enum mlxsw_sp_ipip_type *p_type)
1224{
1225 struct mlxsw_sp_router *router = mlxsw_sp->router;
1226 const struct mlxsw_sp_ipip_ops *ipip_ops;
1227 enum mlxsw_sp_ipip_type ipipt;
1228
1229 for (ipipt = 0; ipipt < MLXSW_SP_IPIP_TYPE_MAX; ++ipipt) {
1230 ipip_ops = router->ipip_ops_arr[ipipt];
1231 if (dev->type == ipip_ops->dev_type) {
1232 if (p_type)
1233 *p_type = ipipt;
1234 return true;
1235 }
1236 }
1237 return false;
1238}
1239
Petr Machata796ec772017-11-03 10:03:29 +01001240bool mlxsw_sp_netdev_is_ipip_ol(const struct mlxsw_sp *mlxsw_sp,
1241 const struct net_device *dev)
Petr Machata00635872017-10-16 16:26:37 +02001242{
1243 return mlxsw_sp_netdev_ipip_type(mlxsw_sp, dev, NULL);
1244}
1245
1246static struct mlxsw_sp_ipip_entry *
1247mlxsw_sp_ipip_entry_find_by_ol_dev(struct mlxsw_sp *mlxsw_sp,
1248 const struct net_device *ol_dev)
1249{
1250 struct mlxsw_sp_ipip_entry *ipip_entry;
1251
1252 list_for_each_entry(ipip_entry, &mlxsw_sp->router->ipip_list,
1253 ipip_list_node)
1254 if (ipip_entry->ol_dev == ol_dev)
1255 return ipip_entry;
1256
1257 return NULL;
1258}
1259
Petr Machatacafdb2a2017-11-03 10:03:30 +01001260static bool mlxsw_sp_netdevice_ipip_can_offload(struct mlxsw_sp *mlxsw_sp,
1261 const struct net_device *ol_dev,
1262 enum mlxsw_sp_ipip_type ipipt)
1263{
1264 const struct mlxsw_sp_ipip_ops *ops
1265 = mlxsw_sp->router->ipip_ops_arr[ipipt];
1266
1267 /* For deciding whether decap should be offloaded, we don't care about
1268 * overlay protocol, so ask whether either one is supported.
1269 */
1270 return ops->can_offload(mlxsw_sp, ol_dev, MLXSW_SP_L3_PROTO_IPV4) ||
1271 ops->can_offload(mlxsw_sp, ol_dev, MLXSW_SP_L3_PROTO_IPV6);
1272}
1273
Petr Machata796ec772017-11-03 10:03:29 +01001274static int mlxsw_sp_netdevice_ipip_ol_reg_event(struct mlxsw_sp *mlxsw_sp,
1275 struct net_device *ol_dev)
Petr Machata00635872017-10-16 16:26:37 +02001276{
Petr Machata00635872017-10-16 16:26:37 +02001277 struct mlxsw_sp_ipip_entry *ipip_entry;
Petr Machataaf641712017-11-03 10:03:40 +01001278 enum mlxsw_sp_l3proto ul_proto;
Petr Machata00635872017-10-16 16:26:37 +02001279 enum mlxsw_sp_ipip_type ipipt;
Petr Machataaf641712017-11-03 10:03:40 +01001280 union mlxsw_sp_l3addr saddr;
1281 u32 ul_tb_id;
Petr Machata00635872017-10-16 16:26:37 +02001282
1283 mlxsw_sp_netdev_ipip_type(mlxsw_sp, ol_dev, &ipipt);
Petr Machatacafdb2a2017-11-03 10:03:30 +01001284 if (mlxsw_sp_netdevice_ipip_can_offload(mlxsw_sp, ol_dev, ipipt)) {
Petr Machataaf641712017-11-03 10:03:40 +01001285 ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(ol_dev);
1286 ul_proto = mlxsw_sp->router->ipip_ops_arr[ipipt]->ul_proto;
1287 saddr = mlxsw_sp_ipip_netdev_saddr(ul_proto, ol_dev);
1288 if (!mlxsw_sp_ipip_demote_tunnel_by_saddr(mlxsw_sp, ul_proto,
1289 saddr, ul_tb_id,
1290 NULL)) {
1291 ipip_entry = mlxsw_sp_ipip_entry_create(mlxsw_sp, ipipt,
1292 ol_dev);
1293 if (IS_ERR(ipip_entry))
1294 return PTR_ERR(ipip_entry);
1295 }
Petr Machata00635872017-10-16 16:26:37 +02001296 }
1297
1298 return 0;
1299}
1300
Petr Machata796ec772017-11-03 10:03:29 +01001301static void mlxsw_sp_netdevice_ipip_ol_unreg_event(struct mlxsw_sp *mlxsw_sp,
1302 struct net_device *ol_dev)
Petr Machata00635872017-10-16 16:26:37 +02001303{
1304 struct mlxsw_sp_ipip_entry *ipip_entry;
1305
1306 ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
1307 if (ipip_entry)
Petr Machata4cccb732017-10-16 16:26:39 +02001308 mlxsw_sp_ipip_entry_destroy(mlxsw_sp, ipip_entry);
Petr Machata00635872017-10-16 16:26:37 +02001309}
1310
Petr Machata47518ca2017-11-03 10:03:35 +01001311static void
1312mlxsw_sp_ipip_entry_ol_up_event(struct mlxsw_sp *mlxsw_sp,
1313 struct mlxsw_sp_ipip_entry *ipip_entry)
1314{
1315 struct mlxsw_sp_fib_entry *decap_fib_entry;
1316
1317 decap_fib_entry = mlxsw_sp_ipip_entry_find_decap(mlxsw_sp, ipip_entry);
1318 if (decap_fib_entry)
1319 mlxsw_sp_ipip_entry_promote_decap(mlxsw_sp, ipip_entry,
1320 decap_fib_entry);
1321}
1322
Petr Machata6d4de442017-11-03 10:03:34 +01001323static void mlxsw_sp_netdevice_ipip_ol_up_event(struct mlxsw_sp *mlxsw_sp,
1324 struct net_device *ol_dev)
Petr Machata00635872017-10-16 16:26:37 +02001325{
Petr Machata00635872017-10-16 16:26:37 +02001326 struct mlxsw_sp_ipip_entry *ipip_entry;
1327
1328 ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
Petr Machata47518ca2017-11-03 10:03:35 +01001329 if (ipip_entry)
1330 mlxsw_sp_ipip_entry_ol_up_event(mlxsw_sp, ipip_entry);
Petr Machata00635872017-10-16 16:26:37 +02001331}
1332
Petr Machataa3fe1982017-11-03 10:03:33 +01001333static void
1334mlxsw_sp_ipip_entry_ol_down_event(struct mlxsw_sp *mlxsw_sp,
1335 struct mlxsw_sp_ipip_entry *ipip_entry)
1336{
1337 if (ipip_entry->decap_fib_entry)
1338 mlxsw_sp_ipip_entry_demote_decap(mlxsw_sp, ipip_entry);
1339}
1340
Petr Machata796ec772017-11-03 10:03:29 +01001341static void mlxsw_sp_netdevice_ipip_ol_down_event(struct mlxsw_sp *mlxsw_sp,
1342 struct net_device *ol_dev)
Petr Machata00635872017-10-16 16:26:37 +02001343{
1344 struct mlxsw_sp_ipip_entry *ipip_entry;
1345
1346 ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
Petr Machataa3fe1982017-11-03 10:03:33 +01001347 if (ipip_entry)
1348 mlxsw_sp_ipip_entry_ol_down_event(mlxsw_sp, ipip_entry);
Petr Machata00635872017-10-16 16:26:37 +02001349}
1350
Petr Machata0c5f1cd2017-11-03 10:03:38 +01001351static void mlxsw_sp_nexthop_rif_update(struct mlxsw_sp *mlxsw_sp,
1352 struct mlxsw_sp_rif *rif);
Petr Machata65a61212017-11-03 10:03:37 +01001353static int
1354mlxsw_sp_ipip_entry_ol_lb_update(struct mlxsw_sp *mlxsw_sp,
1355 struct mlxsw_sp_ipip_entry *ipip_entry,
Petr Machata0c5f1cd2017-11-03 10:03:38 +01001356 bool keep_encap,
Petr Machata65a61212017-11-03 10:03:37 +01001357 struct netlink_ext_ack *extack)
Petr Machataf63ce4e2017-10-16 16:26:38 +02001358{
Petr Machata65a61212017-11-03 10:03:37 +01001359 struct mlxsw_sp_rif_ipip_lb *old_lb_rif = ipip_entry->ol_lb;
1360 struct mlxsw_sp_rif_ipip_lb *new_lb_rif;
Petr Machataf63ce4e2017-10-16 16:26:38 +02001361
Petr Machata65a61212017-11-03 10:03:37 +01001362 new_lb_rif = mlxsw_sp_ipip_ol_ipip_lb_create(mlxsw_sp,
1363 ipip_entry->ipipt,
1364 ipip_entry->ol_dev,
1365 extack);
1366 if (IS_ERR(new_lb_rif))
1367 return PTR_ERR(new_lb_rif);
1368 ipip_entry->ol_lb = new_lb_rif;
Petr Machata0c5f1cd2017-11-03 10:03:38 +01001369
1370 if (keep_encap) {
1371 list_splice_init(&old_lb_rif->common.nexthop_list,
1372 &new_lb_rif->common.nexthop_list);
1373 mlxsw_sp_nexthop_rif_update(mlxsw_sp, &new_lb_rif->common);
1374 }
1375
Petr Machata65a61212017-11-03 10:03:37 +01001376 mlxsw_sp_rif_destroy(&old_lb_rif->common);
Petr Machataf63ce4e2017-10-16 16:26:38 +02001377
Petr Machata65a61212017-11-03 10:03:37 +01001378 return 0;
1379}
1380
Petr Machata0c5f1cd2017-11-03 10:03:38 +01001381/**
1382 * Update the offload related to an IPIP entry. This always updates decap, and
1383 * in addition to that it also:
1384 * @recreate_loopback: recreates the associated loopback RIF
1385 * @keep_encap: updates next hops that use the tunnel netdevice. This is only
1386 * relevant when recreate_loopback is true.
1387 * @update_nexthops: updates next hops, keeping the current loopback RIF. This
1388 * is only relevant when recreate_loopback is false.
1389 */
Petr Machata65a61212017-11-03 10:03:37 +01001390int __mlxsw_sp_ipip_entry_update_tunnel(struct mlxsw_sp *mlxsw_sp,
1391 struct mlxsw_sp_ipip_entry *ipip_entry,
Petr Machata0c5f1cd2017-11-03 10:03:38 +01001392 bool recreate_loopback,
1393 bool keep_encap,
1394 bool update_nexthops,
Petr Machata65a61212017-11-03 10:03:37 +01001395 struct netlink_ext_ack *extack)
1396{
1397 int err;
1398
1399 /* RIFs can't be edited, so to update loopback, we need to destroy and
1400 * recreate it. That creates a window of opportunity where RALUE and
1401 * RATR registers end up referencing a RIF that's already gone. RATRs
1402 * are handled in mlxsw_sp_ipip_entry_ol_lb_update(), and to take care
Petr Machataf63ce4e2017-10-16 16:26:38 +02001403 * of RALUE, demote the decap route back.
1404 */
1405 if (ipip_entry->decap_fib_entry)
1406 mlxsw_sp_ipip_entry_demote_decap(mlxsw_sp, ipip_entry);
1407
Petr Machata0c5f1cd2017-11-03 10:03:38 +01001408 if (recreate_loopback) {
1409 err = mlxsw_sp_ipip_entry_ol_lb_update(mlxsw_sp, ipip_entry,
1410 keep_encap, extack);
1411 if (err)
1412 return err;
1413 } else if (update_nexthops) {
1414 mlxsw_sp_nexthop_rif_update(mlxsw_sp,
1415 &ipip_entry->ol_lb->common);
1416 }
Petr Machataf63ce4e2017-10-16 16:26:38 +02001417
Petr Machata65a61212017-11-03 10:03:37 +01001418 if (ipip_entry->ol_dev->flags & IFF_UP)
1419 mlxsw_sp_ipip_entry_ol_up_event(mlxsw_sp, ipip_entry);
Petr Machataf63ce4e2017-10-16 16:26:38 +02001420
1421 return 0;
1422}
1423
Petr Machata65a61212017-11-03 10:03:37 +01001424static int mlxsw_sp_netdevice_ipip_ol_vrf_event(struct mlxsw_sp *mlxsw_sp,
1425 struct net_device *ol_dev,
1426 struct netlink_ext_ack *extack)
1427{
1428 struct mlxsw_sp_ipip_entry *ipip_entry =
1429 mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
1430
1431 if (!ipip_entry)
1432 return 0;
1433 return __mlxsw_sp_ipip_entry_update_tunnel(mlxsw_sp, ipip_entry,
Petr Machata0c5f1cd2017-11-03 10:03:38 +01001434 true, false, false, extack);
Petr Machata65a61212017-11-03 10:03:37 +01001435}
1436
Petr Machataaf641712017-11-03 10:03:40 +01001437void mlxsw_sp_ipip_entry_demote_tunnel(struct mlxsw_sp *mlxsw_sp,
1438 struct mlxsw_sp_ipip_entry *ipip_entry)
1439{
1440 struct net_device *ol_dev = ipip_entry->ol_dev;
1441
1442 if (ol_dev->flags & IFF_UP)
1443 mlxsw_sp_ipip_entry_ol_down_event(mlxsw_sp, ipip_entry);
1444 mlxsw_sp_ipip_entry_destroy(mlxsw_sp, ipip_entry);
1445}
1446
1447/* The configuration where several tunnels have the same local address in the
1448 * same underlay table needs special treatment in the HW. That is currently not
1449 * implemented in the driver. This function finds and demotes the first tunnel
1450 * with a given source address, except the one passed in in the argument
1451 * `except'.
1452 */
1453bool
1454mlxsw_sp_ipip_demote_tunnel_by_saddr(struct mlxsw_sp *mlxsw_sp,
1455 enum mlxsw_sp_l3proto ul_proto,
1456 union mlxsw_sp_l3addr saddr,
1457 u32 ul_tb_id,
1458 const struct mlxsw_sp_ipip_entry *except)
1459{
1460 struct mlxsw_sp_ipip_entry *ipip_entry, *tmp;
1461
1462 list_for_each_entry_safe(ipip_entry, tmp, &mlxsw_sp->router->ipip_list,
1463 ipip_list_node) {
1464 if (ipip_entry != except &&
1465 mlxsw_sp_ipip_entry_saddr_matches(mlxsw_sp, ul_proto, saddr,
1466 ul_tb_id, ipip_entry)) {
1467 mlxsw_sp_ipip_entry_demote_tunnel(mlxsw_sp, ipip_entry);
1468 return true;
1469 }
1470 }
1471
1472 return false;
1473}
1474
Petr Machata7e75af62017-11-03 10:03:36 +01001475int mlxsw_sp_netdevice_ipip_ol_event(struct mlxsw_sp *mlxsw_sp,
1476 struct net_device *ol_dev,
1477 unsigned long event,
1478 struct netdev_notifier_info *info)
Petr Machata00635872017-10-16 16:26:37 +02001479{
Petr Machata7e75af62017-11-03 10:03:36 +01001480 struct netdev_notifier_changeupper_info *chup;
1481 struct netlink_ext_ack *extack;
1482
Petr Machata00635872017-10-16 16:26:37 +02001483 switch (event) {
1484 case NETDEV_REGISTER:
Petr Machata796ec772017-11-03 10:03:29 +01001485 return mlxsw_sp_netdevice_ipip_ol_reg_event(mlxsw_sp, ol_dev);
Petr Machata00635872017-10-16 16:26:37 +02001486 case NETDEV_UNREGISTER:
Petr Machata796ec772017-11-03 10:03:29 +01001487 mlxsw_sp_netdevice_ipip_ol_unreg_event(mlxsw_sp, ol_dev);
Petr Machata00635872017-10-16 16:26:37 +02001488 return 0;
1489 case NETDEV_UP:
Petr Machata6d4de442017-11-03 10:03:34 +01001490 mlxsw_sp_netdevice_ipip_ol_up_event(mlxsw_sp, ol_dev);
1491 return 0;
Petr Machata00635872017-10-16 16:26:37 +02001492 case NETDEV_DOWN:
Petr Machata796ec772017-11-03 10:03:29 +01001493 mlxsw_sp_netdevice_ipip_ol_down_event(mlxsw_sp, ol_dev);
Petr Machata00635872017-10-16 16:26:37 +02001494 return 0;
Petr Machataf63ce4e2017-10-16 16:26:38 +02001495 case NETDEV_CHANGEUPPER:
Petr Machata7e75af62017-11-03 10:03:36 +01001496 chup = container_of(info, typeof(*chup), info);
1497 extack = info->extack;
1498 if (netif_is_l3_master(chup->upper_dev))
Petr Machata796ec772017-11-03 10:03:29 +01001499 return mlxsw_sp_netdevice_ipip_ol_vrf_event(mlxsw_sp,
Petr Machata7e75af62017-11-03 10:03:36 +01001500 ol_dev,
1501 extack);
Petr Machataf63ce4e2017-10-16 16:26:38 +02001502 return 0;
Petr Machata00635872017-10-16 16:26:37 +02001503 }
1504 return 0;
1505}
1506
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001507struct mlxsw_sp_neigh_key {
Jiri Pirko33b13412016-11-10 12:31:04 +01001508 struct neighbour *n;
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001509};
1510
1511struct mlxsw_sp_neigh_entry {
Ido Schimmel9665b742017-02-08 11:16:42 +01001512 struct list_head rif_list_node;
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001513 struct rhash_head ht_node;
1514 struct mlxsw_sp_neigh_key key;
1515 u16 rif;
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001516 bool connected;
Yotam Gigia6bf9e92016-07-05 11:27:44 +02001517 unsigned char ha[ETH_ALEN];
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001518 struct list_head nexthop_list; /* list of nexthops using
1519 * this neigh entry
1520 */
Yotam Gigib2157142016-07-05 11:27:51 +02001521 struct list_head nexthop_neighs_list_node;
Arkadi Sharshevsky7cfcbc72017-08-24 08:40:08 +02001522 unsigned int counter_index;
1523 bool counter_valid;
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001524};
1525
1526static const struct rhashtable_params mlxsw_sp_neigh_ht_params = {
1527 .key_offset = offsetof(struct mlxsw_sp_neigh_entry, key),
1528 .head_offset = offsetof(struct mlxsw_sp_neigh_entry, ht_node),
1529 .key_len = sizeof(struct mlxsw_sp_neigh_key),
1530};
1531
Arkadi Sharshevskyf17cc842017-08-24 08:40:04 +02001532struct mlxsw_sp_neigh_entry *
1533mlxsw_sp_rif_neigh_next(struct mlxsw_sp_rif *rif,
1534 struct mlxsw_sp_neigh_entry *neigh_entry)
1535{
1536 if (!neigh_entry) {
1537 if (list_empty(&rif->neigh_list))
1538 return NULL;
1539 else
1540 return list_first_entry(&rif->neigh_list,
1541 typeof(*neigh_entry),
1542 rif_list_node);
1543 }
Arkadi Sharshevskyec2437f2017-09-25 10:32:24 +02001544 if (list_is_last(&neigh_entry->rif_list_node, &rif->neigh_list))
Arkadi Sharshevskyf17cc842017-08-24 08:40:04 +02001545 return NULL;
1546 return list_next_entry(neigh_entry, rif_list_node);
1547}
1548
1549int mlxsw_sp_neigh_entry_type(struct mlxsw_sp_neigh_entry *neigh_entry)
1550{
1551 return neigh_entry->key.n->tbl->family;
1552}
1553
1554unsigned char *
1555mlxsw_sp_neigh_entry_ha(struct mlxsw_sp_neigh_entry *neigh_entry)
1556{
1557 return neigh_entry->ha;
1558}
1559
1560u32 mlxsw_sp_neigh4_entry_dip(struct mlxsw_sp_neigh_entry *neigh_entry)
1561{
1562 struct neighbour *n;
1563
1564 n = neigh_entry->key.n;
1565 return ntohl(*((__be32 *) n->primary_key));
1566}
1567
Arkadi Sharshevsky02507682017-08-31 17:59:15 +02001568struct in6_addr *
1569mlxsw_sp_neigh6_entry_dip(struct mlxsw_sp_neigh_entry *neigh_entry)
1570{
1571 struct neighbour *n;
1572
1573 n = neigh_entry->key.n;
1574 return (struct in6_addr *) &n->primary_key;
1575}
1576
Arkadi Sharshevsky7cfcbc72017-08-24 08:40:08 +02001577int mlxsw_sp_neigh_counter_get(struct mlxsw_sp *mlxsw_sp,
1578 struct mlxsw_sp_neigh_entry *neigh_entry,
1579 u64 *p_counter)
1580{
1581 if (!neigh_entry->counter_valid)
1582 return -EINVAL;
1583
1584 return mlxsw_sp_flow_counter_get(mlxsw_sp, neigh_entry->counter_index,
1585 p_counter, NULL);
1586}
1587
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001588static struct mlxsw_sp_neigh_entry *
1589mlxsw_sp_neigh_entry_alloc(struct mlxsw_sp *mlxsw_sp, struct neighbour *n,
1590 u16 rif)
1591{
1592 struct mlxsw_sp_neigh_entry *neigh_entry;
1593
1594 neigh_entry = kzalloc(sizeof(*neigh_entry), GFP_KERNEL);
1595 if (!neigh_entry)
1596 return NULL;
1597
1598 neigh_entry->key.n = n;
1599 neigh_entry->rif = rif;
1600 INIT_LIST_HEAD(&neigh_entry->nexthop_list);
1601
1602 return neigh_entry;
1603}
1604
1605static void mlxsw_sp_neigh_entry_free(struct mlxsw_sp_neigh_entry *neigh_entry)
1606{
1607 kfree(neigh_entry);
1608}
1609
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001610static int
1611mlxsw_sp_neigh_entry_insert(struct mlxsw_sp *mlxsw_sp,
1612 struct mlxsw_sp_neigh_entry *neigh_entry)
1613{
Ido Schimmel9011b672017-05-16 19:38:25 +02001614 return rhashtable_insert_fast(&mlxsw_sp->router->neigh_ht,
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001615 &neigh_entry->ht_node,
1616 mlxsw_sp_neigh_ht_params);
1617}
1618
1619static void
1620mlxsw_sp_neigh_entry_remove(struct mlxsw_sp *mlxsw_sp,
1621 struct mlxsw_sp_neigh_entry *neigh_entry)
1622{
Ido Schimmel9011b672017-05-16 19:38:25 +02001623 rhashtable_remove_fast(&mlxsw_sp->router->neigh_ht,
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001624 &neigh_entry->ht_node,
1625 mlxsw_sp_neigh_ht_params);
1626}
1627
Arkadi Sharshevsky7cfcbc72017-08-24 08:40:08 +02001628static bool
Arkadi Sharshevsky1ed55742017-08-31 17:59:18 +02001629mlxsw_sp_neigh_counter_should_alloc(struct mlxsw_sp *mlxsw_sp,
1630 struct mlxsw_sp_neigh_entry *neigh_entry)
Arkadi Sharshevsky7cfcbc72017-08-24 08:40:08 +02001631{
1632 struct devlink *devlink;
Arkadi Sharshevsky1ed55742017-08-31 17:59:18 +02001633 const char *table_name;
1634
1635 switch (mlxsw_sp_neigh_entry_type(neigh_entry)) {
1636 case AF_INET:
1637 table_name = MLXSW_SP_DPIPE_TABLE_NAME_HOST4;
1638 break;
1639 case AF_INET6:
1640 table_name = MLXSW_SP_DPIPE_TABLE_NAME_HOST6;
1641 break;
1642 default:
1643 WARN_ON(1);
1644 return false;
1645 }
Arkadi Sharshevsky7cfcbc72017-08-24 08:40:08 +02001646
1647 devlink = priv_to_devlink(mlxsw_sp->core);
Arkadi Sharshevsky1ed55742017-08-31 17:59:18 +02001648 return devlink_dpipe_table_counter_enabled(devlink, table_name);
Arkadi Sharshevsky7cfcbc72017-08-24 08:40:08 +02001649}
1650
1651static void
1652mlxsw_sp_neigh_counter_alloc(struct mlxsw_sp *mlxsw_sp,
1653 struct mlxsw_sp_neigh_entry *neigh_entry)
1654{
Arkadi Sharshevsky1ed55742017-08-31 17:59:18 +02001655 if (!mlxsw_sp_neigh_counter_should_alloc(mlxsw_sp, neigh_entry))
Arkadi Sharshevsky7cfcbc72017-08-24 08:40:08 +02001656 return;
1657
1658 if (mlxsw_sp_flow_counter_alloc(mlxsw_sp, &neigh_entry->counter_index))
1659 return;
1660
1661 neigh_entry->counter_valid = true;
1662}
1663
1664static void
1665mlxsw_sp_neigh_counter_free(struct mlxsw_sp *mlxsw_sp,
1666 struct mlxsw_sp_neigh_entry *neigh_entry)
1667{
1668 if (!neigh_entry->counter_valid)
1669 return;
1670 mlxsw_sp_flow_counter_free(mlxsw_sp,
1671 neigh_entry->counter_index);
1672 neigh_entry->counter_valid = false;
1673}
1674
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001675static struct mlxsw_sp_neigh_entry *
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001676mlxsw_sp_neigh_entry_create(struct mlxsw_sp *mlxsw_sp, struct neighbour *n)
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001677{
1678 struct mlxsw_sp_neigh_entry *neigh_entry;
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01001679 struct mlxsw_sp_rif *rif;
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001680 int err;
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001681
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01001682 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, n->dev);
1683 if (!rif)
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001684 return ERR_PTR(-EINVAL);
1685
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01001686 neigh_entry = mlxsw_sp_neigh_entry_alloc(mlxsw_sp, n, rif->rif_index);
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001687 if (!neigh_entry)
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001688 return ERR_PTR(-ENOMEM);
1689
1690 err = mlxsw_sp_neigh_entry_insert(mlxsw_sp, neigh_entry);
1691 if (err)
1692 goto err_neigh_entry_insert;
1693
Arkadi Sharshevsky7cfcbc72017-08-24 08:40:08 +02001694 mlxsw_sp_neigh_counter_alloc(mlxsw_sp, neigh_entry);
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01001695 list_add(&neigh_entry->rif_list_node, &rif->neigh_list);
Ido Schimmel9665b742017-02-08 11:16:42 +01001696
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001697 return neigh_entry;
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001698
1699err_neigh_entry_insert:
1700 mlxsw_sp_neigh_entry_free(neigh_entry);
1701 return ERR_PTR(err);
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001702}
1703
1704static void
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001705mlxsw_sp_neigh_entry_destroy(struct mlxsw_sp *mlxsw_sp,
1706 struct mlxsw_sp_neigh_entry *neigh_entry)
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001707{
Ido Schimmel9665b742017-02-08 11:16:42 +01001708 list_del(&neigh_entry->rif_list_node);
Arkadi Sharshevsky7cfcbc72017-08-24 08:40:08 +02001709 mlxsw_sp_neigh_counter_free(mlxsw_sp, neigh_entry);
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001710 mlxsw_sp_neigh_entry_remove(mlxsw_sp, neigh_entry);
1711 mlxsw_sp_neigh_entry_free(neigh_entry);
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001712}
1713
1714static struct mlxsw_sp_neigh_entry *
Jiri Pirko33b13412016-11-10 12:31:04 +01001715mlxsw_sp_neigh_entry_lookup(struct mlxsw_sp *mlxsw_sp, struct neighbour *n)
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001716{
Jiri Pirko33b13412016-11-10 12:31:04 +01001717 struct mlxsw_sp_neigh_key key;
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001718
Jiri Pirko33b13412016-11-10 12:31:04 +01001719 key.n = n;
Ido Schimmel9011b672017-05-16 19:38:25 +02001720 return rhashtable_lookup_fast(&mlxsw_sp->router->neigh_ht,
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001721 &key, mlxsw_sp_neigh_ht_params);
1722}
1723
Yotam Gigic723c7352016-07-05 11:27:43 +02001724static void
1725mlxsw_sp_router_neighs_update_interval_init(struct mlxsw_sp *mlxsw_sp)
1726{
Arkadi Sharshevskya6c9b5d2017-07-18 10:10:18 +02001727 unsigned long interval;
Yotam Gigic723c7352016-07-05 11:27:43 +02001728
Ido Schimmelb5f3e0d2017-07-24 09:56:00 +02001729#if IS_ENABLED(CONFIG_IPV6)
Arkadi Sharshevskya6c9b5d2017-07-18 10:10:18 +02001730 interval = min_t(unsigned long,
1731 NEIGH_VAR(&arp_tbl.parms, DELAY_PROBE_TIME),
1732 NEIGH_VAR(&nd_tbl.parms, DELAY_PROBE_TIME));
Ido Schimmelb5f3e0d2017-07-24 09:56:00 +02001733#else
1734 interval = NEIGH_VAR(&arp_tbl.parms, DELAY_PROBE_TIME);
1735#endif
Ido Schimmel9011b672017-05-16 19:38:25 +02001736 mlxsw_sp->router->neighs_update.interval = jiffies_to_msecs(interval);
Yotam Gigic723c7352016-07-05 11:27:43 +02001737}
1738
1739static void mlxsw_sp_router_neigh_ent_ipv4_process(struct mlxsw_sp *mlxsw_sp,
1740 char *rauhtd_pl,
1741 int ent_index)
1742{
1743 struct net_device *dev;
1744 struct neighbour *n;
1745 __be32 dipn;
1746 u32 dip;
1747 u16 rif;
1748
1749 mlxsw_reg_rauhtd_ent_ipv4_unpack(rauhtd_pl, ent_index, &rif, &dip);
1750
Ido Schimmel5f9efff2017-05-16 19:38:27 +02001751 if (!mlxsw_sp->router->rifs[rif]) {
Yotam Gigic723c7352016-07-05 11:27:43 +02001752 dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Incorrect RIF in neighbour entry\n");
1753 return;
1754 }
1755
1756 dipn = htonl(dip);
Ido Schimmel5f9efff2017-05-16 19:38:27 +02001757 dev = mlxsw_sp->router->rifs[rif]->dev;
Yotam Gigic723c7352016-07-05 11:27:43 +02001758 n = neigh_lookup(&arp_tbl, &dipn, dev);
1759 if (!n) {
1760 netdev_err(dev, "Failed to find matching neighbour for IP=%pI4h\n",
1761 &dip);
1762 return;
1763 }
1764
1765 netdev_dbg(dev, "Updating neighbour with IP=%pI4h\n", &dip);
1766 neigh_event_send(n, NULL);
1767 neigh_release(n);
1768}
1769
Ido Schimmeldf9a21f2017-08-15 09:10:33 +02001770#if IS_ENABLED(CONFIG_IPV6)
Arkadi Sharshevsky60f040c2017-07-18 10:10:17 +02001771static void mlxsw_sp_router_neigh_ent_ipv6_process(struct mlxsw_sp *mlxsw_sp,
1772 char *rauhtd_pl,
1773 int rec_index)
1774{
1775 struct net_device *dev;
1776 struct neighbour *n;
1777 struct in6_addr dip;
1778 u16 rif;
1779
1780 mlxsw_reg_rauhtd_ent_ipv6_unpack(rauhtd_pl, rec_index, &rif,
1781 (char *) &dip);
1782
1783 if (!mlxsw_sp->router->rifs[rif]) {
1784 dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Incorrect RIF in neighbour entry\n");
1785 return;
1786 }
1787
1788 dev = mlxsw_sp->router->rifs[rif]->dev;
1789 n = neigh_lookup(&nd_tbl, &dip, dev);
1790 if (!n) {
1791 netdev_err(dev, "Failed to find matching neighbour for IP=%pI6c\n",
1792 &dip);
1793 return;
1794 }
1795
1796 netdev_dbg(dev, "Updating neighbour with IP=%pI6c\n", &dip);
1797 neigh_event_send(n, NULL);
1798 neigh_release(n);
1799}
Ido Schimmelb5f3e0d2017-07-24 09:56:00 +02001800#else
1801static void mlxsw_sp_router_neigh_ent_ipv6_process(struct mlxsw_sp *mlxsw_sp,
1802 char *rauhtd_pl,
1803 int rec_index)
1804{
1805}
1806#endif
Arkadi Sharshevsky60f040c2017-07-18 10:10:17 +02001807
Yotam Gigic723c7352016-07-05 11:27:43 +02001808static void mlxsw_sp_router_neigh_rec_ipv4_process(struct mlxsw_sp *mlxsw_sp,
1809 char *rauhtd_pl,
1810 int rec_index)
1811{
1812 u8 num_entries;
1813 int i;
1814
1815 num_entries = mlxsw_reg_rauhtd_ipv4_rec_num_entries_get(rauhtd_pl,
1816 rec_index);
1817 /* Hardware starts counting at 0, so add 1. */
1818 num_entries++;
1819
1820 /* Each record consists of several neighbour entries. */
1821 for (i = 0; i < num_entries; i++) {
1822 int ent_index;
1823
1824 ent_index = rec_index * MLXSW_REG_RAUHTD_IPV4_ENT_PER_REC + i;
1825 mlxsw_sp_router_neigh_ent_ipv4_process(mlxsw_sp, rauhtd_pl,
1826 ent_index);
1827 }
1828
1829}
1830
Arkadi Sharshevsky60f040c2017-07-18 10:10:17 +02001831static void mlxsw_sp_router_neigh_rec_ipv6_process(struct mlxsw_sp *mlxsw_sp,
1832 char *rauhtd_pl,
1833 int rec_index)
1834{
1835 /* One record contains one entry. */
1836 mlxsw_sp_router_neigh_ent_ipv6_process(mlxsw_sp, rauhtd_pl,
1837 rec_index);
1838}
1839
Yotam Gigic723c7352016-07-05 11:27:43 +02001840static void mlxsw_sp_router_neigh_rec_process(struct mlxsw_sp *mlxsw_sp,
1841 char *rauhtd_pl, int rec_index)
1842{
1843 switch (mlxsw_reg_rauhtd_rec_type_get(rauhtd_pl, rec_index)) {
1844 case MLXSW_REG_RAUHTD_TYPE_IPV4:
1845 mlxsw_sp_router_neigh_rec_ipv4_process(mlxsw_sp, rauhtd_pl,
1846 rec_index);
1847 break;
1848 case MLXSW_REG_RAUHTD_TYPE_IPV6:
Arkadi Sharshevsky60f040c2017-07-18 10:10:17 +02001849 mlxsw_sp_router_neigh_rec_ipv6_process(mlxsw_sp, rauhtd_pl,
1850 rec_index);
Yotam Gigic723c7352016-07-05 11:27:43 +02001851 break;
1852 }
1853}
1854
Arkadi Sharshevsky42cdb332016-11-11 16:34:26 +01001855static bool mlxsw_sp_router_rauhtd_is_full(char *rauhtd_pl)
1856{
1857 u8 num_rec, last_rec_index, num_entries;
1858
1859 num_rec = mlxsw_reg_rauhtd_num_rec_get(rauhtd_pl);
1860 last_rec_index = num_rec - 1;
1861
1862 if (num_rec < MLXSW_REG_RAUHTD_REC_MAX_NUM)
1863 return false;
1864 if (mlxsw_reg_rauhtd_rec_type_get(rauhtd_pl, last_rec_index) ==
1865 MLXSW_REG_RAUHTD_TYPE_IPV6)
1866 return true;
1867
1868 num_entries = mlxsw_reg_rauhtd_ipv4_rec_num_entries_get(rauhtd_pl,
1869 last_rec_index);
1870 if (++num_entries == MLXSW_REG_RAUHTD_IPV4_ENT_PER_REC)
1871 return true;
1872 return false;
1873}
1874
Arkadi Sharshevsky60f040c2017-07-18 10:10:17 +02001875static int
1876__mlxsw_sp_router_neighs_update_rauhtd(struct mlxsw_sp *mlxsw_sp,
1877 char *rauhtd_pl,
1878 enum mlxsw_reg_rauhtd_type type)
Yotam Gigic723c7352016-07-05 11:27:43 +02001879{
Arkadi Sharshevsky60f040c2017-07-18 10:10:17 +02001880 int i, num_rec;
1881 int err;
Yotam Gigic723c7352016-07-05 11:27:43 +02001882
1883 /* Make sure the neighbour's netdev isn't removed in the
1884 * process.
1885 */
1886 rtnl_lock();
1887 do {
Arkadi Sharshevsky60f040c2017-07-18 10:10:17 +02001888 mlxsw_reg_rauhtd_pack(rauhtd_pl, type);
Yotam Gigic723c7352016-07-05 11:27:43 +02001889 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(rauhtd),
1890 rauhtd_pl);
1891 if (err) {
Petr Machata7ff176f2017-10-02 12:21:57 +02001892 dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Failed to dump neighbour table\n");
Yotam Gigic723c7352016-07-05 11:27:43 +02001893 break;
1894 }
1895 num_rec = mlxsw_reg_rauhtd_num_rec_get(rauhtd_pl);
1896 for (i = 0; i < num_rec; i++)
1897 mlxsw_sp_router_neigh_rec_process(mlxsw_sp, rauhtd_pl,
1898 i);
Arkadi Sharshevsky42cdb332016-11-11 16:34:26 +01001899 } while (mlxsw_sp_router_rauhtd_is_full(rauhtd_pl));
Yotam Gigic723c7352016-07-05 11:27:43 +02001900 rtnl_unlock();
1901
Arkadi Sharshevsky60f040c2017-07-18 10:10:17 +02001902 return err;
1903}
1904
1905static int mlxsw_sp_router_neighs_update_rauhtd(struct mlxsw_sp *mlxsw_sp)
1906{
1907 enum mlxsw_reg_rauhtd_type type;
1908 char *rauhtd_pl;
1909 int err;
1910
1911 rauhtd_pl = kmalloc(MLXSW_REG_RAUHTD_LEN, GFP_KERNEL);
1912 if (!rauhtd_pl)
1913 return -ENOMEM;
1914
1915 type = MLXSW_REG_RAUHTD_TYPE_IPV4;
1916 err = __mlxsw_sp_router_neighs_update_rauhtd(mlxsw_sp, rauhtd_pl, type);
1917 if (err)
1918 goto out;
1919
1920 type = MLXSW_REG_RAUHTD_TYPE_IPV6;
1921 err = __mlxsw_sp_router_neighs_update_rauhtd(mlxsw_sp, rauhtd_pl, type);
1922out:
Yotam Gigic723c7352016-07-05 11:27:43 +02001923 kfree(rauhtd_pl);
Yotam Gigib2157142016-07-05 11:27:51 +02001924 return err;
1925}
1926
1927static void mlxsw_sp_router_neighs_update_nh(struct mlxsw_sp *mlxsw_sp)
1928{
1929 struct mlxsw_sp_neigh_entry *neigh_entry;
1930
1931 /* Take RTNL mutex here to prevent lists from changes */
1932 rtnl_lock();
Ido Schimmel9011b672017-05-16 19:38:25 +02001933 list_for_each_entry(neigh_entry, &mlxsw_sp->router->nexthop_neighs_list,
Ido Schimmel8a0b7272017-02-06 16:20:15 +01001934 nexthop_neighs_list_node)
Yotam Gigib2157142016-07-05 11:27:51 +02001935 /* If this neigh have nexthops, make the kernel think this neigh
1936 * is active regardless of the traffic.
1937 */
Ido Schimmel8a0b7272017-02-06 16:20:15 +01001938 neigh_event_send(neigh_entry->key.n, NULL);
Yotam Gigib2157142016-07-05 11:27:51 +02001939 rtnl_unlock();
1940}
1941
1942static void
1943mlxsw_sp_router_neighs_update_work_schedule(struct mlxsw_sp *mlxsw_sp)
1944{
Ido Schimmel9011b672017-05-16 19:38:25 +02001945 unsigned long interval = mlxsw_sp->router->neighs_update.interval;
Yotam Gigib2157142016-07-05 11:27:51 +02001946
Ido Schimmel9011b672017-05-16 19:38:25 +02001947 mlxsw_core_schedule_dw(&mlxsw_sp->router->neighs_update.dw,
Yotam Gigib2157142016-07-05 11:27:51 +02001948 msecs_to_jiffies(interval));
1949}
1950
1951static void mlxsw_sp_router_neighs_update_work(struct work_struct *work)
1952{
Ido Schimmel9011b672017-05-16 19:38:25 +02001953 struct mlxsw_sp_router *router;
Yotam Gigib2157142016-07-05 11:27:51 +02001954 int err;
1955
Ido Schimmel9011b672017-05-16 19:38:25 +02001956 router = container_of(work, struct mlxsw_sp_router,
1957 neighs_update.dw.work);
1958 err = mlxsw_sp_router_neighs_update_rauhtd(router->mlxsw_sp);
Yotam Gigib2157142016-07-05 11:27:51 +02001959 if (err)
Ido Schimmel9011b672017-05-16 19:38:25 +02001960 dev_err(router->mlxsw_sp->bus_info->dev, "Could not update kernel for neigh activity");
Yotam Gigib2157142016-07-05 11:27:51 +02001961
Ido Schimmel9011b672017-05-16 19:38:25 +02001962 mlxsw_sp_router_neighs_update_nh(router->mlxsw_sp);
Yotam Gigib2157142016-07-05 11:27:51 +02001963
Ido Schimmel9011b672017-05-16 19:38:25 +02001964 mlxsw_sp_router_neighs_update_work_schedule(router->mlxsw_sp);
Yotam Gigic723c7352016-07-05 11:27:43 +02001965}
1966
Yotam Gigi0b2361d2016-07-05 11:27:52 +02001967static void mlxsw_sp_router_probe_unresolved_nexthops(struct work_struct *work)
1968{
1969 struct mlxsw_sp_neigh_entry *neigh_entry;
Ido Schimmel9011b672017-05-16 19:38:25 +02001970 struct mlxsw_sp_router *router;
Yotam Gigi0b2361d2016-07-05 11:27:52 +02001971
Ido Schimmel9011b672017-05-16 19:38:25 +02001972 router = container_of(work, struct mlxsw_sp_router,
1973 nexthop_probe_dw.work);
Yotam Gigi0b2361d2016-07-05 11:27:52 +02001974 /* Iterate over nexthop neighbours, find those who are unresolved and
1975 * send arp on them. This solves the chicken-egg problem when
1976 * the nexthop wouldn't get offloaded until the neighbor is resolved
1977 * but it wouldn't get resolved ever in case traffic is flowing in HW
1978 * using different nexthop.
1979 *
1980 * Take RTNL mutex here to prevent lists from changes.
1981 */
1982 rtnl_lock();
Ido Schimmel9011b672017-05-16 19:38:25 +02001983 list_for_each_entry(neigh_entry, &router->nexthop_neighs_list,
Ido Schimmel8a0b7272017-02-06 16:20:15 +01001984 nexthop_neighs_list_node)
Ido Schimmel01b1aa32017-02-06 16:20:16 +01001985 if (!neigh_entry->connected)
Jiri Pirko33b13412016-11-10 12:31:04 +01001986 neigh_event_send(neigh_entry->key.n, NULL);
Yotam Gigi0b2361d2016-07-05 11:27:52 +02001987 rtnl_unlock();
1988
Ido Schimmel9011b672017-05-16 19:38:25 +02001989 mlxsw_core_schedule_dw(&router->nexthop_probe_dw,
Yotam Gigi0b2361d2016-07-05 11:27:52 +02001990 MLXSW_SP_UNRESOLVED_NH_PROBE_INTERVAL);
1991}
1992
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001993static void
1994mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp *mlxsw_sp,
1995 struct mlxsw_sp_neigh_entry *neigh_entry,
1996 bool removing);
1997
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001998static enum mlxsw_reg_rauht_op mlxsw_sp_rauht_op(bool adding)
Yotam Gigia6bf9e92016-07-05 11:27:44 +02001999{
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002000 return adding ? MLXSW_REG_RAUHT_OP_WRITE_ADD :
2001 MLXSW_REG_RAUHT_OP_WRITE_DELETE;
2002}
2003
2004static void
2005mlxsw_sp_router_neigh_entry_op4(struct mlxsw_sp *mlxsw_sp,
2006 struct mlxsw_sp_neigh_entry *neigh_entry,
2007 enum mlxsw_reg_rauht_op op)
2008{
Jiri Pirko33b13412016-11-10 12:31:04 +01002009 struct neighbour *n = neigh_entry->key.n;
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002010 u32 dip = ntohl(*((__be32 *) n->primary_key));
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002011 char rauht_pl[MLXSW_REG_RAUHT_LEN];
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002012
2013 mlxsw_reg_rauht_pack4(rauht_pl, op, neigh_entry->rif, neigh_entry->ha,
2014 dip);
Arkadi Sharshevsky7cfcbc72017-08-24 08:40:08 +02002015 if (neigh_entry->counter_valid)
2016 mlxsw_reg_rauht_pack_counter(rauht_pl,
2017 neigh_entry->counter_index);
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002018 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rauht), rauht_pl);
2019}
2020
2021static void
Arkadi Sharshevskyd5eb89c2017-07-18 10:10:15 +02002022mlxsw_sp_router_neigh_entry_op6(struct mlxsw_sp *mlxsw_sp,
2023 struct mlxsw_sp_neigh_entry *neigh_entry,
2024 enum mlxsw_reg_rauht_op op)
2025{
2026 struct neighbour *n = neigh_entry->key.n;
2027 char rauht_pl[MLXSW_REG_RAUHT_LEN];
2028 const char *dip = n->primary_key;
2029
2030 mlxsw_reg_rauht_pack6(rauht_pl, op, neigh_entry->rif, neigh_entry->ha,
2031 dip);
Arkadi Sharshevsky7cfcbc72017-08-24 08:40:08 +02002032 if (neigh_entry->counter_valid)
2033 mlxsw_reg_rauht_pack_counter(rauht_pl,
2034 neigh_entry->counter_index);
Arkadi Sharshevskyd5eb89c2017-07-18 10:10:15 +02002035 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rauht), rauht_pl);
2036}
2037
Arkadi Sharshevsky1d1056d82017-08-31 17:59:13 +02002038bool mlxsw_sp_neigh_ipv6_ignore(struct mlxsw_sp_neigh_entry *neigh_entry)
Arkadi Sharshevskyd5eb89c2017-07-18 10:10:15 +02002039{
Arkadi Sharshevsky1d1056d82017-08-31 17:59:13 +02002040 struct neighbour *n = neigh_entry->key.n;
2041
Arkadi Sharshevskyd5eb89c2017-07-18 10:10:15 +02002042 /* Packets with a link-local destination address are trapped
2043 * after LPM lookup and never reach the neighbour table, so
2044 * there is no need to program such neighbours to the device.
2045 */
2046 if (ipv6_addr_type((struct in6_addr *) &n->primary_key) &
2047 IPV6_ADDR_LINKLOCAL)
2048 return true;
2049 return false;
2050}
2051
2052static void
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002053mlxsw_sp_neigh_entry_update(struct mlxsw_sp *mlxsw_sp,
2054 struct mlxsw_sp_neigh_entry *neigh_entry,
2055 bool adding)
2056{
2057 if (!adding && !neigh_entry->connected)
2058 return;
2059 neigh_entry->connected = adding;
Ido Schimmelb5f3e0d2017-07-24 09:56:00 +02002060 if (neigh_entry->key.n->tbl->family == AF_INET) {
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002061 mlxsw_sp_router_neigh_entry_op4(mlxsw_sp, neigh_entry,
2062 mlxsw_sp_rauht_op(adding));
Ido Schimmelb5f3e0d2017-07-24 09:56:00 +02002063 } else if (neigh_entry->key.n->tbl->family == AF_INET6) {
Arkadi Sharshevsky1d1056d82017-08-31 17:59:13 +02002064 if (mlxsw_sp_neigh_ipv6_ignore(neigh_entry))
Arkadi Sharshevskyd5eb89c2017-07-18 10:10:15 +02002065 return;
2066 mlxsw_sp_router_neigh_entry_op6(mlxsw_sp, neigh_entry,
2067 mlxsw_sp_rauht_op(adding));
2068 } else {
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002069 WARN_ON_ONCE(1);
Arkadi Sharshevskyd5eb89c2017-07-18 10:10:15 +02002070 }
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002071}
2072
Arkadi Sharshevskya481d712017-08-24 08:40:10 +02002073void
2074mlxsw_sp_neigh_entry_counter_update(struct mlxsw_sp *mlxsw_sp,
2075 struct mlxsw_sp_neigh_entry *neigh_entry,
2076 bool adding)
2077{
2078 if (adding)
2079 mlxsw_sp_neigh_counter_alloc(mlxsw_sp, neigh_entry);
2080 else
2081 mlxsw_sp_neigh_counter_free(mlxsw_sp, neigh_entry);
2082 mlxsw_sp_neigh_entry_update(mlxsw_sp, neigh_entry, true);
2083}
2084
Ido Schimmelceb88812017-11-02 17:14:07 +01002085struct mlxsw_sp_netevent_work {
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002086 struct work_struct work;
2087 struct mlxsw_sp *mlxsw_sp;
2088 struct neighbour *n;
2089};
2090
2091static void mlxsw_sp_router_neigh_event_work(struct work_struct *work)
2092{
Ido Schimmelceb88812017-11-02 17:14:07 +01002093 struct mlxsw_sp_netevent_work *net_work =
2094 container_of(work, struct mlxsw_sp_netevent_work, work);
2095 struct mlxsw_sp *mlxsw_sp = net_work->mlxsw_sp;
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002096 struct mlxsw_sp_neigh_entry *neigh_entry;
Ido Schimmelceb88812017-11-02 17:14:07 +01002097 struct neighbour *n = net_work->n;
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002098 unsigned char ha[ETH_ALEN];
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002099 bool entry_connected;
Ido Schimmel93a87e52016-12-23 09:32:49 +01002100 u8 nud_state, dead;
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002101
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002102 /* If these parameters are changed after we release the lock,
2103 * then we are guaranteed to receive another event letting us
2104 * know about it.
2105 */
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002106 read_lock_bh(&n->lock);
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002107 memcpy(ha, n->ha, ETH_ALEN);
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002108 nud_state = n->nud_state;
Ido Schimmel93a87e52016-12-23 09:32:49 +01002109 dead = n->dead;
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002110 read_unlock_bh(&n->lock);
2111
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002112 rtnl_lock();
Ido Schimmel93a87e52016-12-23 09:32:49 +01002113 entry_connected = nud_state & NUD_VALID && !dead;
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002114 neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp, n);
2115 if (!entry_connected && !neigh_entry)
2116 goto out;
2117 if (!neigh_entry) {
2118 neigh_entry = mlxsw_sp_neigh_entry_create(mlxsw_sp, n);
2119 if (IS_ERR(neigh_entry))
2120 goto out;
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002121 }
2122
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002123 memcpy(neigh_entry->ha, ha, ETH_ALEN);
2124 mlxsw_sp_neigh_entry_update(mlxsw_sp, neigh_entry, entry_connected);
2125 mlxsw_sp_nexthop_neigh_update(mlxsw_sp, neigh_entry, !entry_connected);
2126
2127 if (!neigh_entry->connected && list_empty(&neigh_entry->nexthop_list))
2128 mlxsw_sp_neigh_entry_destroy(mlxsw_sp, neigh_entry);
2129
2130out:
2131 rtnl_unlock();
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002132 neigh_release(n);
Ido Schimmelceb88812017-11-02 17:14:07 +01002133 kfree(net_work);
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002134}
2135
Ido Schimmel28678f02017-11-02 17:14:10 +01002136static int mlxsw_sp_mp_hash_init(struct mlxsw_sp *mlxsw_sp);
2137
2138static void mlxsw_sp_router_mp_hash_event_work(struct work_struct *work)
2139{
2140 struct mlxsw_sp_netevent_work *net_work =
2141 container_of(work, struct mlxsw_sp_netevent_work, work);
2142 struct mlxsw_sp *mlxsw_sp = net_work->mlxsw_sp;
2143
2144 mlxsw_sp_mp_hash_init(mlxsw_sp);
2145 kfree(net_work);
2146}
2147
2148static int mlxsw_sp_router_netevent_event(struct notifier_block *nb,
Ido Schimmel48fac882017-11-02 17:14:06 +01002149 unsigned long event, void *ptr)
Yotam Gigic723c7352016-07-05 11:27:43 +02002150{
Ido Schimmelceb88812017-11-02 17:14:07 +01002151 struct mlxsw_sp_netevent_work *net_work;
Yotam Gigic723c7352016-07-05 11:27:43 +02002152 struct mlxsw_sp_port *mlxsw_sp_port;
Ido Schimmel28678f02017-11-02 17:14:10 +01002153 struct mlxsw_sp_router *router;
Yotam Gigic723c7352016-07-05 11:27:43 +02002154 struct mlxsw_sp *mlxsw_sp;
2155 unsigned long interval;
2156 struct neigh_parms *p;
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002157 struct neighbour *n;
Ido Schimmel28678f02017-11-02 17:14:10 +01002158 struct net *net;
Yotam Gigic723c7352016-07-05 11:27:43 +02002159
2160 switch (event) {
2161 case NETEVENT_DELAY_PROBE_TIME_UPDATE:
2162 p = ptr;
2163
2164 /* We don't care about changes in the default table. */
Ido Schimmelb5f3e0d2017-07-24 09:56:00 +02002165 if (!p->dev || (p->tbl->family != AF_INET &&
2166 p->tbl->family != AF_INET6))
Yotam Gigic723c7352016-07-05 11:27:43 +02002167 return NOTIFY_DONE;
2168
2169 /* We are in atomic context and can't take RTNL mutex,
2170 * so use RCU variant to walk the device chain.
2171 */
2172 mlxsw_sp_port = mlxsw_sp_port_lower_dev_hold(p->dev);
2173 if (!mlxsw_sp_port)
2174 return NOTIFY_DONE;
2175
2176 mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2177 interval = jiffies_to_msecs(NEIGH_VAR(p, DELAY_PROBE_TIME));
Ido Schimmel9011b672017-05-16 19:38:25 +02002178 mlxsw_sp->router->neighs_update.interval = interval;
Yotam Gigic723c7352016-07-05 11:27:43 +02002179
2180 mlxsw_sp_port_dev_put(mlxsw_sp_port);
2181 break;
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002182 case NETEVENT_NEIGH_UPDATE:
2183 n = ptr;
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002184
Ido Schimmelb5f3e0d2017-07-24 09:56:00 +02002185 if (n->tbl->family != AF_INET && n->tbl->family != AF_INET6)
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002186 return NOTIFY_DONE;
2187
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002188 mlxsw_sp_port = mlxsw_sp_port_lower_dev_hold(n->dev);
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002189 if (!mlxsw_sp_port)
2190 return NOTIFY_DONE;
2191
Ido Schimmelceb88812017-11-02 17:14:07 +01002192 net_work = kzalloc(sizeof(*net_work), GFP_ATOMIC);
2193 if (!net_work) {
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002194 mlxsw_sp_port_dev_put(mlxsw_sp_port);
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002195 return NOTIFY_BAD;
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002196 }
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002197
Ido Schimmelceb88812017-11-02 17:14:07 +01002198 INIT_WORK(&net_work->work, mlxsw_sp_router_neigh_event_work);
2199 net_work->mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2200 net_work->n = n;
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002201
2202 /* Take a reference to ensure the neighbour won't be
2203 * destructed until we drop the reference in delayed
2204 * work.
2205 */
2206 neigh_clone(n);
Ido Schimmelceb88812017-11-02 17:14:07 +01002207 mlxsw_core_schedule_work(&net_work->work);
Ido Schimmel5c8802f2017-02-06 16:20:13 +01002208 mlxsw_sp_port_dev_put(mlxsw_sp_port);
Yotam Gigia6bf9e92016-07-05 11:27:44 +02002209 break;
Ido Schimmel28678f02017-11-02 17:14:10 +01002210 case NETEVENT_MULTIPATH_HASH_UPDATE:
2211 net = ptr;
2212
2213 if (!net_eq(net, &init_net))
2214 return NOTIFY_DONE;
2215
2216 net_work = kzalloc(sizeof(*net_work), GFP_ATOMIC);
2217 if (!net_work)
2218 return NOTIFY_BAD;
2219
2220 router = container_of(nb, struct mlxsw_sp_router, netevent_nb);
2221 INIT_WORK(&net_work->work, mlxsw_sp_router_mp_hash_event_work);
2222 net_work->mlxsw_sp = router->mlxsw_sp;
2223 mlxsw_core_schedule_work(&net_work->work);
2224 break;
Yotam Gigic723c7352016-07-05 11:27:43 +02002225 }
2226
2227 return NOTIFY_DONE;
2228}
2229
Jiri Pirko6cf3c972016-07-05 11:27:39 +02002230static int mlxsw_sp_neigh_init(struct mlxsw_sp *mlxsw_sp)
2231{
Yotam Gigic723c7352016-07-05 11:27:43 +02002232 int err;
2233
Ido Schimmel9011b672017-05-16 19:38:25 +02002234 err = rhashtable_init(&mlxsw_sp->router->neigh_ht,
Yotam Gigic723c7352016-07-05 11:27:43 +02002235 &mlxsw_sp_neigh_ht_params);
2236 if (err)
2237 return err;
2238
2239 /* Initialize the polling interval according to the default
2240 * table.
2241 */
2242 mlxsw_sp_router_neighs_update_interval_init(mlxsw_sp);
2243
Yotam Gigi0b2361d2016-07-05 11:27:52 +02002244 /* Create the delayed works for the activity_update */
Ido Schimmel9011b672017-05-16 19:38:25 +02002245 INIT_DELAYED_WORK(&mlxsw_sp->router->neighs_update.dw,
Yotam Gigic723c7352016-07-05 11:27:43 +02002246 mlxsw_sp_router_neighs_update_work);
Ido Schimmel9011b672017-05-16 19:38:25 +02002247 INIT_DELAYED_WORK(&mlxsw_sp->router->nexthop_probe_dw,
Yotam Gigi0b2361d2016-07-05 11:27:52 +02002248 mlxsw_sp_router_probe_unresolved_nexthops);
Ido Schimmel9011b672017-05-16 19:38:25 +02002249 mlxsw_core_schedule_dw(&mlxsw_sp->router->neighs_update.dw, 0);
2250 mlxsw_core_schedule_dw(&mlxsw_sp->router->nexthop_probe_dw, 0);
Yotam Gigic723c7352016-07-05 11:27:43 +02002251 return 0;
Jiri Pirko6cf3c972016-07-05 11:27:39 +02002252}
2253
2254static void mlxsw_sp_neigh_fini(struct mlxsw_sp *mlxsw_sp)
2255{
Ido Schimmel9011b672017-05-16 19:38:25 +02002256 cancel_delayed_work_sync(&mlxsw_sp->router->neighs_update.dw);
2257 cancel_delayed_work_sync(&mlxsw_sp->router->nexthop_probe_dw);
2258 rhashtable_destroy(&mlxsw_sp->router->neigh_ht);
Jiri Pirko6cf3c972016-07-05 11:27:39 +02002259}
2260
Ido Schimmel9665b742017-02-08 11:16:42 +01002261static void mlxsw_sp_neigh_rif_gone_sync(struct mlxsw_sp *mlxsw_sp,
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01002262 struct mlxsw_sp_rif *rif)
Ido Schimmel9665b742017-02-08 11:16:42 +01002263{
2264 struct mlxsw_sp_neigh_entry *neigh_entry, *tmp;
2265
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01002266 list_for_each_entry_safe(neigh_entry, tmp, &rif->neigh_list,
Ido Schimmel4a3c67a2017-07-21 20:31:38 +02002267 rif_list_node) {
2268 mlxsw_sp_neigh_entry_update(mlxsw_sp, neigh_entry, false);
Ido Schimmel9665b742017-02-08 11:16:42 +01002269 mlxsw_sp_neigh_entry_destroy(mlxsw_sp, neigh_entry);
Ido Schimmel4a3c67a2017-07-21 20:31:38 +02002270 }
Ido Schimmel9665b742017-02-08 11:16:42 +01002271}
2272
Petr Machata35225e42017-09-02 23:49:22 +02002273enum mlxsw_sp_nexthop_type {
2274 MLXSW_SP_NEXTHOP_TYPE_ETH,
Petr Machata1012b9a2017-09-02 23:49:23 +02002275 MLXSW_SP_NEXTHOP_TYPE_IPIP,
Petr Machata35225e42017-09-02 23:49:22 +02002276};
2277
Ido Schimmelc53b8e12017-02-08 11:16:30 +01002278struct mlxsw_sp_nexthop_key {
2279 struct fib_nh *fib_nh;
2280};
2281
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002282struct mlxsw_sp_nexthop {
2283 struct list_head neigh_list_node; /* member of neigh entry list */
Ido Schimmel9665b742017-02-08 11:16:42 +01002284 struct list_head rif_list_node;
Arkadi Sharshevskydbe45982017-09-25 10:32:23 +02002285 struct list_head router_list_node;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002286 struct mlxsw_sp_nexthop_group *nh_grp; /* pointer back to the group
2287 * this belongs to
2288 */
Ido Schimmelc53b8e12017-02-08 11:16:30 +01002289 struct rhash_head ht_node;
2290 struct mlxsw_sp_nexthop_key key;
Ido Schimmel58adf2c2017-07-18 10:10:19 +02002291 unsigned char gw_addr[sizeof(struct in6_addr)];
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02002292 int ifindex;
Ido Schimmel408bd942017-10-22 23:11:46 +02002293 int nh_weight;
Ido Schimmeleb789982017-10-22 23:11:48 +02002294 int norm_nh_weight;
2295 int num_adj_entries;
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01002296 struct mlxsw_sp_rif *rif;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002297 u8 should_offload:1, /* set indicates this neigh is connected and
2298 * should be put to KVD linear area of this group.
2299 */
2300 offloaded:1, /* set in case the neigh is actually put into
2301 * KVD linear area of this group.
2302 */
2303 update:1; /* set indicates that MAC of this neigh should be
2304 * updated in HW
2305 */
Petr Machata35225e42017-09-02 23:49:22 +02002306 enum mlxsw_sp_nexthop_type type;
2307 union {
2308 struct mlxsw_sp_neigh_entry *neigh_entry;
Petr Machata1012b9a2017-09-02 23:49:23 +02002309 struct mlxsw_sp_ipip_entry *ipip_entry;
Petr Machata35225e42017-09-02 23:49:22 +02002310 };
Arkadi Sharshevskya5390272017-09-25 10:32:28 +02002311 unsigned int counter_index;
2312 bool counter_valid;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002313};
2314
2315struct mlxsw_sp_nexthop_group {
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02002316 void *priv;
Ido Schimmele9ad5e72017-02-08 11:16:29 +01002317 struct rhash_head ht_node;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002318 struct list_head fib_list; /* list of fib entries that use this group */
Ido Schimmel58adf2c2017-07-18 10:10:19 +02002319 struct neigh_table *neigh_tbl;
Ido Schimmelb3e8d1e2017-02-08 11:16:32 +01002320 u8 adj_index_valid:1,
2321 gateway:1; /* routes using the group use a gateway */
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002322 u32 adj_index;
2323 u16 ecmp_size;
2324 u16 count;
Ido Schimmeleb789982017-10-22 23:11:48 +02002325 int sum_norm_weight;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002326 struct mlxsw_sp_nexthop nexthops[0];
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01002327#define nh_rif nexthops[0].rif
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002328};
2329
Arkadi Sharshevsky427e6522017-09-25 10:32:30 +02002330void mlxsw_sp_nexthop_counter_alloc(struct mlxsw_sp *mlxsw_sp,
2331 struct mlxsw_sp_nexthop *nh)
Arkadi Sharshevskya5390272017-09-25 10:32:28 +02002332{
2333 struct devlink *devlink;
2334
2335 devlink = priv_to_devlink(mlxsw_sp->core);
2336 if (!devlink_dpipe_table_counter_enabled(devlink,
2337 MLXSW_SP_DPIPE_TABLE_NAME_ADJ))
2338 return;
2339
2340 if (mlxsw_sp_flow_counter_alloc(mlxsw_sp, &nh->counter_index))
2341 return;
2342
2343 nh->counter_valid = true;
2344}
2345
Arkadi Sharshevsky427e6522017-09-25 10:32:30 +02002346void mlxsw_sp_nexthop_counter_free(struct mlxsw_sp *mlxsw_sp,
2347 struct mlxsw_sp_nexthop *nh)
Arkadi Sharshevskya5390272017-09-25 10:32:28 +02002348{
2349 if (!nh->counter_valid)
2350 return;
2351 mlxsw_sp_flow_counter_free(mlxsw_sp, nh->counter_index);
2352 nh->counter_valid = false;
2353}
2354
2355int mlxsw_sp_nexthop_counter_get(struct mlxsw_sp *mlxsw_sp,
2356 struct mlxsw_sp_nexthop *nh, u64 *p_counter)
2357{
2358 if (!nh->counter_valid)
2359 return -EINVAL;
2360
2361 return mlxsw_sp_flow_counter_get(mlxsw_sp, nh->counter_index,
2362 p_counter, NULL);
2363}
2364
Arkadi Sharshevskyc556cd22017-09-25 10:32:25 +02002365struct mlxsw_sp_nexthop *mlxsw_sp_nexthop_next(struct mlxsw_sp_router *router,
2366 struct mlxsw_sp_nexthop *nh)
2367{
2368 if (!nh) {
2369 if (list_empty(&router->nexthop_list))
2370 return NULL;
2371 else
2372 return list_first_entry(&router->nexthop_list,
2373 typeof(*nh), router_list_node);
2374 }
2375 if (list_is_last(&nh->router_list_node, &router->nexthop_list))
2376 return NULL;
2377 return list_next_entry(nh, router_list_node);
2378}
2379
2380bool mlxsw_sp_nexthop_offload(struct mlxsw_sp_nexthop *nh)
2381{
2382 return nh->offloaded;
2383}
2384
2385unsigned char *mlxsw_sp_nexthop_ha(struct mlxsw_sp_nexthop *nh)
2386{
2387 if (!nh->offloaded)
2388 return NULL;
2389 return nh->neigh_entry->ha;
2390}
2391
2392int mlxsw_sp_nexthop_indexes(struct mlxsw_sp_nexthop *nh, u32 *p_adj_index,
Ido Schimmele69cd9d2017-10-22 23:11:43 +02002393 u32 *p_adj_size, u32 *p_adj_hash_index)
Arkadi Sharshevskyc556cd22017-09-25 10:32:25 +02002394{
2395 struct mlxsw_sp_nexthop_group *nh_grp = nh->nh_grp;
2396 u32 adj_hash_index = 0;
2397 int i;
2398
2399 if (!nh->offloaded || !nh_grp->adj_index_valid)
2400 return -EINVAL;
2401
2402 *p_adj_index = nh_grp->adj_index;
Ido Schimmele69cd9d2017-10-22 23:11:43 +02002403 *p_adj_size = nh_grp->ecmp_size;
Arkadi Sharshevskyc556cd22017-09-25 10:32:25 +02002404
2405 for (i = 0; i < nh_grp->count; i++) {
2406 struct mlxsw_sp_nexthop *nh_iter = &nh_grp->nexthops[i];
2407
2408 if (nh_iter == nh)
2409 break;
2410 if (nh_iter->offloaded)
Ido Schimmeleb789982017-10-22 23:11:48 +02002411 adj_hash_index += nh_iter->num_adj_entries;
Arkadi Sharshevskyc556cd22017-09-25 10:32:25 +02002412 }
2413
2414 *p_adj_hash_index = adj_hash_index;
2415 return 0;
2416}
2417
2418struct mlxsw_sp_rif *mlxsw_sp_nexthop_rif(struct mlxsw_sp_nexthop *nh)
2419{
2420 return nh->rif;
2421}
2422
2423bool mlxsw_sp_nexthop_group_has_ipip(struct mlxsw_sp_nexthop *nh)
2424{
2425 struct mlxsw_sp_nexthop_group *nh_grp = nh->nh_grp;
2426 int i;
2427
2428 for (i = 0; i < nh_grp->count; i++) {
2429 struct mlxsw_sp_nexthop *nh_iter = &nh_grp->nexthops[i];
2430
2431 if (nh_iter->type == MLXSW_SP_NEXTHOP_TYPE_IPIP)
2432 return true;
2433 }
2434 return false;
2435}
2436
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02002437static struct fib_info *
2438mlxsw_sp_nexthop4_group_fi(const struct mlxsw_sp_nexthop_group *nh_grp)
2439{
2440 return nh_grp->priv;
2441}
2442
2443struct mlxsw_sp_nexthop_group_cmp_arg {
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02002444 enum mlxsw_sp_l3proto proto;
2445 union {
2446 struct fib_info *fi;
2447 struct mlxsw_sp_fib6_entry *fib6_entry;
2448 };
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02002449};
2450
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02002451static bool
2452mlxsw_sp_nexthop6_group_has_nexthop(const struct mlxsw_sp_nexthop_group *nh_grp,
2453 const struct in6_addr *gw, int ifindex)
2454{
2455 int i;
2456
2457 for (i = 0; i < nh_grp->count; i++) {
2458 const struct mlxsw_sp_nexthop *nh;
2459
2460 nh = &nh_grp->nexthops[i];
2461 if (nh->ifindex == ifindex &&
2462 ipv6_addr_equal(gw, (struct in6_addr *) nh->gw_addr))
2463 return true;
2464 }
2465
2466 return false;
2467}
2468
2469static bool
2470mlxsw_sp_nexthop6_group_cmp(const struct mlxsw_sp_nexthop_group *nh_grp,
2471 const struct mlxsw_sp_fib6_entry *fib6_entry)
2472{
2473 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
2474
2475 if (nh_grp->count != fib6_entry->nrt6)
2476 return false;
2477
2478 list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) {
2479 struct in6_addr *gw;
2480 int ifindex;
2481
2482 ifindex = mlxsw_sp_rt6->rt->dst.dev->ifindex;
2483 gw = &mlxsw_sp_rt6->rt->rt6i_gateway;
2484 if (!mlxsw_sp_nexthop6_group_has_nexthop(nh_grp, gw, ifindex))
2485 return false;
2486 }
2487
2488 return true;
2489}
2490
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02002491static int
2492mlxsw_sp_nexthop_group_cmp(struct rhashtable_compare_arg *arg, const void *ptr)
2493{
2494 const struct mlxsw_sp_nexthop_group_cmp_arg *cmp_arg = arg->key;
2495 const struct mlxsw_sp_nexthop_group *nh_grp = ptr;
2496
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02002497 switch (cmp_arg->proto) {
2498 case MLXSW_SP_L3_PROTO_IPV4:
2499 return cmp_arg->fi != mlxsw_sp_nexthop4_group_fi(nh_grp);
2500 case MLXSW_SP_L3_PROTO_IPV6:
2501 return !mlxsw_sp_nexthop6_group_cmp(nh_grp,
2502 cmp_arg->fib6_entry);
2503 default:
2504 WARN_ON(1);
2505 return 1;
2506 }
2507}
2508
2509static int
2510mlxsw_sp_nexthop_group_type(const struct mlxsw_sp_nexthop_group *nh_grp)
2511{
2512 return nh_grp->neigh_tbl->family;
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02002513}
2514
2515static u32 mlxsw_sp_nexthop_group_hash_obj(const void *data, u32 len, u32 seed)
2516{
2517 const struct mlxsw_sp_nexthop_group *nh_grp = data;
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02002518 const struct mlxsw_sp_nexthop *nh;
2519 struct fib_info *fi;
2520 unsigned int val;
2521 int i;
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02002522
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02002523 switch (mlxsw_sp_nexthop_group_type(nh_grp)) {
2524 case AF_INET:
2525 fi = mlxsw_sp_nexthop4_group_fi(nh_grp);
2526 return jhash(&fi, sizeof(fi), seed);
2527 case AF_INET6:
2528 val = nh_grp->count;
2529 for (i = 0; i < nh_grp->count; i++) {
2530 nh = &nh_grp->nexthops[i];
2531 val ^= nh->ifindex;
2532 }
2533 return jhash(&val, sizeof(val), seed);
2534 default:
2535 WARN_ON(1);
2536 return 0;
2537 }
2538}
2539
2540static u32
2541mlxsw_sp_nexthop6_group_hash(struct mlxsw_sp_fib6_entry *fib6_entry, u32 seed)
2542{
2543 unsigned int val = fib6_entry->nrt6;
2544 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
2545 struct net_device *dev;
2546
2547 list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) {
2548 dev = mlxsw_sp_rt6->rt->dst.dev;
2549 val ^= dev->ifindex;
2550 }
2551
2552 return jhash(&val, sizeof(val), seed);
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02002553}
2554
2555static u32
2556mlxsw_sp_nexthop_group_hash(const void *data, u32 len, u32 seed)
2557{
2558 const struct mlxsw_sp_nexthop_group_cmp_arg *cmp_arg = data;
2559
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02002560 switch (cmp_arg->proto) {
2561 case MLXSW_SP_L3_PROTO_IPV4:
2562 return jhash(&cmp_arg->fi, sizeof(cmp_arg->fi), seed);
2563 case MLXSW_SP_L3_PROTO_IPV6:
2564 return mlxsw_sp_nexthop6_group_hash(cmp_arg->fib6_entry, seed);
2565 default:
2566 WARN_ON(1);
2567 return 0;
2568 }
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02002569}
2570
Ido Schimmele9ad5e72017-02-08 11:16:29 +01002571static const struct rhashtable_params mlxsw_sp_nexthop_group_ht_params = {
Ido Schimmele9ad5e72017-02-08 11:16:29 +01002572 .head_offset = offsetof(struct mlxsw_sp_nexthop_group, ht_node),
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02002573 .hashfn = mlxsw_sp_nexthop_group_hash,
2574 .obj_hashfn = mlxsw_sp_nexthop_group_hash_obj,
2575 .obj_cmpfn = mlxsw_sp_nexthop_group_cmp,
Ido Schimmele9ad5e72017-02-08 11:16:29 +01002576};
2577
2578static int mlxsw_sp_nexthop_group_insert(struct mlxsw_sp *mlxsw_sp,
2579 struct mlxsw_sp_nexthop_group *nh_grp)
2580{
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02002581 if (mlxsw_sp_nexthop_group_type(nh_grp) == AF_INET6 &&
2582 !nh_grp->gateway)
2583 return 0;
2584
Ido Schimmel9011b672017-05-16 19:38:25 +02002585 return rhashtable_insert_fast(&mlxsw_sp->router->nexthop_group_ht,
Ido Schimmele9ad5e72017-02-08 11:16:29 +01002586 &nh_grp->ht_node,
2587 mlxsw_sp_nexthop_group_ht_params);
2588}
2589
2590static void mlxsw_sp_nexthop_group_remove(struct mlxsw_sp *mlxsw_sp,
2591 struct mlxsw_sp_nexthop_group *nh_grp)
2592{
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02002593 if (mlxsw_sp_nexthop_group_type(nh_grp) == AF_INET6 &&
2594 !nh_grp->gateway)
2595 return;
2596
Ido Schimmel9011b672017-05-16 19:38:25 +02002597 rhashtable_remove_fast(&mlxsw_sp->router->nexthop_group_ht,
Ido Schimmele9ad5e72017-02-08 11:16:29 +01002598 &nh_grp->ht_node,
2599 mlxsw_sp_nexthop_group_ht_params);
2600}
2601
2602static struct mlxsw_sp_nexthop_group *
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02002603mlxsw_sp_nexthop4_group_lookup(struct mlxsw_sp *mlxsw_sp,
2604 struct fib_info *fi)
Ido Schimmele9ad5e72017-02-08 11:16:29 +01002605{
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02002606 struct mlxsw_sp_nexthop_group_cmp_arg cmp_arg;
2607
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02002608 cmp_arg.proto = MLXSW_SP_L3_PROTO_IPV4;
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02002609 cmp_arg.fi = fi;
2610 return rhashtable_lookup_fast(&mlxsw_sp->router->nexthop_group_ht,
2611 &cmp_arg,
Ido Schimmele9ad5e72017-02-08 11:16:29 +01002612 mlxsw_sp_nexthop_group_ht_params);
2613}
2614
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02002615static struct mlxsw_sp_nexthop_group *
2616mlxsw_sp_nexthop6_group_lookup(struct mlxsw_sp *mlxsw_sp,
2617 struct mlxsw_sp_fib6_entry *fib6_entry)
2618{
2619 struct mlxsw_sp_nexthop_group_cmp_arg cmp_arg;
2620
2621 cmp_arg.proto = MLXSW_SP_L3_PROTO_IPV6;
2622 cmp_arg.fib6_entry = fib6_entry;
2623 return rhashtable_lookup_fast(&mlxsw_sp->router->nexthop_group_ht,
2624 &cmp_arg,
2625 mlxsw_sp_nexthop_group_ht_params);
2626}
2627
Ido Schimmelc53b8e12017-02-08 11:16:30 +01002628static const struct rhashtable_params mlxsw_sp_nexthop_ht_params = {
2629 .key_offset = offsetof(struct mlxsw_sp_nexthop, key),
2630 .head_offset = offsetof(struct mlxsw_sp_nexthop, ht_node),
2631 .key_len = sizeof(struct mlxsw_sp_nexthop_key),
2632};
2633
2634static int mlxsw_sp_nexthop_insert(struct mlxsw_sp *mlxsw_sp,
2635 struct mlxsw_sp_nexthop *nh)
2636{
Ido Schimmel9011b672017-05-16 19:38:25 +02002637 return rhashtable_insert_fast(&mlxsw_sp->router->nexthop_ht,
Ido Schimmelc53b8e12017-02-08 11:16:30 +01002638 &nh->ht_node, mlxsw_sp_nexthop_ht_params);
2639}
2640
2641static void mlxsw_sp_nexthop_remove(struct mlxsw_sp *mlxsw_sp,
2642 struct mlxsw_sp_nexthop *nh)
2643{
Ido Schimmel9011b672017-05-16 19:38:25 +02002644 rhashtable_remove_fast(&mlxsw_sp->router->nexthop_ht, &nh->ht_node,
Ido Schimmelc53b8e12017-02-08 11:16:30 +01002645 mlxsw_sp_nexthop_ht_params);
2646}
2647
Ido Schimmelad178c82017-02-08 11:16:40 +01002648static struct mlxsw_sp_nexthop *
2649mlxsw_sp_nexthop_lookup(struct mlxsw_sp *mlxsw_sp,
2650 struct mlxsw_sp_nexthop_key key)
2651{
Ido Schimmel9011b672017-05-16 19:38:25 +02002652 return rhashtable_lookup_fast(&mlxsw_sp->router->nexthop_ht, &key,
Ido Schimmelad178c82017-02-08 11:16:40 +01002653 mlxsw_sp_nexthop_ht_params);
2654}
2655
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002656static int mlxsw_sp_adj_index_mass_update_vr(struct mlxsw_sp *mlxsw_sp,
Ido Schimmel76610eb2017-03-10 08:53:41 +01002657 const struct mlxsw_sp_fib *fib,
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002658 u32 adj_index, u16 ecmp_size,
2659 u32 new_adj_index,
2660 u16 new_ecmp_size)
2661{
2662 char raleu_pl[MLXSW_REG_RALEU_LEN];
2663
Ido Schimmel1a9234e662016-09-19 08:29:26 +02002664 mlxsw_reg_raleu_pack(raleu_pl,
Ido Schimmel76610eb2017-03-10 08:53:41 +01002665 (enum mlxsw_reg_ralxx_protocol) fib->proto,
2666 fib->vr->id, adj_index, ecmp_size, new_adj_index,
Ido Schimmel1a9234e662016-09-19 08:29:26 +02002667 new_ecmp_size);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002668 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raleu), raleu_pl);
2669}
2670
2671static int mlxsw_sp_adj_index_mass_update(struct mlxsw_sp *mlxsw_sp,
2672 struct mlxsw_sp_nexthop_group *nh_grp,
2673 u32 old_adj_index, u16 old_ecmp_size)
2674{
2675 struct mlxsw_sp_fib_entry *fib_entry;
Ido Schimmel76610eb2017-03-10 08:53:41 +01002676 struct mlxsw_sp_fib *fib = NULL;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002677 int err;
2678
2679 list_for_each_entry(fib_entry, &nh_grp->fib_list, nexthop_group_node) {
Ido Schimmel76610eb2017-03-10 08:53:41 +01002680 if (fib == fib_entry->fib_node->fib)
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002681 continue;
Ido Schimmel76610eb2017-03-10 08:53:41 +01002682 fib = fib_entry->fib_node->fib;
2683 err = mlxsw_sp_adj_index_mass_update_vr(mlxsw_sp, fib,
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002684 old_adj_index,
2685 old_ecmp_size,
2686 nh_grp->adj_index,
2687 nh_grp->ecmp_size);
2688 if (err)
2689 return err;
2690 }
2691 return 0;
2692}
2693
Ido Schimmeleb789982017-10-22 23:11:48 +02002694static int __mlxsw_sp_nexthop_update(struct mlxsw_sp *mlxsw_sp, u32 adj_index,
2695 struct mlxsw_sp_nexthop *nh)
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002696{
2697 struct mlxsw_sp_neigh_entry *neigh_entry = nh->neigh_entry;
2698 char ratr_pl[MLXSW_REG_RATR_LEN];
2699
2700 mlxsw_reg_ratr_pack(ratr_pl, MLXSW_REG_RATR_OP_WRITE_WRITE_ENTRY,
Petr Machata89e41982017-09-02 23:49:15 +02002701 true, MLXSW_REG_RATR_TYPE_ETHERNET,
2702 adj_index, neigh_entry->rif);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002703 mlxsw_reg_ratr_eth_entry_pack(ratr_pl, neigh_entry->ha);
Arkadi Sharshevskya5390272017-09-25 10:32:28 +02002704 if (nh->counter_valid)
2705 mlxsw_reg_ratr_counter_pack(ratr_pl, nh->counter_index, true);
2706 else
2707 mlxsw_reg_ratr_counter_pack(ratr_pl, 0, false);
2708
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002709 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ratr), ratr_pl);
2710}
2711
Ido Schimmeleb789982017-10-22 23:11:48 +02002712int mlxsw_sp_nexthop_update(struct mlxsw_sp *mlxsw_sp, u32 adj_index,
2713 struct mlxsw_sp_nexthop *nh)
2714{
2715 int i;
2716
2717 for (i = 0; i < nh->num_adj_entries; i++) {
2718 int err;
2719
2720 err = __mlxsw_sp_nexthop_update(mlxsw_sp, adj_index + i, nh);
2721 if (err)
2722 return err;
2723 }
2724
2725 return 0;
2726}
2727
2728static int __mlxsw_sp_nexthop_ipip_update(struct mlxsw_sp *mlxsw_sp,
2729 u32 adj_index,
2730 struct mlxsw_sp_nexthop *nh)
Petr Machata1012b9a2017-09-02 23:49:23 +02002731{
2732 const struct mlxsw_sp_ipip_ops *ipip_ops;
2733
2734 ipip_ops = mlxsw_sp->router->ipip_ops_arr[nh->ipip_entry->ipipt];
2735 return ipip_ops->nexthop_update(mlxsw_sp, adj_index, nh->ipip_entry);
2736}
2737
Ido Schimmeleb789982017-10-22 23:11:48 +02002738static int mlxsw_sp_nexthop_ipip_update(struct mlxsw_sp *mlxsw_sp,
2739 u32 adj_index,
2740 struct mlxsw_sp_nexthop *nh)
2741{
2742 int i;
2743
2744 for (i = 0; i < nh->num_adj_entries; i++) {
2745 int err;
2746
2747 err = __mlxsw_sp_nexthop_ipip_update(mlxsw_sp, adj_index + i,
2748 nh);
2749 if (err)
2750 return err;
2751 }
2752
2753 return 0;
2754}
2755
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002756static int
Petr Machata35225e42017-09-02 23:49:22 +02002757mlxsw_sp_nexthop_group_update(struct mlxsw_sp *mlxsw_sp,
2758 struct mlxsw_sp_nexthop_group *nh_grp,
2759 bool reallocate)
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002760{
2761 u32 adj_index = nh_grp->adj_index; /* base */
2762 struct mlxsw_sp_nexthop *nh;
2763 int i;
2764 int err;
2765
2766 for (i = 0; i < nh_grp->count; i++) {
2767 nh = &nh_grp->nexthops[i];
2768
2769 if (!nh->should_offload) {
2770 nh->offloaded = 0;
2771 continue;
2772 }
2773
Ido Schimmela59b7e02017-01-23 11:11:42 +01002774 if (nh->update || reallocate) {
Petr Machata35225e42017-09-02 23:49:22 +02002775 switch (nh->type) {
2776 case MLXSW_SP_NEXTHOP_TYPE_ETH:
Arkadi Sharshevskya5390272017-09-25 10:32:28 +02002777 err = mlxsw_sp_nexthop_update
Petr Machata35225e42017-09-02 23:49:22 +02002778 (mlxsw_sp, adj_index, nh);
2779 break;
Petr Machata1012b9a2017-09-02 23:49:23 +02002780 case MLXSW_SP_NEXTHOP_TYPE_IPIP:
2781 err = mlxsw_sp_nexthop_ipip_update
2782 (mlxsw_sp, adj_index, nh);
2783 break;
Petr Machata35225e42017-09-02 23:49:22 +02002784 }
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002785 if (err)
2786 return err;
2787 nh->update = 0;
2788 nh->offloaded = 1;
2789 }
Ido Schimmeleb789982017-10-22 23:11:48 +02002790 adj_index += nh->num_adj_entries;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002791 }
2792 return 0;
2793}
2794
Ido Schimmel1819ae32017-07-21 18:04:28 +02002795static bool
2796mlxsw_sp_fib_node_entry_is_first(const struct mlxsw_sp_fib_node *fib_node,
2797 const struct mlxsw_sp_fib_entry *fib_entry);
2798
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002799static int
2800mlxsw_sp_nexthop_fib_entries_update(struct mlxsw_sp *mlxsw_sp,
2801 struct mlxsw_sp_nexthop_group *nh_grp)
2802{
2803 struct mlxsw_sp_fib_entry *fib_entry;
2804 int err;
2805
2806 list_for_each_entry(fib_entry, &nh_grp->fib_list, nexthop_group_node) {
Ido Schimmel1819ae32017-07-21 18:04:28 +02002807 if (!mlxsw_sp_fib_node_entry_is_first(fib_entry->fib_node,
2808 fib_entry))
2809 continue;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002810 err = mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
2811 if (err)
2812 return err;
2813 }
2814 return 0;
2815}
2816
2817static void
Ido Schimmel77d964e2017-08-02 09:56:05 +02002818mlxsw_sp_fib_entry_offload_refresh(struct mlxsw_sp_fib_entry *fib_entry,
2819 enum mlxsw_reg_ralue_op op, int err);
2820
2821static void
2822mlxsw_sp_nexthop_fib_entries_refresh(struct mlxsw_sp_nexthop_group *nh_grp)
2823{
2824 enum mlxsw_reg_ralue_op op = MLXSW_REG_RALUE_OP_WRITE_WRITE;
2825 struct mlxsw_sp_fib_entry *fib_entry;
2826
2827 list_for_each_entry(fib_entry, &nh_grp->fib_list, nexthop_group_node) {
2828 if (!mlxsw_sp_fib_node_entry_is_first(fib_entry->fib_node,
2829 fib_entry))
2830 continue;
2831 mlxsw_sp_fib_entry_offload_refresh(fib_entry, op, 0);
2832 }
2833}
2834
Ido Schimmel425a08c2017-10-22 23:11:47 +02002835static void mlxsw_sp_adj_grp_size_round_up(u16 *p_adj_grp_size)
2836{
2837 /* Valid sizes for an adjacency group are:
2838 * 1-64, 512, 1024, 2048 and 4096.
2839 */
2840 if (*p_adj_grp_size <= 64)
2841 return;
2842 else if (*p_adj_grp_size <= 512)
2843 *p_adj_grp_size = 512;
2844 else if (*p_adj_grp_size <= 1024)
2845 *p_adj_grp_size = 1024;
2846 else if (*p_adj_grp_size <= 2048)
2847 *p_adj_grp_size = 2048;
2848 else
2849 *p_adj_grp_size = 4096;
2850}
2851
2852static void mlxsw_sp_adj_grp_size_round_down(u16 *p_adj_grp_size,
2853 unsigned int alloc_size)
2854{
2855 if (alloc_size >= 4096)
2856 *p_adj_grp_size = 4096;
2857 else if (alloc_size >= 2048)
2858 *p_adj_grp_size = 2048;
2859 else if (alloc_size >= 1024)
2860 *p_adj_grp_size = 1024;
2861 else if (alloc_size >= 512)
2862 *p_adj_grp_size = 512;
2863}
2864
2865static int mlxsw_sp_fix_adj_grp_size(struct mlxsw_sp *mlxsw_sp,
2866 u16 *p_adj_grp_size)
2867{
2868 unsigned int alloc_size;
2869 int err;
2870
2871 /* Round up the requested group size to the next size supported
2872 * by the device and make sure the request can be satisfied.
2873 */
2874 mlxsw_sp_adj_grp_size_round_up(p_adj_grp_size);
2875 err = mlxsw_sp_kvdl_alloc_size_query(mlxsw_sp, *p_adj_grp_size,
2876 &alloc_size);
2877 if (err)
2878 return err;
2879 /* It is possible the allocation results in more allocated
2880 * entries than requested. Try to use as much of them as
2881 * possible.
2882 */
2883 mlxsw_sp_adj_grp_size_round_down(p_adj_grp_size, alloc_size);
2884
2885 return 0;
2886}
2887
Ido Schimmel77d964e2017-08-02 09:56:05 +02002888static void
Ido Schimmeleb789982017-10-22 23:11:48 +02002889mlxsw_sp_nexthop_group_normalize(struct mlxsw_sp_nexthop_group *nh_grp)
2890{
2891 int i, g = 0, sum_norm_weight = 0;
2892 struct mlxsw_sp_nexthop *nh;
2893
2894 for (i = 0; i < nh_grp->count; i++) {
2895 nh = &nh_grp->nexthops[i];
2896
2897 if (!nh->should_offload)
2898 continue;
2899 if (g > 0)
2900 g = gcd(nh->nh_weight, g);
2901 else
2902 g = nh->nh_weight;
2903 }
2904
2905 for (i = 0; i < nh_grp->count; i++) {
2906 nh = &nh_grp->nexthops[i];
2907
2908 if (!nh->should_offload)
2909 continue;
2910 nh->norm_nh_weight = nh->nh_weight / g;
2911 sum_norm_weight += nh->norm_nh_weight;
2912 }
2913
2914 nh_grp->sum_norm_weight = sum_norm_weight;
2915}
2916
2917static void
2918mlxsw_sp_nexthop_group_rebalance(struct mlxsw_sp_nexthop_group *nh_grp)
2919{
2920 int total = nh_grp->sum_norm_weight;
2921 u16 ecmp_size = nh_grp->ecmp_size;
2922 int i, weight = 0, lower_bound = 0;
2923
2924 for (i = 0; i < nh_grp->count; i++) {
2925 struct mlxsw_sp_nexthop *nh = &nh_grp->nexthops[i];
2926 int upper_bound;
2927
2928 if (!nh->should_offload)
2929 continue;
2930 weight += nh->norm_nh_weight;
2931 upper_bound = DIV_ROUND_CLOSEST(ecmp_size * weight, total);
2932 nh->num_adj_entries = upper_bound - lower_bound;
2933 lower_bound = upper_bound;
2934 }
2935}
2936
2937static void
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002938mlxsw_sp_nexthop_group_refresh(struct mlxsw_sp *mlxsw_sp,
2939 struct mlxsw_sp_nexthop_group *nh_grp)
2940{
Ido Schimmeleb789982017-10-22 23:11:48 +02002941 u16 ecmp_size, old_ecmp_size;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002942 struct mlxsw_sp_nexthop *nh;
2943 bool offload_change = false;
2944 u32 adj_index;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002945 bool old_adj_index_valid;
2946 u32 old_adj_index;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002947 int i;
2948 int err;
2949
Ido Schimmelb3e8d1e2017-02-08 11:16:32 +01002950 if (!nh_grp->gateway) {
2951 mlxsw_sp_nexthop_fib_entries_update(mlxsw_sp, nh_grp);
2952 return;
2953 }
2954
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002955 for (i = 0; i < nh_grp->count; i++) {
2956 nh = &nh_grp->nexthops[i];
2957
Petr Machata56b8a9e2017-07-31 09:27:29 +02002958 if (nh->should_offload != nh->offloaded) {
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002959 offload_change = true;
2960 if (nh->should_offload)
2961 nh->update = 1;
2962 }
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002963 }
2964 if (!offload_change) {
2965 /* Nothing was added or removed, so no need to reallocate. Just
2966 * update MAC on existing adjacency indexes.
2967 */
Petr Machata35225e42017-09-02 23:49:22 +02002968 err = mlxsw_sp_nexthop_group_update(mlxsw_sp, nh_grp, false);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002969 if (err) {
2970 dev_warn(mlxsw_sp->bus_info->dev, "Failed to update neigh MAC in adjacency table.\n");
2971 goto set_trap;
2972 }
2973 return;
2974 }
Ido Schimmeleb789982017-10-22 23:11:48 +02002975 mlxsw_sp_nexthop_group_normalize(nh_grp);
2976 if (!nh_grp->sum_norm_weight)
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002977 /* No neigh of this group is connected so we just set
2978 * the trap and let everthing flow through kernel.
2979 */
2980 goto set_trap;
2981
Ido Schimmeleb789982017-10-22 23:11:48 +02002982 ecmp_size = nh_grp->sum_norm_weight;
Ido Schimmel425a08c2017-10-22 23:11:47 +02002983 err = mlxsw_sp_fix_adj_grp_size(mlxsw_sp, &ecmp_size);
2984 if (err)
2985 /* No valid allocation size available. */
2986 goto set_trap;
2987
Arkadi Sharshevsky13124442017-03-25 08:28:22 +01002988 err = mlxsw_sp_kvdl_alloc(mlxsw_sp, ecmp_size, &adj_index);
2989 if (err) {
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002990 /* We ran out of KVD linear space, just set the
2991 * trap and let everything flow through kernel.
2992 */
2993 dev_warn(mlxsw_sp->bus_info->dev, "Failed to allocate KVD linear area for nexthop group.\n");
2994 goto set_trap;
2995 }
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002996 old_adj_index_valid = nh_grp->adj_index_valid;
2997 old_adj_index = nh_grp->adj_index;
2998 old_ecmp_size = nh_grp->ecmp_size;
2999 nh_grp->adj_index_valid = 1;
3000 nh_grp->adj_index = adj_index;
3001 nh_grp->ecmp_size = ecmp_size;
Ido Schimmeleb789982017-10-22 23:11:48 +02003002 mlxsw_sp_nexthop_group_rebalance(nh_grp);
Petr Machata35225e42017-09-02 23:49:22 +02003003 err = mlxsw_sp_nexthop_group_update(mlxsw_sp, nh_grp, true);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003004 if (err) {
3005 dev_warn(mlxsw_sp->bus_info->dev, "Failed to update neigh MAC in adjacency table.\n");
3006 goto set_trap;
3007 }
3008
3009 if (!old_adj_index_valid) {
3010 /* The trap was set for fib entries, so we have to call
3011 * fib entry update to unset it and use adjacency index.
3012 */
3013 err = mlxsw_sp_nexthop_fib_entries_update(mlxsw_sp, nh_grp);
3014 if (err) {
3015 dev_warn(mlxsw_sp->bus_info->dev, "Failed to add adjacency index to fib entries.\n");
3016 goto set_trap;
3017 }
3018 return;
3019 }
3020
3021 err = mlxsw_sp_adj_index_mass_update(mlxsw_sp, nh_grp,
3022 old_adj_index, old_ecmp_size);
3023 mlxsw_sp_kvdl_free(mlxsw_sp, old_adj_index);
3024 if (err) {
3025 dev_warn(mlxsw_sp->bus_info->dev, "Failed to mass-update adjacency index for nexthop group.\n");
3026 goto set_trap;
3027 }
Ido Schimmel77d964e2017-08-02 09:56:05 +02003028
3029 /* Offload state within the group changed, so update the flags. */
3030 mlxsw_sp_nexthop_fib_entries_refresh(nh_grp);
3031
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003032 return;
3033
3034set_trap:
3035 old_adj_index_valid = nh_grp->adj_index_valid;
3036 nh_grp->adj_index_valid = 0;
3037 for (i = 0; i < nh_grp->count; i++) {
3038 nh = &nh_grp->nexthops[i];
3039 nh->offloaded = 0;
3040 }
3041 err = mlxsw_sp_nexthop_fib_entries_update(mlxsw_sp, nh_grp);
3042 if (err)
3043 dev_warn(mlxsw_sp->bus_info->dev, "Failed to set traps for fib entries.\n");
3044 if (old_adj_index_valid)
3045 mlxsw_sp_kvdl_free(mlxsw_sp, nh_grp->adj_index);
3046}
3047
3048static void __mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp_nexthop *nh,
3049 bool removing)
3050{
Petr Machata213666a2017-07-31 09:27:30 +02003051 if (!removing)
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003052 nh->should_offload = 1;
Petr Machata213666a2017-07-31 09:27:30 +02003053 else if (nh->offloaded)
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003054 nh->should_offload = 0;
3055 nh->update = 1;
3056}
3057
3058static void
3059mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp *mlxsw_sp,
3060 struct mlxsw_sp_neigh_entry *neigh_entry,
3061 bool removing)
3062{
3063 struct mlxsw_sp_nexthop *nh;
3064
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003065 list_for_each_entry(nh, &neigh_entry->nexthop_list,
3066 neigh_list_node) {
3067 __mlxsw_sp_nexthop_neigh_update(nh, removing);
3068 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nh_grp);
3069 }
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003070}
3071
Ido Schimmel9665b742017-02-08 11:16:42 +01003072static void mlxsw_sp_nexthop_rif_init(struct mlxsw_sp_nexthop *nh,
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003073 struct mlxsw_sp_rif *rif)
Ido Schimmel9665b742017-02-08 11:16:42 +01003074{
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003075 if (nh->rif)
Ido Schimmel9665b742017-02-08 11:16:42 +01003076 return;
3077
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003078 nh->rif = rif;
3079 list_add(&nh->rif_list_node, &rif->nexthop_list);
Ido Schimmel9665b742017-02-08 11:16:42 +01003080}
3081
3082static void mlxsw_sp_nexthop_rif_fini(struct mlxsw_sp_nexthop *nh)
3083{
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003084 if (!nh->rif)
Ido Schimmel9665b742017-02-08 11:16:42 +01003085 return;
3086
3087 list_del(&nh->rif_list_node);
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003088 nh->rif = NULL;
Ido Schimmel9665b742017-02-08 11:16:42 +01003089}
3090
Ido Schimmela8c97012017-02-08 11:16:35 +01003091static int mlxsw_sp_nexthop_neigh_init(struct mlxsw_sp *mlxsw_sp,
3092 struct mlxsw_sp_nexthop *nh)
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003093{
3094 struct mlxsw_sp_neigh_entry *neigh_entry;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003095 struct neighbour *n;
Ido Schimmel93a87e52016-12-23 09:32:49 +01003096 u8 nud_state, dead;
Ido Schimmelc53b8e12017-02-08 11:16:30 +01003097 int err;
3098
Ido Schimmelad178c82017-02-08 11:16:40 +01003099 if (!nh->nh_grp->gateway || nh->neigh_entry)
Ido Schimmelb8399a12017-02-08 11:16:33 +01003100 return 0;
3101
Jiri Pirko33b13412016-11-10 12:31:04 +01003102 /* Take a reference of neigh here ensuring that neigh would
Petr Machata8de3c172017-07-31 09:27:25 +02003103 * not be destructed before the nexthop entry is finished.
Jiri Pirko33b13412016-11-10 12:31:04 +01003104 * The reference is taken either in neigh_lookup() or
Ido Schimmelfd76d912017-02-06 16:20:17 +01003105 * in neigh_create() in case n is not found.
Jiri Pirko33b13412016-11-10 12:31:04 +01003106 */
Ido Schimmel58adf2c2017-07-18 10:10:19 +02003107 n = neigh_lookup(nh->nh_grp->neigh_tbl, &nh->gw_addr, nh->rif->dev);
Jiri Pirko33b13412016-11-10 12:31:04 +01003108 if (!n) {
Ido Schimmel58adf2c2017-07-18 10:10:19 +02003109 n = neigh_create(nh->nh_grp->neigh_tbl, &nh->gw_addr,
3110 nh->rif->dev);
Ido Schimmela8c97012017-02-08 11:16:35 +01003111 if (IS_ERR(n))
3112 return PTR_ERR(n);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003113 neigh_event_send(n, NULL);
Jiri Pirko33b13412016-11-10 12:31:04 +01003114 }
3115 neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp, n);
3116 if (!neigh_entry) {
Ido Schimmel5c8802f2017-02-06 16:20:13 +01003117 neigh_entry = mlxsw_sp_neigh_entry_create(mlxsw_sp, n);
3118 if (IS_ERR(neigh_entry)) {
Ido Schimmelc53b8e12017-02-08 11:16:30 +01003119 err = -EINVAL;
3120 goto err_neigh_entry_create;
Ido Schimmel5c8802f2017-02-06 16:20:13 +01003121 }
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003122 }
Yotam Gigib2157142016-07-05 11:27:51 +02003123
3124 /* If that is the first nexthop connected to that neigh, add to
3125 * nexthop_neighs_list
3126 */
3127 if (list_empty(&neigh_entry->nexthop_list))
3128 list_add_tail(&neigh_entry->nexthop_neighs_list_node,
Ido Schimmel9011b672017-05-16 19:38:25 +02003129 &mlxsw_sp->router->nexthop_neighs_list);
Yotam Gigib2157142016-07-05 11:27:51 +02003130
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003131 nh->neigh_entry = neigh_entry;
3132 list_add_tail(&nh->neigh_list_node, &neigh_entry->nexthop_list);
3133 read_lock_bh(&n->lock);
3134 nud_state = n->nud_state;
Ido Schimmel93a87e52016-12-23 09:32:49 +01003135 dead = n->dead;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003136 read_unlock_bh(&n->lock);
Ido Schimmel93a87e52016-12-23 09:32:49 +01003137 __mlxsw_sp_nexthop_neigh_update(nh, !(nud_state & NUD_VALID && !dead));
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003138
3139 return 0;
Ido Schimmelc53b8e12017-02-08 11:16:30 +01003140
3141err_neigh_entry_create:
3142 neigh_release(n);
Ido Schimmelc53b8e12017-02-08 11:16:30 +01003143 return err;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003144}
3145
Ido Schimmela8c97012017-02-08 11:16:35 +01003146static void mlxsw_sp_nexthop_neigh_fini(struct mlxsw_sp *mlxsw_sp,
3147 struct mlxsw_sp_nexthop *nh)
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003148{
3149 struct mlxsw_sp_neigh_entry *neigh_entry = nh->neigh_entry;
Ido Schimmela8c97012017-02-08 11:16:35 +01003150 struct neighbour *n;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003151
Ido Schimmelb8399a12017-02-08 11:16:33 +01003152 if (!neigh_entry)
Ido Schimmela8c97012017-02-08 11:16:35 +01003153 return;
3154 n = neigh_entry->key.n;
Ido Schimmelb8399a12017-02-08 11:16:33 +01003155
Ido Schimmel58312122016-12-23 09:32:50 +01003156 __mlxsw_sp_nexthop_neigh_update(nh, true);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003157 list_del(&nh->neigh_list_node);
Ido Schimmele58be792017-02-08 11:16:28 +01003158 nh->neigh_entry = NULL;
Yotam Gigib2157142016-07-05 11:27:51 +02003159
3160 /* If that is the last nexthop connected to that neigh, remove from
3161 * nexthop_neighs_list
3162 */
Ido Schimmele58be792017-02-08 11:16:28 +01003163 if (list_empty(&neigh_entry->nexthop_list))
3164 list_del(&neigh_entry->nexthop_neighs_list_node);
Yotam Gigib2157142016-07-05 11:27:51 +02003165
Ido Schimmel5c8802f2017-02-06 16:20:13 +01003166 if (!neigh_entry->connected && list_empty(&neigh_entry->nexthop_list))
3167 mlxsw_sp_neigh_entry_destroy(mlxsw_sp, neigh_entry);
3168
3169 neigh_release(n);
Ido Schimmela8c97012017-02-08 11:16:35 +01003170}
Ido Schimmelc53b8e12017-02-08 11:16:30 +01003171
Petr Machata1012b9a2017-09-02 23:49:23 +02003172static int mlxsw_sp_nexthop_ipip_init(struct mlxsw_sp *mlxsw_sp,
Petr Machata1012b9a2017-09-02 23:49:23 +02003173 struct mlxsw_sp_nexthop *nh,
3174 struct net_device *ol_dev)
3175{
3176 if (!nh->nh_grp->gateway || nh->ipip_entry)
3177 return 0;
3178
Petr Machata4cccb732017-10-16 16:26:39 +02003179 nh->ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
3180 if (!nh->ipip_entry)
3181 return -ENOENT;
Petr Machata1012b9a2017-09-02 23:49:23 +02003182
3183 __mlxsw_sp_nexthop_neigh_update(nh, false);
3184 return 0;
3185}
3186
3187static void mlxsw_sp_nexthop_ipip_fini(struct mlxsw_sp *mlxsw_sp,
3188 struct mlxsw_sp_nexthop *nh)
3189{
3190 struct mlxsw_sp_ipip_entry *ipip_entry = nh->ipip_entry;
3191
3192 if (!ipip_entry)
3193 return;
3194
3195 __mlxsw_sp_nexthop_neigh_update(nh, true);
Petr Machata1012b9a2017-09-02 23:49:23 +02003196 nh->ipip_entry = NULL;
3197}
3198
3199static bool mlxsw_sp_nexthop4_ipip_type(const struct mlxsw_sp *mlxsw_sp,
3200 const struct fib_nh *fib_nh,
3201 enum mlxsw_sp_ipip_type *p_ipipt)
3202{
3203 struct net_device *dev = fib_nh->nh_dev;
3204
3205 return dev &&
3206 fib_nh->nh_parent->fib_type == RTN_UNICAST &&
3207 mlxsw_sp_netdev_ipip_type(mlxsw_sp, dev, p_ipipt);
3208}
3209
Petr Machata35225e42017-09-02 23:49:22 +02003210static void mlxsw_sp_nexthop_type_fini(struct mlxsw_sp *mlxsw_sp,
3211 struct mlxsw_sp_nexthop *nh)
3212{
3213 switch (nh->type) {
3214 case MLXSW_SP_NEXTHOP_TYPE_ETH:
3215 mlxsw_sp_nexthop_neigh_fini(mlxsw_sp, nh);
3216 mlxsw_sp_nexthop_rif_fini(nh);
3217 break;
Petr Machata1012b9a2017-09-02 23:49:23 +02003218 case MLXSW_SP_NEXTHOP_TYPE_IPIP:
Petr Machatade0f43c2017-10-02 12:14:57 +02003219 mlxsw_sp_nexthop_rif_fini(nh);
Petr Machata1012b9a2017-09-02 23:49:23 +02003220 mlxsw_sp_nexthop_ipip_fini(mlxsw_sp, nh);
3221 break;
Petr Machata35225e42017-09-02 23:49:22 +02003222 }
3223}
3224
3225static int mlxsw_sp_nexthop4_type_init(struct mlxsw_sp *mlxsw_sp,
3226 struct mlxsw_sp_nexthop *nh,
3227 struct fib_nh *fib_nh)
3228{
Petr Machata1012b9a2017-09-02 23:49:23 +02003229 struct mlxsw_sp_router *router = mlxsw_sp->router;
Petr Machata35225e42017-09-02 23:49:22 +02003230 struct net_device *dev = fib_nh->nh_dev;
Petr Machata1012b9a2017-09-02 23:49:23 +02003231 enum mlxsw_sp_ipip_type ipipt;
Petr Machata35225e42017-09-02 23:49:22 +02003232 struct mlxsw_sp_rif *rif;
3233 int err;
3234
Petr Machata1012b9a2017-09-02 23:49:23 +02003235 if (mlxsw_sp_nexthop4_ipip_type(mlxsw_sp, fib_nh, &ipipt) &&
3236 router->ipip_ops_arr[ipipt]->can_offload(mlxsw_sp, dev,
3237 MLXSW_SP_L3_PROTO_IPV4)) {
3238 nh->type = MLXSW_SP_NEXTHOP_TYPE_IPIP;
Petr Machata4cccb732017-10-16 16:26:39 +02003239 err = mlxsw_sp_nexthop_ipip_init(mlxsw_sp, nh, dev);
Petr Machatade0f43c2017-10-02 12:14:57 +02003240 if (err)
3241 return err;
3242 mlxsw_sp_nexthop_rif_init(nh, &nh->ipip_entry->ol_lb->common);
3243 return 0;
Petr Machata1012b9a2017-09-02 23:49:23 +02003244 }
3245
Petr Machata35225e42017-09-02 23:49:22 +02003246 nh->type = MLXSW_SP_NEXTHOP_TYPE_ETH;
3247 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
3248 if (!rif)
3249 return 0;
3250
3251 mlxsw_sp_nexthop_rif_init(nh, rif);
3252 err = mlxsw_sp_nexthop_neigh_init(mlxsw_sp, nh);
3253 if (err)
3254 goto err_neigh_init;
3255
3256 return 0;
3257
3258err_neigh_init:
3259 mlxsw_sp_nexthop_rif_fini(nh);
3260 return err;
3261}
3262
3263static void mlxsw_sp_nexthop4_type_fini(struct mlxsw_sp *mlxsw_sp,
3264 struct mlxsw_sp_nexthop *nh)
3265{
3266 mlxsw_sp_nexthop_type_fini(mlxsw_sp, nh);
3267}
3268
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02003269static int mlxsw_sp_nexthop4_init(struct mlxsw_sp *mlxsw_sp,
3270 struct mlxsw_sp_nexthop_group *nh_grp,
3271 struct mlxsw_sp_nexthop *nh,
3272 struct fib_nh *fib_nh)
Ido Schimmela8c97012017-02-08 11:16:35 +01003273{
3274 struct net_device *dev = fib_nh->nh_dev;
Ido Schimmeldf6dd79b2017-02-08 14:36:49 +01003275 struct in_device *in_dev;
Ido Schimmela8c97012017-02-08 11:16:35 +01003276 int err;
3277
3278 nh->nh_grp = nh_grp;
3279 nh->key.fib_nh = fib_nh;
Ido Schimmel408bd942017-10-22 23:11:46 +02003280#ifdef CONFIG_IP_ROUTE_MULTIPATH
3281 nh->nh_weight = fib_nh->nh_weight;
3282#else
3283 nh->nh_weight = 1;
3284#endif
Ido Schimmel58adf2c2017-07-18 10:10:19 +02003285 memcpy(&nh->gw_addr, &fib_nh->nh_gw, sizeof(fib_nh->nh_gw));
Ido Schimmela8c97012017-02-08 11:16:35 +01003286 err = mlxsw_sp_nexthop_insert(mlxsw_sp, nh);
3287 if (err)
3288 return err;
3289
Arkadi Sharshevskya5390272017-09-25 10:32:28 +02003290 mlxsw_sp_nexthop_counter_alloc(mlxsw_sp, nh);
Arkadi Sharshevskydbe45982017-09-25 10:32:23 +02003291 list_add_tail(&nh->router_list_node, &mlxsw_sp->router->nexthop_list);
3292
Ido Schimmel97989ee2017-03-10 08:53:38 +01003293 if (!dev)
3294 return 0;
3295
Ido Schimmeldf6dd79b2017-02-08 14:36:49 +01003296 in_dev = __in_dev_get_rtnl(dev);
3297 if (in_dev && IN_DEV_IGNORE_ROUTES_WITH_LINKDOWN(in_dev) &&
3298 fib_nh->nh_flags & RTNH_F_LINKDOWN)
3299 return 0;
3300
Petr Machata35225e42017-09-02 23:49:22 +02003301 err = mlxsw_sp_nexthop4_type_init(mlxsw_sp, nh, fib_nh);
Ido Schimmela8c97012017-02-08 11:16:35 +01003302 if (err)
3303 goto err_nexthop_neigh_init;
3304
3305 return 0;
3306
3307err_nexthop_neigh_init:
3308 mlxsw_sp_nexthop_remove(mlxsw_sp, nh);
3309 return err;
3310}
3311
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02003312static void mlxsw_sp_nexthop4_fini(struct mlxsw_sp *mlxsw_sp,
3313 struct mlxsw_sp_nexthop *nh)
Ido Schimmela8c97012017-02-08 11:16:35 +01003314{
Petr Machata35225e42017-09-02 23:49:22 +02003315 mlxsw_sp_nexthop4_type_fini(mlxsw_sp, nh);
Arkadi Sharshevskydbe45982017-09-25 10:32:23 +02003316 list_del(&nh->router_list_node);
Arkadi Sharshevskya5390272017-09-25 10:32:28 +02003317 mlxsw_sp_nexthop_counter_free(mlxsw_sp, nh);
Ido Schimmelc53b8e12017-02-08 11:16:30 +01003318 mlxsw_sp_nexthop_remove(mlxsw_sp, nh);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003319}
3320
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02003321static void mlxsw_sp_nexthop4_event(struct mlxsw_sp *mlxsw_sp,
3322 unsigned long event, struct fib_nh *fib_nh)
Ido Schimmelad178c82017-02-08 11:16:40 +01003323{
3324 struct mlxsw_sp_nexthop_key key;
3325 struct mlxsw_sp_nexthop *nh;
Ido Schimmelad178c82017-02-08 11:16:40 +01003326
Ido Schimmel9011b672017-05-16 19:38:25 +02003327 if (mlxsw_sp->router->aborted)
Ido Schimmelad178c82017-02-08 11:16:40 +01003328 return;
3329
3330 key.fib_nh = fib_nh;
3331 nh = mlxsw_sp_nexthop_lookup(mlxsw_sp, key);
3332 if (WARN_ON_ONCE(!nh))
3333 return;
3334
Ido Schimmelad178c82017-02-08 11:16:40 +01003335 switch (event) {
3336 case FIB_EVENT_NH_ADD:
Petr Machata35225e42017-09-02 23:49:22 +02003337 mlxsw_sp_nexthop4_type_init(mlxsw_sp, nh, fib_nh);
Ido Schimmelad178c82017-02-08 11:16:40 +01003338 break;
3339 case FIB_EVENT_NH_DEL:
Petr Machata35225e42017-09-02 23:49:22 +02003340 mlxsw_sp_nexthop4_type_fini(mlxsw_sp, nh);
Ido Schimmelad178c82017-02-08 11:16:40 +01003341 break;
3342 }
3343
3344 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nh_grp);
3345}
3346
Petr Machata0c5f1cd2017-11-03 10:03:38 +01003347static void mlxsw_sp_nexthop_rif_update(struct mlxsw_sp *mlxsw_sp,
3348 struct mlxsw_sp_rif *rif)
3349{
3350 struct mlxsw_sp_nexthop *nh;
3351
3352 list_for_each_entry(nh, &rif->nexthop_list, rif_list_node) {
3353 __mlxsw_sp_nexthop_neigh_update(nh, false);
3354 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nh_grp);
3355 }
3356}
3357
Ido Schimmel9665b742017-02-08 11:16:42 +01003358static void mlxsw_sp_nexthop_rif_gone_sync(struct mlxsw_sp *mlxsw_sp,
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003359 struct mlxsw_sp_rif *rif)
Ido Schimmel9665b742017-02-08 11:16:42 +01003360{
3361 struct mlxsw_sp_nexthop *nh, *tmp;
3362
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003363 list_for_each_entry_safe(nh, tmp, &rif->nexthop_list, rif_list_node) {
Petr Machata35225e42017-09-02 23:49:22 +02003364 mlxsw_sp_nexthop_type_fini(mlxsw_sp, nh);
Ido Schimmel9665b742017-02-08 11:16:42 +01003365 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nh_grp);
3366 }
3367}
3368
Petr Machata9b014512017-09-02 23:49:20 +02003369static bool mlxsw_sp_fi_is_gateway(const struct mlxsw_sp *mlxsw_sp,
3370 const struct fib_info *fi)
3371{
Petr Machata1012b9a2017-09-02 23:49:23 +02003372 return fi->fib_nh->nh_scope == RT_SCOPE_LINK ||
3373 mlxsw_sp_nexthop4_ipip_type(mlxsw_sp, fi->fib_nh, NULL);
Petr Machata9b014512017-09-02 23:49:20 +02003374}
3375
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003376static struct mlxsw_sp_nexthop_group *
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02003377mlxsw_sp_nexthop4_group_create(struct mlxsw_sp *mlxsw_sp, struct fib_info *fi)
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003378{
3379 struct mlxsw_sp_nexthop_group *nh_grp;
3380 struct mlxsw_sp_nexthop *nh;
3381 struct fib_nh *fib_nh;
3382 size_t alloc_size;
3383 int i;
3384 int err;
3385
3386 alloc_size = sizeof(*nh_grp) +
3387 fi->fib_nhs * sizeof(struct mlxsw_sp_nexthop);
3388 nh_grp = kzalloc(alloc_size, GFP_KERNEL);
3389 if (!nh_grp)
3390 return ERR_PTR(-ENOMEM);
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02003391 nh_grp->priv = fi;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003392 INIT_LIST_HEAD(&nh_grp->fib_list);
Ido Schimmel58adf2c2017-07-18 10:10:19 +02003393 nh_grp->neigh_tbl = &arp_tbl;
3394
Petr Machata9b014512017-09-02 23:49:20 +02003395 nh_grp->gateway = mlxsw_sp_fi_is_gateway(mlxsw_sp, fi);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003396 nh_grp->count = fi->fib_nhs;
Ido Schimmel7387dbb2017-07-12 09:12:53 +02003397 fib_info_hold(fi);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003398 for (i = 0; i < nh_grp->count; i++) {
3399 nh = &nh_grp->nexthops[i];
3400 fib_nh = &fi->fib_nh[i];
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02003401 err = mlxsw_sp_nexthop4_init(mlxsw_sp, nh_grp, nh, fib_nh);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003402 if (err)
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02003403 goto err_nexthop4_init;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003404 }
Ido Schimmele9ad5e72017-02-08 11:16:29 +01003405 err = mlxsw_sp_nexthop_group_insert(mlxsw_sp, nh_grp);
3406 if (err)
3407 goto err_nexthop_group_insert;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003408 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
3409 return nh_grp;
3410
Ido Schimmele9ad5e72017-02-08 11:16:29 +01003411err_nexthop_group_insert:
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02003412err_nexthop4_init:
Ido Schimmeldf6dd79b2017-02-08 14:36:49 +01003413 for (i--; i >= 0; i--) {
3414 nh = &nh_grp->nexthops[i];
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02003415 mlxsw_sp_nexthop4_fini(mlxsw_sp, nh);
Ido Schimmeldf6dd79b2017-02-08 14:36:49 +01003416 }
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02003417 fib_info_put(fi);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003418 kfree(nh_grp);
3419 return ERR_PTR(err);
3420}
3421
3422static void
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02003423mlxsw_sp_nexthop4_group_destroy(struct mlxsw_sp *mlxsw_sp,
3424 struct mlxsw_sp_nexthop_group *nh_grp)
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003425{
3426 struct mlxsw_sp_nexthop *nh;
3427 int i;
3428
Ido Schimmele9ad5e72017-02-08 11:16:29 +01003429 mlxsw_sp_nexthop_group_remove(mlxsw_sp, nh_grp);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003430 for (i = 0; i < nh_grp->count; i++) {
3431 nh = &nh_grp->nexthops[i];
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02003432 mlxsw_sp_nexthop4_fini(mlxsw_sp, nh);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003433 }
Ido Schimmel58312122016-12-23 09:32:50 +01003434 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
3435 WARN_ON_ONCE(nh_grp->adj_index_valid);
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02003436 fib_info_put(mlxsw_sp_nexthop4_group_fi(nh_grp));
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003437 kfree(nh_grp);
3438}
3439
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02003440static int mlxsw_sp_nexthop4_group_get(struct mlxsw_sp *mlxsw_sp,
3441 struct mlxsw_sp_fib_entry *fib_entry,
3442 struct fib_info *fi)
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003443{
3444 struct mlxsw_sp_nexthop_group *nh_grp;
3445
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02003446 nh_grp = mlxsw_sp_nexthop4_group_lookup(mlxsw_sp, fi);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003447 if (!nh_grp) {
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02003448 nh_grp = mlxsw_sp_nexthop4_group_create(mlxsw_sp, fi);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003449 if (IS_ERR(nh_grp))
3450 return PTR_ERR(nh_grp);
3451 }
3452 list_add_tail(&fib_entry->nexthop_group_node, &nh_grp->fib_list);
3453 fib_entry->nh_group = nh_grp;
3454 return 0;
3455}
3456
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02003457static void mlxsw_sp_nexthop4_group_put(struct mlxsw_sp *mlxsw_sp,
3458 struct mlxsw_sp_fib_entry *fib_entry)
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003459{
3460 struct mlxsw_sp_nexthop_group *nh_grp = fib_entry->nh_group;
3461
3462 list_del(&fib_entry->nexthop_group_node);
3463 if (!list_empty(&nh_grp->fib_list))
3464 return;
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02003465 mlxsw_sp_nexthop4_group_destroy(mlxsw_sp, nh_grp);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003466}
3467
Ido Schimmel013b20f2017-02-08 11:16:36 +01003468static bool
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02003469mlxsw_sp_fib4_entry_should_offload(const struct mlxsw_sp_fib_entry *fib_entry)
3470{
3471 struct mlxsw_sp_fib4_entry *fib4_entry;
3472
3473 fib4_entry = container_of(fib_entry, struct mlxsw_sp_fib4_entry,
3474 common);
3475 return !fib4_entry->tos;
3476}
3477
3478static bool
Ido Schimmel013b20f2017-02-08 11:16:36 +01003479mlxsw_sp_fib_entry_should_offload(const struct mlxsw_sp_fib_entry *fib_entry)
3480{
3481 struct mlxsw_sp_nexthop_group *nh_group = fib_entry->nh_group;
3482
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02003483 switch (fib_entry->fib_node->fib->proto) {
3484 case MLXSW_SP_L3_PROTO_IPV4:
3485 if (!mlxsw_sp_fib4_entry_should_offload(fib_entry))
3486 return false;
3487 break;
3488 case MLXSW_SP_L3_PROTO_IPV6:
3489 break;
3490 }
Ido Schimmel9aecce12017-02-09 10:28:42 +01003491
Ido Schimmel013b20f2017-02-08 11:16:36 +01003492 switch (fib_entry->type) {
3493 case MLXSW_SP_FIB_ENTRY_TYPE_REMOTE:
3494 return !!nh_group->adj_index_valid;
3495 case MLXSW_SP_FIB_ENTRY_TYPE_LOCAL:
Ido Schimmel70ad3502017-02-08 11:16:38 +01003496 return !!nh_group->nh_rif;
Petr Machata4607f6d2017-09-02 23:49:25 +02003497 case MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP:
3498 return true;
Ido Schimmel013b20f2017-02-08 11:16:36 +01003499 default:
3500 return false;
3501 }
3502}
3503
Ido Schimmel428b8512017-08-03 13:28:28 +02003504static struct mlxsw_sp_nexthop *
3505mlxsw_sp_rt6_nexthop(struct mlxsw_sp_nexthop_group *nh_grp,
3506 const struct mlxsw_sp_rt6 *mlxsw_sp_rt6)
3507{
3508 int i;
3509
3510 for (i = 0; i < nh_grp->count; i++) {
3511 struct mlxsw_sp_nexthop *nh = &nh_grp->nexthops[i];
3512 struct rt6_info *rt = mlxsw_sp_rt6->rt;
3513
3514 if (nh->rif && nh->rif->dev == rt->dst.dev &&
3515 ipv6_addr_equal((const struct in6_addr *) &nh->gw_addr,
3516 &rt->rt6i_gateway))
3517 return nh;
3518 continue;
3519 }
3520
3521 return NULL;
3522}
3523
Ido Schimmel3984d1a2017-08-02 09:56:03 +02003524static void
3525mlxsw_sp_fib4_entry_offload_set(struct mlxsw_sp_fib_entry *fib_entry)
3526{
3527 struct mlxsw_sp_nexthop_group *nh_grp = fib_entry->nh_group;
3528 int i;
3529
Petr Machata4607f6d2017-09-02 23:49:25 +02003530 if (fib_entry->type == MLXSW_SP_FIB_ENTRY_TYPE_LOCAL ||
3531 fib_entry->type == MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP) {
Ido Schimmel3984d1a2017-08-02 09:56:03 +02003532 nh_grp->nexthops->key.fib_nh->nh_flags |= RTNH_F_OFFLOAD;
3533 return;
3534 }
3535
3536 for (i = 0; i < nh_grp->count; i++) {
3537 struct mlxsw_sp_nexthop *nh = &nh_grp->nexthops[i];
3538
3539 if (nh->offloaded)
3540 nh->key.fib_nh->nh_flags |= RTNH_F_OFFLOAD;
3541 else
3542 nh->key.fib_nh->nh_flags &= ~RTNH_F_OFFLOAD;
3543 }
3544}
3545
3546static void
3547mlxsw_sp_fib4_entry_offload_unset(struct mlxsw_sp_fib_entry *fib_entry)
3548{
3549 struct mlxsw_sp_nexthop_group *nh_grp = fib_entry->nh_group;
3550 int i;
3551
3552 for (i = 0; i < nh_grp->count; i++) {
3553 struct mlxsw_sp_nexthop *nh = &nh_grp->nexthops[i];
3554
3555 nh->key.fib_nh->nh_flags &= ~RTNH_F_OFFLOAD;
3556 }
3557}
3558
Ido Schimmel428b8512017-08-03 13:28:28 +02003559static void
3560mlxsw_sp_fib6_entry_offload_set(struct mlxsw_sp_fib_entry *fib_entry)
3561{
3562 struct mlxsw_sp_fib6_entry *fib6_entry;
3563 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
3564
3565 fib6_entry = container_of(fib_entry, struct mlxsw_sp_fib6_entry,
3566 common);
3567
3568 if (fib_entry->type == MLXSW_SP_FIB_ENTRY_TYPE_LOCAL) {
3569 list_first_entry(&fib6_entry->rt6_list, struct mlxsw_sp_rt6,
Ido Schimmelfe400792017-08-15 09:09:49 +02003570 list)->rt->rt6i_nh_flags |= RTNH_F_OFFLOAD;
Ido Schimmel428b8512017-08-03 13:28:28 +02003571 return;
3572 }
3573
3574 list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) {
3575 struct mlxsw_sp_nexthop_group *nh_grp = fib_entry->nh_group;
3576 struct mlxsw_sp_nexthop *nh;
3577
3578 nh = mlxsw_sp_rt6_nexthop(nh_grp, mlxsw_sp_rt6);
3579 if (nh && nh->offloaded)
Ido Schimmelfe400792017-08-15 09:09:49 +02003580 mlxsw_sp_rt6->rt->rt6i_nh_flags |= RTNH_F_OFFLOAD;
Ido Schimmel428b8512017-08-03 13:28:28 +02003581 else
Ido Schimmelfe400792017-08-15 09:09:49 +02003582 mlxsw_sp_rt6->rt->rt6i_nh_flags &= ~RTNH_F_OFFLOAD;
Ido Schimmel428b8512017-08-03 13:28:28 +02003583 }
3584}
3585
3586static void
3587mlxsw_sp_fib6_entry_offload_unset(struct mlxsw_sp_fib_entry *fib_entry)
3588{
3589 struct mlxsw_sp_fib6_entry *fib6_entry;
3590 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
3591
3592 fib6_entry = container_of(fib_entry, struct mlxsw_sp_fib6_entry,
3593 common);
3594 list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) {
3595 struct rt6_info *rt = mlxsw_sp_rt6->rt;
3596
Ido Schimmelfe400792017-08-15 09:09:49 +02003597 rt->rt6i_nh_flags &= ~RTNH_F_OFFLOAD;
Ido Schimmel428b8512017-08-03 13:28:28 +02003598 }
3599}
3600
Ido Schimmel013b20f2017-02-08 11:16:36 +01003601static void mlxsw_sp_fib_entry_offload_set(struct mlxsw_sp_fib_entry *fib_entry)
3602{
Ido Schimmel76610eb2017-03-10 08:53:41 +01003603 switch (fib_entry->fib_node->fib->proto) {
Ido Schimmel013b20f2017-02-08 11:16:36 +01003604 case MLXSW_SP_L3_PROTO_IPV4:
Ido Schimmel3984d1a2017-08-02 09:56:03 +02003605 mlxsw_sp_fib4_entry_offload_set(fib_entry);
Ido Schimmel013b20f2017-02-08 11:16:36 +01003606 break;
3607 case MLXSW_SP_L3_PROTO_IPV6:
Ido Schimmel428b8512017-08-03 13:28:28 +02003608 mlxsw_sp_fib6_entry_offload_set(fib_entry);
3609 break;
Ido Schimmel013b20f2017-02-08 11:16:36 +01003610 }
3611}
3612
3613static void
3614mlxsw_sp_fib_entry_offload_unset(struct mlxsw_sp_fib_entry *fib_entry)
3615{
Ido Schimmel76610eb2017-03-10 08:53:41 +01003616 switch (fib_entry->fib_node->fib->proto) {
Ido Schimmel013b20f2017-02-08 11:16:36 +01003617 case MLXSW_SP_L3_PROTO_IPV4:
Ido Schimmel3984d1a2017-08-02 09:56:03 +02003618 mlxsw_sp_fib4_entry_offload_unset(fib_entry);
Ido Schimmel013b20f2017-02-08 11:16:36 +01003619 break;
3620 case MLXSW_SP_L3_PROTO_IPV6:
Ido Schimmel428b8512017-08-03 13:28:28 +02003621 mlxsw_sp_fib6_entry_offload_unset(fib_entry);
3622 break;
Ido Schimmel013b20f2017-02-08 11:16:36 +01003623 }
Ido Schimmel013b20f2017-02-08 11:16:36 +01003624}
3625
3626static void
3627mlxsw_sp_fib_entry_offload_refresh(struct mlxsw_sp_fib_entry *fib_entry,
3628 enum mlxsw_reg_ralue_op op, int err)
3629{
3630 switch (op) {
3631 case MLXSW_REG_RALUE_OP_WRITE_DELETE:
Ido Schimmel013b20f2017-02-08 11:16:36 +01003632 return mlxsw_sp_fib_entry_offload_unset(fib_entry);
3633 case MLXSW_REG_RALUE_OP_WRITE_WRITE:
3634 if (err)
3635 return;
Ido Schimmel1353ee72017-08-02 09:56:04 +02003636 if (mlxsw_sp_fib_entry_should_offload(fib_entry))
Ido Schimmel013b20f2017-02-08 11:16:36 +01003637 mlxsw_sp_fib_entry_offload_set(fib_entry);
Petr Machata85f44a12017-10-02 12:21:58 +02003638 else
Ido Schimmel013b20f2017-02-08 11:16:36 +01003639 mlxsw_sp_fib_entry_offload_unset(fib_entry);
3640 return;
3641 default:
3642 return;
3643 }
3644}
3645
Ido Schimmel9dbf4d72017-07-18 10:10:24 +02003646static void
3647mlxsw_sp_fib_entry_ralue_pack(char *ralue_pl,
3648 const struct mlxsw_sp_fib_entry *fib_entry,
3649 enum mlxsw_reg_ralue_op op)
3650{
3651 struct mlxsw_sp_fib *fib = fib_entry->fib_node->fib;
3652 enum mlxsw_reg_ralxx_protocol proto;
3653 u32 *p_dip;
3654
3655 proto = (enum mlxsw_reg_ralxx_protocol) fib->proto;
3656
3657 switch (fib->proto) {
3658 case MLXSW_SP_L3_PROTO_IPV4:
3659 p_dip = (u32 *) fib_entry->fib_node->key.addr;
3660 mlxsw_reg_ralue_pack4(ralue_pl, proto, op, fib->vr->id,
3661 fib_entry->fib_node->key.prefix_len,
3662 *p_dip);
3663 break;
3664 case MLXSW_SP_L3_PROTO_IPV6:
3665 mlxsw_reg_ralue_pack6(ralue_pl, proto, op, fib->vr->id,
3666 fib_entry->fib_node->key.prefix_len,
3667 fib_entry->fib_node->key.addr);
3668 break;
3669 }
3670}
3671
3672static int mlxsw_sp_fib_entry_op_remote(struct mlxsw_sp *mlxsw_sp,
3673 struct mlxsw_sp_fib_entry *fib_entry,
3674 enum mlxsw_reg_ralue_op op)
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003675{
3676 char ralue_pl[MLXSW_REG_RALUE_LEN];
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003677 enum mlxsw_reg_ralue_trap_action trap_action;
3678 u16 trap_id = 0;
3679 u32 adjacency_index = 0;
3680 u16 ecmp_size = 0;
3681
3682 /* In case the nexthop group adjacency index is valid, use it
3683 * with provided ECMP size. Otherwise, setup trap and pass
3684 * traffic to kernel.
3685 */
Ido Schimmel4b411472017-02-08 11:16:37 +01003686 if (mlxsw_sp_fib_entry_should_offload(fib_entry)) {
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003687 trap_action = MLXSW_REG_RALUE_TRAP_ACTION_NOP;
3688 adjacency_index = fib_entry->nh_group->adj_index;
3689 ecmp_size = fib_entry->nh_group->ecmp_size;
3690 } else {
3691 trap_action = MLXSW_REG_RALUE_TRAP_ACTION_TRAP;
3692 trap_id = MLXSW_TRAP_ID_RTR_INGRESS0;
3693 }
3694
Ido Schimmel9dbf4d72017-07-18 10:10:24 +02003695 mlxsw_sp_fib_entry_ralue_pack(ralue_pl, fib_entry, op);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003696 mlxsw_reg_ralue_act_remote_pack(ralue_pl, trap_action, trap_id,
3697 adjacency_index, ecmp_size);
3698 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
3699}
3700
Ido Schimmel9dbf4d72017-07-18 10:10:24 +02003701static int mlxsw_sp_fib_entry_op_local(struct mlxsw_sp *mlxsw_sp,
3702 struct mlxsw_sp_fib_entry *fib_entry,
3703 enum mlxsw_reg_ralue_op op)
Jiri Pirko61c503f2016-07-04 08:23:11 +02003704{
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003705 struct mlxsw_sp_rif *rif = fib_entry->nh_group->nh_rif;
Ido Schimmel70ad3502017-02-08 11:16:38 +01003706 enum mlxsw_reg_ralue_trap_action trap_action;
Jiri Pirko61c503f2016-07-04 08:23:11 +02003707 char ralue_pl[MLXSW_REG_RALUE_LEN];
Ido Schimmel70ad3502017-02-08 11:16:38 +01003708 u16 trap_id = 0;
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003709 u16 rif_index = 0;
Ido Schimmel70ad3502017-02-08 11:16:38 +01003710
3711 if (mlxsw_sp_fib_entry_should_offload(fib_entry)) {
3712 trap_action = MLXSW_REG_RALUE_TRAP_ACTION_NOP;
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003713 rif_index = rif->rif_index;
Ido Schimmel70ad3502017-02-08 11:16:38 +01003714 } else {
3715 trap_action = MLXSW_REG_RALUE_TRAP_ACTION_TRAP;
3716 trap_id = MLXSW_TRAP_ID_RTR_INGRESS0;
3717 }
Jiri Pirko61c503f2016-07-04 08:23:11 +02003718
Ido Schimmel9dbf4d72017-07-18 10:10:24 +02003719 mlxsw_sp_fib_entry_ralue_pack(ralue_pl, fib_entry, op);
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003720 mlxsw_reg_ralue_act_local_pack(ralue_pl, trap_action, trap_id,
3721 rif_index);
Jiri Pirko61c503f2016-07-04 08:23:11 +02003722 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
3723}
3724
Ido Schimmel9dbf4d72017-07-18 10:10:24 +02003725static int mlxsw_sp_fib_entry_op_trap(struct mlxsw_sp *mlxsw_sp,
3726 struct mlxsw_sp_fib_entry *fib_entry,
3727 enum mlxsw_reg_ralue_op op)
Jiri Pirko61c503f2016-07-04 08:23:11 +02003728{
3729 char ralue_pl[MLXSW_REG_RALUE_LEN];
Jiri Pirko61c503f2016-07-04 08:23:11 +02003730
Ido Schimmel9dbf4d72017-07-18 10:10:24 +02003731 mlxsw_sp_fib_entry_ralue_pack(ralue_pl, fib_entry, op);
Jiri Pirko61c503f2016-07-04 08:23:11 +02003732 mlxsw_reg_ralue_act_ip2me_pack(ralue_pl);
3733 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
3734}
3735
Petr Machata4607f6d2017-09-02 23:49:25 +02003736static int
3737mlxsw_sp_fib_entry_op_ipip_decap(struct mlxsw_sp *mlxsw_sp,
3738 struct mlxsw_sp_fib_entry *fib_entry,
3739 enum mlxsw_reg_ralue_op op)
3740{
3741 struct mlxsw_sp_ipip_entry *ipip_entry = fib_entry->decap.ipip_entry;
3742 const struct mlxsw_sp_ipip_ops *ipip_ops;
3743
3744 if (WARN_ON(!ipip_entry))
3745 return -EINVAL;
3746
3747 ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt];
3748 return ipip_ops->fib_entry_op(mlxsw_sp, ipip_entry, op,
3749 fib_entry->decap.tunnel_index);
3750}
3751
Ido Schimmel9dbf4d72017-07-18 10:10:24 +02003752static int __mlxsw_sp_fib_entry_op(struct mlxsw_sp *mlxsw_sp,
3753 struct mlxsw_sp_fib_entry *fib_entry,
3754 enum mlxsw_reg_ralue_op op)
Jiri Pirko61c503f2016-07-04 08:23:11 +02003755{
3756 switch (fib_entry->type) {
3757 case MLXSW_SP_FIB_ENTRY_TYPE_REMOTE:
Ido Schimmel9dbf4d72017-07-18 10:10:24 +02003758 return mlxsw_sp_fib_entry_op_remote(mlxsw_sp, fib_entry, op);
Jiri Pirko61c503f2016-07-04 08:23:11 +02003759 case MLXSW_SP_FIB_ENTRY_TYPE_LOCAL:
Ido Schimmel9dbf4d72017-07-18 10:10:24 +02003760 return mlxsw_sp_fib_entry_op_local(mlxsw_sp, fib_entry, op);
Jiri Pirko61c503f2016-07-04 08:23:11 +02003761 case MLXSW_SP_FIB_ENTRY_TYPE_TRAP:
Ido Schimmel9dbf4d72017-07-18 10:10:24 +02003762 return mlxsw_sp_fib_entry_op_trap(mlxsw_sp, fib_entry, op);
Petr Machata4607f6d2017-09-02 23:49:25 +02003763 case MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP:
3764 return mlxsw_sp_fib_entry_op_ipip_decap(mlxsw_sp,
3765 fib_entry, op);
Jiri Pirko61c503f2016-07-04 08:23:11 +02003766 }
3767 return -EINVAL;
3768}
3769
3770static int mlxsw_sp_fib_entry_op(struct mlxsw_sp *mlxsw_sp,
3771 struct mlxsw_sp_fib_entry *fib_entry,
3772 enum mlxsw_reg_ralue_op op)
3773{
Ido Schimmel9dbf4d72017-07-18 10:10:24 +02003774 int err = __mlxsw_sp_fib_entry_op(mlxsw_sp, fib_entry, op);
Ido Schimmel013b20f2017-02-08 11:16:36 +01003775
Ido Schimmel013b20f2017-02-08 11:16:36 +01003776 mlxsw_sp_fib_entry_offload_refresh(fib_entry, op, err);
Ido Schimmel9dbf4d72017-07-18 10:10:24 +02003777
Ido Schimmel013b20f2017-02-08 11:16:36 +01003778 return err;
Jiri Pirko61c503f2016-07-04 08:23:11 +02003779}
3780
3781static int mlxsw_sp_fib_entry_update(struct mlxsw_sp *mlxsw_sp,
3782 struct mlxsw_sp_fib_entry *fib_entry)
3783{
Jiri Pirko7146da32016-09-01 10:37:41 +02003784 return mlxsw_sp_fib_entry_op(mlxsw_sp, fib_entry,
3785 MLXSW_REG_RALUE_OP_WRITE_WRITE);
Jiri Pirko61c503f2016-07-04 08:23:11 +02003786}
3787
3788static int mlxsw_sp_fib_entry_del(struct mlxsw_sp *mlxsw_sp,
3789 struct mlxsw_sp_fib_entry *fib_entry)
3790{
3791 return mlxsw_sp_fib_entry_op(mlxsw_sp, fib_entry,
3792 MLXSW_REG_RALUE_OP_WRITE_DELETE);
3793}
3794
Jiri Pirko61c503f2016-07-04 08:23:11 +02003795static int
Ido Schimmel013b20f2017-02-08 11:16:36 +01003796mlxsw_sp_fib4_entry_type_set(struct mlxsw_sp *mlxsw_sp,
3797 const struct fib_entry_notifier_info *fen_info,
3798 struct mlxsw_sp_fib_entry *fib_entry)
Jiri Pirko61c503f2016-07-04 08:23:11 +02003799{
Petr Machata4607f6d2017-09-02 23:49:25 +02003800 union mlxsw_sp_l3addr dip = { .addr4 = htonl(fen_info->dst) };
3801 struct net_device *dev = fen_info->fi->fib_dev;
3802 struct mlxsw_sp_ipip_entry *ipip_entry;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02003803 struct fib_info *fi = fen_info->fi;
Jiri Pirko61c503f2016-07-04 08:23:11 +02003804
Ido Schimmel97989ee2017-03-10 08:53:38 +01003805 switch (fen_info->type) {
Ido Schimmel97989ee2017-03-10 08:53:38 +01003806 case RTN_LOCAL:
Petr Machata4607f6d2017-09-02 23:49:25 +02003807 ipip_entry = mlxsw_sp_ipip_entry_find_by_decap(mlxsw_sp, dev,
3808 MLXSW_SP_L3_PROTO_IPV4, dip);
3809 if (ipip_entry) {
3810 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP;
3811 return mlxsw_sp_fib_entry_decap_init(mlxsw_sp,
3812 fib_entry,
3813 ipip_entry);
3814 }
3815 /* fall through */
3816 case RTN_BROADCAST:
Jiri Pirko61c503f2016-07-04 08:23:11 +02003817 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
3818 return 0;
Ido Schimmel97989ee2017-03-10 08:53:38 +01003819 case RTN_UNREACHABLE: /* fall through */
3820 case RTN_BLACKHOLE: /* fall through */
3821 case RTN_PROHIBIT:
3822 /* Packets hitting these routes need to be trapped, but
3823 * can do so with a lower priority than packets directed
3824 * at the host, so use action type local instead of trap.
3825 */
Jiri Pirkob45f64d2016-09-26 12:52:31 +02003826 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_LOCAL;
Ido Schimmel97989ee2017-03-10 08:53:38 +01003827 return 0;
3828 case RTN_UNICAST:
Petr Machata9b014512017-09-02 23:49:20 +02003829 if (mlxsw_sp_fi_is_gateway(mlxsw_sp, fi))
Ido Schimmel97989ee2017-03-10 08:53:38 +01003830 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_REMOTE;
Petr Machata9b014512017-09-02 23:49:20 +02003831 else
3832 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_LOCAL;
Ido Schimmel97989ee2017-03-10 08:53:38 +01003833 return 0;
3834 default:
3835 return -EINVAL;
3836 }
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02003837}
3838
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02003839static struct mlxsw_sp_fib4_entry *
Ido Schimmel9aecce12017-02-09 10:28:42 +01003840mlxsw_sp_fib4_entry_create(struct mlxsw_sp *mlxsw_sp,
3841 struct mlxsw_sp_fib_node *fib_node,
3842 const struct fib_entry_notifier_info *fen_info)
Jiri Pirko5b004412016-09-01 10:37:40 +02003843{
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02003844 struct mlxsw_sp_fib4_entry *fib4_entry;
Jiri Pirko5b004412016-09-01 10:37:40 +02003845 struct mlxsw_sp_fib_entry *fib_entry;
Ido Schimmel9aecce12017-02-09 10:28:42 +01003846 int err;
3847
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02003848 fib4_entry = kzalloc(sizeof(*fib4_entry), GFP_KERNEL);
3849 if (!fib4_entry)
3850 return ERR_PTR(-ENOMEM);
3851 fib_entry = &fib4_entry->common;
Ido Schimmel9aecce12017-02-09 10:28:42 +01003852
3853 err = mlxsw_sp_fib4_entry_type_set(mlxsw_sp, fen_info, fib_entry);
3854 if (err)
3855 goto err_fib4_entry_type_set;
3856
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02003857 err = mlxsw_sp_nexthop4_group_get(mlxsw_sp, fib_entry, fen_info->fi);
Ido Schimmel9aecce12017-02-09 10:28:42 +01003858 if (err)
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02003859 goto err_nexthop4_group_get;
Ido Schimmel9aecce12017-02-09 10:28:42 +01003860
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02003861 fib4_entry->prio = fen_info->fi->fib_priority;
3862 fib4_entry->tb_id = fen_info->tb_id;
3863 fib4_entry->type = fen_info->type;
3864 fib4_entry->tos = fen_info->tos;
Ido Schimmel9aecce12017-02-09 10:28:42 +01003865
3866 fib_entry->fib_node = fib_node;
3867
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02003868 return fib4_entry;
Ido Schimmel9aecce12017-02-09 10:28:42 +01003869
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02003870err_nexthop4_group_get:
Ido Schimmel9aecce12017-02-09 10:28:42 +01003871err_fib4_entry_type_set:
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02003872 kfree(fib4_entry);
Ido Schimmel9aecce12017-02-09 10:28:42 +01003873 return ERR_PTR(err);
3874}
3875
3876static void mlxsw_sp_fib4_entry_destroy(struct mlxsw_sp *mlxsw_sp,
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02003877 struct mlxsw_sp_fib4_entry *fib4_entry)
Ido Schimmel9aecce12017-02-09 10:28:42 +01003878{
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02003879 mlxsw_sp_nexthop4_group_put(mlxsw_sp, &fib4_entry->common);
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02003880 kfree(fib4_entry);
Ido Schimmel9aecce12017-02-09 10:28:42 +01003881}
3882
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02003883static struct mlxsw_sp_fib4_entry *
Ido Schimmel9aecce12017-02-09 10:28:42 +01003884mlxsw_sp_fib4_entry_lookup(struct mlxsw_sp *mlxsw_sp,
3885 const struct fib_entry_notifier_info *fen_info)
3886{
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02003887 struct mlxsw_sp_fib4_entry *fib4_entry;
Ido Schimmel9aecce12017-02-09 10:28:42 +01003888 struct mlxsw_sp_fib_node *fib_node;
Ido Schimmel160e22a2017-07-18 10:10:20 +02003889 struct mlxsw_sp_fib *fib;
3890 struct mlxsw_sp_vr *vr;
Ido Schimmel9aecce12017-02-09 10:28:42 +01003891
Ido Schimmel160e22a2017-07-18 10:10:20 +02003892 vr = mlxsw_sp_vr_find(mlxsw_sp, fen_info->tb_id);
3893 if (!vr)
3894 return NULL;
3895 fib = mlxsw_sp_vr_fib(vr, MLXSW_SP_L3_PROTO_IPV4);
3896
3897 fib_node = mlxsw_sp_fib_node_lookup(fib, &fen_info->dst,
3898 sizeof(fen_info->dst),
3899 fen_info->dst_len);
3900 if (!fib_node)
Ido Schimmel9aecce12017-02-09 10:28:42 +01003901 return NULL;
3902
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02003903 list_for_each_entry(fib4_entry, &fib_node->entry_list, common.list) {
3904 if (fib4_entry->tb_id == fen_info->tb_id &&
3905 fib4_entry->tos == fen_info->tos &&
3906 fib4_entry->type == fen_info->type &&
Arkadi Sharshevskyba31d362017-08-14 21:09:19 +02003907 mlxsw_sp_nexthop4_group_fi(fib4_entry->common.nh_group) ==
3908 fen_info->fi) {
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02003909 return fib4_entry;
Ido Schimmel9aecce12017-02-09 10:28:42 +01003910 }
3911 }
3912
3913 return NULL;
3914}
3915
3916static const struct rhashtable_params mlxsw_sp_fib_ht_params = {
3917 .key_offset = offsetof(struct mlxsw_sp_fib_node, key),
3918 .head_offset = offsetof(struct mlxsw_sp_fib_node, ht_node),
3919 .key_len = sizeof(struct mlxsw_sp_fib_key),
3920 .automatic_shrinking = true,
3921};
3922
3923static int mlxsw_sp_fib_node_insert(struct mlxsw_sp_fib *fib,
3924 struct mlxsw_sp_fib_node *fib_node)
3925{
3926 return rhashtable_insert_fast(&fib->ht, &fib_node->ht_node,
3927 mlxsw_sp_fib_ht_params);
3928}
3929
3930static void mlxsw_sp_fib_node_remove(struct mlxsw_sp_fib *fib,
3931 struct mlxsw_sp_fib_node *fib_node)
3932{
3933 rhashtable_remove_fast(&fib->ht, &fib_node->ht_node,
3934 mlxsw_sp_fib_ht_params);
3935}
3936
3937static struct mlxsw_sp_fib_node *
3938mlxsw_sp_fib_node_lookup(struct mlxsw_sp_fib *fib, const void *addr,
3939 size_t addr_len, unsigned char prefix_len)
3940{
3941 struct mlxsw_sp_fib_key key;
3942
3943 memset(&key, 0, sizeof(key));
3944 memcpy(key.addr, addr, addr_len);
3945 key.prefix_len = prefix_len;
3946 return rhashtable_lookup_fast(&fib->ht, &key, mlxsw_sp_fib_ht_params);
3947}
3948
3949static struct mlxsw_sp_fib_node *
Ido Schimmel76610eb2017-03-10 08:53:41 +01003950mlxsw_sp_fib_node_create(struct mlxsw_sp_fib *fib, const void *addr,
Ido Schimmel9aecce12017-02-09 10:28:42 +01003951 size_t addr_len, unsigned char prefix_len)
3952{
3953 struct mlxsw_sp_fib_node *fib_node;
3954
3955 fib_node = kzalloc(sizeof(*fib_node), GFP_KERNEL);
3956 if (!fib_node)
3957 return NULL;
3958
3959 INIT_LIST_HEAD(&fib_node->entry_list);
Ido Schimmel76610eb2017-03-10 08:53:41 +01003960 list_add(&fib_node->list, &fib->node_list);
Ido Schimmel9aecce12017-02-09 10:28:42 +01003961 memcpy(fib_node->key.addr, addr, addr_len);
3962 fib_node->key.prefix_len = prefix_len;
Ido Schimmel9aecce12017-02-09 10:28:42 +01003963
3964 return fib_node;
3965}
3966
3967static void mlxsw_sp_fib_node_destroy(struct mlxsw_sp_fib_node *fib_node)
3968{
Ido Schimmel9aecce12017-02-09 10:28:42 +01003969 list_del(&fib_node->list);
3970 WARN_ON(!list_empty(&fib_node->entry_list));
3971 kfree(fib_node);
3972}
3973
3974static bool
3975mlxsw_sp_fib_node_entry_is_first(const struct mlxsw_sp_fib_node *fib_node,
3976 const struct mlxsw_sp_fib_entry *fib_entry)
3977{
3978 return list_first_entry(&fib_node->entry_list,
3979 struct mlxsw_sp_fib_entry, list) == fib_entry;
3980}
3981
Ido Schimmelfc922bb2017-08-14 10:54:05 +02003982static int mlxsw_sp_fib_lpm_tree_link(struct mlxsw_sp *mlxsw_sp,
3983 struct mlxsw_sp_fib *fib,
3984 struct mlxsw_sp_fib_node *fib_node)
3985{
3986 struct mlxsw_sp_prefix_usage req_prefix_usage = {{ 0 } };
3987 struct mlxsw_sp_lpm_tree *lpm_tree;
3988 int err;
3989
3990 /* Since the tree is shared between all virtual routers we must
3991 * make sure it contains all the required prefix lengths. This
3992 * can be computed by either adding the new prefix length to the
3993 * existing prefix usage of a bound tree, or by aggregating the
3994 * prefix lengths across all virtual routers and adding the new
3995 * one as well.
3996 */
3997 if (fib->lpm_tree)
3998 mlxsw_sp_prefix_usage_cpy(&req_prefix_usage,
3999 &fib->lpm_tree->prefix_usage);
4000 else
4001 mlxsw_sp_vrs_prefixes(mlxsw_sp, fib->proto, &req_prefix_usage);
4002 mlxsw_sp_prefix_usage_set(&req_prefix_usage, fib_node->key.prefix_len);
4003
4004 lpm_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, &req_prefix_usage,
4005 fib->proto);
4006 if (IS_ERR(lpm_tree))
4007 return PTR_ERR(lpm_tree);
4008
4009 if (fib->lpm_tree && fib->lpm_tree->id == lpm_tree->id)
4010 return 0;
4011
4012 err = mlxsw_sp_vrs_lpm_tree_replace(mlxsw_sp, fib, lpm_tree);
4013 if (err)
4014 return err;
4015
4016 return 0;
4017}
4018
4019static void mlxsw_sp_fib_lpm_tree_unlink(struct mlxsw_sp *mlxsw_sp,
4020 struct mlxsw_sp_fib *fib)
4021{
Ido Schimmelfc922bb2017-08-14 10:54:05 +02004022 if (!mlxsw_sp_prefix_usage_none(&fib->prefix_usage))
4023 return;
4024 mlxsw_sp_vr_lpm_tree_unbind(mlxsw_sp, fib);
4025 mlxsw_sp_lpm_tree_put(mlxsw_sp, fib->lpm_tree);
4026 fib->lpm_tree = NULL;
4027}
4028
Ido Schimmel9aecce12017-02-09 10:28:42 +01004029static void mlxsw_sp_fib_node_prefix_inc(struct mlxsw_sp_fib_node *fib_node)
4030{
4031 unsigned char prefix_len = fib_node->key.prefix_len;
Ido Schimmel76610eb2017-03-10 08:53:41 +01004032 struct mlxsw_sp_fib *fib = fib_node->fib;
Ido Schimmel9aecce12017-02-09 10:28:42 +01004033
4034 if (fib->prefix_ref_count[prefix_len]++ == 0)
4035 mlxsw_sp_prefix_usage_set(&fib->prefix_usage, prefix_len);
4036}
4037
4038static void mlxsw_sp_fib_node_prefix_dec(struct mlxsw_sp_fib_node *fib_node)
4039{
4040 unsigned char prefix_len = fib_node->key.prefix_len;
Ido Schimmel76610eb2017-03-10 08:53:41 +01004041 struct mlxsw_sp_fib *fib = fib_node->fib;
Ido Schimmel9aecce12017-02-09 10:28:42 +01004042
4043 if (--fib->prefix_ref_count[prefix_len] == 0)
4044 mlxsw_sp_prefix_usage_clear(&fib->prefix_usage, prefix_len);
4045}
4046
Ido Schimmel76610eb2017-03-10 08:53:41 +01004047static int mlxsw_sp_fib_node_init(struct mlxsw_sp *mlxsw_sp,
4048 struct mlxsw_sp_fib_node *fib_node,
4049 struct mlxsw_sp_fib *fib)
4050{
Ido Schimmel76610eb2017-03-10 08:53:41 +01004051 int err;
4052
4053 err = mlxsw_sp_fib_node_insert(fib, fib_node);
4054 if (err)
4055 return err;
4056 fib_node->fib = fib;
4057
Ido Schimmelfc922bb2017-08-14 10:54:05 +02004058 err = mlxsw_sp_fib_lpm_tree_link(mlxsw_sp, fib, fib_node);
4059 if (err)
4060 goto err_fib_lpm_tree_link;
Ido Schimmel76610eb2017-03-10 08:53:41 +01004061
4062 mlxsw_sp_fib_node_prefix_inc(fib_node);
4063
4064 return 0;
4065
Ido Schimmelfc922bb2017-08-14 10:54:05 +02004066err_fib_lpm_tree_link:
Ido Schimmel76610eb2017-03-10 08:53:41 +01004067 fib_node->fib = NULL;
4068 mlxsw_sp_fib_node_remove(fib, fib_node);
4069 return err;
4070}
4071
4072static void mlxsw_sp_fib_node_fini(struct mlxsw_sp *mlxsw_sp,
4073 struct mlxsw_sp_fib_node *fib_node)
4074{
Ido Schimmel76610eb2017-03-10 08:53:41 +01004075 struct mlxsw_sp_fib *fib = fib_node->fib;
4076
4077 mlxsw_sp_fib_node_prefix_dec(fib_node);
Ido Schimmelfc922bb2017-08-14 10:54:05 +02004078 mlxsw_sp_fib_lpm_tree_unlink(mlxsw_sp, fib);
Ido Schimmel76610eb2017-03-10 08:53:41 +01004079 fib_node->fib = NULL;
4080 mlxsw_sp_fib_node_remove(fib, fib_node);
4081}
4082
Ido Schimmel9aecce12017-02-09 10:28:42 +01004083static struct mlxsw_sp_fib_node *
Ido Schimmel731ea1c2017-07-18 10:10:21 +02004084mlxsw_sp_fib_node_get(struct mlxsw_sp *mlxsw_sp, u32 tb_id, const void *addr,
4085 size_t addr_len, unsigned char prefix_len,
4086 enum mlxsw_sp_l3proto proto)
Ido Schimmel9aecce12017-02-09 10:28:42 +01004087{
4088 struct mlxsw_sp_fib_node *fib_node;
Ido Schimmel76610eb2017-03-10 08:53:41 +01004089 struct mlxsw_sp_fib *fib;
Jiri Pirko5b004412016-09-01 10:37:40 +02004090 struct mlxsw_sp_vr *vr;
4091 int err;
4092
David Ahernf8fa9b42017-10-18 09:56:56 -07004093 vr = mlxsw_sp_vr_get(mlxsw_sp, tb_id, NULL);
Jiri Pirko5b004412016-09-01 10:37:40 +02004094 if (IS_ERR(vr))
4095 return ERR_CAST(vr);
Ido Schimmel731ea1c2017-07-18 10:10:21 +02004096 fib = mlxsw_sp_vr_fib(vr, proto);
Jiri Pirko5b004412016-09-01 10:37:40 +02004097
Ido Schimmel731ea1c2017-07-18 10:10:21 +02004098 fib_node = mlxsw_sp_fib_node_lookup(fib, addr, addr_len, prefix_len);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004099 if (fib_node)
4100 return fib_node;
4101
Ido Schimmel731ea1c2017-07-18 10:10:21 +02004102 fib_node = mlxsw_sp_fib_node_create(fib, addr, addr_len, prefix_len);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004103 if (!fib_node) {
Jiri Pirko5b004412016-09-01 10:37:40 +02004104 err = -ENOMEM;
Ido Schimmel9aecce12017-02-09 10:28:42 +01004105 goto err_fib_node_create;
Jiri Pirko5b004412016-09-01 10:37:40 +02004106 }
Jiri Pirko5b004412016-09-01 10:37:40 +02004107
Ido Schimmel76610eb2017-03-10 08:53:41 +01004108 err = mlxsw_sp_fib_node_init(mlxsw_sp, fib_node, fib);
4109 if (err)
4110 goto err_fib_node_init;
4111
Ido Schimmel9aecce12017-02-09 10:28:42 +01004112 return fib_node;
Jiri Pirko5b004412016-09-01 10:37:40 +02004113
Ido Schimmel76610eb2017-03-10 08:53:41 +01004114err_fib_node_init:
4115 mlxsw_sp_fib_node_destroy(fib_node);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004116err_fib_node_create:
Ido Schimmel76610eb2017-03-10 08:53:41 +01004117 mlxsw_sp_vr_put(vr);
Jiri Pirko5b004412016-09-01 10:37:40 +02004118 return ERR_PTR(err);
4119}
4120
Ido Schimmel731ea1c2017-07-18 10:10:21 +02004121static void mlxsw_sp_fib_node_put(struct mlxsw_sp *mlxsw_sp,
4122 struct mlxsw_sp_fib_node *fib_node)
Jiri Pirko5b004412016-09-01 10:37:40 +02004123{
Ido Schimmel76610eb2017-03-10 08:53:41 +01004124 struct mlxsw_sp_vr *vr = fib_node->fib->vr;
Jiri Pirko5b004412016-09-01 10:37:40 +02004125
Ido Schimmel9aecce12017-02-09 10:28:42 +01004126 if (!list_empty(&fib_node->entry_list))
4127 return;
Ido Schimmel76610eb2017-03-10 08:53:41 +01004128 mlxsw_sp_fib_node_fini(mlxsw_sp, fib_node);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004129 mlxsw_sp_fib_node_destroy(fib_node);
Ido Schimmel76610eb2017-03-10 08:53:41 +01004130 mlxsw_sp_vr_put(vr);
Jiri Pirko5b004412016-09-01 10:37:40 +02004131}
4132
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004133static struct mlxsw_sp_fib4_entry *
Ido Schimmel9aecce12017-02-09 10:28:42 +01004134mlxsw_sp_fib4_node_entry_find(const struct mlxsw_sp_fib_node *fib_node,
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004135 const struct mlxsw_sp_fib4_entry *new4_entry)
Jiri Pirko61c503f2016-07-04 08:23:11 +02004136{
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004137 struct mlxsw_sp_fib4_entry *fib4_entry;
Ido Schimmel9aecce12017-02-09 10:28:42 +01004138
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004139 list_for_each_entry(fib4_entry, &fib_node->entry_list, common.list) {
4140 if (fib4_entry->tb_id > new4_entry->tb_id)
Ido Schimmel9aecce12017-02-09 10:28:42 +01004141 continue;
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004142 if (fib4_entry->tb_id != new4_entry->tb_id)
Ido Schimmel9aecce12017-02-09 10:28:42 +01004143 break;
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004144 if (fib4_entry->tos > new4_entry->tos)
Ido Schimmel9aecce12017-02-09 10:28:42 +01004145 continue;
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004146 if (fib4_entry->prio >= new4_entry->prio ||
4147 fib4_entry->tos < new4_entry->tos)
4148 return fib4_entry;
Ido Schimmel9aecce12017-02-09 10:28:42 +01004149 }
4150
4151 return NULL;
4152}
4153
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004154static int
4155mlxsw_sp_fib4_node_list_append(struct mlxsw_sp_fib4_entry *fib4_entry,
4156 struct mlxsw_sp_fib4_entry *new4_entry)
Ido Schimmel4283bce2017-02-09 10:28:43 +01004157{
4158 struct mlxsw_sp_fib_node *fib_node;
4159
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004160 if (WARN_ON(!fib4_entry))
Ido Schimmel4283bce2017-02-09 10:28:43 +01004161 return -EINVAL;
4162
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004163 fib_node = fib4_entry->common.fib_node;
4164 list_for_each_entry_from(fib4_entry, &fib_node->entry_list,
4165 common.list) {
4166 if (fib4_entry->tb_id != new4_entry->tb_id ||
4167 fib4_entry->tos != new4_entry->tos ||
4168 fib4_entry->prio != new4_entry->prio)
Ido Schimmel4283bce2017-02-09 10:28:43 +01004169 break;
4170 }
4171
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004172 list_add_tail(&new4_entry->common.list, &fib4_entry->common.list);
Ido Schimmel4283bce2017-02-09 10:28:43 +01004173 return 0;
4174}
4175
Ido Schimmel9aecce12017-02-09 10:28:42 +01004176static int
Ido Schimmel9efbee62017-07-18 10:10:28 +02004177mlxsw_sp_fib4_node_list_insert(struct mlxsw_sp_fib4_entry *new4_entry,
Ido Schimmel599cf8f2017-02-09 10:28:44 +01004178 bool replace, bool append)
Ido Schimmel9aecce12017-02-09 10:28:42 +01004179{
Ido Schimmel9efbee62017-07-18 10:10:28 +02004180 struct mlxsw_sp_fib_node *fib_node = new4_entry->common.fib_node;
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004181 struct mlxsw_sp_fib4_entry *fib4_entry;
Ido Schimmel9aecce12017-02-09 10:28:42 +01004182
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004183 fib4_entry = mlxsw_sp_fib4_node_entry_find(fib_node, new4_entry);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004184
Ido Schimmel4283bce2017-02-09 10:28:43 +01004185 if (append)
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004186 return mlxsw_sp_fib4_node_list_append(fib4_entry, new4_entry);
4187 if (replace && WARN_ON(!fib4_entry))
Ido Schimmel599cf8f2017-02-09 10:28:44 +01004188 return -EINVAL;
Ido Schimmel4283bce2017-02-09 10:28:43 +01004189
Ido Schimmel599cf8f2017-02-09 10:28:44 +01004190 /* Insert new entry before replaced one, so that we can later
4191 * remove the second.
4192 */
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004193 if (fib4_entry) {
4194 list_add_tail(&new4_entry->common.list,
4195 &fib4_entry->common.list);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004196 } else {
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004197 struct mlxsw_sp_fib4_entry *last;
Ido Schimmel9aecce12017-02-09 10:28:42 +01004198
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004199 list_for_each_entry(last, &fib_node->entry_list, common.list) {
4200 if (new4_entry->tb_id > last->tb_id)
Ido Schimmel9aecce12017-02-09 10:28:42 +01004201 break;
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004202 fib4_entry = last;
Ido Schimmel9aecce12017-02-09 10:28:42 +01004203 }
4204
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004205 if (fib4_entry)
4206 list_add(&new4_entry->common.list,
4207 &fib4_entry->common.list);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004208 else
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004209 list_add(&new4_entry->common.list,
4210 &fib_node->entry_list);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004211 }
4212
4213 return 0;
4214}
4215
4216static void
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004217mlxsw_sp_fib4_node_list_remove(struct mlxsw_sp_fib4_entry *fib4_entry)
Ido Schimmel9aecce12017-02-09 10:28:42 +01004218{
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004219 list_del(&fib4_entry->common.list);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004220}
4221
Ido Schimmel80c238f2017-07-18 10:10:29 +02004222static int mlxsw_sp_fib_node_entry_add(struct mlxsw_sp *mlxsw_sp,
4223 struct mlxsw_sp_fib_entry *fib_entry)
Ido Schimmel9aecce12017-02-09 10:28:42 +01004224{
Ido Schimmel9efbee62017-07-18 10:10:28 +02004225 struct mlxsw_sp_fib_node *fib_node = fib_entry->fib_node;
4226
Ido Schimmel9aecce12017-02-09 10:28:42 +01004227 if (!mlxsw_sp_fib_node_entry_is_first(fib_node, fib_entry))
4228 return 0;
4229
4230 /* To prevent packet loss, overwrite the previously offloaded
4231 * entry.
4232 */
4233 if (!list_is_singular(&fib_node->entry_list)) {
4234 enum mlxsw_reg_ralue_op op = MLXSW_REG_RALUE_OP_WRITE_DELETE;
4235 struct mlxsw_sp_fib_entry *n = list_next_entry(fib_entry, list);
4236
4237 mlxsw_sp_fib_entry_offload_refresh(n, op, 0);
4238 }
4239
4240 return mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
4241}
4242
Ido Schimmel80c238f2017-07-18 10:10:29 +02004243static void mlxsw_sp_fib_node_entry_del(struct mlxsw_sp *mlxsw_sp,
4244 struct mlxsw_sp_fib_entry *fib_entry)
Ido Schimmel9aecce12017-02-09 10:28:42 +01004245{
Ido Schimmel9efbee62017-07-18 10:10:28 +02004246 struct mlxsw_sp_fib_node *fib_node = fib_entry->fib_node;
4247
Ido Schimmel9aecce12017-02-09 10:28:42 +01004248 if (!mlxsw_sp_fib_node_entry_is_first(fib_node, fib_entry))
4249 return;
4250
4251 /* Promote the next entry by overwriting the deleted entry */
4252 if (!list_is_singular(&fib_node->entry_list)) {
4253 struct mlxsw_sp_fib_entry *n = list_next_entry(fib_entry, list);
4254 enum mlxsw_reg_ralue_op op = MLXSW_REG_RALUE_OP_WRITE_DELETE;
4255
4256 mlxsw_sp_fib_entry_update(mlxsw_sp, n);
4257 mlxsw_sp_fib_entry_offload_refresh(fib_entry, op, 0);
4258 return;
4259 }
4260
4261 mlxsw_sp_fib_entry_del(mlxsw_sp, fib_entry);
4262}
4263
4264static int mlxsw_sp_fib4_node_entry_link(struct mlxsw_sp *mlxsw_sp,
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004265 struct mlxsw_sp_fib4_entry *fib4_entry,
Ido Schimmel599cf8f2017-02-09 10:28:44 +01004266 bool replace, bool append)
Ido Schimmel9aecce12017-02-09 10:28:42 +01004267{
Ido Schimmel9aecce12017-02-09 10:28:42 +01004268 int err;
4269
Ido Schimmel9efbee62017-07-18 10:10:28 +02004270 err = mlxsw_sp_fib4_node_list_insert(fib4_entry, replace, append);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004271 if (err)
4272 return err;
4273
Ido Schimmel80c238f2017-07-18 10:10:29 +02004274 err = mlxsw_sp_fib_node_entry_add(mlxsw_sp, &fib4_entry->common);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004275 if (err)
Ido Schimmel80c238f2017-07-18 10:10:29 +02004276 goto err_fib_node_entry_add;
Ido Schimmel9aecce12017-02-09 10:28:42 +01004277
Ido Schimmel9aecce12017-02-09 10:28:42 +01004278 return 0;
4279
Ido Schimmel80c238f2017-07-18 10:10:29 +02004280err_fib_node_entry_add:
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004281 mlxsw_sp_fib4_node_list_remove(fib4_entry);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004282 return err;
4283}
4284
4285static void
4286mlxsw_sp_fib4_node_entry_unlink(struct mlxsw_sp *mlxsw_sp,
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004287 struct mlxsw_sp_fib4_entry *fib4_entry)
Ido Schimmel9aecce12017-02-09 10:28:42 +01004288{
Ido Schimmel80c238f2017-07-18 10:10:29 +02004289 mlxsw_sp_fib_node_entry_del(mlxsw_sp, &fib4_entry->common);
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004290 mlxsw_sp_fib4_node_list_remove(fib4_entry);
Petr Machata4607f6d2017-09-02 23:49:25 +02004291
4292 if (fib4_entry->common.type == MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP)
4293 mlxsw_sp_fib_entry_decap_fini(mlxsw_sp, &fib4_entry->common);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004294}
4295
Ido Schimmel599cf8f2017-02-09 10:28:44 +01004296static void mlxsw_sp_fib4_entry_replace(struct mlxsw_sp *mlxsw_sp,
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004297 struct mlxsw_sp_fib4_entry *fib4_entry,
Ido Schimmel599cf8f2017-02-09 10:28:44 +01004298 bool replace)
4299{
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004300 struct mlxsw_sp_fib_node *fib_node = fib4_entry->common.fib_node;
4301 struct mlxsw_sp_fib4_entry *replaced;
Ido Schimmel599cf8f2017-02-09 10:28:44 +01004302
4303 if (!replace)
4304 return;
4305
4306 /* We inserted the new entry before replaced one */
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004307 replaced = list_next_entry(fib4_entry, common.list);
Ido Schimmel599cf8f2017-02-09 10:28:44 +01004308
4309 mlxsw_sp_fib4_node_entry_unlink(mlxsw_sp, replaced);
4310 mlxsw_sp_fib4_entry_destroy(mlxsw_sp, replaced);
Ido Schimmel731ea1c2017-07-18 10:10:21 +02004311 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
Ido Schimmel599cf8f2017-02-09 10:28:44 +01004312}
4313
Ido Schimmel9aecce12017-02-09 10:28:42 +01004314static int
4315mlxsw_sp_router_fib4_add(struct mlxsw_sp *mlxsw_sp,
Ido Schimmel4283bce2017-02-09 10:28:43 +01004316 const struct fib_entry_notifier_info *fen_info,
Ido Schimmel599cf8f2017-02-09 10:28:44 +01004317 bool replace, bool append)
Ido Schimmel9aecce12017-02-09 10:28:42 +01004318{
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004319 struct mlxsw_sp_fib4_entry *fib4_entry;
Ido Schimmel9aecce12017-02-09 10:28:42 +01004320 struct mlxsw_sp_fib_node *fib_node;
Jiri Pirko61c503f2016-07-04 08:23:11 +02004321 int err;
4322
Ido Schimmel9011b672017-05-16 19:38:25 +02004323 if (mlxsw_sp->router->aborted)
Jiri Pirkob45f64d2016-09-26 12:52:31 +02004324 return 0;
4325
Ido Schimmel731ea1c2017-07-18 10:10:21 +02004326 fib_node = mlxsw_sp_fib_node_get(mlxsw_sp, fen_info->tb_id,
4327 &fen_info->dst, sizeof(fen_info->dst),
4328 fen_info->dst_len,
4329 MLXSW_SP_L3_PROTO_IPV4);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004330 if (IS_ERR(fib_node)) {
4331 dev_warn(mlxsw_sp->bus_info->dev, "Failed to get FIB node\n");
4332 return PTR_ERR(fib_node);
4333 }
4334
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004335 fib4_entry = mlxsw_sp_fib4_entry_create(mlxsw_sp, fib_node, fen_info);
4336 if (IS_ERR(fib4_entry)) {
Ido Schimmel9aecce12017-02-09 10:28:42 +01004337 dev_warn(mlxsw_sp->bus_info->dev, "Failed to create FIB entry\n");
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004338 err = PTR_ERR(fib4_entry);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004339 goto err_fib4_entry_create;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02004340 }
Jiri Pirko61c503f2016-07-04 08:23:11 +02004341
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004342 err = mlxsw_sp_fib4_node_entry_link(mlxsw_sp, fib4_entry, replace,
Ido Schimmel599cf8f2017-02-09 10:28:44 +01004343 append);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02004344 if (err) {
Ido Schimmel9aecce12017-02-09 10:28:42 +01004345 dev_warn(mlxsw_sp->bus_info->dev, "Failed to link FIB entry to node\n");
4346 goto err_fib4_node_entry_link;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02004347 }
Ido Schimmel9aecce12017-02-09 10:28:42 +01004348
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004349 mlxsw_sp_fib4_entry_replace(mlxsw_sp, fib4_entry, replace);
Ido Schimmel599cf8f2017-02-09 10:28:44 +01004350
Jiri Pirko61c503f2016-07-04 08:23:11 +02004351 return 0;
4352
Ido Schimmel9aecce12017-02-09 10:28:42 +01004353err_fib4_node_entry_link:
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004354 mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib4_entry);
Ido Schimmel9aecce12017-02-09 10:28:42 +01004355err_fib4_entry_create:
Ido Schimmel731ea1c2017-07-18 10:10:21 +02004356 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
Jiri Pirko61c503f2016-07-04 08:23:11 +02004357 return err;
4358}
4359
Jiri Pirko37956d72016-10-20 16:05:43 +02004360static void mlxsw_sp_router_fib4_del(struct mlxsw_sp *mlxsw_sp,
4361 struct fib_entry_notifier_info *fen_info)
Jiri Pirko61c503f2016-07-04 08:23:11 +02004362{
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004363 struct mlxsw_sp_fib4_entry *fib4_entry;
Ido Schimmel9aecce12017-02-09 10:28:42 +01004364 struct mlxsw_sp_fib_node *fib_node;
Jiri Pirko61c503f2016-07-04 08:23:11 +02004365
Ido Schimmel9011b672017-05-16 19:38:25 +02004366 if (mlxsw_sp->router->aborted)
Jiri Pirko37956d72016-10-20 16:05:43 +02004367 return;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02004368
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004369 fib4_entry = mlxsw_sp_fib4_entry_lookup(mlxsw_sp, fen_info);
4370 if (WARN_ON(!fib4_entry))
Jiri Pirko37956d72016-10-20 16:05:43 +02004371 return;
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004372 fib_node = fib4_entry->common.fib_node;
Jiri Pirko5b004412016-09-01 10:37:40 +02004373
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02004374 mlxsw_sp_fib4_node_entry_unlink(mlxsw_sp, fib4_entry);
4375 mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib4_entry);
Ido Schimmel731ea1c2017-07-18 10:10:21 +02004376 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
Jiri Pirko61c503f2016-07-04 08:23:11 +02004377}
Jiri Pirkob45f64d2016-09-26 12:52:31 +02004378
Ido Schimmel428b8512017-08-03 13:28:28 +02004379static bool mlxsw_sp_fib6_rt_should_ignore(const struct rt6_info *rt)
4380{
4381 /* Packets with link-local destination IP arriving to the router
4382 * are trapped to the CPU, so no need to program specific routes
4383 * for them.
4384 */
4385 if (ipv6_addr_type(&rt->rt6i_dst.addr) & IPV6_ADDR_LINKLOCAL)
4386 return true;
4387
4388 /* Multicast routes aren't supported, so ignore them. Neighbour
4389 * Discovery packets are specifically trapped.
4390 */
4391 if (ipv6_addr_type(&rt->rt6i_dst.addr) & IPV6_ADDR_MULTICAST)
4392 return true;
4393
4394 /* Cloned routes are irrelevant in the forwarding path. */
4395 if (rt->rt6i_flags & RTF_CACHE)
4396 return true;
4397
4398 return false;
4399}
4400
4401static struct mlxsw_sp_rt6 *mlxsw_sp_rt6_create(struct rt6_info *rt)
4402{
4403 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
4404
4405 mlxsw_sp_rt6 = kzalloc(sizeof(*mlxsw_sp_rt6), GFP_KERNEL);
4406 if (!mlxsw_sp_rt6)
4407 return ERR_PTR(-ENOMEM);
4408
4409 /* In case of route replace, replaced route is deleted with
4410 * no notification. Take reference to prevent accessing freed
4411 * memory.
4412 */
4413 mlxsw_sp_rt6->rt = rt;
4414 rt6_hold(rt);
4415
4416 return mlxsw_sp_rt6;
4417}
4418
4419#if IS_ENABLED(CONFIG_IPV6)
4420static void mlxsw_sp_rt6_release(struct rt6_info *rt)
4421{
4422 rt6_release(rt);
4423}
4424#else
4425static void mlxsw_sp_rt6_release(struct rt6_info *rt)
4426{
4427}
4428#endif
4429
4430static void mlxsw_sp_rt6_destroy(struct mlxsw_sp_rt6 *mlxsw_sp_rt6)
4431{
4432 mlxsw_sp_rt6_release(mlxsw_sp_rt6->rt);
4433 kfree(mlxsw_sp_rt6);
4434}
4435
4436static bool mlxsw_sp_fib6_rt_can_mp(const struct rt6_info *rt)
4437{
4438 /* RTF_CACHE routes are ignored */
4439 return (rt->rt6i_flags & (RTF_GATEWAY | RTF_ADDRCONF)) == RTF_GATEWAY;
4440}
4441
4442static struct rt6_info *
4443mlxsw_sp_fib6_entry_rt(const struct mlxsw_sp_fib6_entry *fib6_entry)
4444{
4445 return list_first_entry(&fib6_entry->rt6_list, struct mlxsw_sp_rt6,
4446 list)->rt;
4447}
4448
4449static struct mlxsw_sp_fib6_entry *
4450mlxsw_sp_fib6_node_mp_entry_find(const struct mlxsw_sp_fib_node *fib_node,
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02004451 const struct rt6_info *nrt, bool replace)
Ido Schimmel428b8512017-08-03 13:28:28 +02004452{
4453 struct mlxsw_sp_fib6_entry *fib6_entry;
4454
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02004455 if (!mlxsw_sp_fib6_rt_can_mp(nrt) || replace)
Ido Schimmel428b8512017-08-03 13:28:28 +02004456 return NULL;
4457
4458 list_for_each_entry(fib6_entry, &fib_node->entry_list, common.list) {
4459 struct rt6_info *rt = mlxsw_sp_fib6_entry_rt(fib6_entry);
4460
4461 /* RT6_TABLE_LOCAL and RT6_TABLE_MAIN share the same
4462 * virtual router.
4463 */
4464 if (rt->rt6i_table->tb6_id > nrt->rt6i_table->tb6_id)
4465 continue;
4466 if (rt->rt6i_table->tb6_id != nrt->rt6i_table->tb6_id)
4467 break;
4468 if (rt->rt6i_metric < nrt->rt6i_metric)
4469 continue;
4470 if (rt->rt6i_metric == nrt->rt6i_metric &&
4471 mlxsw_sp_fib6_rt_can_mp(rt))
4472 return fib6_entry;
4473 if (rt->rt6i_metric > nrt->rt6i_metric)
4474 break;
4475 }
4476
4477 return NULL;
4478}
4479
4480static struct mlxsw_sp_rt6 *
4481mlxsw_sp_fib6_entry_rt_find(const struct mlxsw_sp_fib6_entry *fib6_entry,
4482 const struct rt6_info *rt)
4483{
4484 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
4485
4486 list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) {
4487 if (mlxsw_sp_rt6->rt == rt)
4488 return mlxsw_sp_rt6;
4489 }
4490
4491 return NULL;
4492}
4493
Petr Machata8f28a302017-09-02 23:49:24 +02004494static bool mlxsw_sp_nexthop6_ipip_type(const struct mlxsw_sp *mlxsw_sp,
4495 const struct rt6_info *rt,
4496 enum mlxsw_sp_ipip_type *ret)
4497{
4498 return rt->dst.dev &&
4499 mlxsw_sp_netdev_ipip_type(mlxsw_sp, rt->dst.dev, ret);
4500}
4501
Petr Machata35225e42017-09-02 23:49:22 +02004502static int mlxsw_sp_nexthop6_type_init(struct mlxsw_sp *mlxsw_sp,
4503 struct mlxsw_sp_nexthop_group *nh_grp,
4504 struct mlxsw_sp_nexthop *nh,
4505 const struct rt6_info *rt)
Ido Schimmel428b8512017-08-03 13:28:28 +02004506{
Petr Machata8f28a302017-09-02 23:49:24 +02004507 struct mlxsw_sp_router *router = mlxsw_sp->router;
Ido Schimmel428b8512017-08-03 13:28:28 +02004508 struct net_device *dev = rt->dst.dev;
Petr Machata8f28a302017-09-02 23:49:24 +02004509 enum mlxsw_sp_ipip_type ipipt;
Ido Schimmel428b8512017-08-03 13:28:28 +02004510 struct mlxsw_sp_rif *rif;
4511 int err;
4512
Petr Machata8f28a302017-09-02 23:49:24 +02004513 if (mlxsw_sp_nexthop6_ipip_type(mlxsw_sp, rt, &ipipt) &&
4514 router->ipip_ops_arr[ipipt]->can_offload(mlxsw_sp, dev,
4515 MLXSW_SP_L3_PROTO_IPV6)) {
4516 nh->type = MLXSW_SP_NEXTHOP_TYPE_IPIP;
Petr Machata4cccb732017-10-16 16:26:39 +02004517 err = mlxsw_sp_nexthop_ipip_init(mlxsw_sp, nh, dev);
Petr Machatade0f43c2017-10-02 12:14:57 +02004518 if (err)
4519 return err;
4520 mlxsw_sp_nexthop_rif_init(nh, &nh->ipip_entry->ol_lb->common);
4521 return 0;
Petr Machata8f28a302017-09-02 23:49:24 +02004522 }
4523
Petr Machata35225e42017-09-02 23:49:22 +02004524 nh->type = MLXSW_SP_NEXTHOP_TYPE_ETH;
Ido Schimmel428b8512017-08-03 13:28:28 +02004525 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
4526 if (!rif)
4527 return 0;
4528 mlxsw_sp_nexthop_rif_init(nh, rif);
4529
4530 err = mlxsw_sp_nexthop_neigh_init(mlxsw_sp, nh);
4531 if (err)
4532 goto err_nexthop_neigh_init;
4533
4534 return 0;
4535
4536err_nexthop_neigh_init:
4537 mlxsw_sp_nexthop_rif_fini(nh);
4538 return err;
4539}
4540
Petr Machata35225e42017-09-02 23:49:22 +02004541static void mlxsw_sp_nexthop6_type_fini(struct mlxsw_sp *mlxsw_sp,
4542 struct mlxsw_sp_nexthop *nh)
4543{
4544 mlxsw_sp_nexthop_type_fini(mlxsw_sp, nh);
4545}
4546
4547static int mlxsw_sp_nexthop6_init(struct mlxsw_sp *mlxsw_sp,
4548 struct mlxsw_sp_nexthop_group *nh_grp,
4549 struct mlxsw_sp_nexthop *nh,
4550 const struct rt6_info *rt)
4551{
4552 struct net_device *dev = rt->dst.dev;
4553
4554 nh->nh_grp = nh_grp;
Ido Schimmel408bd942017-10-22 23:11:46 +02004555 nh->nh_weight = 1;
Petr Machata35225e42017-09-02 23:49:22 +02004556 memcpy(&nh->gw_addr, &rt->rt6i_gateway, sizeof(nh->gw_addr));
Arkadi Sharshevskya5390272017-09-25 10:32:28 +02004557 mlxsw_sp_nexthop_counter_alloc(mlxsw_sp, nh);
Petr Machata35225e42017-09-02 23:49:22 +02004558
Arkadi Sharshevskydbe45982017-09-25 10:32:23 +02004559 list_add_tail(&nh->router_list_node, &mlxsw_sp->router->nexthop_list);
4560
Petr Machata35225e42017-09-02 23:49:22 +02004561 if (!dev)
4562 return 0;
4563 nh->ifindex = dev->ifindex;
4564
4565 return mlxsw_sp_nexthop6_type_init(mlxsw_sp, nh_grp, nh, rt);
4566}
4567
Ido Schimmel428b8512017-08-03 13:28:28 +02004568static void mlxsw_sp_nexthop6_fini(struct mlxsw_sp *mlxsw_sp,
4569 struct mlxsw_sp_nexthop *nh)
4570{
Petr Machata35225e42017-09-02 23:49:22 +02004571 mlxsw_sp_nexthop6_type_fini(mlxsw_sp, nh);
Arkadi Sharshevskydbe45982017-09-25 10:32:23 +02004572 list_del(&nh->router_list_node);
Arkadi Sharshevskya5390272017-09-25 10:32:28 +02004573 mlxsw_sp_nexthop_counter_free(mlxsw_sp, nh);
Ido Schimmel428b8512017-08-03 13:28:28 +02004574}
4575
Petr Machataf6050ee2017-09-02 23:49:21 +02004576static bool mlxsw_sp_rt6_is_gateway(const struct mlxsw_sp *mlxsw_sp,
4577 const struct rt6_info *rt)
4578{
Petr Machata8f28a302017-09-02 23:49:24 +02004579 return rt->rt6i_flags & RTF_GATEWAY ||
4580 mlxsw_sp_nexthop6_ipip_type(mlxsw_sp, rt, NULL);
Petr Machataf6050ee2017-09-02 23:49:21 +02004581}
4582
Ido Schimmel428b8512017-08-03 13:28:28 +02004583static struct mlxsw_sp_nexthop_group *
4584mlxsw_sp_nexthop6_group_create(struct mlxsw_sp *mlxsw_sp,
4585 struct mlxsw_sp_fib6_entry *fib6_entry)
4586{
4587 struct mlxsw_sp_nexthop_group *nh_grp;
4588 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
4589 struct mlxsw_sp_nexthop *nh;
4590 size_t alloc_size;
4591 int i = 0;
4592 int err;
4593
4594 alloc_size = sizeof(*nh_grp) +
4595 fib6_entry->nrt6 * sizeof(struct mlxsw_sp_nexthop);
4596 nh_grp = kzalloc(alloc_size, GFP_KERNEL);
4597 if (!nh_grp)
4598 return ERR_PTR(-ENOMEM);
4599 INIT_LIST_HEAD(&nh_grp->fib_list);
4600#if IS_ENABLED(CONFIG_IPV6)
4601 nh_grp->neigh_tbl = &nd_tbl;
4602#endif
4603 mlxsw_sp_rt6 = list_first_entry(&fib6_entry->rt6_list,
4604 struct mlxsw_sp_rt6, list);
Petr Machataf6050ee2017-09-02 23:49:21 +02004605 nh_grp->gateway = mlxsw_sp_rt6_is_gateway(mlxsw_sp, mlxsw_sp_rt6->rt);
Ido Schimmel428b8512017-08-03 13:28:28 +02004606 nh_grp->count = fib6_entry->nrt6;
4607 for (i = 0; i < nh_grp->count; i++) {
4608 struct rt6_info *rt = mlxsw_sp_rt6->rt;
4609
4610 nh = &nh_grp->nexthops[i];
4611 err = mlxsw_sp_nexthop6_init(mlxsw_sp, nh_grp, nh, rt);
4612 if (err)
4613 goto err_nexthop6_init;
4614 mlxsw_sp_rt6 = list_next_entry(mlxsw_sp_rt6, list);
4615 }
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02004616
4617 err = mlxsw_sp_nexthop_group_insert(mlxsw_sp, nh_grp);
4618 if (err)
4619 goto err_nexthop_group_insert;
4620
Ido Schimmel428b8512017-08-03 13:28:28 +02004621 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
4622 return nh_grp;
4623
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02004624err_nexthop_group_insert:
Ido Schimmel428b8512017-08-03 13:28:28 +02004625err_nexthop6_init:
4626 for (i--; i >= 0; i--) {
4627 nh = &nh_grp->nexthops[i];
4628 mlxsw_sp_nexthop6_fini(mlxsw_sp, nh);
4629 }
4630 kfree(nh_grp);
4631 return ERR_PTR(err);
4632}
4633
4634static void
4635mlxsw_sp_nexthop6_group_destroy(struct mlxsw_sp *mlxsw_sp,
4636 struct mlxsw_sp_nexthop_group *nh_grp)
4637{
4638 struct mlxsw_sp_nexthop *nh;
4639 int i = nh_grp->count;
4640
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02004641 mlxsw_sp_nexthop_group_remove(mlxsw_sp, nh_grp);
Ido Schimmel428b8512017-08-03 13:28:28 +02004642 for (i--; i >= 0; i--) {
4643 nh = &nh_grp->nexthops[i];
4644 mlxsw_sp_nexthop6_fini(mlxsw_sp, nh);
4645 }
4646 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
4647 WARN_ON(nh_grp->adj_index_valid);
4648 kfree(nh_grp);
4649}
4650
4651static int mlxsw_sp_nexthop6_group_get(struct mlxsw_sp *mlxsw_sp,
4652 struct mlxsw_sp_fib6_entry *fib6_entry)
4653{
4654 struct mlxsw_sp_nexthop_group *nh_grp;
4655
Arkadi Sharshevskye6f3b372017-08-14 21:09:20 +02004656 nh_grp = mlxsw_sp_nexthop6_group_lookup(mlxsw_sp, fib6_entry);
4657 if (!nh_grp) {
4658 nh_grp = mlxsw_sp_nexthop6_group_create(mlxsw_sp, fib6_entry);
4659 if (IS_ERR(nh_grp))
4660 return PTR_ERR(nh_grp);
4661 }
Ido Schimmel428b8512017-08-03 13:28:28 +02004662
4663 list_add_tail(&fib6_entry->common.nexthop_group_node,
4664 &nh_grp->fib_list);
4665 fib6_entry->common.nh_group = nh_grp;
4666
4667 return 0;
4668}
4669
4670static void mlxsw_sp_nexthop6_group_put(struct mlxsw_sp *mlxsw_sp,
4671 struct mlxsw_sp_fib_entry *fib_entry)
4672{
4673 struct mlxsw_sp_nexthop_group *nh_grp = fib_entry->nh_group;
4674
4675 list_del(&fib_entry->nexthop_group_node);
4676 if (!list_empty(&nh_grp->fib_list))
4677 return;
4678 mlxsw_sp_nexthop6_group_destroy(mlxsw_sp, nh_grp);
4679}
4680
4681static int
4682mlxsw_sp_nexthop6_group_update(struct mlxsw_sp *mlxsw_sp,
4683 struct mlxsw_sp_fib6_entry *fib6_entry)
4684{
4685 struct mlxsw_sp_nexthop_group *old_nh_grp = fib6_entry->common.nh_group;
4686 int err;
4687
4688 fib6_entry->common.nh_group = NULL;
4689 list_del(&fib6_entry->common.nexthop_group_node);
4690
4691 err = mlxsw_sp_nexthop6_group_get(mlxsw_sp, fib6_entry);
4692 if (err)
4693 goto err_nexthop6_group_get;
4694
4695 /* In case this entry is offloaded, then the adjacency index
4696 * currently associated with it in the device's table is that
4697 * of the old group. Start using the new one instead.
4698 */
4699 err = mlxsw_sp_fib_node_entry_add(mlxsw_sp, &fib6_entry->common);
4700 if (err)
4701 goto err_fib_node_entry_add;
4702
4703 if (list_empty(&old_nh_grp->fib_list))
4704 mlxsw_sp_nexthop6_group_destroy(mlxsw_sp, old_nh_grp);
4705
4706 return 0;
4707
4708err_fib_node_entry_add:
4709 mlxsw_sp_nexthop6_group_put(mlxsw_sp, &fib6_entry->common);
4710err_nexthop6_group_get:
4711 list_add_tail(&fib6_entry->common.nexthop_group_node,
4712 &old_nh_grp->fib_list);
4713 fib6_entry->common.nh_group = old_nh_grp;
4714 return err;
4715}
4716
4717static int
4718mlxsw_sp_fib6_entry_nexthop_add(struct mlxsw_sp *mlxsw_sp,
4719 struct mlxsw_sp_fib6_entry *fib6_entry,
4720 struct rt6_info *rt)
4721{
4722 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
4723 int err;
4724
4725 mlxsw_sp_rt6 = mlxsw_sp_rt6_create(rt);
4726 if (IS_ERR(mlxsw_sp_rt6))
4727 return PTR_ERR(mlxsw_sp_rt6);
4728
4729 list_add_tail(&mlxsw_sp_rt6->list, &fib6_entry->rt6_list);
4730 fib6_entry->nrt6++;
4731
4732 err = mlxsw_sp_nexthop6_group_update(mlxsw_sp, fib6_entry);
4733 if (err)
4734 goto err_nexthop6_group_update;
4735
4736 return 0;
4737
4738err_nexthop6_group_update:
4739 fib6_entry->nrt6--;
4740 list_del(&mlxsw_sp_rt6->list);
4741 mlxsw_sp_rt6_destroy(mlxsw_sp_rt6);
4742 return err;
4743}
4744
4745static void
4746mlxsw_sp_fib6_entry_nexthop_del(struct mlxsw_sp *mlxsw_sp,
4747 struct mlxsw_sp_fib6_entry *fib6_entry,
4748 struct rt6_info *rt)
4749{
4750 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
4751
4752 mlxsw_sp_rt6 = mlxsw_sp_fib6_entry_rt_find(fib6_entry, rt);
4753 if (WARN_ON(!mlxsw_sp_rt6))
4754 return;
4755
4756 fib6_entry->nrt6--;
4757 list_del(&mlxsw_sp_rt6->list);
4758 mlxsw_sp_nexthop6_group_update(mlxsw_sp, fib6_entry);
4759 mlxsw_sp_rt6_destroy(mlxsw_sp_rt6);
4760}
4761
Petr Machataf6050ee2017-09-02 23:49:21 +02004762static void mlxsw_sp_fib6_entry_type_set(struct mlxsw_sp *mlxsw_sp,
4763 struct mlxsw_sp_fib_entry *fib_entry,
Ido Schimmel428b8512017-08-03 13:28:28 +02004764 const struct rt6_info *rt)
4765{
4766 /* Packets hitting RTF_REJECT routes need to be discarded by the
4767 * stack. We can rely on their destination device not having a
4768 * RIF (it's the loopback device) and can thus use action type
4769 * local, which will cause them to be trapped with a lower
4770 * priority than packets that need to be locally received.
4771 */
Ido Schimmeld3b6d372017-09-01 10:58:55 +02004772 if (rt->rt6i_flags & (RTF_LOCAL | RTF_ANYCAST))
Ido Schimmel428b8512017-08-03 13:28:28 +02004773 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
4774 else if (rt->rt6i_flags & RTF_REJECT)
4775 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_LOCAL;
Petr Machataf6050ee2017-09-02 23:49:21 +02004776 else if (mlxsw_sp_rt6_is_gateway(mlxsw_sp, rt))
Ido Schimmel428b8512017-08-03 13:28:28 +02004777 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_REMOTE;
4778 else
4779 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_LOCAL;
4780}
4781
4782static void
4783mlxsw_sp_fib6_entry_rt_destroy_all(struct mlxsw_sp_fib6_entry *fib6_entry)
4784{
4785 struct mlxsw_sp_rt6 *mlxsw_sp_rt6, *tmp;
4786
4787 list_for_each_entry_safe(mlxsw_sp_rt6, tmp, &fib6_entry->rt6_list,
4788 list) {
4789 fib6_entry->nrt6--;
4790 list_del(&mlxsw_sp_rt6->list);
4791 mlxsw_sp_rt6_destroy(mlxsw_sp_rt6);
4792 }
4793}
4794
4795static struct mlxsw_sp_fib6_entry *
4796mlxsw_sp_fib6_entry_create(struct mlxsw_sp *mlxsw_sp,
4797 struct mlxsw_sp_fib_node *fib_node,
4798 struct rt6_info *rt)
4799{
4800 struct mlxsw_sp_fib6_entry *fib6_entry;
4801 struct mlxsw_sp_fib_entry *fib_entry;
4802 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
4803 int err;
4804
4805 fib6_entry = kzalloc(sizeof(*fib6_entry), GFP_KERNEL);
4806 if (!fib6_entry)
4807 return ERR_PTR(-ENOMEM);
4808 fib_entry = &fib6_entry->common;
4809
4810 mlxsw_sp_rt6 = mlxsw_sp_rt6_create(rt);
4811 if (IS_ERR(mlxsw_sp_rt6)) {
4812 err = PTR_ERR(mlxsw_sp_rt6);
4813 goto err_rt6_create;
4814 }
4815
Petr Machataf6050ee2017-09-02 23:49:21 +02004816 mlxsw_sp_fib6_entry_type_set(mlxsw_sp, fib_entry, mlxsw_sp_rt6->rt);
Ido Schimmel428b8512017-08-03 13:28:28 +02004817
4818 INIT_LIST_HEAD(&fib6_entry->rt6_list);
4819 list_add_tail(&mlxsw_sp_rt6->list, &fib6_entry->rt6_list);
4820 fib6_entry->nrt6 = 1;
4821 err = mlxsw_sp_nexthop6_group_get(mlxsw_sp, fib6_entry);
4822 if (err)
4823 goto err_nexthop6_group_get;
4824
4825 fib_entry->fib_node = fib_node;
4826
4827 return fib6_entry;
4828
4829err_nexthop6_group_get:
4830 list_del(&mlxsw_sp_rt6->list);
4831 mlxsw_sp_rt6_destroy(mlxsw_sp_rt6);
4832err_rt6_create:
4833 kfree(fib6_entry);
4834 return ERR_PTR(err);
4835}
4836
4837static void mlxsw_sp_fib6_entry_destroy(struct mlxsw_sp *mlxsw_sp,
4838 struct mlxsw_sp_fib6_entry *fib6_entry)
4839{
4840 mlxsw_sp_nexthop6_group_put(mlxsw_sp, &fib6_entry->common);
4841 mlxsw_sp_fib6_entry_rt_destroy_all(fib6_entry);
4842 WARN_ON(fib6_entry->nrt6);
4843 kfree(fib6_entry);
4844}
4845
4846static struct mlxsw_sp_fib6_entry *
4847mlxsw_sp_fib6_node_entry_find(const struct mlxsw_sp_fib_node *fib_node,
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02004848 const struct rt6_info *nrt, bool replace)
Ido Schimmel428b8512017-08-03 13:28:28 +02004849{
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02004850 struct mlxsw_sp_fib6_entry *fib6_entry, *fallback = NULL;
Ido Schimmel428b8512017-08-03 13:28:28 +02004851
4852 list_for_each_entry(fib6_entry, &fib_node->entry_list, common.list) {
4853 struct rt6_info *rt = mlxsw_sp_fib6_entry_rt(fib6_entry);
4854
4855 if (rt->rt6i_table->tb6_id > nrt->rt6i_table->tb6_id)
4856 continue;
4857 if (rt->rt6i_table->tb6_id != nrt->rt6i_table->tb6_id)
4858 break;
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02004859 if (replace && rt->rt6i_metric == nrt->rt6i_metric) {
4860 if (mlxsw_sp_fib6_rt_can_mp(rt) ==
4861 mlxsw_sp_fib6_rt_can_mp(nrt))
4862 return fib6_entry;
4863 if (mlxsw_sp_fib6_rt_can_mp(nrt))
4864 fallback = fallback ?: fib6_entry;
4865 }
Ido Schimmel428b8512017-08-03 13:28:28 +02004866 if (rt->rt6i_metric > nrt->rt6i_metric)
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02004867 return fallback ?: fib6_entry;
Ido Schimmel428b8512017-08-03 13:28:28 +02004868 }
4869
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02004870 return fallback;
Ido Schimmel428b8512017-08-03 13:28:28 +02004871}
4872
4873static int
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02004874mlxsw_sp_fib6_node_list_insert(struct mlxsw_sp_fib6_entry *new6_entry,
4875 bool replace)
Ido Schimmel428b8512017-08-03 13:28:28 +02004876{
4877 struct mlxsw_sp_fib_node *fib_node = new6_entry->common.fib_node;
4878 struct rt6_info *nrt = mlxsw_sp_fib6_entry_rt(new6_entry);
4879 struct mlxsw_sp_fib6_entry *fib6_entry;
4880
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02004881 fib6_entry = mlxsw_sp_fib6_node_entry_find(fib_node, nrt, replace);
4882
4883 if (replace && WARN_ON(!fib6_entry))
4884 return -EINVAL;
Ido Schimmel428b8512017-08-03 13:28:28 +02004885
4886 if (fib6_entry) {
4887 list_add_tail(&new6_entry->common.list,
4888 &fib6_entry->common.list);
4889 } else {
4890 struct mlxsw_sp_fib6_entry *last;
4891
4892 list_for_each_entry(last, &fib_node->entry_list, common.list) {
4893 struct rt6_info *rt = mlxsw_sp_fib6_entry_rt(last);
4894
4895 if (nrt->rt6i_table->tb6_id > rt->rt6i_table->tb6_id)
4896 break;
4897 fib6_entry = last;
4898 }
4899
4900 if (fib6_entry)
4901 list_add(&new6_entry->common.list,
4902 &fib6_entry->common.list);
4903 else
4904 list_add(&new6_entry->common.list,
4905 &fib_node->entry_list);
4906 }
4907
4908 return 0;
4909}
4910
4911static void
4912mlxsw_sp_fib6_node_list_remove(struct mlxsw_sp_fib6_entry *fib6_entry)
4913{
4914 list_del(&fib6_entry->common.list);
4915}
4916
4917static int mlxsw_sp_fib6_node_entry_link(struct mlxsw_sp *mlxsw_sp,
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02004918 struct mlxsw_sp_fib6_entry *fib6_entry,
4919 bool replace)
Ido Schimmel428b8512017-08-03 13:28:28 +02004920{
4921 int err;
4922
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02004923 err = mlxsw_sp_fib6_node_list_insert(fib6_entry, replace);
Ido Schimmel428b8512017-08-03 13:28:28 +02004924 if (err)
4925 return err;
4926
4927 err = mlxsw_sp_fib_node_entry_add(mlxsw_sp, &fib6_entry->common);
4928 if (err)
4929 goto err_fib_node_entry_add;
4930
4931 return 0;
4932
4933err_fib_node_entry_add:
4934 mlxsw_sp_fib6_node_list_remove(fib6_entry);
4935 return err;
4936}
4937
4938static void
4939mlxsw_sp_fib6_node_entry_unlink(struct mlxsw_sp *mlxsw_sp,
4940 struct mlxsw_sp_fib6_entry *fib6_entry)
4941{
4942 mlxsw_sp_fib_node_entry_del(mlxsw_sp, &fib6_entry->common);
4943 mlxsw_sp_fib6_node_list_remove(fib6_entry);
4944}
4945
4946static struct mlxsw_sp_fib6_entry *
4947mlxsw_sp_fib6_entry_lookup(struct mlxsw_sp *mlxsw_sp,
4948 const struct rt6_info *rt)
4949{
4950 struct mlxsw_sp_fib6_entry *fib6_entry;
4951 struct mlxsw_sp_fib_node *fib_node;
4952 struct mlxsw_sp_fib *fib;
4953 struct mlxsw_sp_vr *vr;
4954
4955 vr = mlxsw_sp_vr_find(mlxsw_sp, rt->rt6i_table->tb6_id);
4956 if (!vr)
4957 return NULL;
4958 fib = mlxsw_sp_vr_fib(vr, MLXSW_SP_L3_PROTO_IPV6);
4959
4960 fib_node = mlxsw_sp_fib_node_lookup(fib, &rt->rt6i_dst.addr,
4961 sizeof(rt->rt6i_dst.addr),
4962 rt->rt6i_dst.plen);
4963 if (!fib_node)
4964 return NULL;
4965
4966 list_for_each_entry(fib6_entry, &fib_node->entry_list, common.list) {
4967 struct rt6_info *iter_rt = mlxsw_sp_fib6_entry_rt(fib6_entry);
4968
4969 if (rt->rt6i_table->tb6_id == iter_rt->rt6i_table->tb6_id &&
4970 rt->rt6i_metric == iter_rt->rt6i_metric &&
4971 mlxsw_sp_fib6_entry_rt_find(fib6_entry, rt))
4972 return fib6_entry;
4973 }
4974
4975 return NULL;
4976}
4977
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02004978static void mlxsw_sp_fib6_entry_replace(struct mlxsw_sp *mlxsw_sp,
4979 struct mlxsw_sp_fib6_entry *fib6_entry,
4980 bool replace)
4981{
4982 struct mlxsw_sp_fib_node *fib_node = fib6_entry->common.fib_node;
4983 struct mlxsw_sp_fib6_entry *replaced;
4984
4985 if (!replace)
4986 return;
4987
4988 replaced = list_next_entry(fib6_entry, common.list);
4989
4990 mlxsw_sp_fib6_node_entry_unlink(mlxsw_sp, replaced);
4991 mlxsw_sp_fib6_entry_destroy(mlxsw_sp, replaced);
4992 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
4993}
4994
Ido Schimmel428b8512017-08-03 13:28:28 +02004995static int mlxsw_sp_router_fib6_add(struct mlxsw_sp *mlxsw_sp,
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02004996 struct rt6_info *rt, bool replace)
Ido Schimmel428b8512017-08-03 13:28:28 +02004997{
4998 struct mlxsw_sp_fib6_entry *fib6_entry;
4999 struct mlxsw_sp_fib_node *fib_node;
5000 int err;
5001
5002 if (mlxsw_sp->router->aborted)
5003 return 0;
5004
Ido Schimmelf36f5ac2017-08-03 13:28:30 +02005005 if (rt->rt6i_src.plen)
5006 return -EINVAL;
5007
Ido Schimmel428b8512017-08-03 13:28:28 +02005008 if (mlxsw_sp_fib6_rt_should_ignore(rt))
5009 return 0;
5010
5011 fib_node = mlxsw_sp_fib_node_get(mlxsw_sp, rt->rt6i_table->tb6_id,
5012 &rt->rt6i_dst.addr,
5013 sizeof(rt->rt6i_dst.addr),
5014 rt->rt6i_dst.plen,
5015 MLXSW_SP_L3_PROTO_IPV6);
5016 if (IS_ERR(fib_node))
5017 return PTR_ERR(fib_node);
5018
5019 /* Before creating a new entry, try to append route to an existing
5020 * multipath entry.
5021 */
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02005022 fib6_entry = mlxsw_sp_fib6_node_mp_entry_find(fib_node, rt, replace);
Ido Schimmel428b8512017-08-03 13:28:28 +02005023 if (fib6_entry) {
5024 err = mlxsw_sp_fib6_entry_nexthop_add(mlxsw_sp, fib6_entry, rt);
5025 if (err)
5026 goto err_fib6_entry_nexthop_add;
5027 return 0;
5028 }
5029
5030 fib6_entry = mlxsw_sp_fib6_entry_create(mlxsw_sp, fib_node, rt);
5031 if (IS_ERR(fib6_entry)) {
5032 err = PTR_ERR(fib6_entry);
5033 goto err_fib6_entry_create;
5034 }
5035
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02005036 err = mlxsw_sp_fib6_node_entry_link(mlxsw_sp, fib6_entry, replace);
Ido Schimmel428b8512017-08-03 13:28:28 +02005037 if (err)
5038 goto err_fib6_node_entry_link;
5039
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02005040 mlxsw_sp_fib6_entry_replace(mlxsw_sp, fib6_entry, replace);
5041
Ido Schimmel428b8512017-08-03 13:28:28 +02005042 return 0;
5043
5044err_fib6_node_entry_link:
5045 mlxsw_sp_fib6_entry_destroy(mlxsw_sp, fib6_entry);
5046err_fib6_entry_create:
5047err_fib6_entry_nexthop_add:
5048 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
5049 return err;
5050}
5051
5052static void mlxsw_sp_router_fib6_del(struct mlxsw_sp *mlxsw_sp,
5053 struct rt6_info *rt)
5054{
5055 struct mlxsw_sp_fib6_entry *fib6_entry;
5056 struct mlxsw_sp_fib_node *fib_node;
5057
5058 if (mlxsw_sp->router->aborted)
5059 return;
5060
5061 if (mlxsw_sp_fib6_rt_should_ignore(rt))
5062 return;
5063
5064 fib6_entry = mlxsw_sp_fib6_entry_lookup(mlxsw_sp, rt);
5065 if (WARN_ON(!fib6_entry))
5066 return;
5067
5068 /* If route is part of a multipath entry, but not the last one
5069 * removed, then only reduce its nexthop group.
5070 */
5071 if (!list_is_singular(&fib6_entry->rt6_list)) {
5072 mlxsw_sp_fib6_entry_nexthop_del(mlxsw_sp, fib6_entry, rt);
5073 return;
5074 }
5075
5076 fib_node = fib6_entry->common.fib_node;
5077
5078 mlxsw_sp_fib6_node_entry_unlink(mlxsw_sp, fib6_entry);
5079 mlxsw_sp_fib6_entry_destroy(mlxsw_sp, fib6_entry);
5080 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
5081}
5082
Ido Schimmelbc65a8a2017-07-18 10:10:25 +02005083static int __mlxsw_sp_router_set_abort_trap(struct mlxsw_sp *mlxsw_sp,
5084 enum mlxsw_reg_ralxx_protocol proto,
5085 u8 tree_id)
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005086{
5087 char ralta_pl[MLXSW_REG_RALTA_LEN];
5088 char ralst_pl[MLXSW_REG_RALST_LEN];
Ido Schimmelb5d90e62017-03-10 08:53:43 +01005089 int i, err;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005090
Ido Schimmelbc65a8a2017-07-18 10:10:25 +02005091 mlxsw_reg_ralta_pack(ralta_pl, true, proto, tree_id);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005092 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralta), ralta_pl);
5093 if (err)
5094 return err;
5095
Ido Schimmelbc65a8a2017-07-18 10:10:25 +02005096 mlxsw_reg_ralst_pack(ralst_pl, 0xff, tree_id);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005097 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralst), ralst_pl);
5098 if (err)
5099 return err;
5100
Ido Schimmelb5d90e62017-03-10 08:53:43 +01005101 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
Ido Schimmel9011b672017-05-16 19:38:25 +02005102 struct mlxsw_sp_vr *vr = &mlxsw_sp->router->vrs[i];
Ido Schimmelb5d90e62017-03-10 08:53:43 +01005103 char raltb_pl[MLXSW_REG_RALTB_LEN];
5104 char ralue_pl[MLXSW_REG_RALUE_LEN];
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005105
Ido Schimmelbc65a8a2017-07-18 10:10:25 +02005106 mlxsw_reg_raltb_pack(raltb_pl, vr->id, proto, tree_id);
Ido Schimmelb5d90e62017-03-10 08:53:43 +01005107 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raltb),
5108 raltb_pl);
5109 if (err)
5110 return err;
5111
Ido Schimmelbc65a8a2017-07-18 10:10:25 +02005112 mlxsw_reg_ralue_pack(ralue_pl, proto,
5113 MLXSW_REG_RALUE_OP_WRITE_WRITE, vr->id, 0);
Ido Schimmelb5d90e62017-03-10 08:53:43 +01005114 mlxsw_reg_ralue_act_ip2me_pack(ralue_pl);
5115 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue),
5116 ralue_pl);
5117 if (err)
5118 return err;
5119 }
5120
5121 return 0;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005122}
5123
Yotam Gigid42b0962017-09-27 08:23:20 +02005124static int mlxsw_sp_router_fibmr_add(struct mlxsw_sp *mlxsw_sp,
5125 struct mfc_entry_notifier_info *men_info,
5126 bool replace)
5127{
5128 struct mlxsw_sp_vr *vr;
5129
5130 if (mlxsw_sp->router->aborted)
5131 return 0;
5132
David Ahernf8fa9b42017-10-18 09:56:56 -07005133 vr = mlxsw_sp_vr_get(mlxsw_sp, men_info->tb_id, NULL);
Yotam Gigid42b0962017-09-27 08:23:20 +02005134 if (IS_ERR(vr))
5135 return PTR_ERR(vr);
5136
5137 return mlxsw_sp_mr_route4_add(vr->mr4_table, men_info->mfc, replace);
5138}
5139
5140static void mlxsw_sp_router_fibmr_del(struct mlxsw_sp *mlxsw_sp,
5141 struct mfc_entry_notifier_info *men_info)
5142{
5143 struct mlxsw_sp_vr *vr;
5144
5145 if (mlxsw_sp->router->aborted)
5146 return;
5147
5148 vr = mlxsw_sp_vr_find(mlxsw_sp, men_info->tb_id);
5149 if (WARN_ON(!vr))
5150 return;
5151
5152 mlxsw_sp_mr_route4_del(vr->mr4_table, men_info->mfc);
5153 mlxsw_sp_vr_put(vr);
5154}
5155
5156static int
5157mlxsw_sp_router_fibmr_vif_add(struct mlxsw_sp *mlxsw_sp,
5158 struct vif_entry_notifier_info *ven_info)
5159{
5160 struct mlxsw_sp_rif *rif;
5161 struct mlxsw_sp_vr *vr;
5162
5163 if (mlxsw_sp->router->aborted)
5164 return 0;
5165
David Ahernf8fa9b42017-10-18 09:56:56 -07005166 vr = mlxsw_sp_vr_get(mlxsw_sp, ven_info->tb_id, NULL);
Yotam Gigid42b0962017-09-27 08:23:20 +02005167 if (IS_ERR(vr))
5168 return PTR_ERR(vr);
5169
5170 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, ven_info->dev);
5171 return mlxsw_sp_mr_vif_add(vr->mr4_table, ven_info->dev,
5172 ven_info->vif_index,
5173 ven_info->vif_flags, rif);
5174}
5175
5176static void
5177mlxsw_sp_router_fibmr_vif_del(struct mlxsw_sp *mlxsw_sp,
5178 struct vif_entry_notifier_info *ven_info)
5179{
5180 struct mlxsw_sp_vr *vr;
5181
5182 if (mlxsw_sp->router->aborted)
5183 return;
5184
5185 vr = mlxsw_sp_vr_find(mlxsw_sp, ven_info->tb_id);
5186 if (WARN_ON(!vr))
5187 return;
5188
5189 mlxsw_sp_mr_vif_del(vr->mr4_table, ven_info->vif_index);
5190 mlxsw_sp_vr_put(vr);
5191}
5192
Ido Schimmelbc65a8a2017-07-18 10:10:25 +02005193static int mlxsw_sp_router_set_abort_trap(struct mlxsw_sp *mlxsw_sp)
5194{
5195 enum mlxsw_reg_ralxx_protocol proto = MLXSW_REG_RALXX_PROTOCOL_IPV4;
5196 int err;
5197
5198 err = __mlxsw_sp_router_set_abort_trap(mlxsw_sp, proto,
5199 MLXSW_SP_LPM_TREE_MIN);
5200 if (err)
5201 return err;
5202
Yotam Gigid42b0962017-09-27 08:23:20 +02005203 /* The multicast router code does not need an abort trap as by default,
5204 * packets that don't match any routes are trapped to the CPU.
5205 */
5206
Ido Schimmelbc65a8a2017-07-18 10:10:25 +02005207 proto = MLXSW_REG_RALXX_PROTOCOL_IPV6;
5208 return __mlxsw_sp_router_set_abort_trap(mlxsw_sp, proto,
5209 MLXSW_SP_LPM_TREE_MIN + 1);
5210}
5211
Ido Schimmel9aecce12017-02-09 10:28:42 +01005212static void mlxsw_sp_fib4_node_flush(struct mlxsw_sp *mlxsw_sp,
5213 struct mlxsw_sp_fib_node *fib_node)
5214{
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02005215 struct mlxsw_sp_fib4_entry *fib4_entry, *tmp;
Ido Schimmel9aecce12017-02-09 10:28:42 +01005216
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02005217 list_for_each_entry_safe(fib4_entry, tmp, &fib_node->entry_list,
5218 common.list) {
5219 bool do_break = &tmp->common.list == &fib_node->entry_list;
Ido Schimmel9aecce12017-02-09 10:28:42 +01005220
Ido Schimmel4f1c7f12017-07-18 10:10:26 +02005221 mlxsw_sp_fib4_node_entry_unlink(mlxsw_sp, fib4_entry);
5222 mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib4_entry);
Ido Schimmel731ea1c2017-07-18 10:10:21 +02005223 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
Ido Schimmel9aecce12017-02-09 10:28:42 +01005224 /* Break when entry list is empty and node was freed.
5225 * Otherwise, we'll access freed memory in the next
5226 * iteration.
5227 */
5228 if (do_break)
5229 break;
5230 }
5231}
5232
Ido Schimmel428b8512017-08-03 13:28:28 +02005233static void mlxsw_sp_fib6_node_flush(struct mlxsw_sp *mlxsw_sp,
5234 struct mlxsw_sp_fib_node *fib_node)
5235{
5236 struct mlxsw_sp_fib6_entry *fib6_entry, *tmp;
5237
5238 list_for_each_entry_safe(fib6_entry, tmp, &fib_node->entry_list,
5239 common.list) {
5240 bool do_break = &tmp->common.list == &fib_node->entry_list;
5241
5242 mlxsw_sp_fib6_node_entry_unlink(mlxsw_sp, fib6_entry);
5243 mlxsw_sp_fib6_entry_destroy(mlxsw_sp, fib6_entry);
5244 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
5245 if (do_break)
5246 break;
5247 }
5248}
5249
Ido Schimmel9aecce12017-02-09 10:28:42 +01005250static void mlxsw_sp_fib_node_flush(struct mlxsw_sp *mlxsw_sp,
5251 struct mlxsw_sp_fib_node *fib_node)
5252{
Ido Schimmel76610eb2017-03-10 08:53:41 +01005253 switch (fib_node->fib->proto) {
Ido Schimmel9aecce12017-02-09 10:28:42 +01005254 case MLXSW_SP_L3_PROTO_IPV4:
5255 mlxsw_sp_fib4_node_flush(mlxsw_sp, fib_node);
5256 break;
5257 case MLXSW_SP_L3_PROTO_IPV6:
Ido Schimmel428b8512017-08-03 13:28:28 +02005258 mlxsw_sp_fib6_node_flush(mlxsw_sp, fib_node);
Ido Schimmel9aecce12017-02-09 10:28:42 +01005259 break;
5260 }
5261}
5262
Ido Schimmel76610eb2017-03-10 08:53:41 +01005263static void mlxsw_sp_vr_fib_flush(struct mlxsw_sp *mlxsw_sp,
5264 struct mlxsw_sp_vr *vr,
5265 enum mlxsw_sp_l3proto proto)
5266{
5267 struct mlxsw_sp_fib *fib = mlxsw_sp_vr_fib(vr, proto);
5268 struct mlxsw_sp_fib_node *fib_node, *tmp;
5269
5270 list_for_each_entry_safe(fib_node, tmp, &fib->node_list, list) {
5271 bool do_break = &tmp->list == &fib->node_list;
5272
5273 mlxsw_sp_fib_node_flush(mlxsw_sp, fib_node);
5274 if (do_break)
5275 break;
5276 }
5277}
5278
Ido Schimmelac571de2016-11-14 11:26:32 +01005279static void mlxsw_sp_router_fib_flush(struct mlxsw_sp *mlxsw_sp)
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005280{
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005281 int i;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005282
Jiri Pirkoc1a38312016-10-21 16:07:23 +02005283 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
Ido Schimmel9011b672017-05-16 19:38:25 +02005284 struct mlxsw_sp_vr *vr = &mlxsw_sp->router->vrs[i];
Ido Schimmelac571de2016-11-14 11:26:32 +01005285
Ido Schimmel76610eb2017-03-10 08:53:41 +01005286 if (!mlxsw_sp_vr_is_used(vr))
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005287 continue;
Yotam Gigid42b0962017-09-27 08:23:20 +02005288
5289 mlxsw_sp_mr_table_flush(vr->mr4_table);
Ido Schimmel76610eb2017-03-10 08:53:41 +01005290 mlxsw_sp_vr_fib_flush(mlxsw_sp, vr, MLXSW_SP_L3_PROTO_IPV4);
Ido Schimmela3d9bc52017-07-18 10:10:22 +02005291
5292 /* If virtual router was only used for IPv4, then it's no
5293 * longer used.
5294 */
5295 if (!mlxsw_sp_vr_is_used(vr))
5296 continue;
5297 mlxsw_sp_vr_fib_flush(mlxsw_sp, vr, MLXSW_SP_L3_PROTO_IPV6);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005298 }
Ido Schimmelac571de2016-11-14 11:26:32 +01005299}
5300
Ido Schimmelbc65a8a2017-07-18 10:10:25 +02005301static void mlxsw_sp_router_fib_abort(struct mlxsw_sp *mlxsw_sp)
Ido Schimmelac571de2016-11-14 11:26:32 +01005302{
5303 int err;
5304
Ido Schimmel9011b672017-05-16 19:38:25 +02005305 if (mlxsw_sp->router->aborted)
Ido Schimmeld331d302016-11-16 09:51:58 +01005306 return;
5307 dev_warn(mlxsw_sp->bus_info->dev, "FIB abort triggered. Note that FIB entries are no longer being offloaded to this device.\n");
Ido Schimmelac571de2016-11-14 11:26:32 +01005308 mlxsw_sp_router_fib_flush(mlxsw_sp);
Ido Schimmel9011b672017-05-16 19:38:25 +02005309 mlxsw_sp->router->aborted = true;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005310 err = mlxsw_sp_router_set_abort_trap(mlxsw_sp);
5311 if (err)
5312 dev_warn(mlxsw_sp->bus_info->dev, "Failed to set abort trap.\n");
5313}
5314
Ido Schimmel30572242016-12-03 16:45:01 +01005315struct mlxsw_sp_fib_event_work {
Ido Schimmela0e47612017-02-06 16:20:10 +01005316 struct work_struct work;
Ido Schimmelad178c82017-02-08 11:16:40 +01005317 union {
Ido Schimmel428b8512017-08-03 13:28:28 +02005318 struct fib6_entry_notifier_info fen6_info;
Ido Schimmelad178c82017-02-08 11:16:40 +01005319 struct fib_entry_notifier_info fen_info;
Ido Schimmel5d7bfd12017-03-16 09:08:14 +01005320 struct fib_rule_notifier_info fr_info;
Ido Schimmelad178c82017-02-08 11:16:40 +01005321 struct fib_nh_notifier_info fnh_info;
Yotam Gigid42b0962017-09-27 08:23:20 +02005322 struct mfc_entry_notifier_info men_info;
5323 struct vif_entry_notifier_info ven_info;
Ido Schimmelad178c82017-02-08 11:16:40 +01005324 };
Ido Schimmel30572242016-12-03 16:45:01 +01005325 struct mlxsw_sp *mlxsw_sp;
5326 unsigned long event;
5327};
5328
Ido Schimmel66a57632017-08-03 13:28:26 +02005329static void mlxsw_sp_router_fib4_event_work(struct work_struct *work)
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005330{
Ido Schimmel30572242016-12-03 16:45:01 +01005331 struct mlxsw_sp_fib_event_work *fib_work =
Ido Schimmela0e47612017-02-06 16:20:10 +01005332 container_of(work, struct mlxsw_sp_fib_event_work, work);
Ido Schimmel30572242016-12-03 16:45:01 +01005333 struct mlxsw_sp *mlxsw_sp = fib_work->mlxsw_sp;
Ido Schimmel599cf8f2017-02-09 10:28:44 +01005334 bool replace, append;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005335 int err;
5336
Ido Schimmel30572242016-12-03 16:45:01 +01005337 /* Protect internal structures from changes */
5338 rtnl_lock();
5339 switch (fib_work->event) {
Ido Schimmel599cf8f2017-02-09 10:28:44 +01005340 case FIB_EVENT_ENTRY_REPLACE: /* fall through */
Ido Schimmel4283bce2017-02-09 10:28:43 +01005341 case FIB_EVENT_ENTRY_APPEND: /* fall through */
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005342 case FIB_EVENT_ENTRY_ADD:
Ido Schimmel599cf8f2017-02-09 10:28:44 +01005343 replace = fib_work->event == FIB_EVENT_ENTRY_REPLACE;
Ido Schimmel4283bce2017-02-09 10:28:43 +01005344 append = fib_work->event == FIB_EVENT_ENTRY_APPEND;
5345 err = mlxsw_sp_router_fib4_add(mlxsw_sp, &fib_work->fen_info,
Ido Schimmel599cf8f2017-02-09 10:28:44 +01005346 replace, append);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005347 if (err)
Ido Schimmelbc65a8a2017-07-18 10:10:25 +02005348 mlxsw_sp_router_fib_abort(mlxsw_sp);
Ido Schimmel30572242016-12-03 16:45:01 +01005349 fib_info_put(fib_work->fen_info.fi);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005350 break;
5351 case FIB_EVENT_ENTRY_DEL:
Ido Schimmel30572242016-12-03 16:45:01 +01005352 mlxsw_sp_router_fib4_del(mlxsw_sp, &fib_work->fen_info);
5353 fib_info_put(fib_work->fen_info.fi);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005354 break;
David Ahern1f279232017-10-27 17:37:14 -07005355 case FIB_EVENT_RULE_ADD:
5356 /* if we get here, a rule was added that we do not support.
5357 * just do the fib_abort
5358 */
5359 mlxsw_sp_router_fib_abort(mlxsw_sp);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005360 break;
Ido Schimmelad178c82017-02-08 11:16:40 +01005361 case FIB_EVENT_NH_ADD: /* fall through */
5362 case FIB_EVENT_NH_DEL:
Ido Schimmel0e6ea2a2017-07-18 10:10:27 +02005363 mlxsw_sp_nexthop4_event(mlxsw_sp, fib_work->event,
5364 fib_work->fnh_info.fib_nh);
Ido Schimmelad178c82017-02-08 11:16:40 +01005365 fib_info_put(fib_work->fnh_info.fib_nh->nh_parent);
5366 break;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005367 }
Ido Schimmel30572242016-12-03 16:45:01 +01005368 rtnl_unlock();
5369 kfree(fib_work);
5370}
5371
Ido Schimmel66a57632017-08-03 13:28:26 +02005372static void mlxsw_sp_router_fib6_event_work(struct work_struct *work)
5373{
Ido Schimmel583419f2017-08-03 13:28:27 +02005374 struct mlxsw_sp_fib_event_work *fib_work =
5375 container_of(work, struct mlxsw_sp_fib_event_work, work);
5376 struct mlxsw_sp *mlxsw_sp = fib_work->mlxsw_sp;
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02005377 bool replace;
Ido Schimmel428b8512017-08-03 13:28:28 +02005378 int err;
Ido Schimmel583419f2017-08-03 13:28:27 +02005379
5380 rtnl_lock();
5381 switch (fib_work->event) {
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02005382 case FIB_EVENT_ENTRY_REPLACE: /* fall through */
Ido Schimmel428b8512017-08-03 13:28:28 +02005383 case FIB_EVENT_ENTRY_ADD:
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02005384 replace = fib_work->event == FIB_EVENT_ENTRY_REPLACE;
Ido Schimmel428b8512017-08-03 13:28:28 +02005385 err = mlxsw_sp_router_fib6_add(mlxsw_sp,
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02005386 fib_work->fen6_info.rt, replace);
Ido Schimmel428b8512017-08-03 13:28:28 +02005387 if (err)
5388 mlxsw_sp_router_fib_abort(mlxsw_sp);
5389 mlxsw_sp_rt6_release(fib_work->fen6_info.rt);
5390 break;
5391 case FIB_EVENT_ENTRY_DEL:
5392 mlxsw_sp_router_fib6_del(mlxsw_sp, fib_work->fen6_info.rt);
5393 mlxsw_sp_rt6_release(fib_work->fen6_info.rt);
5394 break;
David Ahern1f279232017-10-27 17:37:14 -07005395 case FIB_EVENT_RULE_ADD:
5396 /* if we get here, a rule was added that we do not support.
5397 * just do the fib_abort
5398 */
5399 mlxsw_sp_router_fib_abort(mlxsw_sp);
Ido Schimmel583419f2017-08-03 13:28:27 +02005400 break;
5401 }
5402 rtnl_unlock();
5403 kfree(fib_work);
Ido Schimmel66a57632017-08-03 13:28:26 +02005404}
5405
Yotam Gigid42b0962017-09-27 08:23:20 +02005406static void mlxsw_sp_router_fibmr_event_work(struct work_struct *work)
5407{
5408 struct mlxsw_sp_fib_event_work *fib_work =
5409 container_of(work, struct mlxsw_sp_fib_event_work, work);
5410 struct mlxsw_sp *mlxsw_sp = fib_work->mlxsw_sp;
Yotam Gigid42b0962017-09-27 08:23:20 +02005411 bool replace;
5412 int err;
5413
5414 rtnl_lock();
5415 switch (fib_work->event) {
5416 case FIB_EVENT_ENTRY_REPLACE: /* fall through */
5417 case FIB_EVENT_ENTRY_ADD:
5418 replace = fib_work->event == FIB_EVENT_ENTRY_REPLACE;
5419
5420 err = mlxsw_sp_router_fibmr_add(mlxsw_sp, &fib_work->men_info,
5421 replace);
5422 if (err)
5423 mlxsw_sp_router_fib_abort(mlxsw_sp);
5424 ipmr_cache_put(fib_work->men_info.mfc);
5425 break;
5426 case FIB_EVENT_ENTRY_DEL:
5427 mlxsw_sp_router_fibmr_del(mlxsw_sp, &fib_work->men_info);
5428 ipmr_cache_put(fib_work->men_info.mfc);
5429 break;
5430 case FIB_EVENT_VIF_ADD:
5431 err = mlxsw_sp_router_fibmr_vif_add(mlxsw_sp,
5432 &fib_work->ven_info);
5433 if (err)
5434 mlxsw_sp_router_fib_abort(mlxsw_sp);
5435 dev_put(fib_work->ven_info.dev);
5436 break;
5437 case FIB_EVENT_VIF_DEL:
5438 mlxsw_sp_router_fibmr_vif_del(mlxsw_sp,
5439 &fib_work->ven_info);
5440 dev_put(fib_work->ven_info.dev);
5441 break;
David Ahern1f279232017-10-27 17:37:14 -07005442 case FIB_EVENT_RULE_ADD:
5443 /* if we get here, a rule was added that we do not support.
5444 * just do the fib_abort
5445 */
5446 mlxsw_sp_router_fib_abort(mlxsw_sp);
Yotam Gigid42b0962017-09-27 08:23:20 +02005447 break;
5448 }
5449 rtnl_unlock();
5450 kfree(fib_work);
5451}
5452
Ido Schimmel66a57632017-08-03 13:28:26 +02005453static void mlxsw_sp_router_fib4_event(struct mlxsw_sp_fib_event_work *fib_work,
5454 struct fib_notifier_info *info)
5455{
David Ahern3c75f9b2017-10-18 15:01:38 -07005456 struct fib_entry_notifier_info *fen_info;
David Ahern3c75f9b2017-10-18 15:01:38 -07005457 struct fib_nh_notifier_info *fnh_info;
5458
Ido Schimmel66a57632017-08-03 13:28:26 +02005459 switch (fib_work->event) {
5460 case FIB_EVENT_ENTRY_REPLACE: /* fall through */
5461 case FIB_EVENT_ENTRY_APPEND: /* fall through */
5462 case FIB_EVENT_ENTRY_ADD: /* fall through */
5463 case FIB_EVENT_ENTRY_DEL:
David Ahern3c75f9b2017-10-18 15:01:38 -07005464 fen_info = container_of(info, struct fib_entry_notifier_info,
5465 info);
5466 fib_work->fen_info = *fen_info;
5467 /* Take reference on fib_info to prevent it from being
Ido Schimmel66a57632017-08-03 13:28:26 +02005468 * freed while work is queued. Release it afterwards.
5469 */
5470 fib_info_hold(fib_work->fen_info.fi);
5471 break;
Ido Schimmel66a57632017-08-03 13:28:26 +02005472 case FIB_EVENT_NH_ADD: /* fall through */
5473 case FIB_EVENT_NH_DEL:
David Ahern3c75f9b2017-10-18 15:01:38 -07005474 fnh_info = container_of(info, struct fib_nh_notifier_info,
5475 info);
5476 fib_work->fnh_info = *fnh_info;
Ido Schimmel66a57632017-08-03 13:28:26 +02005477 fib_info_hold(fib_work->fnh_info.fib_nh->nh_parent);
5478 break;
5479 }
5480}
5481
5482static void mlxsw_sp_router_fib6_event(struct mlxsw_sp_fib_event_work *fib_work,
5483 struct fib_notifier_info *info)
5484{
David Ahern3c75f9b2017-10-18 15:01:38 -07005485 struct fib6_entry_notifier_info *fen6_info;
David Ahern3c75f9b2017-10-18 15:01:38 -07005486
Ido Schimmel583419f2017-08-03 13:28:27 +02005487 switch (fib_work->event) {
Ido Schimmel0a7fd1a2017-08-03 13:28:29 +02005488 case FIB_EVENT_ENTRY_REPLACE: /* fall through */
Ido Schimmel428b8512017-08-03 13:28:28 +02005489 case FIB_EVENT_ENTRY_ADD: /* fall through */
5490 case FIB_EVENT_ENTRY_DEL:
David Ahern3c75f9b2017-10-18 15:01:38 -07005491 fen6_info = container_of(info, struct fib6_entry_notifier_info,
5492 info);
5493 fib_work->fen6_info = *fen6_info;
Ido Schimmel428b8512017-08-03 13:28:28 +02005494 rt6_hold(fib_work->fen6_info.rt);
5495 break;
Ido Schimmel583419f2017-08-03 13:28:27 +02005496 }
Ido Schimmel66a57632017-08-03 13:28:26 +02005497}
5498
Yotam Gigid42b0962017-09-27 08:23:20 +02005499static void
5500mlxsw_sp_router_fibmr_event(struct mlxsw_sp_fib_event_work *fib_work,
5501 struct fib_notifier_info *info)
5502{
5503 switch (fib_work->event) {
5504 case FIB_EVENT_ENTRY_REPLACE: /* fall through */
5505 case FIB_EVENT_ENTRY_ADD: /* fall through */
5506 case FIB_EVENT_ENTRY_DEL:
5507 memcpy(&fib_work->men_info, info, sizeof(fib_work->men_info));
5508 ipmr_cache_hold(fib_work->men_info.mfc);
5509 break;
5510 case FIB_EVENT_VIF_ADD: /* fall through */
5511 case FIB_EVENT_VIF_DEL:
5512 memcpy(&fib_work->ven_info, info, sizeof(fib_work->ven_info));
5513 dev_hold(fib_work->ven_info.dev);
5514 break;
David Ahern1f279232017-10-27 17:37:14 -07005515 }
5516}
5517
5518static int mlxsw_sp_router_fib_rule_event(unsigned long event,
5519 struct fib_notifier_info *info,
5520 struct mlxsw_sp *mlxsw_sp)
5521{
5522 struct netlink_ext_ack *extack = info->extack;
5523 struct fib_rule_notifier_info *fr_info;
5524 struct fib_rule *rule;
5525 int err = 0;
5526
5527 /* nothing to do at the moment */
5528 if (event == FIB_EVENT_RULE_DEL)
5529 return 0;
5530
5531 if (mlxsw_sp->router->aborted)
5532 return 0;
5533
5534 fr_info = container_of(info, struct fib_rule_notifier_info, info);
5535 rule = fr_info->rule;
5536
5537 switch (info->family) {
5538 case AF_INET:
5539 if (!fib4_rule_default(rule) && !rule->l3mdev)
5540 err = -1;
5541 break;
5542 case AF_INET6:
5543 if (!fib6_rule_default(rule) && !rule->l3mdev)
5544 err = -1;
5545 break;
5546 case RTNL_FAMILY_IPMR:
5547 if (!ipmr_rule_default(rule) && !rule->l3mdev)
5548 err = -1;
Yotam Gigid42b0962017-09-27 08:23:20 +02005549 break;
5550 }
David Ahern1f279232017-10-27 17:37:14 -07005551
5552 if (err < 0)
5553 NL_SET_ERR_MSG(extack, "spectrum: FIB rules not supported. Aborting offload");
5554
5555 return err;
Yotam Gigid42b0962017-09-27 08:23:20 +02005556}
5557
Ido Schimmel30572242016-12-03 16:45:01 +01005558/* Called with rcu_read_lock() */
5559static int mlxsw_sp_router_fib_event(struct notifier_block *nb,
5560 unsigned long event, void *ptr)
5561{
Ido Schimmel30572242016-12-03 16:45:01 +01005562 struct mlxsw_sp_fib_event_work *fib_work;
5563 struct fib_notifier_info *info = ptr;
Ido Schimmel7e39d112017-05-16 19:38:28 +02005564 struct mlxsw_sp_router *router;
David Ahern1f279232017-10-27 17:37:14 -07005565 int err;
Ido Schimmel30572242016-12-03 16:45:01 +01005566
Ido Schimmel8e29f972017-09-15 15:31:07 +02005567 if (!net_eq(info->net, &init_net) ||
Yotam Gigi664375e2017-09-27 08:23:22 +02005568 (info->family != AF_INET && info->family != AF_INET6 &&
5569 info->family != RTNL_FAMILY_IPMR))
Ido Schimmel30572242016-12-03 16:45:01 +01005570 return NOTIFY_DONE;
5571
David Ahern1f279232017-10-27 17:37:14 -07005572 router = container_of(nb, struct mlxsw_sp_router, fib_nb);
5573
5574 switch (event) {
5575 case FIB_EVENT_RULE_ADD: /* fall through */
5576 case FIB_EVENT_RULE_DEL:
5577 err = mlxsw_sp_router_fib_rule_event(event, info,
5578 router->mlxsw_sp);
5579 if (!err)
5580 return NOTIFY_DONE;
5581 }
5582
Ido Schimmel30572242016-12-03 16:45:01 +01005583 fib_work = kzalloc(sizeof(*fib_work), GFP_ATOMIC);
5584 if (WARN_ON(!fib_work))
5585 return NOTIFY_BAD;
5586
Ido Schimmel7e39d112017-05-16 19:38:28 +02005587 fib_work->mlxsw_sp = router->mlxsw_sp;
Ido Schimmel30572242016-12-03 16:45:01 +01005588 fib_work->event = event;
5589
Ido Schimmel66a57632017-08-03 13:28:26 +02005590 switch (info->family) {
5591 case AF_INET:
5592 INIT_WORK(&fib_work->work, mlxsw_sp_router_fib4_event_work);
5593 mlxsw_sp_router_fib4_event(fib_work, info);
Ido Schimmel30572242016-12-03 16:45:01 +01005594 break;
Ido Schimmel66a57632017-08-03 13:28:26 +02005595 case AF_INET6:
5596 INIT_WORK(&fib_work->work, mlxsw_sp_router_fib6_event_work);
5597 mlxsw_sp_router_fib6_event(fib_work, info);
Ido Schimmelad178c82017-02-08 11:16:40 +01005598 break;
Yotam Gigid42b0962017-09-27 08:23:20 +02005599 case RTNL_FAMILY_IPMR:
5600 INIT_WORK(&fib_work->work, mlxsw_sp_router_fibmr_event_work);
5601 mlxsw_sp_router_fibmr_event(fib_work, info);
5602 break;
Ido Schimmel30572242016-12-03 16:45:01 +01005603 }
5604
Ido Schimmela0e47612017-02-06 16:20:10 +01005605 mlxsw_core_schedule_work(&fib_work->work);
Ido Schimmel30572242016-12-03 16:45:01 +01005606
Jiri Pirkob45f64d2016-09-26 12:52:31 +02005607 return NOTIFY_DONE;
5608}
5609
Ido Schimmel4724ba562017-03-10 08:53:39 +01005610static struct mlxsw_sp_rif *
5611mlxsw_sp_rif_find_by_dev(const struct mlxsw_sp *mlxsw_sp,
5612 const struct net_device *dev)
5613{
5614 int i;
5615
5616 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++)
Ido Schimmel5f9efff2017-05-16 19:38:27 +02005617 if (mlxsw_sp->router->rifs[i] &&
5618 mlxsw_sp->router->rifs[i]->dev == dev)
5619 return mlxsw_sp->router->rifs[i];
Ido Schimmel4724ba562017-03-10 08:53:39 +01005620
5621 return NULL;
5622}
5623
5624static int mlxsw_sp_router_rif_disable(struct mlxsw_sp *mlxsw_sp, u16 rif)
5625{
5626 char ritr_pl[MLXSW_REG_RITR_LEN];
5627 int err;
5628
5629 mlxsw_reg_ritr_rif_pack(ritr_pl, rif);
5630 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
5631 if (WARN_ON_ONCE(err))
5632 return err;
5633
5634 mlxsw_reg_ritr_enable_set(ritr_pl, false);
5635 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
5636}
5637
5638static void mlxsw_sp_router_rif_gone_sync(struct mlxsw_sp *mlxsw_sp,
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01005639 struct mlxsw_sp_rif *rif)
Ido Schimmel4724ba562017-03-10 08:53:39 +01005640{
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01005641 mlxsw_sp_router_rif_disable(mlxsw_sp, rif->rif_index);
5642 mlxsw_sp_nexthop_rif_gone_sync(mlxsw_sp, rif);
5643 mlxsw_sp_neigh_rif_gone_sync(mlxsw_sp, rif);
Ido Schimmel4724ba562017-03-10 08:53:39 +01005644}
5645
Arkadi Sharshevsky5ea12372017-07-18 10:10:13 +02005646static bool
5647mlxsw_sp_rif_should_config(struct mlxsw_sp_rif *rif, struct net_device *dev,
5648 unsigned long event)
Ido Schimmel4724ba562017-03-10 08:53:39 +01005649{
Arkadi Sharshevsky5ea12372017-07-18 10:10:13 +02005650 struct inet6_dev *inet6_dev;
5651 bool addr_list_empty = true;
5652 struct in_device *idev;
5653
Ido Schimmel4724ba562017-03-10 08:53:39 +01005654 switch (event) {
5655 case NETDEV_UP:
Petr Machataf1b1f272017-07-31 09:27:28 +02005656 return rif == NULL;
Ido Schimmel4724ba562017-03-10 08:53:39 +01005657 case NETDEV_DOWN:
Arkadi Sharshevsky5ea12372017-07-18 10:10:13 +02005658 idev = __in_dev_get_rtnl(dev);
5659 if (idev && idev->ifa_list)
5660 addr_list_empty = false;
5661
5662 inet6_dev = __in6_dev_get(dev);
5663 if (addr_list_empty && inet6_dev &&
5664 !list_empty(&inet6_dev->addr_list))
5665 addr_list_empty = false;
5666
5667 if (rif && addr_list_empty &&
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01005668 !netif_is_l3_slave(rif->dev))
Ido Schimmel4724ba562017-03-10 08:53:39 +01005669 return true;
5670 /* It is possible we already removed the RIF ourselves
5671 * if it was assigned to a netdev that is now a bridge
5672 * or LAG slave.
5673 */
5674 return false;
5675 }
5676
5677 return false;
5678}
5679
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02005680static enum mlxsw_sp_rif_type
5681mlxsw_sp_dev_rif_type(const struct mlxsw_sp *mlxsw_sp,
5682 const struct net_device *dev)
5683{
5684 enum mlxsw_sp_fid_type type;
5685
Petr Machata6ddb7422017-09-02 23:49:19 +02005686 if (mlxsw_sp_netdev_ipip_type(mlxsw_sp, dev, NULL))
5687 return MLXSW_SP_RIF_TYPE_IPIP_LB;
5688
5689 /* Otherwise RIF type is derived from the type of the underlying FID. */
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02005690 if (is_vlan_dev(dev) && netif_is_bridge_master(vlan_dev_real_dev(dev)))
5691 type = MLXSW_SP_FID_TYPE_8021Q;
5692 else if (netif_is_bridge_master(dev) && br_vlan_enabled(dev))
5693 type = MLXSW_SP_FID_TYPE_8021Q;
5694 else if (netif_is_bridge_master(dev))
5695 type = MLXSW_SP_FID_TYPE_8021D;
5696 else
5697 type = MLXSW_SP_FID_TYPE_RFID;
5698
5699 return mlxsw_sp_fid_type_rif_type(mlxsw_sp, type);
5700}
5701
Ido Schimmelde5ed992017-06-04 16:53:40 +02005702static int mlxsw_sp_rif_index_alloc(struct mlxsw_sp *mlxsw_sp, u16 *p_rif_index)
Ido Schimmel4724ba562017-03-10 08:53:39 +01005703{
5704 int i;
5705
Ido Schimmelde5ed992017-06-04 16:53:40 +02005706 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++) {
5707 if (!mlxsw_sp->router->rifs[i]) {
5708 *p_rif_index = i;
5709 return 0;
5710 }
5711 }
Ido Schimmel4724ba562017-03-10 08:53:39 +01005712
Ido Schimmelde5ed992017-06-04 16:53:40 +02005713 return -ENOBUFS;
Ido Schimmel4724ba562017-03-10 08:53:39 +01005714}
5715
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02005716static struct mlxsw_sp_rif *mlxsw_sp_rif_alloc(size_t rif_size, u16 rif_index,
5717 u16 vr_id,
5718 struct net_device *l3_dev)
Ido Schimmel4724ba562017-03-10 08:53:39 +01005719{
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01005720 struct mlxsw_sp_rif *rif;
Ido Schimmel4724ba562017-03-10 08:53:39 +01005721
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02005722 rif = kzalloc(rif_size, GFP_KERNEL);
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01005723 if (!rif)
Ido Schimmel4724ba562017-03-10 08:53:39 +01005724 return NULL;
5725
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01005726 INIT_LIST_HEAD(&rif->nexthop_list);
5727 INIT_LIST_HEAD(&rif->neigh_list);
5728 ether_addr_copy(rif->addr, l3_dev->dev_addr);
5729 rif->mtu = l3_dev->mtu;
5730 rif->vr_id = vr_id;
5731 rif->dev = l3_dev;
5732 rif->rif_index = rif_index;
Ido Schimmel4724ba562017-03-10 08:53:39 +01005733
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01005734 return rif;
Ido Schimmel4724ba562017-03-10 08:53:39 +01005735}
5736
Ido Schimmel5f9efff2017-05-16 19:38:27 +02005737struct mlxsw_sp_rif *mlxsw_sp_rif_by_index(const struct mlxsw_sp *mlxsw_sp,
5738 u16 rif_index)
5739{
5740 return mlxsw_sp->router->rifs[rif_index];
5741}
5742
Arkadi Sharshevskyfd1b9d42017-03-28 17:24:16 +02005743u16 mlxsw_sp_rif_index(const struct mlxsw_sp_rif *rif)
5744{
5745 return rif->rif_index;
5746}
5747
Petr Machata92107cf2017-09-02 23:49:28 +02005748u16 mlxsw_sp_ipip_lb_rif_index(const struct mlxsw_sp_rif_ipip_lb *lb_rif)
5749{
5750 return lb_rif->common.rif_index;
5751}
5752
5753u16 mlxsw_sp_ipip_lb_ul_vr_id(const struct mlxsw_sp_rif_ipip_lb *lb_rif)
5754{
5755 return lb_rif->ul_vr_id;
5756}
5757
Arkadi Sharshevskyfd1b9d42017-03-28 17:24:16 +02005758int mlxsw_sp_rif_dev_ifindex(const struct mlxsw_sp_rif *rif)
5759{
5760 return rif->dev->ifindex;
5761}
5762
Yotam Gigi91e4d592017-09-19 10:00:19 +02005763const struct net_device *mlxsw_sp_rif_dev(const struct mlxsw_sp_rif *rif)
5764{
5765 return rif->dev;
5766}
5767
Ido Schimmel4724ba562017-03-10 08:53:39 +01005768static struct mlxsw_sp_rif *
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02005769mlxsw_sp_rif_create(struct mlxsw_sp *mlxsw_sp,
David Ahernf8fa9b42017-10-18 09:56:56 -07005770 const struct mlxsw_sp_rif_params *params,
5771 struct netlink_ext_ack *extack)
Ido Schimmel4724ba562017-03-10 08:53:39 +01005772{
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02005773 u32 tb_id = l3mdev_fib_table(params->dev);
5774 const struct mlxsw_sp_rif_ops *ops;
Petr Machata010cadf2017-09-02 23:49:18 +02005775 struct mlxsw_sp_fid *fid = NULL;
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02005776 enum mlxsw_sp_rif_type type;
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01005777 struct mlxsw_sp_rif *rif;
Ido Schimmela1107482017-05-26 08:37:39 +02005778 struct mlxsw_sp_vr *vr;
5779 u16 rif_index;
Ido Schimmel4724ba562017-03-10 08:53:39 +01005780 int err;
5781
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02005782 type = mlxsw_sp_dev_rif_type(mlxsw_sp, params->dev);
5783 ops = mlxsw_sp->router->rif_ops_arr[type];
5784
David Ahernf8fa9b42017-10-18 09:56:56 -07005785 vr = mlxsw_sp_vr_get(mlxsw_sp, tb_id ? : RT_TABLE_MAIN, extack);
Ido Schimmelc9ec53f2017-05-26 08:37:38 +02005786 if (IS_ERR(vr))
5787 return ERR_CAST(vr);
Petr Machata28a04c72017-10-02 12:14:56 +02005788 vr->rif_count++;
Ido Schimmelc9ec53f2017-05-26 08:37:38 +02005789
Ido Schimmelde5ed992017-06-04 16:53:40 +02005790 err = mlxsw_sp_rif_index_alloc(mlxsw_sp, &rif_index);
David Ahernf8fa9b42017-10-18 09:56:56 -07005791 if (err) {
5792 NL_SET_ERR_MSG(extack, "spectrum: Exceeded number of supported router interfaces");
Ido Schimmelde5ed992017-06-04 16:53:40 +02005793 goto err_rif_index_alloc;
David Ahernf8fa9b42017-10-18 09:56:56 -07005794 }
Ido Schimmel4724ba562017-03-10 08:53:39 +01005795
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02005796 rif = mlxsw_sp_rif_alloc(ops->rif_size, rif_index, vr->id, params->dev);
Ido Schimmela13a5942017-05-26 08:37:33 +02005797 if (!rif) {
5798 err = -ENOMEM;
5799 goto err_rif_alloc;
5800 }
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02005801 rif->mlxsw_sp = mlxsw_sp;
5802 rif->ops = ops;
Ido Schimmela13a5942017-05-26 08:37:33 +02005803
Petr Machata010cadf2017-09-02 23:49:18 +02005804 if (ops->fid_get) {
5805 fid = ops->fid_get(rif);
5806 if (IS_ERR(fid)) {
5807 err = PTR_ERR(fid);
5808 goto err_fid_get;
5809 }
5810 rif->fid = fid;
Ido Schimmel4d93cee2017-05-26 08:37:34 +02005811 }
5812
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02005813 if (ops->setup)
5814 ops->setup(rif, params);
5815
5816 err = ops->configure(rif);
Ido Schimmel4724ba562017-03-10 08:53:39 +01005817 if (err)
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02005818 goto err_configure;
Ido Schimmel4724ba562017-03-10 08:53:39 +01005819
Yotam Gigid42b0962017-09-27 08:23:20 +02005820 err = mlxsw_sp_mr_rif_add(vr->mr4_table, rif);
5821 if (err)
5822 goto err_mr_rif_add;
5823
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02005824 mlxsw_sp_rif_counters_alloc(rif);
Ido Schimmel5f9efff2017-05-16 19:38:27 +02005825 mlxsw_sp->router->rifs[rif_index] = rif;
Ido Schimmel4724ba562017-03-10 08:53:39 +01005826
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01005827 return rif;
Ido Schimmel4724ba562017-03-10 08:53:39 +01005828
Yotam Gigid42b0962017-09-27 08:23:20 +02005829err_mr_rif_add:
5830 ops->deconfigure(rif);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02005831err_configure:
Petr Machata010cadf2017-09-02 23:49:18 +02005832 if (fid)
5833 mlxsw_sp_fid_put(fid);
Ido Schimmela1107482017-05-26 08:37:39 +02005834err_fid_get:
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02005835 kfree(rif);
5836err_rif_alloc:
Ido Schimmelde5ed992017-06-04 16:53:40 +02005837err_rif_index_alloc:
Petr Machata28a04c72017-10-02 12:14:56 +02005838 vr->rif_count--;
Ido Schimmelc9ec53f2017-05-26 08:37:38 +02005839 mlxsw_sp_vr_put(vr);
Ido Schimmel4724ba562017-03-10 08:53:39 +01005840 return ERR_PTR(err);
5841}
5842
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02005843void mlxsw_sp_rif_destroy(struct mlxsw_sp_rif *rif)
Ido Schimmel4724ba562017-03-10 08:53:39 +01005844{
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02005845 const struct mlxsw_sp_rif_ops *ops = rif->ops;
5846 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
Ido Schimmela1107482017-05-26 08:37:39 +02005847 struct mlxsw_sp_fid *fid = rif->fid;
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02005848 struct mlxsw_sp_vr *vr;
Ido Schimmel4724ba562017-03-10 08:53:39 +01005849
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01005850 mlxsw_sp_router_rif_gone_sync(mlxsw_sp, rif);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02005851 vr = &mlxsw_sp->router->vrs[rif->vr_id];
Arkadi Sharshevskye0c0afd2017-03-28 17:24:15 +02005852
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02005853 mlxsw_sp->router->rifs[rif->rif_index] = NULL;
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02005854 mlxsw_sp_rif_counters_free(rif);
Yotam Gigid42b0962017-09-27 08:23:20 +02005855 mlxsw_sp_mr_rif_del(vr->mr4_table, rif);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02005856 ops->deconfigure(rif);
Petr Machata010cadf2017-09-02 23:49:18 +02005857 if (fid)
5858 /* Loopback RIFs are not associated with a FID. */
5859 mlxsw_sp_fid_put(fid);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02005860 kfree(rif);
Petr Machata28a04c72017-10-02 12:14:56 +02005861 vr->rif_count--;
Ido Schimmelc9ec53f2017-05-26 08:37:38 +02005862 mlxsw_sp_vr_put(vr);
Ido Schimmel4724ba562017-03-10 08:53:39 +01005863}
5864
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02005865static void
5866mlxsw_sp_rif_subport_params_init(struct mlxsw_sp_rif_params *params,
5867 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
5868{
5869 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
5870
5871 params->vid = mlxsw_sp_port_vlan->vid;
5872 params->lag = mlxsw_sp_port->lagged;
5873 if (params->lag)
5874 params->lag_id = mlxsw_sp_port->lag_id;
5875 else
5876 params->system_port = mlxsw_sp_port->local_port;
5877}
5878
Ido Schimmel7cbecf22017-05-26 08:37:28 +02005879static int
Ido Schimmela1107482017-05-26 08:37:39 +02005880mlxsw_sp_port_vlan_router_join(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan,
David Ahernf8fa9b42017-10-18 09:56:56 -07005881 struct net_device *l3_dev,
5882 struct netlink_ext_ack *extack)
Ido Schimmel4724ba562017-03-10 08:53:39 +01005883{
Ido Schimmel7cbecf22017-05-26 08:37:28 +02005884 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
Ido Schimmel1b8f09a2017-05-26 08:37:36 +02005885 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
Ido Schimmel7cbecf22017-05-26 08:37:28 +02005886 u16 vid = mlxsw_sp_port_vlan->vid;
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01005887 struct mlxsw_sp_rif *rif;
Ido Schimmela1107482017-05-26 08:37:39 +02005888 struct mlxsw_sp_fid *fid;
Ido Schimmel03ea01e2017-05-23 21:56:30 +02005889 int err;
Ido Schimmel4724ba562017-03-10 08:53:39 +01005890
Ido Schimmel1b8f09a2017-05-26 08:37:36 +02005891 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev);
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01005892 if (!rif) {
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02005893 struct mlxsw_sp_rif_params params = {
5894 .dev = l3_dev,
5895 };
5896
5897 mlxsw_sp_rif_subport_params_init(&params, mlxsw_sp_port_vlan);
David Ahernf8fa9b42017-10-18 09:56:56 -07005898 rif = mlxsw_sp_rif_create(mlxsw_sp, &params, extack);
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01005899 if (IS_ERR(rif))
5900 return PTR_ERR(rif);
Ido Schimmel4724ba562017-03-10 08:53:39 +01005901 }
5902
Ido Schimmela1107482017-05-26 08:37:39 +02005903 /* FID was already created, just take a reference */
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02005904 fid = rif->ops->fid_get(rif);
Ido Schimmela1107482017-05-26 08:37:39 +02005905 err = mlxsw_sp_fid_port_vid_map(fid, mlxsw_sp_port, vid);
5906 if (err)
5907 goto err_fid_port_vid_map;
5908
Ido Schimmel7cbecf22017-05-26 08:37:28 +02005909 err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, false);
Ido Schimmel03ea01e2017-05-23 21:56:30 +02005910 if (err)
5911 goto err_port_vid_learning_set;
5912
Ido Schimmel7cbecf22017-05-26 08:37:28 +02005913 err = mlxsw_sp_port_vid_stp_set(mlxsw_sp_port, vid,
Ido Schimmel03ea01e2017-05-23 21:56:30 +02005914 BR_STATE_FORWARDING);
5915 if (err)
5916 goto err_port_vid_stp_set;
5917
Ido Schimmela1107482017-05-26 08:37:39 +02005918 mlxsw_sp_port_vlan->fid = fid;
Ido Schimmel4724ba562017-03-10 08:53:39 +01005919
Ido Schimmel4724ba562017-03-10 08:53:39 +01005920 return 0;
Ido Schimmel03ea01e2017-05-23 21:56:30 +02005921
5922err_port_vid_stp_set:
Ido Schimmel7cbecf22017-05-26 08:37:28 +02005923 mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, true);
Ido Schimmel03ea01e2017-05-23 21:56:30 +02005924err_port_vid_learning_set:
Ido Schimmela1107482017-05-26 08:37:39 +02005925 mlxsw_sp_fid_port_vid_unmap(fid, mlxsw_sp_port, vid);
5926err_fid_port_vid_map:
5927 mlxsw_sp_fid_put(fid);
Ido Schimmel03ea01e2017-05-23 21:56:30 +02005928 return err;
Ido Schimmel4724ba562017-03-10 08:53:39 +01005929}
5930
Ido Schimmela1107482017-05-26 08:37:39 +02005931void
5932mlxsw_sp_port_vlan_router_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
Ido Schimmel4724ba562017-03-10 08:53:39 +01005933{
Ido Schimmelce95e152017-05-26 08:37:27 +02005934 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
Ido Schimmel7cbecf22017-05-26 08:37:28 +02005935 struct mlxsw_sp_fid *fid = mlxsw_sp_port_vlan->fid;
Ido Schimmelce95e152017-05-26 08:37:27 +02005936 u16 vid = mlxsw_sp_port_vlan->vid;
Ido Schimmelce95e152017-05-26 08:37:27 +02005937
Ido Schimmela1107482017-05-26 08:37:39 +02005938 if (WARN_ON(mlxsw_sp_fid_type(fid) != MLXSW_SP_FID_TYPE_RFID))
5939 return;
Ido Schimmel4aafc362017-05-26 08:37:25 +02005940
Ido Schimmela1107482017-05-26 08:37:39 +02005941 mlxsw_sp_port_vlan->fid = NULL;
Ido Schimmel7cbecf22017-05-26 08:37:28 +02005942 mlxsw_sp_port_vid_stp_set(mlxsw_sp_port, vid, BR_STATE_BLOCKING);
5943 mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, true);
Ido Schimmela1107482017-05-26 08:37:39 +02005944 mlxsw_sp_fid_port_vid_unmap(fid, mlxsw_sp_port, vid);
5945 /* If router port holds the last reference on the rFID, then the
5946 * associated Sub-port RIF will be destroyed.
5947 */
5948 mlxsw_sp_fid_put(fid);
Ido Schimmel4724ba562017-03-10 08:53:39 +01005949}
5950
Ido Schimmel7cbecf22017-05-26 08:37:28 +02005951static int mlxsw_sp_inetaddr_port_vlan_event(struct net_device *l3_dev,
5952 struct net_device *port_dev,
David Ahernf8fa9b42017-10-18 09:56:56 -07005953 unsigned long event, u16 vid,
5954 struct netlink_ext_ack *extack)
Ido Schimmel4724ba562017-03-10 08:53:39 +01005955{
5956 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(port_dev);
Ido Schimmelce95e152017-05-26 08:37:27 +02005957 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
Ido Schimmel4724ba562017-03-10 08:53:39 +01005958
Ido Schimmelce95e152017-05-26 08:37:27 +02005959 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
Ido Schimmel7cbecf22017-05-26 08:37:28 +02005960 if (WARN_ON(!mlxsw_sp_port_vlan))
5961 return -EINVAL;
Ido Schimmel4724ba562017-03-10 08:53:39 +01005962
5963 switch (event) {
5964 case NETDEV_UP:
Ido Schimmela1107482017-05-26 08:37:39 +02005965 return mlxsw_sp_port_vlan_router_join(mlxsw_sp_port_vlan,
David Ahernf8fa9b42017-10-18 09:56:56 -07005966 l3_dev, extack);
Ido Schimmel4724ba562017-03-10 08:53:39 +01005967 case NETDEV_DOWN:
Ido Schimmela1107482017-05-26 08:37:39 +02005968 mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port_vlan);
Ido Schimmel4724ba562017-03-10 08:53:39 +01005969 break;
5970 }
5971
5972 return 0;
5973}
5974
5975static int mlxsw_sp_inetaddr_port_event(struct net_device *port_dev,
David Ahernf8fa9b42017-10-18 09:56:56 -07005976 unsigned long event,
5977 struct netlink_ext_ack *extack)
Ido Schimmel4724ba562017-03-10 08:53:39 +01005978{
Jiri Pirko2b94e582017-04-18 16:55:37 +02005979 if (netif_is_bridge_port(port_dev) ||
5980 netif_is_lag_port(port_dev) ||
5981 netif_is_ovs_port(port_dev))
Ido Schimmel4724ba562017-03-10 08:53:39 +01005982 return 0;
5983
David Ahernf8fa9b42017-10-18 09:56:56 -07005984 return mlxsw_sp_inetaddr_port_vlan_event(port_dev, port_dev, event, 1,
5985 extack);
Ido Schimmel4724ba562017-03-10 08:53:39 +01005986}
5987
5988static int __mlxsw_sp_inetaddr_lag_event(struct net_device *l3_dev,
5989 struct net_device *lag_dev,
David Ahernf8fa9b42017-10-18 09:56:56 -07005990 unsigned long event, u16 vid,
5991 struct netlink_ext_ack *extack)
Ido Schimmel4724ba562017-03-10 08:53:39 +01005992{
5993 struct net_device *port_dev;
5994 struct list_head *iter;
5995 int err;
5996
5997 netdev_for_each_lower_dev(lag_dev, port_dev, iter) {
5998 if (mlxsw_sp_port_dev_check(port_dev)) {
Ido Schimmel7cbecf22017-05-26 08:37:28 +02005999 err = mlxsw_sp_inetaddr_port_vlan_event(l3_dev,
6000 port_dev,
David Ahernf8fa9b42017-10-18 09:56:56 -07006001 event, vid,
6002 extack);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006003 if (err)
6004 return err;
6005 }
6006 }
6007
6008 return 0;
6009}
6010
6011static int mlxsw_sp_inetaddr_lag_event(struct net_device *lag_dev,
David Ahernf8fa9b42017-10-18 09:56:56 -07006012 unsigned long event,
6013 struct netlink_ext_ack *extack)
Ido Schimmel4724ba562017-03-10 08:53:39 +01006014{
6015 if (netif_is_bridge_port(lag_dev))
6016 return 0;
6017
David Ahernf8fa9b42017-10-18 09:56:56 -07006018 return __mlxsw_sp_inetaddr_lag_event(lag_dev, lag_dev, event, 1,
6019 extack);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006020}
6021
Ido Schimmel4724ba562017-03-10 08:53:39 +01006022static int mlxsw_sp_inetaddr_bridge_event(struct net_device *l3_dev,
David Ahernf8fa9b42017-10-18 09:56:56 -07006023 unsigned long event,
6024 struct netlink_ext_ack *extack)
Ido Schimmel4724ba562017-03-10 08:53:39 +01006025{
6026 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(l3_dev);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006027 struct mlxsw_sp_rif_params params = {
6028 .dev = l3_dev,
6029 };
Ido Schimmela1107482017-05-26 08:37:39 +02006030 struct mlxsw_sp_rif *rif;
Ido Schimmel4724ba562017-03-10 08:53:39 +01006031
6032 switch (event) {
6033 case NETDEV_UP:
David Ahernf8fa9b42017-10-18 09:56:56 -07006034 rif = mlxsw_sp_rif_create(mlxsw_sp, &params, extack);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006035 if (IS_ERR(rif))
6036 return PTR_ERR(rif);
6037 break;
Ido Schimmel4724ba562017-03-10 08:53:39 +01006038 case NETDEV_DOWN:
Ido Schimmela1107482017-05-26 08:37:39 +02006039 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006040 mlxsw_sp_rif_destroy(rif);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006041 break;
6042 }
6043
6044 return 0;
6045}
6046
6047static int mlxsw_sp_inetaddr_vlan_event(struct net_device *vlan_dev,
David Ahernf8fa9b42017-10-18 09:56:56 -07006048 unsigned long event,
6049 struct netlink_ext_ack *extack)
Ido Schimmel4724ba562017-03-10 08:53:39 +01006050{
6051 struct net_device *real_dev = vlan_dev_real_dev(vlan_dev);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006052 u16 vid = vlan_dev_vlan_id(vlan_dev);
6053
Ido Schimmel6b27c8a2017-06-28 09:03:12 +03006054 if (netif_is_bridge_port(vlan_dev))
6055 return 0;
6056
Ido Schimmel4724ba562017-03-10 08:53:39 +01006057 if (mlxsw_sp_port_dev_check(real_dev))
Ido Schimmel7cbecf22017-05-26 08:37:28 +02006058 return mlxsw_sp_inetaddr_port_vlan_event(vlan_dev, real_dev,
David Ahernf8fa9b42017-10-18 09:56:56 -07006059 event, vid, extack);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006060 else if (netif_is_lag_master(real_dev))
6061 return __mlxsw_sp_inetaddr_lag_event(vlan_dev, real_dev, event,
David Ahernf8fa9b42017-10-18 09:56:56 -07006062 vid, extack);
Ido Schimmelc57529e2017-05-26 08:37:31 +02006063 else if (netif_is_bridge_master(real_dev) && br_vlan_enabled(real_dev))
David Ahernf8fa9b42017-10-18 09:56:56 -07006064 return mlxsw_sp_inetaddr_bridge_event(vlan_dev, event, extack);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006065
6066 return 0;
6067}
6068
Ido Schimmelb1e45522017-04-30 19:47:14 +03006069static int __mlxsw_sp_inetaddr_event(struct net_device *dev,
David Ahernf8fa9b42017-10-18 09:56:56 -07006070 unsigned long event,
6071 struct netlink_ext_ack *extack)
Ido Schimmelb1e45522017-04-30 19:47:14 +03006072{
6073 if (mlxsw_sp_port_dev_check(dev))
David Ahernf8fa9b42017-10-18 09:56:56 -07006074 return mlxsw_sp_inetaddr_port_event(dev, event, extack);
Ido Schimmelb1e45522017-04-30 19:47:14 +03006075 else if (netif_is_lag_master(dev))
David Ahernf8fa9b42017-10-18 09:56:56 -07006076 return mlxsw_sp_inetaddr_lag_event(dev, event, extack);
Ido Schimmelb1e45522017-04-30 19:47:14 +03006077 else if (netif_is_bridge_master(dev))
David Ahernf8fa9b42017-10-18 09:56:56 -07006078 return mlxsw_sp_inetaddr_bridge_event(dev, event, extack);
Ido Schimmelb1e45522017-04-30 19:47:14 +03006079 else if (is_vlan_dev(dev))
David Ahernf8fa9b42017-10-18 09:56:56 -07006080 return mlxsw_sp_inetaddr_vlan_event(dev, event, extack);
Ido Schimmelb1e45522017-04-30 19:47:14 +03006081 else
6082 return 0;
6083}
6084
Ido Schimmel4724ba562017-03-10 08:53:39 +01006085int mlxsw_sp_inetaddr_event(struct notifier_block *unused,
6086 unsigned long event, void *ptr)
6087{
6088 struct in_ifaddr *ifa = (struct in_ifaddr *) ptr;
6089 struct net_device *dev = ifa->ifa_dev->dev;
6090 struct mlxsw_sp *mlxsw_sp;
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01006091 struct mlxsw_sp_rif *rif;
Ido Schimmel4724ba562017-03-10 08:53:39 +01006092 int err = 0;
6093
David Ahern89d5dd22017-10-18 09:56:55 -07006094 /* NETDEV_UP event is handled by mlxsw_sp_inetaddr_valid_event */
6095 if (event == NETDEV_UP)
6096 goto out;
6097
6098 mlxsw_sp = mlxsw_sp_lower_get(dev);
6099 if (!mlxsw_sp)
6100 goto out;
6101
6102 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
6103 if (!mlxsw_sp_rif_should_config(rif, dev, event))
6104 goto out;
6105
David Ahernf8fa9b42017-10-18 09:56:56 -07006106 err = __mlxsw_sp_inetaddr_event(dev, event, NULL);
David Ahern89d5dd22017-10-18 09:56:55 -07006107out:
6108 return notifier_from_errno(err);
6109}
6110
6111int mlxsw_sp_inetaddr_valid_event(struct notifier_block *unused,
6112 unsigned long event, void *ptr)
6113{
6114 struct in_validator_info *ivi = (struct in_validator_info *) ptr;
6115 struct net_device *dev = ivi->ivi_dev->dev;
6116 struct mlxsw_sp *mlxsw_sp;
6117 struct mlxsw_sp_rif *rif;
6118 int err = 0;
6119
Ido Schimmel4724ba562017-03-10 08:53:39 +01006120 mlxsw_sp = mlxsw_sp_lower_get(dev);
6121 if (!mlxsw_sp)
6122 goto out;
6123
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01006124 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
Arkadi Sharshevsky5ea12372017-07-18 10:10:13 +02006125 if (!mlxsw_sp_rif_should_config(rif, dev, event))
Ido Schimmel4724ba562017-03-10 08:53:39 +01006126 goto out;
6127
David Ahernf8fa9b42017-10-18 09:56:56 -07006128 err = __mlxsw_sp_inetaddr_event(dev, event, ivi->extack);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006129out:
6130 return notifier_from_errno(err);
6131}
6132
Arkadi Sharshevsky5ea12372017-07-18 10:10:13 +02006133struct mlxsw_sp_inet6addr_event_work {
6134 struct work_struct work;
6135 struct net_device *dev;
6136 unsigned long event;
6137};
6138
6139static void mlxsw_sp_inet6addr_event_work(struct work_struct *work)
6140{
6141 struct mlxsw_sp_inet6addr_event_work *inet6addr_work =
6142 container_of(work, struct mlxsw_sp_inet6addr_event_work, work);
6143 struct net_device *dev = inet6addr_work->dev;
6144 unsigned long event = inet6addr_work->event;
6145 struct mlxsw_sp *mlxsw_sp;
6146 struct mlxsw_sp_rif *rif;
6147
6148 rtnl_lock();
6149 mlxsw_sp = mlxsw_sp_lower_get(dev);
6150 if (!mlxsw_sp)
6151 goto out;
6152
6153 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
6154 if (!mlxsw_sp_rif_should_config(rif, dev, event))
6155 goto out;
6156
David Ahernf8fa9b42017-10-18 09:56:56 -07006157 __mlxsw_sp_inetaddr_event(dev, event, NULL);
Arkadi Sharshevsky5ea12372017-07-18 10:10:13 +02006158out:
6159 rtnl_unlock();
6160 dev_put(dev);
6161 kfree(inet6addr_work);
6162}
6163
6164/* Called with rcu_read_lock() */
6165int mlxsw_sp_inet6addr_event(struct notifier_block *unused,
6166 unsigned long event, void *ptr)
6167{
6168 struct inet6_ifaddr *if6 = (struct inet6_ifaddr *) ptr;
6169 struct mlxsw_sp_inet6addr_event_work *inet6addr_work;
6170 struct net_device *dev = if6->idev->dev;
6171
David Ahern89d5dd22017-10-18 09:56:55 -07006172 /* NETDEV_UP event is handled by mlxsw_sp_inet6addr_valid_event */
6173 if (event == NETDEV_UP)
6174 return NOTIFY_DONE;
6175
Arkadi Sharshevsky5ea12372017-07-18 10:10:13 +02006176 if (!mlxsw_sp_port_dev_lower_find_rcu(dev))
6177 return NOTIFY_DONE;
6178
6179 inet6addr_work = kzalloc(sizeof(*inet6addr_work), GFP_ATOMIC);
6180 if (!inet6addr_work)
6181 return NOTIFY_BAD;
6182
6183 INIT_WORK(&inet6addr_work->work, mlxsw_sp_inet6addr_event_work);
6184 inet6addr_work->dev = dev;
6185 inet6addr_work->event = event;
6186 dev_hold(dev);
6187 mlxsw_core_schedule_work(&inet6addr_work->work);
6188
6189 return NOTIFY_DONE;
6190}
6191
David Ahern89d5dd22017-10-18 09:56:55 -07006192int mlxsw_sp_inet6addr_valid_event(struct notifier_block *unused,
6193 unsigned long event, void *ptr)
6194{
6195 struct in6_validator_info *i6vi = (struct in6_validator_info *) ptr;
6196 struct net_device *dev = i6vi->i6vi_dev->dev;
6197 struct mlxsw_sp *mlxsw_sp;
6198 struct mlxsw_sp_rif *rif;
6199 int err = 0;
6200
6201 mlxsw_sp = mlxsw_sp_lower_get(dev);
6202 if (!mlxsw_sp)
6203 goto out;
6204
6205 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
6206 if (!mlxsw_sp_rif_should_config(rif, dev, event))
6207 goto out;
6208
David Ahernf8fa9b42017-10-18 09:56:56 -07006209 err = __mlxsw_sp_inetaddr_event(dev, event, i6vi->extack);
David Ahern89d5dd22017-10-18 09:56:55 -07006210out:
6211 return notifier_from_errno(err);
6212}
6213
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01006214static int mlxsw_sp_rif_edit(struct mlxsw_sp *mlxsw_sp, u16 rif_index,
Ido Schimmel4724ba562017-03-10 08:53:39 +01006215 const char *mac, int mtu)
6216{
6217 char ritr_pl[MLXSW_REG_RITR_LEN];
6218 int err;
6219
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01006220 mlxsw_reg_ritr_rif_pack(ritr_pl, rif_index);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006221 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
6222 if (err)
6223 return err;
6224
6225 mlxsw_reg_ritr_mtu_set(ritr_pl, mtu);
6226 mlxsw_reg_ritr_if_mac_memcpy_to(ritr_pl, mac);
6227 mlxsw_reg_ritr_op_set(ritr_pl, MLXSW_REG_RITR_RIF_CREATE);
6228 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
6229}
6230
6231int mlxsw_sp_netdevice_router_port_event(struct net_device *dev)
6232{
6233 struct mlxsw_sp *mlxsw_sp;
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01006234 struct mlxsw_sp_rif *rif;
Ido Schimmela1107482017-05-26 08:37:39 +02006235 u16 fid_index;
Ido Schimmel4724ba562017-03-10 08:53:39 +01006236 int err;
6237
6238 mlxsw_sp = mlxsw_sp_lower_get(dev);
6239 if (!mlxsw_sp)
6240 return 0;
6241
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01006242 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
6243 if (!rif)
Ido Schimmel4724ba562017-03-10 08:53:39 +01006244 return 0;
Ido Schimmela1107482017-05-26 08:37:39 +02006245 fid_index = mlxsw_sp_fid_index(rif->fid);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006246
Ido Schimmela1107482017-05-26 08:37:39 +02006247 err = mlxsw_sp_rif_fdb_op(mlxsw_sp, rif->addr, fid_index, false);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006248 if (err)
6249 return err;
6250
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01006251 err = mlxsw_sp_rif_edit(mlxsw_sp, rif->rif_index, dev->dev_addr,
6252 dev->mtu);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006253 if (err)
6254 goto err_rif_edit;
6255
Ido Schimmela1107482017-05-26 08:37:39 +02006256 err = mlxsw_sp_rif_fdb_op(mlxsw_sp, dev->dev_addr, fid_index, true);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006257 if (err)
6258 goto err_rif_fdb_op;
6259
Yotam Gigifd890fe2017-09-27 08:23:21 +02006260 if (rif->mtu != dev->mtu) {
6261 struct mlxsw_sp_vr *vr;
6262
6263 /* The RIF is relevant only to its mr_table instance, as unlike
6264 * unicast routing, in multicast routing a RIF cannot be shared
6265 * between several multicast routing tables.
6266 */
6267 vr = &mlxsw_sp->router->vrs[rif->vr_id];
6268 mlxsw_sp_mr_rif_mtu_update(vr->mr4_table, rif, dev->mtu);
6269 }
6270
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01006271 ether_addr_copy(rif->addr, dev->dev_addr);
6272 rif->mtu = dev->mtu;
Ido Schimmel4724ba562017-03-10 08:53:39 +01006273
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01006274 netdev_dbg(dev, "Updated RIF=%d\n", rif->rif_index);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006275
6276 return 0;
6277
6278err_rif_fdb_op:
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01006279 mlxsw_sp_rif_edit(mlxsw_sp, rif->rif_index, rif->addr, rif->mtu);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006280err_rif_edit:
Ido Schimmela1107482017-05-26 08:37:39 +02006281 mlxsw_sp_rif_fdb_op(mlxsw_sp, rif->addr, fid_index, true);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006282 return err;
6283}
6284
Ido Schimmelb1e45522017-04-30 19:47:14 +03006285static int mlxsw_sp_port_vrf_join(struct mlxsw_sp *mlxsw_sp,
David Ahernf8fa9b42017-10-18 09:56:56 -07006286 struct net_device *l3_dev,
6287 struct netlink_ext_ack *extack)
Ido Schimmel7179eb52017-03-16 09:08:18 +01006288{
Ido Schimmelb1e45522017-04-30 19:47:14 +03006289 struct mlxsw_sp_rif *rif;
Ido Schimmel7179eb52017-03-16 09:08:18 +01006290
Ido Schimmelb1e45522017-04-30 19:47:14 +03006291 /* If netdev is already associated with a RIF, then we need to
6292 * destroy it and create a new one with the new virtual router ID.
Ido Schimmel7179eb52017-03-16 09:08:18 +01006293 */
Ido Schimmelb1e45522017-04-30 19:47:14 +03006294 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev);
6295 if (rif)
David Ahernf8fa9b42017-10-18 09:56:56 -07006296 __mlxsw_sp_inetaddr_event(l3_dev, NETDEV_DOWN, extack);
Ido Schimmel7179eb52017-03-16 09:08:18 +01006297
David Ahernf8fa9b42017-10-18 09:56:56 -07006298 return __mlxsw_sp_inetaddr_event(l3_dev, NETDEV_UP, extack);
Ido Schimmel7179eb52017-03-16 09:08:18 +01006299}
6300
Ido Schimmelb1e45522017-04-30 19:47:14 +03006301static void mlxsw_sp_port_vrf_leave(struct mlxsw_sp *mlxsw_sp,
6302 struct net_device *l3_dev)
Ido Schimmel7179eb52017-03-16 09:08:18 +01006303{
Ido Schimmelb1e45522017-04-30 19:47:14 +03006304 struct mlxsw_sp_rif *rif;
Ido Schimmel7179eb52017-03-16 09:08:18 +01006305
Ido Schimmelb1e45522017-04-30 19:47:14 +03006306 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev);
6307 if (!rif)
Ido Schimmel7179eb52017-03-16 09:08:18 +01006308 return;
David Ahernf8fa9b42017-10-18 09:56:56 -07006309 __mlxsw_sp_inetaddr_event(l3_dev, NETDEV_DOWN, NULL);
Ido Schimmel7179eb52017-03-16 09:08:18 +01006310}
6311
Ido Schimmelb1e45522017-04-30 19:47:14 +03006312int mlxsw_sp_netdevice_vrf_event(struct net_device *l3_dev, unsigned long event,
6313 struct netdev_notifier_changeupper_info *info)
Ido Schimmel3d70e4582017-03-16 09:08:19 +01006314{
Ido Schimmelb1e45522017-04-30 19:47:14 +03006315 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(l3_dev);
6316 int err = 0;
Ido Schimmel3d70e4582017-03-16 09:08:19 +01006317
Ido Schimmelb1e45522017-04-30 19:47:14 +03006318 if (!mlxsw_sp)
6319 return 0;
Ido Schimmel3d70e4582017-03-16 09:08:19 +01006320
Ido Schimmelb1e45522017-04-30 19:47:14 +03006321 switch (event) {
6322 case NETDEV_PRECHANGEUPPER:
6323 return 0;
6324 case NETDEV_CHANGEUPPER:
David Ahernf8fa9b42017-10-18 09:56:56 -07006325 if (info->linking) {
6326 struct netlink_ext_ack *extack;
6327
6328 extack = netdev_notifier_info_to_extack(&info->info);
6329 err = mlxsw_sp_port_vrf_join(mlxsw_sp, l3_dev, extack);
6330 } else {
Ido Schimmelb1e45522017-04-30 19:47:14 +03006331 mlxsw_sp_port_vrf_leave(mlxsw_sp, l3_dev);
David Ahernf8fa9b42017-10-18 09:56:56 -07006332 }
Ido Schimmelb1e45522017-04-30 19:47:14 +03006333 break;
6334 }
Ido Schimmel3d70e4582017-03-16 09:08:19 +01006335
Ido Schimmelb1e45522017-04-30 19:47:14 +03006336 return err;
Ido Schimmel3d70e4582017-03-16 09:08:19 +01006337}
6338
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006339static struct mlxsw_sp_rif_subport *
6340mlxsw_sp_rif_subport_rif(const struct mlxsw_sp_rif *rif)
Ido Schimmela1107482017-05-26 08:37:39 +02006341{
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006342 return container_of(rif, struct mlxsw_sp_rif_subport, common);
Ido Schimmela1107482017-05-26 08:37:39 +02006343}
6344
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006345static void mlxsw_sp_rif_subport_setup(struct mlxsw_sp_rif *rif,
6346 const struct mlxsw_sp_rif_params *params)
6347{
6348 struct mlxsw_sp_rif_subport *rif_subport;
6349
6350 rif_subport = mlxsw_sp_rif_subport_rif(rif);
6351 rif_subport->vid = params->vid;
6352 rif_subport->lag = params->lag;
6353 if (params->lag)
6354 rif_subport->lag_id = params->lag_id;
6355 else
6356 rif_subport->system_port = params->system_port;
6357}
6358
6359static int mlxsw_sp_rif_subport_op(struct mlxsw_sp_rif *rif, bool enable)
6360{
6361 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
6362 struct mlxsw_sp_rif_subport *rif_subport;
6363 char ritr_pl[MLXSW_REG_RITR_LEN];
6364
6365 rif_subport = mlxsw_sp_rif_subport_rif(rif);
6366 mlxsw_reg_ritr_pack(ritr_pl, enable, MLXSW_REG_RITR_SP_IF,
Petr Machata9571e822017-09-02 23:49:14 +02006367 rif->rif_index, rif->vr_id, rif->dev->mtu);
6368 mlxsw_reg_ritr_mac_pack(ritr_pl, rif->dev->dev_addr);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006369 mlxsw_reg_ritr_sp_if_pack(ritr_pl, rif_subport->lag,
6370 rif_subport->lag ? rif_subport->lag_id :
6371 rif_subport->system_port,
6372 rif_subport->vid);
6373
6374 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
6375}
6376
6377static int mlxsw_sp_rif_subport_configure(struct mlxsw_sp_rif *rif)
6378{
Petr Machata010cadf2017-09-02 23:49:18 +02006379 int err;
6380
6381 err = mlxsw_sp_rif_subport_op(rif, true);
6382 if (err)
6383 return err;
6384
6385 err = mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
6386 mlxsw_sp_fid_index(rif->fid), true);
6387 if (err)
6388 goto err_rif_fdb_op;
6389
6390 mlxsw_sp_fid_rif_set(rif->fid, rif);
6391 return 0;
6392
6393err_rif_fdb_op:
6394 mlxsw_sp_rif_subport_op(rif, false);
6395 return err;
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006396}
6397
6398static void mlxsw_sp_rif_subport_deconfigure(struct mlxsw_sp_rif *rif)
6399{
Petr Machata010cadf2017-09-02 23:49:18 +02006400 struct mlxsw_sp_fid *fid = rif->fid;
6401
6402 mlxsw_sp_fid_rif_set(fid, NULL);
6403 mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
6404 mlxsw_sp_fid_index(fid), false);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006405 mlxsw_sp_rif_subport_op(rif, false);
6406}
6407
6408static struct mlxsw_sp_fid *
6409mlxsw_sp_rif_subport_fid_get(struct mlxsw_sp_rif *rif)
6410{
6411 return mlxsw_sp_fid_rfid_get(rif->mlxsw_sp, rif->rif_index);
6412}
6413
6414static const struct mlxsw_sp_rif_ops mlxsw_sp_rif_subport_ops = {
6415 .type = MLXSW_SP_RIF_TYPE_SUBPORT,
6416 .rif_size = sizeof(struct mlxsw_sp_rif_subport),
6417 .setup = mlxsw_sp_rif_subport_setup,
6418 .configure = mlxsw_sp_rif_subport_configure,
6419 .deconfigure = mlxsw_sp_rif_subport_deconfigure,
6420 .fid_get = mlxsw_sp_rif_subport_fid_get,
6421};
6422
6423static int mlxsw_sp_rif_vlan_fid_op(struct mlxsw_sp_rif *rif,
6424 enum mlxsw_reg_ritr_if_type type,
6425 u16 vid_fid, bool enable)
6426{
6427 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
6428 char ritr_pl[MLXSW_REG_RITR_LEN];
6429
6430 mlxsw_reg_ritr_pack(ritr_pl, enable, type, rif->rif_index, rif->vr_id,
Petr Machata9571e822017-09-02 23:49:14 +02006431 rif->dev->mtu);
6432 mlxsw_reg_ritr_mac_pack(ritr_pl, rif->dev->dev_addr);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006433 mlxsw_reg_ritr_fid_set(ritr_pl, type, vid_fid);
6434
6435 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
6436}
6437
Yotam Gigib35750f2017-10-09 11:15:33 +02006438u8 mlxsw_sp_router_port(const struct mlxsw_sp *mlxsw_sp)
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006439{
6440 return mlxsw_core_max_ports(mlxsw_sp->core) + 1;
6441}
6442
6443static int mlxsw_sp_rif_vlan_configure(struct mlxsw_sp_rif *rif)
6444{
6445 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
6446 u16 vid = mlxsw_sp_fid_8021q_vid(rif->fid);
6447 int err;
6448
6449 err = mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_VLAN_IF, vid, true);
6450 if (err)
6451 return err;
6452
Ido Schimmel0d284812017-07-18 10:10:12 +02006453 err = mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
6454 mlxsw_sp_router_port(mlxsw_sp), true);
6455 if (err)
6456 goto err_fid_mc_flood_set;
6457
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006458 err = mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
6459 mlxsw_sp_router_port(mlxsw_sp), true);
6460 if (err)
6461 goto err_fid_bc_flood_set;
6462
Petr Machata010cadf2017-09-02 23:49:18 +02006463 err = mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
6464 mlxsw_sp_fid_index(rif->fid), true);
6465 if (err)
6466 goto err_rif_fdb_op;
6467
6468 mlxsw_sp_fid_rif_set(rif->fid, rif);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006469 return 0;
6470
Petr Machata010cadf2017-09-02 23:49:18 +02006471err_rif_fdb_op:
6472 mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
6473 mlxsw_sp_router_port(mlxsw_sp), false);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006474err_fid_bc_flood_set:
Ido Schimmel0d284812017-07-18 10:10:12 +02006475 mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
6476 mlxsw_sp_router_port(mlxsw_sp), false);
6477err_fid_mc_flood_set:
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006478 mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_VLAN_IF, vid, false);
6479 return err;
6480}
6481
6482static void mlxsw_sp_rif_vlan_deconfigure(struct mlxsw_sp_rif *rif)
6483{
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006484 u16 vid = mlxsw_sp_fid_8021q_vid(rif->fid);
Petr Machata010cadf2017-09-02 23:49:18 +02006485 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
6486 struct mlxsw_sp_fid *fid = rif->fid;
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006487
Petr Machata010cadf2017-09-02 23:49:18 +02006488 mlxsw_sp_fid_rif_set(fid, NULL);
6489 mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
6490 mlxsw_sp_fid_index(fid), false);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006491 mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
6492 mlxsw_sp_router_port(mlxsw_sp), false);
Ido Schimmel0d284812017-07-18 10:10:12 +02006493 mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
6494 mlxsw_sp_router_port(mlxsw_sp), false);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006495 mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_VLAN_IF, vid, false);
6496}
6497
6498static struct mlxsw_sp_fid *
6499mlxsw_sp_rif_vlan_fid_get(struct mlxsw_sp_rif *rif)
6500{
6501 u16 vid = is_vlan_dev(rif->dev) ? vlan_dev_vlan_id(rif->dev) : 1;
6502
6503 return mlxsw_sp_fid_8021q_get(rif->mlxsw_sp, vid);
6504}
6505
6506static const struct mlxsw_sp_rif_ops mlxsw_sp_rif_vlan_ops = {
6507 .type = MLXSW_SP_RIF_TYPE_VLAN,
6508 .rif_size = sizeof(struct mlxsw_sp_rif),
6509 .configure = mlxsw_sp_rif_vlan_configure,
6510 .deconfigure = mlxsw_sp_rif_vlan_deconfigure,
6511 .fid_get = mlxsw_sp_rif_vlan_fid_get,
6512};
6513
6514static int mlxsw_sp_rif_fid_configure(struct mlxsw_sp_rif *rif)
6515{
6516 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
6517 u16 fid_index = mlxsw_sp_fid_index(rif->fid);
6518 int err;
6519
6520 err = mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_FID_IF, fid_index,
6521 true);
6522 if (err)
6523 return err;
6524
Ido Schimmel0d284812017-07-18 10:10:12 +02006525 err = mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
6526 mlxsw_sp_router_port(mlxsw_sp), true);
6527 if (err)
6528 goto err_fid_mc_flood_set;
6529
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006530 err = mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
6531 mlxsw_sp_router_port(mlxsw_sp), true);
6532 if (err)
6533 goto err_fid_bc_flood_set;
6534
Petr Machata010cadf2017-09-02 23:49:18 +02006535 err = mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
6536 mlxsw_sp_fid_index(rif->fid), true);
6537 if (err)
6538 goto err_rif_fdb_op;
6539
6540 mlxsw_sp_fid_rif_set(rif->fid, rif);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006541 return 0;
6542
Petr Machata010cadf2017-09-02 23:49:18 +02006543err_rif_fdb_op:
6544 mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
6545 mlxsw_sp_router_port(mlxsw_sp), false);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006546err_fid_bc_flood_set:
Ido Schimmel0d284812017-07-18 10:10:12 +02006547 mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
6548 mlxsw_sp_router_port(mlxsw_sp), false);
6549err_fid_mc_flood_set:
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006550 mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_FID_IF, fid_index, false);
6551 return err;
6552}
6553
6554static void mlxsw_sp_rif_fid_deconfigure(struct mlxsw_sp_rif *rif)
6555{
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006556 u16 fid_index = mlxsw_sp_fid_index(rif->fid);
Petr Machata010cadf2017-09-02 23:49:18 +02006557 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
6558 struct mlxsw_sp_fid *fid = rif->fid;
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006559
Petr Machata010cadf2017-09-02 23:49:18 +02006560 mlxsw_sp_fid_rif_set(fid, NULL);
6561 mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
6562 mlxsw_sp_fid_index(fid), false);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006563 mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
6564 mlxsw_sp_router_port(mlxsw_sp), false);
Ido Schimmel0d284812017-07-18 10:10:12 +02006565 mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
6566 mlxsw_sp_router_port(mlxsw_sp), false);
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006567 mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_FID_IF, fid_index, false);
6568}
6569
6570static struct mlxsw_sp_fid *
6571mlxsw_sp_rif_fid_fid_get(struct mlxsw_sp_rif *rif)
6572{
6573 return mlxsw_sp_fid_8021d_get(rif->mlxsw_sp, rif->dev->ifindex);
6574}
6575
6576static const struct mlxsw_sp_rif_ops mlxsw_sp_rif_fid_ops = {
6577 .type = MLXSW_SP_RIF_TYPE_FID,
6578 .rif_size = sizeof(struct mlxsw_sp_rif),
6579 .configure = mlxsw_sp_rif_fid_configure,
6580 .deconfigure = mlxsw_sp_rif_fid_deconfigure,
6581 .fid_get = mlxsw_sp_rif_fid_fid_get,
6582};
6583
Petr Machata6ddb7422017-09-02 23:49:19 +02006584static struct mlxsw_sp_rif_ipip_lb *
6585mlxsw_sp_rif_ipip_lb_rif(struct mlxsw_sp_rif *rif)
6586{
6587 return container_of(rif, struct mlxsw_sp_rif_ipip_lb, common);
6588}
6589
6590static void
6591mlxsw_sp_rif_ipip_lb_setup(struct mlxsw_sp_rif *rif,
6592 const struct mlxsw_sp_rif_params *params)
6593{
6594 struct mlxsw_sp_rif_params_ipip_lb *params_lb;
6595 struct mlxsw_sp_rif_ipip_lb *rif_lb;
6596
6597 params_lb = container_of(params, struct mlxsw_sp_rif_params_ipip_lb,
6598 common);
6599 rif_lb = mlxsw_sp_rif_ipip_lb_rif(rif);
6600 rif_lb->lb_config = params_lb->lb_config;
6601}
6602
6603static int
6604mlxsw_sp_rif_ipip_lb_op(struct mlxsw_sp_rif_ipip_lb *lb_rif,
6605 struct mlxsw_sp_vr *ul_vr, bool enable)
6606{
6607 struct mlxsw_sp_rif_ipip_lb_config lb_cf = lb_rif->lb_config;
6608 struct mlxsw_sp_rif *rif = &lb_rif->common;
6609 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
6610 char ritr_pl[MLXSW_REG_RITR_LEN];
6611 u32 saddr4;
6612
6613 switch (lb_cf.ul_protocol) {
6614 case MLXSW_SP_L3_PROTO_IPV4:
6615 saddr4 = be32_to_cpu(lb_cf.saddr.addr4);
6616 mlxsw_reg_ritr_pack(ritr_pl, enable, MLXSW_REG_RITR_LOOPBACK_IF,
6617 rif->rif_index, rif->vr_id, rif->dev->mtu);
6618 mlxsw_reg_ritr_loopback_ipip4_pack(ritr_pl, lb_cf.lb_ipipt,
6619 MLXSW_REG_RITR_LOOPBACK_IPIP_OPTIONS_GRE_KEY_PRESET,
6620 ul_vr->id, saddr4, lb_cf.okey);
6621 break;
6622
6623 case MLXSW_SP_L3_PROTO_IPV6:
6624 return -EAFNOSUPPORT;
6625 }
6626
6627 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
6628}
6629
6630static int
6631mlxsw_sp_rif_ipip_lb_configure(struct mlxsw_sp_rif *rif)
6632{
6633 struct mlxsw_sp_rif_ipip_lb *lb_rif = mlxsw_sp_rif_ipip_lb_rif(rif);
6634 u32 ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(rif->dev);
6635 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
6636 struct mlxsw_sp_vr *ul_vr;
6637 int err;
6638
David Ahernf8fa9b42017-10-18 09:56:56 -07006639 ul_vr = mlxsw_sp_vr_get(mlxsw_sp, ul_tb_id, NULL);
Petr Machata6ddb7422017-09-02 23:49:19 +02006640 if (IS_ERR(ul_vr))
6641 return PTR_ERR(ul_vr);
6642
6643 err = mlxsw_sp_rif_ipip_lb_op(lb_rif, ul_vr, true);
6644 if (err)
6645 goto err_loopback_op;
6646
6647 lb_rif->ul_vr_id = ul_vr->id;
6648 ++ul_vr->rif_count;
6649 return 0;
6650
6651err_loopback_op:
6652 mlxsw_sp_vr_put(ul_vr);
6653 return err;
6654}
6655
6656static void mlxsw_sp_rif_ipip_lb_deconfigure(struct mlxsw_sp_rif *rif)
6657{
6658 struct mlxsw_sp_rif_ipip_lb *lb_rif = mlxsw_sp_rif_ipip_lb_rif(rif);
6659 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
6660 struct mlxsw_sp_vr *ul_vr;
6661
6662 ul_vr = &mlxsw_sp->router->vrs[lb_rif->ul_vr_id];
6663 mlxsw_sp_rif_ipip_lb_op(lb_rif, ul_vr, false);
6664
6665 --ul_vr->rif_count;
6666 mlxsw_sp_vr_put(ul_vr);
6667}
6668
6669static const struct mlxsw_sp_rif_ops mlxsw_sp_rif_ipip_lb_ops = {
6670 .type = MLXSW_SP_RIF_TYPE_IPIP_LB,
6671 .rif_size = sizeof(struct mlxsw_sp_rif_ipip_lb),
6672 .setup = mlxsw_sp_rif_ipip_lb_setup,
6673 .configure = mlxsw_sp_rif_ipip_lb_configure,
6674 .deconfigure = mlxsw_sp_rif_ipip_lb_deconfigure,
6675};
6676
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006677static const struct mlxsw_sp_rif_ops *mlxsw_sp_rif_ops_arr[] = {
6678 [MLXSW_SP_RIF_TYPE_SUBPORT] = &mlxsw_sp_rif_subport_ops,
6679 [MLXSW_SP_RIF_TYPE_VLAN] = &mlxsw_sp_rif_vlan_ops,
6680 [MLXSW_SP_RIF_TYPE_FID] = &mlxsw_sp_rif_fid_ops,
Petr Machata6ddb7422017-09-02 23:49:19 +02006681 [MLXSW_SP_RIF_TYPE_IPIP_LB] = &mlxsw_sp_rif_ipip_lb_ops,
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006682};
6683
Ido Schimmel348b8fc2017-05-16 19:38:29 +02006684static int mlxsw_sp_rifs_init(struct mlxsw_sp *mlxsw_sp)
6685{
6686 u64 max_rifs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS);
6687
6688 mlxsw_sp->router->rifs = kcalloc(max_rifs,
6689 sizeof(struct mlxsw_sp_rif *),
6690 GFP_KERNEL);
6691 if (!mlxsw_sp->router->rifs)
6692 return -ENOMEM;
Ido Schimmele4f3c1c2017-05-26 08:37:40 +02006693
6694 mlxsw_sp->router->rif_ops_arr = mlxsw_sp_rif_ops_arr;
6695
Ido Schimmel348b8fc2017-05-16 19:38:29 +02006696 return 0;
6697}
6698
6699static void mlxsw_sp_rifs_fini(struct mlxsw_sp *mlxsw_sp)
6700{
6701 int i;
6702
6703 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++)
6704 WARN_ON_ONCE(mlxsw_sp->router->rifs[i]);
6705
6706 kfree(mlxsw_sp->router->rifs);
6707}
6708
Petr Machatadcbda282017-10-20 09:16:16 +02006709static int
6710mlxsw_sp_ipip_config_tigcr(struct mlxsw_sp *mlxsw_sp)
6711{
6712 char tigcr_pl[MLXSW_REG_TIGCR_LEN];
6713
6714 mlxsw_reg_tigcr_pack(tigcr_pl, true, 0);
6715 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(tigcr), tigcr_pl);
6716}
6717
Petr Machata38ebc0f2017-09-02 23:49:17 +02006718static int mlxsw_sp_ipips_init(struct mlxsw_sp *mlxsw_sp)
6719{
6720 mlxsw_sp->router->ipip_ops_arr = mlxsw_sp_ipip_ops_arr;
Petr Machata1012b9a2017-09-02 23:49:23 +02006721 INIT_LIST_HEAD(&mlxsw_sp->router->ipip_list);
Petr Machatadcbda282017-10-20 09:16:16 +02006722 return mlxsw_sp_ipip_config_tigcr(mlxsw_sp);
Petr Machata38ebc0f2017-09-02 23:49:17 +02006723}
6724
6725static void mlxsw_sp_ipips_fini(struct mlxsw_sp *mlxsw_sp)
6726{
Petr Machata1012b9a2017-09-02 23:49:23 +02006727 WARN_ON(!list_empty(&mlxsw_sp->router->ipip_list));
Petr Machata38ebc0f2017-09-02 23:49:17 +02006728}
6729
Ido Schimmelc3852ef2016-12-03 16:45:07 +01006730static void mlxsw_sp_router_fib_dump_flush(struct notifier_block *nb)
6731{
Ido Schimmel7e39d112017-05-16 19:38:28 +02006732 struct mlxsw_sp_router *router;
Ido Schimmelc3852ef2016-12-03 16:45:07 +01006733
6734 /* Flush pending FIB notifications and then flush the device's
6735 * table before requesting another dump. The FIB notification
6736 * block is unregistered, so no need to take RTNL.
6737 */
6738 mlxsw_core_flush_owq();
Ido Schimmel7e39d112017-05-16 19:38:28 +02006739 router = container_of(nb, struct mlxsw_sp_router, fib_nb);
6740 mlxsw_sp_router_fib_flush(router->mlxsw_sp);
Ido Schimmelc3852ef2016-12-03 16:45:07 +01006741}
6742
Ido Schimmelaf658b62017-11-02 17:14:09 +01006743#ifdef CONFIG_IP_ROUTE_MULTIPATH
6744static void mlxsw_sp_mp_hash_header_set(char *recr2_pl, int header)
6745{
6746 mlxsw_reg_recr2_outer_header_enables_set(recr2_pl, header, true);
6747}
6748
6749static void mlxsw_sp_mp_hash_field_set(char *recr2_pl, int field)
6750{
6751 mlxsw_reg_recr2_outer_header_fields_enable_set(recr2_pl, field, true);
6752}
6753
6754static void mlxsw_sp_mp4_hash_init(char *recr2_pl)
6755{
6756 bool only_l3 = !init_net.ipv4.sysctl_fib_multipath_hash_policy;
6757
6758 mlxsw_sp_mp_hash_header_set(recr2_pl,
6759 MLXSW_REG_RECR2_IPV4_EN_NOT_TCP_NOT_UDP);
6760 mlxsw_sp_mp_hash_header_set(recr2_pl, MLXSW_REG_RECR2_IPV4_EN_TCP_UDP);
6761 mlxsw_reg_recr2_ipv4_sip_enable(recr2_pl);
6762 mlxsw_reg_recr2_ipv4_dip_enable(recr2_pl);
6763 if (only_l3)
6764 return;
6765 mlxsw_sp_mp_hash_header_set(recr2_pl, MLXSW_REG_RECR2_TCP_UDP_EN_IPV4);
6766 mlxsw_sp_mp_hash_field_set(recr2_pl, MLXSW_REG_RECR2_IPV4_PROTOCOL);
6767 mlxsw_sp_mp_hash_field_set(recr2_pl, MLXSW_REG_RECR2_TCP_UDP_SPORT);
6768 mlxsw_sp_mp_hash_field_set(recr2_pl, MLXSW_REG_RECR2_TCP_UDP_DPORT);
6769}
6770
6771static void mlxsw_sp_mp6_hash_init(char *recr2_pl)
6772{
6773 mlxsw_sp_mp_hash_header_set(recr2_pl,
6774 MLXSW_REG_RECR2_IPV6_EN_NOT_TCP_NOT_UDP);
6775 mlxsw_sp_mp_hash_header_set(recr2_pl, MLXSW_REG_RECR2_IPV6_EN_TCP_UDP);
6776 mlxsw_reg_recr2_ipv6_sip_enable(recr2_pl);
6777 mlxsw_reg_recr2_ipv6_dip_enable(recr2_pl);
6778 mlxsw_sp_mp_hash_field_set(recr2_pl, MLXSW_REG_RECR2_IPV6_FLOW_LABEL);
6779 mlxsw_sp_mp_hash_field_set(recr2_pl, MLXSW_REG_RECR2_IPV6_NEXT_HEADER);
6780}
6781
6782static int mlxsw_sp_mp_hash_init(struct mlxsw_sp *mlxsw_sp)
6783{
6784 char recr2_pl[MLXSW_REG_RECR2_LEN];
6785 u32 seed;
6786
6787 get_random_bytes(&seed, sizeof(seed));
6788 mlxsw_reg_recr2_pack(recr2_pl, seed);
6789 mlxsw_sp_mp4_hash_init(recr2_pl);
6790 mlxsw_sp_mp6_hash_init(recr2_pl);
6791
6792 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(recr2), recr2_pl);
6793}
6794#else
6795static int mlxsw_sp_mp_hash_init(struct mlxsw_sp *mlxsw_sp)
6796{
6797 return 0;
6798}
6799#endif
6800
Ido Schimmel4724ba562017-03-10 08:53:39 +01006801static int __mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp)
6802{
6803 char rgcr_pl[MLXSW_REG_RGCR_LEN];
6804 u64 max_rifs;
6805 int err;
6806
6807 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_RIFS))
6808 return -EIO;
Ido Schimmel4724ba562017-03-10 08:53:39 +01006809 max_rifs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006810
Arkadi Sharshevskye29237e2017-07-18 10:10:09 +02006811 mlxsw_reg_rgcr_pack(rgcr_pl, true, true);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006812 mlxsw_reg_rgcr_max_router_interfaces_set(rgcr_pl, max_rifs);
6813 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rgcr), rgcr_pl);
6814 if (err)
Ido Schimmel348b8fc2017-05-16 19:38:29 +02006815 return err;
Ido Schimmel4724ba562017-03-10 08:53:39 +01006816 return 0;
Ido Schimmel4724ba562017-03-10 08:53:39 +01006817}
6818
6819static void __mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp)
6820{
6821 char rgcr_pl[MLXSW_REG_RGCR_LEN];
Ido Schimmel4724ba562017-03-10 08:53:39 +01006822
Arkadi Sharshevskye29237e2017-07-18 10:10:09 +02006823 mlxsw_reg_rgcr_pack(rgcr_pl, false, false);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006824 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rgcr), rgcr_pl);
Ido Schimmel4724ba562017-03-10 08:53:39 +01006825}
6826
Jiri Pirkob45f64d2016-09-26 12:52:31 +02006827int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp)
6828{
Ido Schimmel9011b672017-05-16 19:38:25 +02006829 struct mlxsw_sp_router *router;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02006830 int err;
6831
Ido Schimmel9011b672017-05-16 19:38:25 +02006832 router = kzalloc(sizeof(*mlxsw_sp->router), GFP_KERNEL);
6833 if (!router)
6834 return -ENOMEM;
6835 mlxsw_sp->router = router;
6836 router->mlxsw_sp = mlxsw_sp;
6837
6838 INIT_LIST_HEAD(&mlxsw_sp->router->nexthop_neighs_list);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02006839 err = __mlxsw_sp_router_init(mlxsw_sp);
6840 if (err)
Ido Schimmel9011b672017-05-16 19:38:25 +02006841 goto err_router_init;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02006842
Ido Schimmel348b8fc2017-05-16 19:38:29 +02006843 err = mlxsw_sp_rifs_init(mlxsw_sp);
6844 if (err)
6845 goto err_rifs_init;
6846
Petr Machata38ebc0f2017-09-02 23:49:17 +02006847 err = mlxsw_sp_ipips_init(mlxsw_sp);
6848 if (err)
6849 goto err_ipips_init;
6850
Ido Schimmel9011b672017-05-16 19:38:25 +02006851 err = rhashtable_init(&mlxsw_sp->router->nexthop_ht,
Ido Schimmelc53b8e12017-02-08 11:16:30 +01006852 &mlxsw_sp_nexthop_ht_params);
6853 if (err)
6854 goto err_nexthop_ht_init;
6855
Ido Schimmel9011b672017-05-16 19:38:25 +02006856 err = rhashtable_init(&mlxsw_sp->router->nexthop_group_ht,
Ido Schimmele9ad5e72017-02-08 11:16:29 +01006857 &mlxsw_sp_nexthop_group_ht_params);
6858 if (err)
6859 goto err_nexthop_group_ht_init;
6860
Arkadi Sharshevskydbe45982017-09-25 10:32:23 +02006861 INIT_LIST_HEAD(&mlxsw_sp->router->nexthop_list);
Ido Schimmel8494ab02017-03-24 08:02:47 +01006862 err = mlxsw_sp_lpm_init(mlxsw_sp);
6863 if (err)
6864 goto err_lpm_init;
6865
Yotam Gigid42b0962017-09-27 08:23:20 +02006866 err = mlxsw_sp_mr_init(mlxsw_sp, &mlxsw_sp_mr_tcam_ops);
6867 if (err)
6868 goto err_mr_init;
6869
Jiri Pirkob45f64d2016-09-26 12:52:31 +02006870 err = mlxsw_sp_vrs_init(mlxsw_sp);
6871 if (err)
6872 goto err_vrs_init;
6873
Ido Schimmel8c9583a2016-10-27 15:12:57 +02006874 err = mlxsw_sp_neigh_init(mlxsw_sp);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02006875 if (err)
6876 goto err_neigh_init;
6877
Ido Schimmel48fac882017-11-02 17:14:06 +01006878 mlxsw_sp->router->netevent_nb.notifier_call =
6879 mlxsw_sp_router_netevent_event;
6880 err = register_netevent_notifier(&mlxsw_sp->router->netevent_nb);
6881 if (err)
6882 goto err_register_netevent_notifier;
6883
Ido Schimmelaf658b62017-11-02 17:14:09 +01006884 err = mlxsw_sp_mp_hash_init(mlxsw_sp);
6885 if (err)
6886 goto err_mp_hash_init;
6887
Ido Schimmel7e39d112017-05-16 19:38:28 +02006888 mlxsw_sp->router->fib_nb.notifier_call = mlxsw_sp_router_fib_event;
6889 err = register_fib_notifier(&mlxsw_sp->router->fib_nb,
Ido Schimmelc3852ef2016-12-03 16:45:07 +01006890 mlxsw_sp_router_fib_dump_flush);
6891 if (err)
6892 goto err_register_fib_notifier;
6893
Jiri Pirkob45f64d2016-09-26 12:52:31 +02006894 return 0;
6895
Ido Schimmelc3852ef2016-12-03 16:45:07 +01006896err_register_fib_notifier:
Ido Schimmelaf658b62017-11-02 17:14:09 +01006897err_mp_hash_init:
Ido Schimmel48fac882017-11-02 17:14:06 +01006898 unregister_netevent_notifier(&mlxsw_sp->router->netevent_nb);
6899err_register_netevent_notifier:
Ido Schimmelc3852ef2016-12-03 16:45:07 +01006900 mlxsw_sp_neigh_fini(mlxsw_sp);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02006901err_neigh_init:
6902 mlxsw_sp_vrs_fini(mlxsw_sp);
6903err_vrs_init:
Yotam Gigid42b0962017-09-27 08:23:20 +02006904 mlxsw_sp_mr_fini(mlxsw_sp);
6905err_mr_init:
Ido Schimmel8494ab02017-03-24 08:02:47 +01006906 mlxsw_sp_lpm_fini(mlxsw_sp);
6907err_lpm_init:
Ido Schimmel9011b672017-05-16 19:38:25 +02006908 rhashtable_destroy(&mlxsw_sp->router->nexthop_group_ht);
Ido Schimmele9ad5e72017-02-08 11:16:29 +01006909err_nexthop_group_ht_init:
Ido Schimmel9011b672017-05-16 19:38:25 +02006910 rhashtable_destroy(&mlxsw_sp->router->nexthop_ht);
Ido Schimmelc53b8e12017-02-08 11:16:30 +01006911err_nexthop_ht_init:
Petr Machata38ebc0f2017-09-02 23:49:17 +02006912 mlxsw_sp_ipips_fini(mlxsw_sp);
6913err_ipips_init:
Ido Schimmel348b8fc2017-05-16 19:38:29 +02006914 mlxsw_sp_rifs_fini(mlxsw_sp);
6915err_rifs_init:
Jiri Pirkob45f64d2016-09-26 12:52:31 +02006916 __mlxsw_sp_router_fini(mlxsw_sp);
Ido Schimmel9011b672017-05-16 19:38:25 +02006917err_router_init:
6918 kfree(mlxsw_sp->router);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02006919 return err;
6920}
6921
6922void mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp)
6923{
Ido Schimmel7e39d112017-05-16 19:38:28 +02006924 unregister_fib_notifier(&mlxsw_sp->router->fib_nb);
Ido Schimmel48fac882017-11-02 17:14:06 +01006925 unregister_netevent_notifier(&mlxsw_sp->router->netevent_nb);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02006926 mlxsw_sp_neigh_fini(mlxsw_sp);
6927 mlxsw_sp_vrs_fini(mlxsw_sp);
Yotam Gigid42b0962017-09-27 08:23:20 +02006928 mlxsw_sp_mr_fini(mlxsw_sp);
Ido Schimmel8494ab02017-03-24 08:02:47 +01006929 mlxsw_sp_lpm_fini(mlxsw_sp);
Ido Schimmel9011b672017-05-16 19:38:25 +02006930 rhashtable_destroy(&mlxsw_sp->router->nexthop_group_ht);
6931 rhashtable_destroy(&mlxsw_sp->router->nexthop_ht);
Petr Machata38ebc0f2017-09-02 23:49:17 +02006932 mlxsw_sp_ipips_fini(mlxsw_sp);
Ido Schimmel348b8fc2017-05-16 19:38:29 +02006933 mlxsw_sp_rifs_fini(mlxsw_sp);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02006934 __mlxsw_sp_router_fini(mlxsw_sp);
Ido Schimmel9011b672017-05-16 19:38:25 +02006935 kfree(mlxsw_sp->router);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02006936}