blob: 6a1de24168f33fe72a2f9f0253b5c8c5bc8920b5 [file] [log] [blame]
Ido Schimmel464dce12016-07-02 11:00:15 +02001/*
2 * drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
3 * Copyright (c) 2016 Mellanox Technologies. All rights reserved.
4 * Copyright (c) 2016 Jiri Pirko <jiri@mellanox.com>
5 * Copyright (c) 2016 Ido Schimmel <idosch@mellanox.com>
Yotam Gigic723c7352016-07-05 11:27:43 +02006 * Copyright (c) 2016 Yotam Gigi <yotamg@mellanox.com>
Ido Schimmel464dce12016-07-02 11:00:15 +02007 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
19 *
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
35 */
36
37#include <linux/kernel.h>
38#include <linux/types.h>
Jiri Pirko5e9c16c2016-07-04 08:23:04 +020039#include <linux/rhashtable.h>
40#include <linux/bitops.h>
41#include <linux/in6.h>
Yotam Gigic723c7352016-07-05 11:27:43 +020042#include <linux/notifier.h>
Ido Schimmeldf6dd79b2017-02-08 14:36:49 +010043#include <linux/inetdevice.h>
Ido Schimmel9db032b2017-03-16 09:08:17 +010044#include <linux/netdevice.h>
Ido Schimmel03ea01e2017-05-23 21:56:30 +020045#include <linux/if_bridge.h>
Yotam Gigic723c7352016-07-05 11:27:43 +020046#include <net/netevent.h>
Jiri Pirko6cf3c972016-07-05 11:27:39 +020047#include <net/neighbour.h>
48#include <net/arp.h>
Jiri Pirkob45f64d2016-09-26 12:52:31 +020049#include <net/ip_fib.h>
Ido Schimmel5d7bfd12017-03-16 09:08:14 +010050#include <net/fib_rules.h>
Ido Schimmel57837882017-03-16 09:08:16 +010051#include <net/l3mdev.h>
Ido Schimmel464dce12016-07-02 11:00:15 +020052
53#include "spectrum.h"
54#include "core.h"
55#include "reg.h"
Arkadi Sharshevskye0c0afd2017-03-28 17:24:15 +020056#include "spectrum_cnt.h"
57#include "spectrum_dpipe.h"
58#include "spectrum_router.h"
Ido Schimmel464dce12016-07-02 11:00:15 +020059
Ido Schimmel9011b672017-05-16 19:38:25 +020060struct mlxsw_sp_vr;
61struct mlxsw_sp_lpm_tree;
62
63struct mlxsw_sp_router {
64 struct mlxsw_sp *mlxsw_sp;
Ido Schimmel5f9efff2017-05-16 19:38:27 +020065 struct mlxsw_sp_rif **rifs;
Ido Schimmel9011b672017-05-16 19:38:25 +020066 struct mlxsw_sp_vr *vrs;
67 struct rhashtable neigh_ht;
68 struct rhashtable nexthop_group_ht;
69 struct rhashtable nexthop_ht;
70 struct {
71 struct mlxsw_sp_lpm_tree *trees;
72 unsigned int tree_count;
73 } lpm;
74 struct {
75 struct delayed_work dw;
76 unsigned long interval; /* ms */
77 } neighs_update;
78 struct delayed_work nexthop_probe_dw;
79#define MLXSW_SP_UNRESOLVED_NH_PROBE_INTERVAL 5000 /* ms */
80 struct list_head nexthop_neighs_list;
81 bool aborted;
Ido Schimmel7e39d112017-05-16 19:38:28 +020082 struct notifier_block fib_nb;
Ido Schimmel9011b672017-05-16 19:38:25 +020083};
84
Ido Schimmel4724ba562017-03-10 08:53:39 +010085struct mlxsw_sp_rif {
86 struct list_head nexthop_list;
87 struct list_head neigh_list;
88 struct net_device *dev;
89 struct mlxsw_sp_fid *f;
90 unsigned char addr[ETH_ALEN];
91 int mtu;
Arkadi Sharshevskybf952332017-03-17 09:38:00 +010092 u16 rif_index;
Ido Schimmel69132292017-03-10 08:53:42 +010093 u16 vr_id;
Arkadi Sharshevskye0c0afd2017-03-28 17:24:15 +020094 unsigned int counter_ingress;
95 bool counter_ingress_valid;
96 unsigned int counter_egress;
97 bool counter_egress_valid;
Ido Schimmel4724ba562017-03-10 08:53:39 +010098};
99
Arkadi Sharshevskye0c0afd2017-03-28 17:24:15 +0200100static unsigned int *
101mlxsw_sp_rif_p_counter_get(struct mlxsw_sp_rif *rif,
102 enum mlxsw_sp_rif_counter_dir dir)
103{
104 switch (dir) {
105 case MLXSW_SP_RIF_COUNTER_EGRESS:
106 return &rif->counter_egress;
107 case MLXSW_SP_RIF_COUNTER_INGRESS:
108 return &rif->counter_ingress;
109 }
110 return NULL;
111}
112
113static bool
114mlxsw_sp_rif_counter_valid_get(struct mlxsw_sp_rif *rif,
115 enum mlxsw_sp_rif_counter_dir dir)
116{
117 switch (dir) {
118 case MLXSW_SP_RIF_COUNTER_EGRESS:
119 return rif->counter_egress_valid;
120 case MLXSW_SP_RIF_COUNTER_INGRESS:
121 return rif->counter_ingress_valid;
122 }
123 return false;
124}
125
126static void
127mlxsw_sp_rif_counter_valid_set(struct mlxsw_sp_rif *rif,
128 enum mlxsw_sp_rif_counter_dir dir,
129 bool valid)
130{
131 switch (dir) {
132 case MLXSW_SP_RIF_COUNTER_EGRESS:
133 rif->counter_egress_valid = valid;
134 break;
135 case MLXSW_SP_RIF_COUNTER_INGRESS:
136 rif->counter_ingress_valid = valid;
137 break;
138 }
139}
140
141static int mlxsw_sp_rif_counter_edit(struct mlxsw_sp *mlxsw_sp, u16 rif_index,
142 unsigned int counter_index, bool enable,
143 enum mlxsw_sp_rif_counter_dir dir)
144{
145 char ritr_pl[MLXSW_REG_RITR_LEN];
146 bool is_egress = false;
147 int err;
148
149 if (dir == MLXSW_SP_RIF_COUNTER_EGRESS)
150 is_egress = true;
151 mlxsw_reg_ritr_rif_pack(ritr_pl, rif_index);
152 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
153 if (err)
154 return err;
155
156 mlxsw_reg_ritr_counter_pack(ritr_pl, counter_index, enable,
157 is_egress);
158 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
159}
160
161int mlxsw_sp_rif_counter_value_get(struct mlxsw_sp *mlxsw_sp,
162 struct mlxsw_sp_rif *rif,
163 enum mlxsw_sp_rif_counter_dir dir, u64 *cnt)
164{
165 char ricnt_pl[MLXSW_REG_RICNT_LEN];
166 unsigned int *p_counter_index;
167 bool valid;
168 int err;
169
170 valid = mlxsw_sp_rif_counter_valid_get(rif, dir);
171 if (!valid)
172 return -EINVAL;
173
174 p_counter_index = mlxsw_sp_rif_p_counter_get(rif, dir);
175 if (!p_counter_index)
176 return -EINVAL;
177 mlxsw_reg_ricnt_pack(ricnt_pl, *p_counter_index,
178 MLXSW_REG_RICNT_OPCODE_NOP);
179 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ricnt), ricnt_pl);
180 if (err)
181 return err;
182 *cnt = mlxsw_reg_ricnt_good_unicast_packets_get(ricnt_pl);
183 return 0;
184}
185
186static int mlxsw_sp_rif_counter_clear(struct mlxsw_sp *mlxsw_sp,
187 unsigned int counter_index)
188{
189 char ricnt_pl[MLXSW_REG_RICNT_LEN];
190
191 mlxsw_reg_ricnt_pack(ricnt_pl, counter_index,
192 MLXSW_REG_RICNT_OPCODE_CLEAR);
193 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ricnt), ricnt_pl);
194}
195
196int mlxsw_sp_rif_counter_alloc(struct mlxsw_sp *mlxsw_sp,
197 struct mlxsw_sp_rif *rif,
198 enum mlxsw_sp_rif_counter_dir dir)
199{
200 unsigned int *p_counter_index;
201 int err;
202
203 p_counter_index = mlxsw_sp_rif_p_counter_get(rif, dir);
204 if (!p_counter_index)
205 return -EINVAL;
206 err = mlxsw_sp_counter_alloc(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_RIF,
207 p_counter_index);
208 if (err)
209 return err;
210
211 err = mlxsw_sp_rif_counter_clear(mlxsw_sp, *p_counter_index);
212 if (err)
213 goto err_counter_clear;
214
215 err = mlxsw_sp_rif_counter_edit(mlxsw_sp, rif->rif_index,
216 *p_counter_index, true, dir);
217 if (err)
218 goto err_counter_edit;
219 mlxsw_sp_rif_counter_valid_set(rif, dir, true);
220 return 0;
221
222err_counter_edit:
223err_counter_clear:
224 mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_RIF,
225 *p_counter_index);
226 return err;
227}
228
229void mlxsw_sp_rif_counter_free(struct mlxsw_sp *mlxsw_sp,
230 struct mlxsw_sp_rif *rif,
231 enum mlxsw_sp_rif_counter_dir dir)
232{
233 unsigned int *p_counter_index;
234
Arkadi Sharshevsky6b1206b2017-05-18 09:18:53 +0200235 if (!mlxsw_sp_rif_counter_valid_get(rif, dir))
236 return;
237
Arkadi Sharshevskye0c0afd2017-03-28 17:24:15 +0200238 p_counter_index = mlxsw_sp_rif_p_counter_get(rif, dir);
239 if (WARN_ON(!p_counter_index))
240 return;
241 mlxsw_sp_rif_counter_edit(mlxsw_sp, rif->rif_index,
242 *p_counter_index, false, dir);
243 mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_RIF,
244 *p_counter_index);
245 mlxsw_sp_rif_counter_valid_set(rif, dir, false);
246}
247
Ido Schimmel4724ba562017-03-10 08:53:39 +0100248static struct mlxsw_sp_rif *
249mlxsw_sp_rif_find_by_dev(const struct mlxsw_sp *mlxsw_sp,
250 const struct net_device *dev);
251
Ido Schimmel9011b672017-05-16 19:38:25 +0200252#define MLXSW_SP_PREFIX_COUNT (sizeof(struct in6_addr) * BITS_PER_BYTE)
253
254struct mlxsw_sp_prefix_usage {
255 DECLARE_BITMAP(b, MLXSW_SP_PREFIX_COUNT);
256};
257
Jiri Pirko53342022016-07-04 08:23:08 +0200258#define mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage) \
259 for_each_set_bit(prefix, (prefix_usage)->b, MLXSW_SP_PREFIX_COUNT)
260
261static bool
Jiri Pirko6b75c482016-07-04 08:23:09 +0200262mlxsw_sp_prefix_usage_subset(struct mlxsw_sp_prefix_usage *prefix_usage1,
263 struct mlxsw_sp_prefix_usage *prefix_usage2)
264{
265 unsigned char prefix;
266
267 mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage1) {
268 if (!test_bit(prefix, prefix_usage2->b))
269 return false;
270 }
271 return true;
272}
273
274static bool
Jiri Pirko53342022016-07-04 08:23:08 +0200275mlxsw_sp_prefix_usage_eq(struct mlxsw_sp_prefix_usage *prefix_usage1,
276 struct mlxsw_sp_prefix_usage *prefix_usage2)
277{
278 return !memcmp(prefix_usage1, prefix_usage2, sizeof(*prefix_usage1));
279}
280
Jiri Pirko6b75c482016-07-04 08:23:09 +0200281static bool
282mlxsw_sp_prefix_usage_none(struct mlxsw_sp_prefix_usage *prefix_usage)
283{
284 struct mlxsw_sp_prefix_usage prefix_usage_none = {{ 0 } };
285
286 return mlxsw_sp_prefix_usage_eq(prefix_usage, &prefix_usage_none);
287}
288
289static void
290mlxsw_sp_prefix_usage_cpy(struct mlxsw_sp_prefix_usage *prefix_usage1,
291 struct mlxsw_sp_prefix_usage *prefix_usage2)
292{
293 memcpy(prefix_usage1, prefix_usage2, sizeof(*prefix_usage1));
294}
295
296static void
Jiri Pirko5e9c16c2016-07-04 08:23:04 +0200297mlxsw_sp_prefix_usage_set(struct mlxsw_sp_prefix_usage *prefix_usage,
298 unsigned char prefix_len)
299{
300 set_bit(prefix_len, prefix_usage->b);
301}
302
303static void
304mlxsw_sp_prefix_usage_clear(struct mlxsw_sp_prefix_usage *prefix_usage,
305 unsigned char prefix_len)
306{
307 clear_bit(prefix_len, prefix_usage->b);
308}
309
310struct mlxsw_sp_fib_key {
311 unsigned char addr[sizeof(struct in6_addr)];
312 unsigned char prefix_len;
313};
314
Jiri Pirko61c503f2016-07-04 08:23:11 +0200315enum mlxsw_sp_fib_entry_type {
316 MLXSW_SP_FIB_ENTRY_TYPE_REMOTE,
317 MLXSW_SP_FIB_ENTRY_TYPE_LOCAL,
318 MLXSW_SP_FIB_ENTRY_TYPE_TRAP,
319};
320
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +0200321struct mlxsw_sp_nexthop_group;
Ido Schimmel9011b672017-05-16 19:38:25 +0200322struct mlxsw_sp_fib;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +0200323
Ido Schimmel9aecce12017-02-09 10:28:42 +0100324struct mlxsw_sp_fib_node {
325 struct list_head entry_list;
Jiri Pirkob45f64d2016-09-26 12:52:31 +0200326 struct list_head list;
Ido Schimmel9aecce12017-02-09 10:28:42 +0100327 struct rhash_head ht_node;
Ido Schimmel76610eb2017-03-10 08:53:41 +0100328 struct mlxsw_sp_fib *fib;
Ido Schimmel9aecce12017-02-09 10:28:42 +0100329 struct mlxsw_sp_fib_key key;
330};
331
332struct mlxsw_sp_fib_entry_params {
333 u32 tb_id;
334 u32 prio;
335 u8 tos;
336 u8 type;
337};
338
339struct mlxsw_sp_fib_entry {
340 struct list_head list;
341 struct mlxsw_sp_fib_node *fib_node;
342 enum mlxsw_sp_fib_entry_type type;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +0200343 struct list_head nexthop_group_node;
344 struct mlxsw_sp_nexthop_group *nh_group;
Ido Schimmel9aecce12017-02-09 10:28:42 +0100345 struct mlxsw_sp_fib_entry_params params;
Ido Schimmel013b20f2017-02-08 11:16:36 +0100346 bool offloaded;
Jiri Pirko5e9c16c2016-07-04 08:23:04 +0200347};
348
Ido Schimmel9011b672017-05-16 19:38:25 +0200349enum mlxsw_sp_l3proto {
350 MLXSW_SP_L3_PROTO_IPV4,
351 MLXSW_SP_L3_PROTO_IPV6,
352};
353
354struct mlxsw_sp_lpm_tree {
355 u8 id; /* tree ID */
356 unsigned int ref_count;
357 enum mlxsw_sp_l3proto proto;
358 struct mlxsw_sp_prefix_usage prefix_usage;
359};
360
Jiri Pirko5e9c16c2016-07-04 08:23:04 +0200361struct mlxsw_sp_fib {
362 struct rhashtable ht;
Ido Schimmel9aecce12017-02-09 10:28:42 +0100363 struct list_head node_list;
Ido Schimmel76610eb2017-03-10 08:53:41 +0100364 struct mlxsw_sp_vr *vr;
365 struct mlxsw_sp_lpm_tree *lpm_tree;
Jiri Pirko5e9c16c2016-07-04 08:23:04 +0200366 unsigned long prefix_ref_count[MLXSW_SP_PREFIX_COUNT];
367 struct mlxsw_sp_prefix_usage prefix_usage;
Ido Schimmel76610eb2017-03-10 08:53:41 +0100368 enum mlxsw_sp_l3proto proto;
Jiri Pirko5e9c16c2016-07-04 08:23:04 +0200369};
370
Ido Schimmel9011b672017-05-16 19:38:25 +0200371struct mlxsw_sp_vr {
372 u16 id; /* virtual router ID */
373 u32 tb_id; /* kernel fib table id */
374 unsigned int rif_count;
375 struct mlxsw_sp_fib *fib4;
376};
377
Ido Schimmel9aecce12017-02-09 10:28:42 +0100378static const struct rhashtable_params mlxsw_sp_fib_ht_params;
Jiri Pirko5e9c16c2016-07-04 08:23:04 +0200379
Ido Schimmel76610eb2017-03-10 08:53:41 +0100380static struct mlxsw_sp_fib *mlxsw_sp_fib_create(struct mlxsw_sp_vr *vr,
381 enum mlxsw_sp_l3proto proto)
Jiri Pirko5e9c16c2016-07-04 08:23:04 +0200382{
383 struct mlxsw_sp_fib *fib;
384 int err;
385
386 fib = kzalloc(sizeof(*fib), GFP_KERNEL);
387 if (!fib)
388 return ERR_PTR(-ENOMEM);
389 err = rhashtable_init(&fib->ht, &mlxsw_sp_fib_ht_params);
390 if (err)
391 goto err_rhashtable_init;
Ido Schimmel9aecce12017-02-09 10:28:42 +0100392 INIT_LIST_HEAD(&fib->node_list);
Ido Schimmel76610eb2017-03-10 08:53:41 +0100393 fib->proto = proto;
394 fib->vr = vr;
Jiri Pirko5e9c16c2016-07-04 08:23:04 +0200395 return fib;
396
397err_rhashtable_init:
398 kfree(fib);
399 return ERR_PTR(err);
400}
401
402static void mlxsw_sp_fib_destroy(struct mlxsw_sp_fib *fib)
403{
Ido Schimmel9aecce12017-02-09 10:28:42 +0100404 WARN_ON(!list_empty(&fib->node_list));
Ido Schimmel76610eb2017-03-10 08:53:41 +0100405 WARN_ON(fib->lpm_tree);
Jiri Pirko5e9c16c2016-07-04 08:23:04 +0200406 rhashtable_destroy(&fib->ht);
407 kfree(fib);
408}
409
Jiri Pirko53342022016-07-04 08:23:08 +0200410static struct mlxsw_sp_lpm_tree *
Ido Schimmel382dbb42017-03-10 08:53:40 +0100411mlxsw_sp_lpm_tree_find_unused(struct mlxsw_sp *mlxsw_sp)
Jiri Pirko53342022016-07-04 08:23:08 +0200412{
413 static struct mlxsw_sp_lpm_tree *lpm_tree;
414 int i;
415
Ido Schimmel9011b672017-05-16 19:38:25 +0200416 for (i = 0; i < mlxsw_sp->router->lpm.tree_count; i++) {
417 lpm_tree = &mlxsw_sp->router->lpm.trees[i];
Ido Schimmel382dbb42017-03-10 08:53:40 +0100418 if (lpm_tree->ref_count == 0)
419 return lpm_tree;
Jiri Pirko53342022016-07-04 08:23:08 +0200420 }
421 return NULL;
422}
423
424static int mlxsw_sp_lpm_tree_alloc(struct mlxsw_sp *mlxsw_sp,
425 struct mlxsw_sp_lpm_tree *lpm_tree)
426{
427 char ralta_pl[MLXSW_REG_RALTA_LEN];
428
Ido Schimmel1a9234e662016-09-19 08:29:26 +0200429 mlxsw_reg_ralta_pack(ralta_pl, true,
430 (enum mlxsw_reg_ralxx_protocol) lpm_tree->proto,
431 lpm_tree->id);
Jiri Pirko53342022016-07-04 08:23:08 +0200432 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralta), ralta_pl);
433}
434
435static int mlxsw_sp_lpm_tree_free(struct mlxsw_sp *mlxsw_sp,
436 struct mlxsw_sp_lpm_tree *lpm_tree)
437{
438 char ralta_pl[MLXSW_REG_RALTA_LEN];
439
Ido Schimmel1a9234e662016-09-19 08:29:26 +0200440 mlxsw_reg_ralta_pack(ralta_pl, false,
441 (enum mlxsw_reg_ralxx_protocol) lpm_tree->proto,
442 lpm_tree->id);
Jiri Pirko53342022016-07-04 08:23:08 +0200443 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralta), ralta_pl);
444}
445
446static int
447mlxsw_sp_lpm_tree_left_struct_set(struct mlxsw_sp *mlxsw_sp,
448 struct mlxsw_sp_prefix_usage *prefix_usage,
449 struct mlxsw_sp_lpm_tree *lpm_tree)
450{
451 char ralst_pl[MLXSW_REG_RALST_LEN];
452 u8 root_bin = 0;
453 u8 prefix;
454 u8 last_prefix = MLXSW_REG_RALST_BIN_NO_CHILD;
455
456 mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage)
457 root_bin = prefix;
458
459 mlxsw_reg_ralst_pack(ralst_pl, root_bin, lpm_tree->id);
460 mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage) {
461 if (prefix == 0)
462 continue;
463 mlxsw_reg_ralst_bin_pack(ralst_pl, prefix, last_prefix,
464 MLXSW_REG_RALST_BIN_NO_CHILD);
465 last_prefix = prefix;
466 }
467 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralst), ralst_pl);
468}
469
470static struct mlxsw_sp_lpm_tree *
471mlxsw_sp_lpm_tree_create(struct mlxsw_sp *mlxsw_sp,
472 struct mlxsw_sp_prefix_usage *prefix_usage,
Ido Schimmel382dbb42017-03-10 08:53:40 +0100473 enum mlxsw_sp_l3proto proto)
Jiri Pirko53342022016-07-04 08:23:08 +0200474{
475 struct mlxsw_sp_lpm_tree *lpm_tree;
476 int err;
477
Ido Schimmel382dbb42017-03-10 08:53:40 +0100478 lpm_tree = mlxsw_sp_lpm_tree_find_unused(mlxsw_sp);
Jiri Pirko53342022016-07-04 08:23:08 +0200479 if (!lpm_tree)
480 return ERR_PTR(-EBUSY);
481 lpm_tree->proto = proto;
482 err = mlxsw_sp_lpm_tree_alloc(mlxsw_sp, lpm_tree);
483 if (err)
484 return ERR_PTR(err);
485
486 err = mlxsw_sp_lpm_tree_left_struct_set(mlxsw_sp, prefix_usage,
487 lpm_tree);
488 if (err)
489 goto err_left_struct_set;
Jiri Pirko2083d362016-10-25 11:25:56 +0200490 memcpy(&lpm_tree->prefix_usage, prefix_usage,
491 sizeof(lpm_tree->prefix_usage));
Jiri Pirko53342022016-07-04 08:23:08 +0200492 return lpm_tree;
493
494err_left_struct_set:
495 mlxsw_sp_lpm_tree_free(mlxsw_sp, lpm_tree);
496 return ERR_PTR(err);
497}
498
499static int mlxsw_sp_lpm_tree_destroy(struct mlxsw_sp *mlxsw_sp,
500 struct mlxsw_sp_lpm_tree *lpm_tree)
501{
502 return mlxsw_sp_lpm_tree_free(mlxsw_sp, lpm_tree);
503}
504
505static struct mlxsw_sp_lpm_tree *
506mlxsw_sp_lpm_tree_get(struct mlxsw_sp *mlxsw_sp,
507 struct mlxsw_sp_prefix_usage *prefix_usage,
Ido Schimmel382dbb42017-03-10 08:53:40 +0100508 enum mlxsw_sp_l3proto proto)
Jiri Pirko53342022016-07-04 08:23:08 +0200509{
510 struct mlxsw_sp_lpm_tree *lpm_tree;
511 int i;
512
Ido Schimmel9011b672017-05-16 19:38:25 +0200513 for (i = 0; i < mlxsw_sp->router->lpm.tree_count; i++) {
514 lpm_tree = &mlxsw_sp->router->lpm.trees[i];
Jiri Pirko8b99bec2016-10-25 11:25:57 +0200515 if (lpm_tree->ref_count != 0 &&
516 lpm_tree->proto == proto &&
Jiri Pirko53342022016-07-04 08:23:08 +0200517 mlxsw_sp_prefix_usage_eq(&lpm_tree->prefix_usage,
518 prefix_usage))
519 goto inc_ref_count;
520 }
521 lpm_tree = mlxsw_sp_lpm_tree_create(mlxsw_sp, prefix_usage,
Ido Schimmel382dbb42017-03-10 08:53:40 +0100522 proto);
Jiri Pirko53342022016-07-04 08:23:08 +0200523 if (IS_ERR(lpm_tree))
524 return lpm_tree;
525
526inc_ref_count:
527 lpm_tree->ref_count++;
528 return lpm_tree;
529}
530
531static int mlxsw_sp_lpm_tree_put(struct mlxsw_sp *mlxsw_sp,
532 struct mlxsw_sp_lpm_tree *lpm_tree)
533{
534 if (--lpm_tree->ref_count == 0)
535 return mlxsw_sp_lpm_tree_destroy(mlxsw_sp, lpm_tree);
536 return 0;
537}
538
Ido Schimmel8494ab02017-03-24 08:02:47 +0100539#define MLXSW_SP_LPM_TREE_MIN 2 /* trees 0 and 1 are reserved */
540
541static int mlxsw_sp_lpm_init(struct mlxsw_sp *mlxsw_sp)
Jiri Pirko53342022016-07-04 08:23:08 +0200542{
543 struct mlxsw_sp_lpm_tree *lpm_tree;
Ido Schimmel8494ab02017-03-24 08:02:47 +0100544 u64 max_trees;
Jiri Pirko53342022016-07-04 08:23:08 +0200545 int i;
546
Ido Schimmel8494ab02017-03-24 08:02:47 +0100547 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_LPM_TREES))
548 return -EIO;
549
550 max_trees = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_LPM_TREES);
Ido Schimmel9011b672017-05-16 19:38:25 +0200551 mlxsw_sp->router->lpm.tree_count = max_trees - MLXSW_SP_LPM_TREE_MIN;
552 mlxsw_sp->router->lpm.trees = kcalloc(mlxsw_sp->router->lpm.tree_count,
Ido Schimmel8494ab02017-03-24 08:02:47 +0100553 sizeof(struct mlxsw_sp_lpm_tree),
554 GFP_KERNEL);
Ido Schimmel9011b672017-05-16 19:38:25 +0200555 if (!mlxsw_sp->router->lpm.trees)
Ido Schimmel8494ab02017-03-24 08:02:47 +0100556 return -ENOMEM;
557
Ido Schimmel9011b672017-05-16 19:38:25 +0200558 for (i = 0; i < mlxsw_sp->router->lpm.tree_count; i++) {
559 lpm_tree = &mlxsw_sp->router->lpm.trees[i];
Jiri Pirko53342022016-07-04 08:23:08 +0200560 lpm_tree->id = i + MLXSW_SP_LPM_TREE_MIN;
561 }
Ido Schimmel8494ab02017-03-24 08:02:47 +0100562
563 return 0;
564}
565
566static void mlxsw_sp_lpm_fini(struct mlxsw_sp *mlxsw_sp)
567{
Ido Schimmel9011b672017-05-16 19:38:25 +0200568 kfree(mlxsw_sp->router->lpm.trees);
Jiri Pirko53342022016-07-04 08:23:08 +0200569}
570
Ido Schimmel76610eb2017-03-10 08:53:41 +0100571static bool mlxsw_sp_vr_is_used(const struct mlxsw_sp_vr *vr)
572{
573 return !!vr->fib4;
574}
575
Jiri Pirko6b75c482016-07-04 08:23:09 +0200576static struct mlxsw_sp_vr *mlxsw_sp_vr_find_unused(struct mlxsw_sp *mlxsw_sp)
577{
578 struct mlxsw_sp_vr *vr;
579 int i;
580
Jiri Pirkoc1a38312016-10-21 16:07:23 +0200581 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
Ido Schimmel9011b672017-05-16 19:38:25 +0200582 vr = &mlxsw_sp->router->vrs[i];
Ido Schimmel76610eb2017-03-10 08:53:41 +0100583 if (!mlxsw_sp_vr_is_used(vr))
Jiri Pirko6b75c482016-07-04 08:23:09 +0200584 return vr;
585 }
586 return NULL;
587}
588
589static int mlxsw_sp_vr_lpm_tree_bind(struct mlxsw_sp *mlxsw_sp,
Ido Schimmel76610eb2017-03-10 08:53:41 +0100590 const struct mlxsw_sp_fib *fib)
Jiri Pirko6b75c482016-07-04 08:23:09 +0200591{
592 char raltb_pl[MLXSW_REG_RALTB_LEN];
593
Ido Schimmel76610eb2017-03-10 08:53:41 +0100594 mlxsw_reg_raltb_pack(raltb_pl, fib->vr->id,
595 (enum mlxsw_reg_ralxx_protocol) fib->proto,
596 fib->lpm_tree->id);
Jiri Pirko6b75c482016-07-04 08:23:09 +0200597 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raltb), raltb_pl);
598}
599
600static int mlxsw_sp_vr_lpm_tree_unbind(struct mlxsw_sp *mlxsw_sp,
Ido Schimmel76610eb2017-03-10 08:53:41 +0100601 const struct mlxsw_sp_fib *fib)
Jiri Pirko6b75c482016-07-04 08:23:09 +0200602{
603 char raltb_pl[MLXSW_REG_RALTB_LEN];
604
605 /* Bind to tree 0 which is default */
Ido Schimmel76610eb2017-03-10 08:53:41 +0100606 mlxsw_reg_raltb_pack(raltb_pl, fib->vr->id,
607 (enum mlxsw_reg_ralxx_protocol) fib->proto, 0);
Jiri Pirko6b75c482016-07-04 08:23:09 +0200608 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raltb), raltb_pl);
609}
610
611static u32 mlxsw_sp_fix_tb_id(u32 tb_id)
612{
613 /* For our purpose, squash main and local table into one */
614 if (tb_id == RT_TABLE_LOCAL)
615 tb_id = RT_TABLE_MAIN;
616 return tb_id;
617}
618
619static struct mlxsw_sp_vr *mlxsw_sp_vr_find(struct mlxsw_sp *mlxsw_sp,
Ido Schimmel76610eb2017-03-10 08:53:41 +0100620 u32 tb_id)
Jiri Pirko6b75c482016-07-04 08:23:09 +0200621{
622 struct mlxsw_sp_vr *vr;
623 int i;
624
625 tb_id = mlxsw_sp_fix_tb_id(tb_id);
Nogah Frankel9497c042016-09-20 11:16:54 +0200626
Jiri Pirkoc1a38312016-10-21 16:07:23 +0200627 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
Ido Schimmel9011b672017-05-16 19:38:25 +0200628 vr = &mlxsw_sp->router->vrs[i];
Ido Schimmel76610eb2017-03-10 08:53:41 +0100629 if (mlxsw_sp_vr_is_used(vr) && vr->tb_id == tb_id)
Jiri Pirko6b75c482016-07-04 08:23:09 +0200630 return vr;
631 }
632 return NULL;
633}
634
Ido Schimmel76610eb2017-03-10 08:53:41 +0100635static struct mlxsw_sp_fib *mlxsw_sp_vr_fib(const struct mlxsw_sp_vr *vr,
636 enum mlxsw_sp_l3proto proto)
Jiri Pirko6b75c482016-07-04 08:23:09 +0200637{
Ido Schimmel76610eb2017-03-10 08:53:41 +0100638 switch (proto) {
639 case MLXSW_SP_L3_PROTO_IPV4:
640 return vr->fib4;
641 case MLXSW_SP_L3_PROTO_IPV6:
642 BUG_ON(1);
643 }
644 return NULL;
645}
646
647static struct mlxsw_sp_vr *mlxsw_sp_vr_create(struct mlxsw_sp *mlxsw_sp,
648 u32 tb_id)
649{
Jiri Pirko6b75c482016-07-04 08:23:09 +0200650 struct mlxsw_sp_vr *vr;
Jiri Pirko6b75c482016-07-04 08:23:09 +0200651
652 vr = mlxsw_sp_vr_find_unused(mlxsw_sp);
653 if (!vr)
654 return ERR_PTR(-EBUSY);
Ido Schimmel76610eb2017-03-10 08:53:41 +0100655 vr->fib4 = mlxsw_sp_fib_create(vr, MLXSW_SP_L3_PROTO_IPV4);
656 if (IS_ERR(vr->fib4))
657 return ERR_CAST(vr->fib4);
Jiri Pirko6b75c482016-07-04 08:23:09 +0200658 vr->tb_id = tb_id;
Jiri Pirko6b75c482016-07-04 08:23:09 +0200659 return vr;
Jiri Pirko6b75c482016-07-04 08:23:09 +0200660}
661
Ido Schimmel76610eb2017-03-10 08:53:41 +0100662static void mlxsw_sp_vr_destroy(struct mlxsw_sp_vr *vr)
Jiri Pirko6b75c482016-07-04 08:23:09 +0200663{
Ido Schimmel76610eb2017-03-10 08:53:41 +0100664 mlxsw_sp_fib_destroy(vr->fib4);
665 vr->fib4 = NULL;
Jiri Pirko6b75c482016-07-04 08:23:09 +0200666}
667
668static int
Ido Schimmel76610eb2017-03-10 08:53:41 +0100669mlxsw_sp_vr_lpm_tree_check(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_fib *fib,
Jiri Pirko6b75c482016-07-04 08:23:09 +0200670 struct mlxsw_sp_prefix_usage *req_prefix_usage)
671{
Ido Schimmel76610eb2017-03-10 08:53:41 +0100672 struct mlxsw_sp_lpm_tree *lpm_tree = fib->lpm_tree;
Ido Schimmelf7df4922017-02-28 08:55:40 +0100673 struct mlxsw_sp_lpm_tree *new_tree;
674 int err;
Jiri Pirko6b75c482016-07-04 08:23:09 +0200675
Ido Schimmelf7df4922017-02-28 08:55:40 +0100676 if (mlxsw_sp_prefix_usage_eq(req_prefix_usage, &lpm_tree->prefix_usage))
Jiri Pirko6b75c482016-07-04 08:23:09 +0200677 return 0;
678
Ido Schimmelf7df4922017-02-28 08:55:40 +0100679 new_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, req_prefix_usage,
Ido Schimmel76610eb2017-03-10 08:53:41 +0100680 fib->proto);
Ido Schimmelf7df4922017-02-28 08:55:40 +0100681 if (IS_ERR(new_tree)) {
Jiri Pirko6b75c482016-07-04 08:23:09 +0200682 /* We failed to get a tree according to the required
683 * prefix usage. However, the current tree might be still good
684 * for us if our requirement is subset of the prefixes used
685 * in the tree.
686 */
687 if (mlxsw_sp_prefix_usage_subset(req_prefix_usage,
Ido Schimmelf7df4922017-02-28 08:55:40 +0100688 &lpm_tree->prefix_usage))
Jiri Pirko6b75c482016-07-04 08:23:09 +0200689 return 0;
Ido Schimmelf7df4922017-02-28 08:55:40 +0100690 return PTR_ERR(new_tree);
Jiri Pirko6b75c482016-07-04 08:23:09 +0200691 }
692
Ido Schimmelf7df4922017-02-28 08:55:40 +0100693 /* Prevent packet loss by overwriting existing binding */
Ido Schimmel76610eb2017-03-10 08:53:41 +0100694 fib->lpm_tree = new_tree;
695 err = mlxsw_sp_vr_lpm_tree_bind(mlxsw_sp, fib);
Ido Schimmelf7df4922017-02-28 08:55:40 +0100696 if (err)
697 goto err_tree_bind;
698 mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
699
700 return 0;
701
702err_tree_bind:
Ido Schimmel76610eb2017-03-10 08:53:41 +0100703 fib->lpm_tree = lpm_tree;
Ido Schimmelf7df4922017-02-28 08:55:40 +0100704 mlxsw_sp_lpm_tree_put(mlxsw_sp, new_tree);
705 return err;
Jiri Pirko6b75c482016-07-04 08:23:09 +0200706}
707
Ido Schimmel76610eb2017-03-10 08:53:41 +0100708static struct mlxsw_sp_vr *mlxsw_sp_vr_get(struct mlxsw_sp *mlxsw_sp, u32 tb_id)
Jiri Pirko6b75c482016-07-04 08:23:09 +0200709{
710 struct mlxsw_sp_vr *vr;
Jiri Pirko6b75c482016-07-04 08:23:09 +0200711
712 tb_id = mlxsw_sp_fix_tb_id(tb_id);
Ido Schimmel76610eb2017-03-10 08:53:41 +0100713 vr = mlxsw_sp_vr_find(mlxsw_sp, tb_id);
714 if (!vr)
715 vr = mlxsw_sp_vr_create(mlxsw_sp, tb_id);
Jiri Pirko6b75c482016-07-04 08:23:09 +0200716 return vr;
717}
718
Ido Schimmel76610eb2017-03-10 08:53:41 +0100719static void mlxsw_sp_vr_put(struct mlxsw_sp_vr *vr)
Jiri Pirko6b75c482016-07-04 08:23:09 +0200720{
Ido Schimmel69132292017-03-10 08:53:42 +0100721 if (!vr->rif_count && list_empty(&vr->fib4->node_list))
Ido Schimmel76610eb2017-03-10 08:53:41 +0100722 mlxsw_sp_vr_destroy(vr);
Jiri Pirko6b75c482016-07-04 08:23:09 +0200723}
724
Nogah Frankel9497c042016-09-20 11:16:54 +0200725static int mlxsw_sp_vrs_init(struct mlxsw_sp *mlxsw_sp)
Jiri Pirko6b75c482016-07-04 08:23:09 +0200726{
727 struct mlxsw_sp_vr *vr;
Jiri Pirkoc1a38312016-10-21 16:07:23 +0200728 u64 max_vrs;
Jiri Pirko6b75c482016-07-04 08:23:09 +0200729 int i;
730
Jiri Pirkoc1a38312016-10-21 16:07:23 +0200731 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_VRS))
Nogah Frankel9497c042016-09-20 11:16:54 +0200732 return -EIO;
733
Jiri Pirkoc1a38312016-10-21 16:07:23 +0200734 max_vrs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS);
Ido Schimmel9011b672017-05-16 19:38:25 +0200735 mlxsw_sp->router->vrs = kcalloc(max_vrs, sizeof(struct mlxsw_sp_vr),
736 GFP_KERNEL);
737 if (!mlxsw_sp->router->vrs)
Nogah Frankel9497c042016-09-20 11:16:54 +0200738 return -ENOMEM;
739
Jiri Pirkoc1a38312016-10-21 16:07:23 +0200740 for (i = 0; i < max_vrs; i++) {
Ido Schimmel9011b672017-05-16 19:38:25 +0200741 vr = &mlxsw_sp->router->vrs[i];
Jiri Pirko6b75c482016-07-04 08:23:09 +0200742 vr->id = i;
743 }
Nogah Frankel9497c042016-09-20 11:16:54 +0200744
745 return 0;
746}
747
Ido Schimmelac571de2016-11-14 11:26:32 +0100748static void mlxsw_sp_router_fib_flush(struct mlxsw_sp *mlxsw_sp);
749
Nogah Frankel9497c042016-09-20 11:16:54 +0200750static void mlxsw_sp_vrs_fini(struct mlxsw_sp *mlxsw_sp)
751{
Ido Schimmel30572242016-12-03 16:45:01 +0100752 /* At this stage we're guaranteed not to have new incoming
753 * FIB notifications and the work queue is free from FIBs
754 * sitting on top of mlxsw netdevs. However, we can still
755 * have other FIBs queued. Flush the queue before flushing
756 * the device's tables. No need for locks, as we're the only
757 * writer.
758 */
759 mlxsw_core_flush_owq();
Ido Schimmelac571de2016-11-14 11:26:32 +0100760 mlxsw_sp_router_fib_flush(mlxsw_sp);
Ido Schimmel9011b672017-05-16 19:38:25 +0200761 kfree(mlxsw_sp->router->vrs);
Jiri Pirko6b75c482016-07-04 08:23:09 +0200762}
763
Jiri Pirko6cf3c972016-07-05 11:27:39 +0200764struct mlxsw_sp_neigh_key {
Jiri Pirko33b13412016-11-10 12:31:04 +0100765 struct neighbour *n;
Jiri Pirko6cf3c972016-07-05 11:27:39 +0200766};
767
768struct mlxsw_sp_neigh_entry {
Ido Schimmel9665b742017-02-08 11:16:42 +0100769 struct list_head rif_list_node;
Jiri Pirko6cf3c972016-07-05 11:27:39 +0200770 struct rhash_head ht_node;
771 struct mlxsw_sp_neigh_key key;
772 u16 rif;
Ido Schimmel5c8802f2017-02-06 16:20:13 +0100773 bool connected;
Yotam Gigia6bf9e92016-07-05 11:27:44 +0200774 unsigned char ha[ETH_ALEN];
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +0200775 struct list_head nexthop_list; /* list of nexthops using
776 * this neigh entry
777 */
Yotam Gigib2157142016-07-05 11:27:51 +0200778 struct list_head nexthop_neighs_list_node;
Jiri Pirko6cf3c972016-07-05 11:27:39 +0200779};
780
781static const struct rhashtable_params mlxsw_sp_neigh_ht_params = {
782 .key_offset = offsetof(struct mlxsw_sp_neigh_entry, key),
783 .head_offset = offsetof(struct mlxsw_sp_neigh_entry, ht_node),
784 .key_len = sizeof(struct mlxsw_sp_neigh_key),
785};
786
Ido Schimmel5c8802f2017-02-06 16:20:13 +0100787static struct mlxsw_sp_neigh_entry *
788mlxsw_sp_neigh_entry_alloc(struct mlxsw_sp *mlxsw_sp, struct neighbour *n,
789 u16 rif)
790{
791 struct mlxsw_sp_neigh_entry *neigh_entry;
792
793 neigh_entry = kzalloc(sizeof(*neigh_entry), GFP_KERNEL);
794 if (!neigh_entry)
795 return NULL;
796
797 neigh_entry->key.n = n;
798 neigh_entry->rif = rif;
799 INIT_LIST_HEAD(&neigh_entry->nexthop_list);
800
801 return neigh_entry;
802}
803
804static void mlxsw_sp_neigh_entry_free(struct mlxsw_sp_neigh_entry *neigh_entry)
805{
806 kfree(neigh_entry);
807}
808
Jiri Pirko6cf3c972016-07-05 11:27:39 +0200809static int
810mlxsw_sp_neigh_entry_insert(struct mlxsw_sp *mlxsw_sp,
811 struct mlxsw_sp_neigh_entry *neigh_entry)
812{
Ido Schimmel9011b672017-05-16 19:38:25 +0200813 return rhashtable_insert_fast(&mlxsw_sp->router->neigh_ht,
Jiri Pirko6cf3c972016-07-05 11:27:39 +0200814 &neigh_entry->ht_node,
815 mlxsw_sp_neigh_ht_params);
816}
817
818static void
819mlxsw_sp_neigh_entry_remove(struct mlxsw_sp *mlxsw_sp,
820 struct mlxsw_sp_neigh_entry *neigh_entry)
821{
Ido Schimmel9011b672017-05-16 19:38:25 +0200822 rhashtable_remove_fast(&mlxsw_sp->router->neigh_ht,
Jiri Pirko6cf3c972016-07-05 11:27:39 +0200823 &neigh_entry->ht_node,
824 mlxsw_sp_neigh_ht_params);
825}
826
827static struct mlxsw_sp_neigh_entry *
Ido Schimmel5c8802f2017-02-06 16:20:13 +0100828mlxsw_sp_neigh_entry_create(struct mlxsw_sp *mlxsw_sp, struct neighbour *n)
Jiri Pirko6cf3c972016-07-05 11:27:39 +0200829{
830 struct mlxsw_sp_neigh_entry *neigh_entry;
Arkadi Sharshevskybf952332017-03-17 09:38:00 +0100831 struct mlxsw_sp_rif *rif;
Ido Schimmel5c8802f2017-02-06 16:20:13 +0100832 int err;
Jiri Pirko6cf3c972016-07-05 11:27:39 +0200833
Arkadi Sharshevskybf952332017-03-17 09:38:00 +0100834 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, n->dev);
835 if (!rif)
Ido Schimmel5c8802f2017-02-06 16:20:13 +0100836 return ERR_PTR(-EINVAL);
837
Arkadi Sharshevskybf952332017-03-17 09:38:00 +0100838 neigh_entry = mlxsw_sp_neigh_entry_alloc(mlxsw_sp, n, rif->rif_index);
Jiri Pirko6cf3c972016-07-05 11:27:39 +0200839 if (!neigh_entry)
Ido Schimmel5c8802f2017-02-06 16:20:13 +0100840 return ERR_PTR(-ENOMEM);
841
842 err = mlxsw_sp_neigh_entry_insert(mlxsw_sp, neigh_entry);
843 if (err)
844 goto err_neigh_entry_insert;
845
Arkadi Sharshevskybf952332017-03-17 09:38:00 +0100846 list_add(&neigh_entry->rif_list_node, &rif->neigh_list);
Ido Schimmel9665b742017-02-08 11:16:42 +0100847
Jiri Pirko6cf3c972016-07-05 11:27:39 +0200848 return neigh_entry;
Ido Schimmel5c8802f2017-02-06 16:20:13 +0100849
850err_neigh_entry_insert:
851 mlxsw_sp_neigh_entry_free(neigh_entry);
852 return ERR_PTR(err);
Jiri Pirko6cf3c972016-07-05 11:27:39 +0200853}
854
855static void
Ido Schimmel5c8802f2017-02-06 16:20:13 +0100856mlxsw_sp_neigh_entry_destroy(struct mlxsw_sp *mlxsw_sp,
857 struct mlxsw_sp_neigh_entry *neigh_entry)
Jiri Pirko6cf3c972016-07-05 11:27:39 +0200858{
Ido Schimmel9665b742017-02-08 11:16:42 +0100859 list_del(&neigh_entry->rif_list_node);
Ido Schimmel5c8802f2017-02-06 16:20:13 +0100860 mlxsw_sp_neigh_entry_remove(mlxsw_sp, neigh_entry);
861 mlxsw_sp_neigh_entry_free(neigh_entry);
Jiri Pirko6cf3c972016-07-05 11:27:39 +0200862}
863
864static struct mlxsw_sp_neigh_entry *
Jiri Pirko33b13412016-11-10 12:31:04 +0100865mlxsw_sp_neigh_entry_lookup(struct mlxsw_sp *mlxsw_sp, struct neighbour *n)
Jiri Pirko6cf3c972016-07-05 11:27:39 +0200866{
Jiri Pirko33b13412016-11-10 12:31:04 +0100867 struct mlxsw_sp_neigh_key key;
Jiri Pirko6cf3c972016-07-05 11:27:39 +0200868
Jiri Pirko33b13412016-11-10 12:31:04 +0100869 key.n = n;
Ido Schimmel9011b672017-05-16 19:38:25 +0200870 return rhashtable_lookup_fast(&mlxsw_sp->router->neigh_ht,
Jiri Pirko6cf3c972016-07-05 11:27:39 +0200871 &key, mlxsw_sp_neigh_ht_params);
872}
873
Yotam Gigic723c7352016-07-05 11:27:43 +0200874static void
875mlxsw_sp_router_neighs_update_interval_init(struct mlxsw_sp *mlxsw_sp)
876{
877 unsigned long interval = NEIGH_VAR(&arp_tbl.parms, DELAY_PROBE_TIME);
878
Ido Schimmel9011b672017-05-16 19:38:25 +0200879 mlxsw_sp->router->neighs_update.interval = jiffies_to_msecs(interval);
Yotam Gigic723c7352016-07-05 11:27:43 +0200880}
881
882static void mlxsw_sp_router_neigh_ent_ipv4_process(struct mlxsw_sp *mlxsw_sp,
883 char *rauhtd_pl,
884 int ent_index)
885{
886 struct net_device *dev;
887 struct neighbour *n;
888 __be32 dipn;
889 u32 dip;
890 u16 rif;
891
892 mlxsw_reg_rauhtd_ent_ipv4_unpack(rauhtd_pl, ent_index, &rif, &dip);
893
Ido Schimmel5f9efff2017-05-16 19:38:27 +0200894 if (!mlxsw_sp->router->rifs[rif]) {
Yotam Gigic723c7352016-07-05 11:27:43 +0200895 dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Incorrect RIF in neighbour entry\n");
896 return;
897 }
898
899 dipn = htonl(dip);
Ido Schimmel5f9efff2017-05-16 19:38:27 +0200900 dev = mlxsw_sp->router->rifs[rif]->dev;
Yotam Gigic723c7352016-07-05 11:27:43 +0200901 n = neigh_lookup(&arp_tbl, &dipn, dev);
902 if (!n) {
903 netdev_err(dev, "Failed to find matching neighbour for IP=%pI4h\n",
904 &dip);
905 return;
906 }
907
908 netdev_dbg(dev, "Updating neighbour with IP=%pI4h\n", &dip);
909 neigh_event_send(n, NULL);
910 neigh_release(n);
911}
912
913static void mlxsw_sp_router_neigh_rec_ipv4_process(struct mlxsw_sp *mlxsw_sp,
914 char *rauhtd_pl,
915 int rec_index)
916{
917 u8 num_entries;
918 int i;
919
920 num_entries = mlxsw_reg_rauhtd_ipv4_rec_num_entries_get(rauhtd_pl,
921 rec_index);
922 /* Hardware starts counting at 0, so add 1. */
923 num_entries++;
924
925 /* Each record consists of several neighbour entries. */
926 for (i = 0; i < num_entries; i++) {
927 int ent_index;
928
929 ent_index = rec_index * MLXSW_REG_RAUHTD_IPV4_ENT_PER_REC + i;
930 mlxsw_sp_router_neigh_ent_ipv4_process(mlxsw_sp, rauhtd_pl,
931 ent_index);
932 }
933
934}
935
936static void mlxsw_sp_router_neigh_rec_process(struct mlxsw_sp *mlxsw_sp,
937 char *rauhtd_pl, int rec_index)
938{
939 switch (mlxsw_reg_rauhtd_rec_type_get(rauhtd_pl, rec_index)) {
940 case MLXSW_REG_RAUHTD_TYPE_IPV4:
941 mlxsw_sp_router_neigh_rec_ipv4_process(mlxsw_sp, rauhtd_pl,
942 rec_index);
943 break;
944 case MLXSW_REG_RAUHTD_TYPE_IPV6:
945 WARN_ON_ONCE(1);
946 break;
947 }
948}
949
Arkadi Sharshevsky42cdb332016-11-11 16:34:26 +0100950static bool mlxsw_sp_router_rauhtd_is_full(char *rauhtd_pl)
951{
952 u8 num_rec, last_rec_index, num_entries;
953
954 num_rec = mlxsw_reg_rauhtd_num_rec_get(rauhtd_pl);
955 last_rec_index = num_rec - 1;
956
957 if (num_rec < MLXSW_REG_RAUHTD_REC_MAX_NUM)
958 return false;
959 if (mlxsw_reg_rauhtd_rec_type_get(rauhtd_pl, last_rec_index) ==
960 MLXSW_REG_RAUHTD_TYPE_IPV6)
961 return true;
962
963 num_entries = mlxsw_reg_rauhtd_ipv4_rec_num_entries_get(rauhtd_pl,
964 last_rec_index);
965 if (++num_entries == MLXSW_REG_RAUHTD_IPV4_ENT_PER_REC)
966 return true;
967 return false;
968}
969
Yotam Gigib2157142016-07-05 11:27:51 +0200970static int mlxsw_sp_router_neighs_update_rauhtd(struct mlxsw_sp *mlxsw_sp)
Yotam Gigic723c7352016-07-05 11:27:43 +0200971{
Yotam Gigic723c7352016-07-05 11:27:43 +0200972 char *rauhtd_pl;
973 u8 num_rec;
974 int i, err;
975
976 rauhtd_pl = kmalloc(MLXSW_REG_RAUHTD_LEN, GFP_KERNEL);
977 if (!rauhtd_pl)
Yotam Gigib2157142016-07-05 11:27:51 +0200978 return -ENOMEM;
Yotam Gigic723c7352016-07-05 11:27:43 +0200979
980 /* Make sure the neighbour's netdev isn't removed in the
981 * process.
982 */
983 rtnl_lock();
984 do {
985 mlxsw_reg_rauhtd_pack(rauhtd_pl, MLXSW_REG_RAUHTD_TYPE_IPV4);
986 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(rauhtd),
987 rauhtd_pl);
988 if (err) {
989 dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Failed to dump neighbour talbe\n");
990 break;
991 }
992 num_rec = mlxsw_reg_rauhtd_num_rec_get(rauhtd_pl);
993 for (i = 0; i < num_rec; i++)
994 mlxsw_sp_router_neigh_rec_process(mlxsw_sp, rauhtd_pl,
995 i);
Arkadi Sharshevsky42cdb332016-11-11 16:34:26 +0100996 } while (mlxsw_sp_router_rauhtd_is_full(rauhtd_pl));
Yotam Gigic723c7352016-07-05 11:27:43 +0200997 rtnl_unlock();
998
999 kfree(rauhtd_pl);
Yotam Gigib2157142016-07-05 11:27:51 +02001000 return err;
1001}
1002
1003static void mlxsw_sp_router_neighs_update_nh(struct mlxsw_sp *mlxsw_sp)
1004{
1005 struct mlxsw_sp_neigh_entry *neigh_entry;
1006
1007 /* Take RTNL mutex here to prevent lists from changes */
1008 rtnl_lock();
Ido Schimmel9011b672017-05-16 19:38:25 +02001009 list_for_each_entry(neigh_entry, &mlxsw_sp->router->nexthop_neighs_list,
Ido Schimmel8a0b7272017-02-06 16:20:15 +01001010 nexthop_neighs_list_node)
Yotam Gigib2157142016-07-05 11:27:51 +02001011 /* If this neigh have nexthops, make the kernel think this neigh
1012 * is active regardless of the traffic.
1013 */
Ido Schimmel8a0b7272017-02-06 16:20:15 +01001014 neigh_event_send(neigh_entry->key.n, NULL);
Yotam Gigib2157142016-07-05 11:27:51 +02001015 rtnl_unlock();
1016}
1017
1018static void
1019mlxsw_sp_router_neighs_update_work_schedule(struct mlxsw_sp *mlxsw_sp)
1020{
Ido Schimmel9011b672017-05-16 19:38:25 +02001021 unsigned long interval = mlxsw_sp->router->neighs_update.interval;
Yotam Gigib2157142016-07-05 11:27:51 +02001022
Ido Schimmel9011b672017-05-16 19:38:25 +02001023 mlxsw_core_schedule_dw(&mlxsw_sp->router->neighs_update.dw,
Yotam Gigib2157142016-07-05 11:27:51 +02001024 msecs_to_jiffies(interval));
1025}
1026
1027static void mlxsw_sp_router_neighs_update_work(struct work_struct *work)
1028{
Ido Schimmel9011b672017-05-16 19:38:25 +02001029 struct mlxsw_sp_router *router;
Yotam Gigib2157142016-07-05 11:27:51 +02001030 int err;
1031
Ido Schimmel9011b672017-05-16 19:38:25 +02001032 router = container_of(work, struct mlxsw_sp_router,
1033 neighs_update.dw.work);
1034 err = mlxsw_sp_router_neighs_update_rauhtd(router->mlxsw_sp);
Yotam Gigib2157142016-07-05 11:27:51 +02001035 if (err)
Ido Schimmel9011b672017-05-16 19:38:25 +02001036 dev_err(router->mlxsw_sp->bus_info->dev, "Could not update kernel for neigh activity");
Yotam Gigib2157142016-07-05 11:27:51 +02001037
Ido Schimmel9011b672017-05-16 19:38:25 +02001038 mlxsw_sp_router_neighs_update_nh(router->mlxsw_sp);
Yotam Gigib2157142016-07-05 11:27:51 +02001039
Ido Schimmel9011b672017-05-16 19:38:25 +02001040 mlxsw_sp_router_neighs_update_work_schedule(router->mlxsw_sp);
Yotam Gigic723c7352016-07-05 11:27:43 +02001041}
1042
Yotam Gigi0b2361d2016-07-05 11:27:52 +02001043static void mlxsw_sp_router_probe_unresolved_nexthops(struct work_struct *work)
1044{
1045 struct mlxsw_sp_neigh_entry *neigh_entry;
Ido Schimmel9011b672017-05-16 19:38:25 +02001046 struct mlxsw_sp_router *router;
Yotam Gigi0b2361d2016-07-05 11:27:52 +02001047
Ido Schimmel9011b672017-05-16 19:38:25 +02001048 router = container_of(work, struct mlxsw_sp_router,
1049 nexthop_probe_dw.work);
Yotam Gigi0b2361d2016-07-05 11:27:52 +02001050 /* Iterate over nexthop neighbours, find those who are unresolved and
1051 * send arp on them. This solves the chicken-egg problem when
1052 * the nexthop wouldn't get offloaded until the neighbor is resolved
1053 * but it wouldn't get resolved ever in case traffic is flowing in HW
1054 * using different nexthop.
1055 *
1056 * Take RTNL mutex here to prevent lists from changes.
1057 */
1058 rtnl_lock();
Ido Schimmel9011b672017-05-16 19:38:25 +02001059 list_for_each_entry(neigh_entry, &router->nexthop_neighs_list,
Ido Schimmel8a0b7272017-02-06 16:20:15 +01001060 nexthop_neighs_list_node)
Ido Schimmel01b1aa32017-02-06 16:20:16 +01001061 if (!neigh_entry->connected)
Jiri Pirko33b13412016-11-10 12:31:04 +01001062 neigh_event_send(neigh_entry->key.n, NULL);
Yotam Gigi0b2361d2016-07-05 11:27:52 +02001063 rtnl_unlock();
1064
Ido Schimmel9011b672017-05-16 19:38:25 +02001065 mlxsw_core_schedule_dw(&router->nexthop_probe_dw,
Yotam Gigi0b2361d2016-07-05 11:27:52 +02001066 MLXSW_SP_UNRESOLVED_NH_PROBE_INTERVAL);
1067}
1068
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001069static void
1070mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp *mlxsw_sp,
1071 struct mlxsw_sp_neigh_entry *neigh_entry,
1072 bool removing);
1073
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001074static enum mlxsw_reg_rauht_op mlxsw_sp_rauht_op(bool adding)
Yotam Gigia6bf9e92016-07-05 11:27:44 +02001075{
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001076 return adding ? MLXSW_REG_RAUHT_OP_WRITE_ADD :
1077 MLXSW_REG_RAUHT_OP_WRITE_DELETE;
1078}
1079
1080static void
1081mlxsw_sp_router_neigh_entry_op4(struct mlxsw_sp *mlxsw_sp,
1082 struct mlxsw_sp_neigh_entry *neigh_entry,
1083 enum mlxsw_reg_rauht_op op)
1084{
Jiri Pirko33b13412016-11-10 12:31:04 +01001085 struct neighbour *n = neigh_entry->key.n;
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001086 u32 dip = ntohl(*((__be32 *) n->primary_key));
Yotam Gigia6bf9e92016-07-05 11:27:44 +02001087 char rauht_pl[MLXSW_REG_RAUHT_LEN];
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001088
1089 mlxsw_reg_rauht_pack4(rauht_pl, op, neigh_entry->rif, neigh_entry->ha,
1090 dip);
1091 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rauht), rauht_pl);
1092}
1093
1094static void
1095mlxsw_sp_neigh_entry_update(struct mlxsw_sp *mlxsw_sp,
1096 struct mlxsw_sp_neigh_entry *neigh_entry,
1097 bool adding)
1098{
1099 if (!adding && !neigh_entry->connected)
1100 return;
1101 neigh_entry->connected = adding;
1102 if (neigh_entry->key.n->tbl == &arp_tbl)
1103 mlxsw_sp_router_neigh_entry_op4(mlxsw_sp, neigh_entry,
1104 mlxsw_sp_rauht_op(adding));
1105 else
1106 WARN_ON_ONCE(1);
1107}
1108
1109struct mlxsw_sp_neigh_event_work {
1110 struct work_struct work;
1111 struct mlxsw_sp *mlxsw_sp;
1112 struct neighbour *n;
1113};
1114
1115static void mlxsw_sp_router_neigh_event_work(struct work_struct *work)
1116{
1117 struct mlxsw_sp_neigh_event_work *neigh_work =
1118 container_of(work, struct mlxsw_sp_neigh_event_work, work);
1119 struct mlxsw_sp *mlxsw_sp = neigh_work->mlxsw_sp;
1120 struct mlxsw_sp_neigh_entry *neigh_entry;
1121 struct neighbour *n = neigh_work->n;
1122 unsigned char ha[ETH_ALEN];
Yotam Gigia6bf9e92016-07-05 11:27:44 +02001123 bool entry_connected;
Ido Schimmel93a87e52016-12-23 09:32:49 +01001124 u8 nud_state, dead;
Yotam Gigia6bf9e92016-07-05 11:27:44 +02001125
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001126 /* If these parameters are changed after we release the lock,
1127 * then we are guaranteed to receive another event letting us
1128 * know about it.
1129 */
Yotam Gigia6bf9e92016-07-05 11:27:44 +02001130 read_lock_bh(&n->lock);
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001131 memcpy(ha, n->ha, ETH_ALEN);
Yotam Gigia6bf9e92016-07-05 11:27:44 +02001132 nud_state = n->nud_state;
Ido Schimmel93a87e52016-12-23 09:32:49 +01001133 dead = n->dead;
Yotam Gigia6bf9e92016-07-05 11:27:44 +02001134 read_unlock_bh(&n->lock);
1135
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001136 rtnl_lock();
Ido Schimmel93a87e52016-12-23 09:32:49 +01001137 entry_connected = nud_state & NUD_VALID && !dead;
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001138 neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp, n);
1139 if (!entry_connected && !neigh_entry)
1140 goto out;
1141 if (!neigh_entry) {
1142 neigh_entry = mlxsw_sp_neigh_entry_create(mlxsw_sp, n);
1143 if (IS_ERR(neigh_entry))
1144 goto out;
Yotam Gigia6bf9e92016-07-05 11:27:44 +02001145 }
1146
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001147 memcpy(neigh_entry->ha, ha, ETH_ALEN);
1148 mlxsw_sp_neigh_entry_update(mlxsw_sp, neigh_entry, entry_connected);
1149 mlxsw_sp_nexthop_neigh_update(mlxsw_sp, neigh_entry, !entry_connected);
1150
1151 if (!neigh_entry->connected && list_empty(&neigh_entry->nexthop_list))
1152 mlxsw_sp_neigh_entry_destroy(mlxsw_sp, neigh_entry);
1153
1154out:
1155 rtnl_unlock();
Yotam Gigia6bf9e92016-07-05 11:27:44 +02001156 neigh_release(n);
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001157 kfree(neigh_work);
Yotam Gigia6bf9e92016-07-05 11:27:44 +02001158}
1159
Jiri Pirkoe7322632016-09-01 10:37:43 +02001160int mlxsw_sp_router_netevent_event(struct notifier_block *unused,
1161 unsigned long event, void *ptr)
Yotam Gigic723c7352016-07-05 11:27:43 +02001162{
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001163 struct mlxsw_sp_neigh_event_work *neigh_work;
Yotam Gigic723c7352016-07-05 11:27:43 +02001164 struct mlxsw_sp_port *mlxsw_sp_port;
1165 struct mlxsw_sp *mlxsw_sp;
1166 unsigned long interval;
1167 struct neigh_parms *p;
Yotam Gigia6bf9e92016-07-05 11:27:44 +02001168 struct neighbour *n;
Yotam Gigic723c7352016-07-05 11:27:43 +02001169
1170 switch (event) {
1171 case NETEVENT_DELAY_PROBE_TIME_UPDATE:
1172 p = ptr;
1173
1174 /* We don't care about changes in the default table. */
1175 if (!p->dev || p->tbl != &arp_tbl)
1176 return NOTIFY_DONE;
1177
1178 /* We are in atomic context and can't take RTNL mutex,
1179 * so use RCU variant to walk the device chain.
1180 */
1181 mlxsw_sp_port = mlxsw_sp_port_lower_dev_hold(p->dev);
1182 if (!mlxsw_sp_port)
1183 return NOTIFY_DONE;
1184
1185 mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1186 interval = jiffies_to_msecs(NEIGH_VAR(p, DELAY_PROBE_TIME));
Ido Schimmel9011b672017-05-16 19:38:25 +02001187 mlxsw_sp->router->neighs_update.interval = interval;
Yotam Gigic723c7352016-07-05 11:27:43 +02001188
1189 mlxsw_sp_port_dev_put(mlxsw_sp_port);
1190 break;
Yotam Gigia6bf9e92016-07-05 11:27:44 +02001191 case NETEVENT_NEIGH_UPDATE:
1192 n = ptr;
Yotam Gigia6bf9e92016-07-05 11:27:44 +02001193
1194 if (n->tbl != &arp_tbl)
1195 return NOTIFY_DONE;
1196
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001197 mlxsw_sp_port = mlxsw_sp_port_lower_dev_hold(n->dev);
Yotam Gigia6bf9e92016-07-05 11:27:44 +02001198 if (!mlxsw_sp_port)
1199 return NOTIFY_DONE;
1200
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001201 neigh_work = kzalloc(sizeof(*neigh_work), GFP_ATOMIC);
1202 if (!neigh_work) {
Yotam Gigia6bf9e92016-07-05 11:27:44 +02001203 mlxsw_sp_port_dev_put(mlxsw_sp_port);
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001204 return NOTIFY_BAD;
Yotam Gigia6bf9e92016-07-05 11:27:44 +02001205 }
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001206
1207 INIT_WORK(&neigh_work->work, mlxsw_sp_router_neigh_event_work);
1208 neigh_work->mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1209 neigh_work->n = n;
Yotam Gigia6bf9e92016-07-05 11:27:44 +02001210
1211 /* Take a reference to ensure the neighbour won't be
1212 * destructed until we drop the reference in delayed
1213 * work.
1214 */
1215 neigh_clone(n);
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001216 mlxsw_core_schedule_work(&neigh_work->work);
1217 mlxsw_sp_port_dev_put(mlxsw_sp_port);
Yotam Gigia6bf9e92016-07-05 11:27:44 +02001218 break;
Yotam Gigic723c7352016-07-05 11:27:43 +02001219 }
1220
1221 return NOTIFY_DONE;
1222}
1223
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001224static int mlxsw_sp_neigh_init(struct mlxsw_sp *mlxsw_sp)
1225{
Yotam Gigic723c7352016-07-05 11:27:43 +02001226 int err;
1227
Ido Schimmel9011b672017-05-16 19:38:25 +02001228 err = rhashtable_init(&mlxsw_sp->router->neigh_ht,
Yotam Gigic723c7352016-07-05 11:27:43 +02001229 &mlxsw_sp_neigh_ht_params);
1230 if (err)
1231 return err;
1232
1233 /* Initialize the polling interval according to the default
1234 * table.
1235 */
1236 mlxsw_sp_router_neighs_update_interval_init(mlxsw_sp);
1237
Yotam Gigi0b2361d2016-07-05 11:27:52 +02001238 /* Create the delayed works for the activity_update */
Ido Schimmel9011b672017-05-16 19:38:25 +02001239 INIT_DELAYED_WORK(&mlxsw_sp->router->neighs_update.dw,
Yotam Gigic723c7352016-07-05 11:27:43 +02001240 mlxsw_sp_router_neighs_update_work);
Ido Schimmel9011b672017-05-16 19:38:25 +02001241 INIT_DELAYED_WORK(&mlxsw_sp->router->nexthop_probe_dw,
Yotam Gigi0b2361d2016-07-05 11:27:52 +02001242 mlxsw_sp_router_probe_unresolved_nexthops);
Ido Schimmel9011b672017-05-16 19:38:25 +02001243 mlxsw_core_schedule_dw(&mlxsw_sp->router->neighs_update.dw, 0);
1244 mlxsw_core_schedule_dw(&mlxsw_sp->router->nexthop_probe_dw, 0);
Yotam Gigic723c7352016-07-05 11:27:43 +02001245 return 0;
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001246}
1247
1248static void mlxsw_sp_neigh_fini(struct mlxsw_sp *mlxsw_sp)
1249{
Ido Schimmel9011b672017-05-16 19:38:25 +02001250 cancel_delayed_work_sync(&mlxsw_sp->router->neighs_update.dw);
1251 cancel_delayed_work_sync(&mlxsw_sp->router->nexthop_probe_dw);
1252 rhashtable_destroy(&mlxsw_sp->router->neigh_ht);
Jiri Pirko6cf3c972016-07-05 11:27:39 +02001253}
1254
Ido Schimmel9665b742017-02-08 11:16:42 +01001255static int mlxsw_sp_neigh_rif_flush(struct mlxsw_sp *mlxsw_sp,
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01001256 const struct mlxsw_sp_rif *rif)
Ido Schimmel9665b742017-02-08 11:16:42 +01001257{
1258 char rauht_pl[MLXSW_REG_RAUHT_LEN];
1259
1260 mlxsw_reg_rauht_pack(rauht_pl, MLXSW_REG_RAUHT_OP_WRITE_DELETE_ALL,
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01001261 rif->rif_index, rif->addr);
Ido Schimmel9665b742017-02-08 11:16:42 +01001262 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rauht), rauht_pl);
1263}
1264
1265static void mlxsw_sp_neigh_rif_gone_sync(struct mlxsw_sp *mlxsw_sp,
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01001266 struct mlxsw_sp_rif *rif)
Ido Schimmel9665b742017-02-08 11:16:42 +01001267{
1268 struct mlxsw_sp_neigh_entry *neigh_entry, *tmp;
1269
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01001270 mlxsw_sp_neigh_rif_flush(mlxsw_sp, rif);
1271 list_for_each_entry_safe(neigh_entry, tmp, &rif->neigh_list,
Ido Schimmel9665b742017-02-08 11:16:42 +01001272 rif_list_node)
1273 mlxsw_sp_neigh_entry_destroy(mlxsw_sp, neigh_entry);
1274}
1275
Ido Schimmelc53b8e12017-02-08 11:16:30 +01001276struct mlxsw_sp_nexthop_key {
1277 struct fib_nh *fib_nh;
1278};
1279
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001280struct mlxsw_sp_nexthop {
1281 struct list_head neigh_list_node; /* member of neigh entry list */
Ido Schimmel9665b742017-02-08 11:16:42 +01001282 struct list_head rif_list_node;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001283 struct mlxsw_sp_nexthop_group *nh_grp; /* pointer back to the group
1284 * this belongs to
1285 */
Ido Schimmelc53b8e12017-02-08 11:16:30 +01001286 struct rhash_head ht_node;
1287 struct mlxsw_sp_nexthop_key key;
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01001288 struct mlxsw_sp_rif *rif;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001289 u8 should_offload:1, /* set indicates this neigh is connected and
1290 * should be put to KVD linear area of this group.
1291 */
1292 offloaded:1, /* set in case the neigh is actually put into
1293 * KVD linear area of this group.
1294 */
1295 update:1; /* set indicates that MAC of this neigh should be
1296 * updated in HW
1297 */
1298 struct mlxsw_sp_neigh_entry *neigh_entry;
1299};
1300
Ido Schimmele9ad5e72017-02-08 11:16:29 +01001301struct mlxsw_sp_nexthop_group_key {
1302 struct fib_info *fi;
1303};
1304
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001305struct mlxsw_sp_nexthop_group {
Ido Schimmele9ad5e72017-02-08 11:16:29 +01001306 struct rhash_head ht_node;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001307 struct list_head fib_list; /* list of fib entries that use this group */
Ido Schimmele9ad5e72017-02-08 11:16:29 +01001308 struct mlxsw_sp_nexthop_group_key key;
Ido Schimmelb3e8d1e2017-02-08 11:16:32 +01001309 u8 adj_index_valid:1,
1310 gateway:1; /* routes using the group use a gateway */
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001311 u32 adj_index;
1312 u16 ecmp_size;
1313 u16 count;
1314 struct mlxsw_sp_nexthop nexthops[0];
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01001315#define nh_rif nexthops[0].rif
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001316};
1317
Ido Schimmele9ad5e72017-02-08 11:16:29 +01001318static const struct rhashtable_params mlxsw_sp_nexthop_group_ht_params = {
1319 .key_offset = offsetof(struct mlxsw_sp_nexthop_group, key),
1320 .head_offset = offsetof(struct mlxsw_sp_nexthop_group, ht_node),
1321 .key_len = sizeof(struct mlxsw_sp_nexthop_group_key),
1322};
1323
1324static int mlxsw_sp_nexthop_group_insert(struct mlxsw_sp *mlxsw_sp,
1325 struct mlxsw_sp_nexthop_group *nh_grp)
1326{
Ido Schimmel9011b672017-05-16 19:38:25 +02001327 return rhashtable_insert_fast(&mlxsw_sp->router->nexthop_group_ht,
Ido Schimmele9ad5e72017-02-08 11:16:29 +01001328 &nh_grp->ht_node,
1329 mlxsw_sp_nexthop_group_ht_params);
1330}
1331
1332static void mlxsw_sp_nexthop_group_remove(struct mlxsw_sp *mlxsw_sp,
1333 struct mlxsw_sp_nexthop_group *nh_grp)
1334{
Ido Schimmel9011b672017-05-16 19:38:25 +02001335 rhashtable_remove_fast(&mlxsw_sp->router->nexthop_group_ht,
Ido Schimmele9ad5e72017-02-08 11:16:29 +01001336 &nh_grp->ht_node,
1337 mlxsw_sp_nexthop_group_ht_params);
1338}
1339
1340static struct mlxsw_sp_nexthop_group *
1341mlxsw_sp_nexthop_group_lookup(struct mlxsw_sp *mlxsw_sp,
1342 struct mlxsw_sp_nexthop_group_key key)
1343{
Ido Schimmel9011b672017-05-16 19:38:25 +02001344 return rhashtable_lookup_fast(&mlxsw_sp->router->nexthop_group_ht, &key,
Ido Schimmele9ad5e72017-02-08 11:16:29 +01001345 mlxsw_sp_nexthop_group_ht_params);
1346}
1347
Ido Schimmelc53b8e12017-02-08 11:16:30 +01001348static const struct rhashtable_params mlxsw_sp_nexthop_ht_params = {
1349 .key_offset = offsetof(struct mlxsw_sp_nexthop, key),
1350 .head_offset = offsetof(struct mlxsw_sp_nexthop, ht_node),
1351 .key_len = sizeof(struct mlxsw_sp_nexthop_key),
1352};
1353
1354static int mlxsw_sp_nexthop_insert(struct mlxsw_sp *mlxsw_sp,
1355 struct mlxsw_sp_nexthop *nh)
1356{
Ido Schimmel9011b672017-05-16 19:38:25 +02001357 return rhashtable_insert_fast(&mlxsw_sp->router->nexthop_ht,
Ido Schimmelc53b8e12017-02-08 11:16:30 +01001358 &nh->ht_node, mlxsw_sp_nexthop_ht_params);
1359}
1360
1361static void mlxsw_sp_nexthop_remove(struct mlxsw_sp *mlxsw_sp,
1362 struct mlxsw_sp_nexthop *nh)
1363{
Ido Schimmel9011b672017-05-16 19:38:25 +02001364 rhashtable_remove_fast(&mlxsw_sp->router->nexthop_ht, &nh->ht_node,
Ido Schimmelc53b8e12017-02-08 11:16:30 +01001365 mlxsw_sp_nexthop_ht_params);
1366}
1367
Ido Schimmelad178c82017-02-08 11:16:40 +01001368static struct mlxsw_sp_nexthop *
1369mlxsw_sp_nexthop_lookup(struct mlxsw_sp *mlxsw_sp,
1370 struct mlxsw_sp_nexthop_key key)
1371{
Ido Schimmel9011b672017-05-16 19:38:25 +02001372 return rhashtable_lookup_fast(&mlxsw_sp->router->nexthop_ht, &key,
Ido Schimmelad178c82017-02-08 11:16:40 +01001373 mlxsw_sp_nexthop_ht_params);
1374}
1375
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001376static int mlxsw_sp_adj_index_mass_update_vr(struct mlxsw_sp *mlxsw_sp,
Ido Schimmel76610eb2017-03-10 08:53:41 +01001377 const struct mlxsw_sp_fib *fib,
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001378 u32 adj_index, u16 ecmp_size,
1379 u32 new_adj_index,
1380 u16 new_ecmp_size)
1381{
1382 char raleu_pl[MLXSW_REG_RALEU_LEN];
1383
Ido Schimmel1a9234e662016-09-19 08:29:26 +02001384 mlxsw_reg_raleu_pack(raleu_pl,
Ido Schimmel76610eb2017-03-10 08:53:41 +01001385 (enum mlxsw_reg_ralxx_protocol) fib->proto,
1386 fib->vr->id, adj_index, ecmp_size, new_adj_index,
Ido Schimmel1a9234e662016-09-19 08:29:26 +02001387 new_ecmp_size);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001388 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raleu), raleu_pl);
1389}
1390
1391static int mlxsw_sp_adj_index_mass_update(struct mlxsw_sp *mlxsw_sp,
1392 struct mlxsw_sp_nexthop_group *nh_grp,
1393 u32 old_adj_index, u16 old_ecmp_size)
1394{
1395 struct mlxsw_sp_fib_entry *fib_entry;
Ido Schimmel76610eb2017-03-10 08:53:41 +01001396 struct mlxsw_sp_fib *fib = NULL;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001397 int err;
1398
1399 list_for_each_entry(fib_entry, &nh_grp->fib_list, nexthop_group_node) {
Ido Schimmel76610eb2017-03-10 08:53:41 +01001400 if (fib == fib_entry->fib_node->fib)
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001401 continue;
Ido Schimmel76610eb2017-03-10 08:53:41 +01001402 fib = fib_entry->fib_node->fib;
1403 err = mlxsw_sp_adj_index_mass_update_vr(mlxsw_sp, fib,
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001404 old_adj_index,
1405 old_ecmp_size,
1406 nh_grp->adj_index,
1407 nh_grp->ecmp_size);
1408 if (err)
1409 return err;
1410 }
1411 return 0;
1412}
1413
1414static int mlxsw_sp_nexthop_mac_update(struct mlxsw_sp *mlxsw_sp, u32 adj_index,
1415 struct mlxsw_sp_nexthop *nh)
1416{
1417 struct mlxsw_sp_neigh_entry *neigh_entry = nh->neigh_entry;
1418 char ratr_pl[MLXSW_REG_RATR_LEN];
1419
1420 mlxsw_reg_ratr_pack(ratr_pl, MLXSW_REG_RATR_OP_WRITE_WRITE_ENTRY,
1421 true, adj_index, neigh_entry->rif);
1422 mlxsw_reg_ratr_eth_entry_pack(ratr_pl, neigh_entry->ha);
1423 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ratr), ratr_pl);
1424}
1425
1426static int
1427mlxsw_sp_nexthop_group_mac_update(struct mlxsw_sp *mlxsw_sp,
Ido Schimmela59b7e02017-01-23 11:11:42 +01001428 struct mlxsw_sp_nexthop_group *nh_grp,
1429 bool reallocate)
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001430{
1431 u32 adj_index = nh_grp->adj_index; /* base */
1432 struct mlxsw_sp_nexthop *nh;
1433 int i;
1434 int err;
1435
1436 for (i = 0; i < nh_grp->count; i++) {
1437 nh = &nh_grp->nexthops[i];
1438
1439 if (!nh->should_offload) {
1440 nh->offloaded = 0;
1441 continue;
1442 }
1443
Ido Schimmela59b7e02017-01-23 11:11:42 +01001444 if (nh->update || reallocate) {
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001445 err = mlxsw_sp_nexthop_mac_update(mlxsw_sp,
1446 adj_index, nh);
1447 if (err)
1448 return err;
1449 nh->update = 0;
1450 nh->offloaded = 1;
1451 }
1452 adj_index++;
1453 }
1454 return 0;
1455}
1456
1457static int mlxsw_sp_fib_entry_update(struct mlxsw_sp *mlxsw_sp,
1458 struct mlxsw_sp_fib_entry *fib_entry);
1459
1460static int
1461mlxsw_sp_nexthop_fib_entries_update(struct mlxsw_sp *mlxsw_sp,
1462 struct mlxsw_sp_nexthop_group *nh_grp)
1463{
1464 struct mlxsw_sp_fib_entry *fib_entry;
1465 int err;
1466
1467 list_for_each_entry(fib_entry, &nh_grp->fib_list, nexthop_group_node) {
1468 err = mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
1469 if (err)
1470 return err;
1471 }
1472 return 0;
1473}
1474
1475static void
1476mlxsw_sp_nexthop_group_refresh(struct mlxsw_sp *mlxsw_sp,
1477 struct mlxsw_sp_nexthop_group *nh_grp)
1478{
1479 struct mlxsw_sp_nexthop *nh;
1480 bool offload_change = false;
1481 u32 adj_index;
1482 u16 ecmp_size = 0;
1483 bool old_adj_index_valid;
1484 u32 old_adj_index;
1485 u16 old_ecmp_size;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001486 int i;
1487 int err;
1488
Ido Schimmelb3e8d1e2017-02-08 11:16:32 +01001489 if (!nh_grp->gateway) {
1490 mlxsw_sp_nexthop_fib_entries_update(mlxsw_sp, nh_grp);
1491 return;
1492 }
1493
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001494 for (i = 0; i < nh_grp->count; i++) {
1495 nh = &nh_grp->nexthops[i];
1496
1497 if (nh->should_offload ^ nh->offloaded) {
1498 offload_change = true;
1499 if (nh->should_offload)
1500 nh->update = 1;
1501 }
1502 if (nh->should_offload)
1503 ecmp_size++;
1504 }
1505 if (!offload_change) {
1506 /* Nothing was added or removed, so no need to reallocate. Just
1507 * update MAC on existing adjacency indexes.
1508 */
Ido Schimmela59b7e02017-01-23 11:11:42 +01001509 err = mlxsw_sp_nexthop_group_mac_update(mlxsw_sp, nh_grp,
1510 false);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001511 if (err) {
1512 dev_warn(mlxsw_sp->bus_info->dev, "Failed to update neigh MAC in adjacency table.\n");
1513 goto set_trap;
1514 }
1515 return;
1516 }
1517 if (!ecmp_size)
1518 /* No neigh of this group is connected so we just set
1519 * the trap and let everthing flow through kernel.
1520 */
1521 goto set_trap;
1522
Arkadi Sharshevsky13124442017-03-25 08:28:22 +01001523 err = mlxsw_sp_kvdl_alloc(mlxsw_sp, ecmp_size, &adj_index);
1524 if (err) {
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001525 /* We ran out of KVD linear space, just set the
1526 * trap and let everything flow through kernel.
1527 */
1528 dev_warn(mlxsw_sp->bus_info->dev, "Failed to allocate KVD linear area for nexthop group.\n");
1529 goto set_trap;
1530 }
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001531 old_adj_index_valid = nh_grp->adj_index_valid;
1532 old_adj_index = nh_grp->adj_index;
1533 old_ecmp_size = nh_grp->ecmp_size;
1534 nh_grp->adj_index_valid = 1;
1535 nh_grp->adj_index = adj_index;
1536 nh_grp->ecmp_size = ecmp_size;
Ido Schimmela59b7e02017-01-23 11:11:42 +01001537 err = mlxsw_sp_nexthop_group_mac_update(mlxsw_sp, nh_grp, true);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001538 if (err) {
1539 dev_warn(mlxsw_sp->bus_info->dev, "Failed to update neigh MAC in adjacency table.\n");
1540 goto set_trap;
1541 }
1542
1543 if (!old_adj_index_valid) {
1544 /* The trap was set for fib entries, so we have to call
1545 * fib entry update to unset it and use adjacency index.
1546 */
1547 err = mlxsw_sp_nexthop_fib_entries_update(mlxsw_sp, nh_grp);
1548 if (err) {
1549 dev_warn(mlxsw_sp->bus_info->dev, "Failed to add adjacency index to fib entries.\n");
1550 goto set_trap;
1551 }
1552 return;
1553 }
1554
1555 err = mlxsw_sp_adj_index_mass_update(mlxsw_sp, nh_grp,
1556 old_adj_index, old_ecmp_size);
1557 mlxsw_sp_kvdl_free(mlxsw_sp, old_adj_index);
1558 if (err) {
1559 dev_warn(mlxsw_sp->bus_info->dev, "Failed to mass-update adjacency index for nexthop group.\n");
1560 goto set_trap;
1561 }
1562 return;
1563
1564set_trap:
1565 old_adj_index_valid = nh_grp->adj_index_valid;
1566 nh_grp->adj_index_valid = 0;
1567 for (i = 0; i < nh_grp->count; i++) {
1568 nh = &nh_grp->nexthops[i];
1569 nh->offloaded = 0;
1570 }
1571 err = mlxsw_sp_nexthop_fib_entries_update(mlxsw_sp, nh_grp);
1572 if (err)
1573 dev_warn(mlxsw_sp->bus_info->dev, "Failed to set traps for fib entries.\n");
1574 if (old_adj_index_valid)
1575 mlxsw_sp_kvdl_free(mlxsw_sp, nh_grp->adj_index);
1576}
1577
1578static void __mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp_nexthop *nh,
1579 bool removing)
1580{
1581 if (!removing && !nh->should_offload)
1582 nh->should_offload = 1;
1583 else if (removing && nh->offloaded)
1584 nh->should_offload = 0;
1585 nh->update = 1;
1586}
1587
1588static void
1589mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp *mlxsw_sp,
1590 struct mlxsw_sp_neigh_entry *neigh_entry,
1591 bool removing)
1592{
1593 struct mlxsw_sp_nexthop *nh;
1594
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001595 list_for_each_entry(nh, &neigh_entry->nexthop_list,
1596 neigh_list_node) {
1597 __mlxsw_sp_nexthop_neigh_update(nh, removing);
1598 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nh_grp);
1599 }
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001600}
1601
Ido Schimmel9665b742017-02-08 11:16:42 +01001602static void mlxsw_sp_nexthop_rif_init(struct mlxsw_sp_nexthop *nh,
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01001603 struct mlxsw_sp_rif *rif)
Ido Schimmel9665b742017-02-08 11:16:42 +01001604{
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01001605 if (nh->rif)
Ido Schimmel9665b742017-02-08 11:16:42 +01001606 return;
1607
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01001608 nh->rif = rif;
1609 list_add(&nh->rif_list_node, &rif->nexthop_list);
Ido Schimmel9665b742017-02-08 11:16:42 +01001610}
1611
1612static void mlxsw_sp_nexthop_rif_fini(struct mlxsw_sp_nexthop *nh)
1613{
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01001614 if (!nh->rif)
Ido Schimmel9665b742017-02-08 11:16:42 +01001615 return;
1616
1617 list_del(&nh->rif_list_node);
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01001618 nh->rif = NULL;
Ido Schimmel9665b742017-02-08 11:16:42 +01001619}
1620
Ido Schimmela8c97012017-02-08 11:16:35 +01001621static int mlxsw_sp_nexthop_neigh_init(struct mlxsw_sp *mlxsw_sp,
1622 struct mlxsw_sp_nexthop *nh)
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001623{
1624 struct mlxsw_sp_neigh_entry *neigh_entry;
Ido Schimmela8c97012017-02-08 11:16:35 +01001625 struct fib_nh *fib_nh = nh->key.fib_nh;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001626 struct neighbour *n;
Ido Schimmel93a87e52016-12-23 09:32:49 +01001627 u8 nud_state, dead;
Ido Schimmelc53b8e12017-02-08 11:16:30 +01001628 int err;
1629
Ido Schimmelad178c82017-02-08 11:16:40 +01001630 if (!nh->nh_grp->gateway || nh->neigh_entry)
Ido Schimmelb8399a12017-02-08 11:16:33 +01001631 return 0;
1632
Jiri Pirko33b13412016-11-10 12:31:04 +01001633 /* Take a reference of neigh here ensuring that neigh would
1634 * not be detructed before the nexthop entry is finished.
1635 * The reference is taken either in neigh_lookup() or
Ido Schimmelfd76d912017-02-06 16:20:17 +01001636 * in neigh_create() in case n is not found.
Jiri Pirko33b13412016-11-10 12:31:04 +01001637 */
Ido Schimmela8c97012017-02-08 11:16:35 +01001638 n = neigh_lookup(&arp_tbl, &fib_nh->nh_gw, fib_nh->nh_dev);
Jiri Pirko33b13412016-11-10 12:31:04 +01001639 if (!n) {
Ido Schimmela8c97012017-02-08 11:16:35 +01001640 n = neigh_create(&arp_tbl, &fib_nh->nh_gw, fib_nh->nh_dev);
1641 if (IS_ERR(n))
1642 return PTR_ERR(n);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001643 neigh_event_send(n, NULL);
Jiri Pirko33b13412016-11-10 12:31:04 +01001644 }
1645 neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp, n);
1646 if (!neigh_entry) {
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001647 neigh_entry = mlxsw_sp_neigh_entry_create(mlxsw_sp, n);
1648 if (IS_ERR(neigh_entry)) {
Ido Schimmelc53b8e12017-02-08 11:16:30 +01001649 err = -EINVAL;
1650 goto err_neigh_entry_create;
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001651 }
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001652 }
Yotam Gigib2157142016-07-05 11:27:51 +02001653
1654 /* If that is the first nexthop connected to that neigh, add to
1655 * nexthop_neighs_list
1656 */
1657 if (list_empty(&neigh_entry->nexthop_list))
1658 list_add_tail(&neigh_entry->nexthop_neighs_list_node,
Ido Schimmel9011b672017-05-16 19:38:25 +02001659 &mlxsw_sp->router->nexthop_neighs_list);
Yotam Gigib2157142016-07-05 11:27:51 +02001660
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001661 nh->neigh_entry = neigh_entry;
1662 list_add_tail(&nh->neigh_list_node, &neigh_entry->nexthop_list);
1663 read_lock_bh(&n->lock);
1664 nud_state = n->nud_state;
Ido Schimmel93a87e52016-12-23 09:32:49 +01001665 dead = n->dead;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001666 read_unlock_bh(&n->lock);
Ido Schimmel93a87e52016-12-23 09:32:49 +01001667 __mlxsw_sp_nexthop_neigh_update(nh, !(nud_state & NUD_VALID && !dead));
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001668
1669 return 0;
Ido Schimmelc53b8e12017-02-08 11:16:30 +01001670
1671err_neigh_entry_create:
1672 neigh_release(n);
Ido Schimmelc53b8e12017-02-08 11:16:30 +01001673 return err;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001674}
1675
Ido Schimmela8c97012017-02-08 11:16:35 +01001676static void mlxsw_sp_nexthop_neigh_fini(struct mlxsw_sp *mlxsw_sp,
1677 struct mlxsw_sp_nexthop *nh)
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001678{
1679 struct mlxsw_sp_neigh_entry *neigh_entry = nh->neigh_entry;
Ido Schimmela8c97012017-02-08 11:16:35 +01001680 struct neighbour *n;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001681
Ido Schimmelb8399a12017-02-08 11:16:33 +01001682 if (!neigh_entry)
Ido Schimmela8c97012017-02-08 11:16:35 +01001683 return;
1684 n = neigh_entry->key.n;
Ido Schimmelb8399a12017-02-08 11:16:33 +01001685
Ido Schimmel58312122016-12-23 09:32:50 +01001686 __mlxsw_sp_nexthop_neigh_update(nh, true);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001687 list_del(&nh->neigh_list_node);
Ido Schimmele58be792017-02-08 11:16:28 +01001688 nh->neigh_entry = NULL;
Yotam Gigib2157142016-07-05 11:27:51 +02001689
1690 /* If that is the last nexthop connected to that neigh, remove from
1691 * nexthop_neighs_list
1692 */
Ido Schimmele58be792017-02-08 11:16:28 +01001693 if (list_empty(&neigh_entry->nexthop_list))
1694 list_del(&neigh_entry->nexthop_neighs_list_node);
Yotam Gigib2157142016-07-05 11:27:51 +02001695
Ido Schimmel5c8802f2017-02-06 16:20:13 +01001696 if (!neigh_entry->connected && list_empty(&neigh_entry->nexthop_list))
1697 mlxsw_sp_neigh_entry_destroy(mlxsw_sp, neigh_entry);
1698
1699 neigh_release(n);
Ido Schimmela8c97012017-02-08 11:16:35 +01001700}
Ido Schimmelc53b8e12017-02-08 11:16:30 +01001701
Ido Schimmela8c97012017-02-08 11:16:35 +01001702static int mlxsw_sp_nexthop_init(struct mlxsw_sp *mlxsw_sp,
1703 struct mlxsw_sp_nexthop_group *nh_grp,
1704 struct mlxsw_sp_nexthop *nh,
1705 struct fib_nh *fib_nh)
1706{
1707 struct net_device *dev = fib_nh->nh_dev;
Ido Schimmeldf6dd79b2017-02-08 14:36:49 +01001708 struct in_device *in_dev;
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01001709 struct mlxsw_sp_rif *rif;
Ido Schimmela8c97012017-02-08 11:16:35 +01001710 int err;
1711
1712 nh->nh_grp = nh_grp;
1713 nh->key.fib_nh = fib_nh;
1714 err = mlxsw_sp_nexthop_insert(mlxsw_sp, nh);
1715 if (err)
1716 return err;
1717
Ido Schimmel97989ee2017-03-10 08:53:38 +01001718 if (!dev)
1719 return 0;
1720
Ido Schimmeldf6dd79b2017-02-08 14:36:49 +01001721 in_dev = __in_dev_get_rtnl(dev);
1722 if (in_dev && IN_DEV_IGNORE_ROUTES_WITH_LINKDOWN(in_dev) &&
1723 fib_nh->nh_flags & RTNH_F_LINKDOWN)
1724 return 0;
1725
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01001726 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
1727 if (!rif)
Ido Schimmela8c97012017-02-08 11:16:35 +01001728 return 0;
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01001729 mlxsw_sp_nexthop_rif_init(nh, rif);
Ido Schimmela8c97012017-02-08 11:16:35 +01001730
1731 err = mlxsw_sp_nexthop_neigh_init(mlxsw_sp, nh);
1732 if (err)
1733 goto err_nexthop_neigh_init;
1734
1735 return 0;
1736
1737err_nexthop_neigh_init:
1738 mlxsw_sp_nexthop_remove(mlxsw_sp, nh);
1739 return err;
1740}
1741
1742static void mlxsw_sp_nexthop_fini(struct mlxsw_sp *mlxsw_sp,
1743 struct mlxsw_sp_nexthop *nh)
1744{
1745 mlxsw_sp_nexthop_neigh_fini(mlxsw_sp, nh);
Ido Schimmel9665b742017-02-08 11:16:42 +01001746 mlxsw_sp_nexthop_rif_fini(nh);
Ido Schimmelc53b8e12017-02-08 11:16:30 +01001747 mlxsw_sp_nexthop_remove(mlxsw_sp, nh);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001748}
1749
Ido Schimmelad178c82017-02-08 11:16:40 +01001750static void mlxsw_sp_nexthop_event(struct mlxsw_sp *mlxsw_sp,
1751 unsigned long event, struct fib_nh *fib_nh)
1752{
1753 struct mlxsw_sp_nexthop_key key;
1754 struct mlxsw_sp_nexthop *nh;
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01001755 struct mlxsw_sp_rif *rif;
Ido Schimmelad178c82017-02-08 11:16:40 +01001756
Ido Schimmel9011b672017-05-16 19:38:25 +02001757 if (mlxsw_sp->router->aborted)
Ido Schimmelad178c82017-02-08 11:16:40 +01001758 return;
1759
1760 key.fib_nh = fib_nh;
1761 nh = mlxsw_sp_nexthop_lookup(mlxsw_sp, key);
1762 if (WARN_ON_ONCE(!nh))
1763 return;
1764
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01001765 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, fib_nh->nh_dev);
1766 if (!rif)
Ido Schimmelad178c82017-02-08 11:16:40 +01001767 return;
1768
1769 switch (event) {
1770 case FIB_EVENT_NH_ADD:
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01001771 mlxsw_sp_nexthop_rif_init(nh, rif);
Ido Schimmelad178c82017-02-08 11:16:40 +01001772 mlxsw_sp_nexthop_neigh_init(mlxsw_sp, nh);
1773 break;
1774 case FIB_EVENT_NH_DEL:
1775 mlxsw_sp_nexthop_neigh_fini(mlxsw_sp, nh);
Ido Schimmel9665b742017-02-08 11:16:42 +01001776 mlxsw_sp_nexthop_rif_fini(nh);
Ido Schimmelad178c82017-02-08 11:16:40 +01001777 break;
1778 }
1779
1780 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nh_grp);
1781}
1782
Ido Schimmel9665b742017-02-08 11:16:42 +01001783static void mlxsw_sp_nexthop_rif_gone_sync(struct mlxsw_sp *mlxsw_sp,
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01001784 struct mlxsw_sp_rif *rif)
Ido Schimmel9665b742017-02-08 11:16:42 +01001785{
1786 struct mlxsw_sp_nexthop *nh, *tmp;
1787
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01001788 list_for_each_entry_safe(nh, tmp, &rif->nexthop_list, rif_list_node) {
Ido Schimmel9665b742017-02-08 11:16:42 +01001789 mlxsw_sp_nexthop_neigh_fini(mlxsw_sp, nh);
1790 mlxsw_sp_nexthop_rif_fini(nh);
1791 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nh_grp);
1792 }
1793}
1794
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001795static struct mlxsw_sp_nexthop_group *
1796mlxsw_sp_nexthop_group_create(struct mlxsw_sp *mlxsw_sp, struct fib_info *fi)
1797{
1798 struct mlxsw_sp_nexthop_group *nh_grp;
1799 struct mlxsw_sp_nexthop *nh;
1800 struct fib_nh *fib_nh;
1801 size_t alloc_size;
1802 int i;
1803 int err;
1804
1805 alloc_size = sizeof(*nh_grp) +
1806 fi->fib_nhs * sizeof(struct mlxsw_sp_nexthop);
1807 nh_grp = kzalloc(alloc_size, GFP_KERNEL);
1808 if (!nh_grp)
1809 return ERR_PTR(-ENOMEM);
1810 INIT_LIST_HEAD(&nh_grp->fib_list);
Ido Schimmelb3e8d1e2017-02-08 11:16:32 +01001811 nh_grp->gateway = fi->fib_nh->nh_scope == RT_SCOPE_LINK;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001812 nh_grp->count = fi->fib_nhs;
Ido Schimmele9ad5e72017-02-08 11:16:29 +01001813 nh_grp->key.fi = fi;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001814 for (i = 0; i < nh_grp->count; i++) {
1815 nh = &nh_grp->nexthops[i];
1816 fib_nh = &fi->fib_nh[i];
1817 err = mlxsw_sp_nexthop_init(mlxsw_sp, nh_grp, nh, fib_nh);
1818 if (err)
1819 goto err_nexthop_init;
1820 }
Ido Schimmele9ad5e72017-02-08 11:16:29 +01001821 err = mlxsw_sp_nexthop_group_insert(mlxsw_sp, nh_grp);
1822 if (err)
1823 goto err_nexthop_group_insert;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001824 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
1825 return nh_grp;
1826
Ido Schimmele9ad5e72017-02-08 11:16:29 +01001827err_nexthop_group_insert:
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001828err_nexthop_init:
Ido Schimmeldf6dd79b2017-02-08 14:36:49 +01001829 for (i--; i >= 0; i--) {
1830 nh = &nh_grp->nexthops[i];
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001831 mlxsw_sp_nexthop_fini(mlxsw_sp, nh);
Ido Schimmeldf6dd79b2017-02-08 14:36:49 +01001832 }
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001833 kfree(nh_grp);
1834 return ERR_PTR(err);
1835}
1836
1837static void
1838mlxsw_sp_nexthop_group_destroy(struct mlxsw_sp *mlxsw_sp,
1839 struct mlxsw_sp_nexthop_group *nh_grp)
1840{
1841 struct mlxsw_sp_nexthop *nh;
1842 int i;
1843
Ido Schimmele9ad5e72017-02-08 11:16:29 +01001844 mlxsw_sp_nexthop_group_remove(mlxsw_sp, nh_grp);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001845 for (i = 0; i < nh_grp->count; i++) {
1846 nh = &nh_grp->nexthops[i];
1847 mlxsw_sp_nexthop_fini(mlxsw_sp, nh);
1848 }
Ido Schimmel58312122016-12-23 09:32:50 +01001849 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
1850 WARN_ON_ONCE(nh_grp->adj_index_valid);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001851 kfree(nh_grp);
1852}
1853
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001854static int mlxsw_sp_nexthop_group_get(struct mlxsw_sp *mlxsw_sp,
1855 struct mlxsw_sp_fib_entry *fib_entry,
1856 struct fib_info *fi)
1857{
Ido Schimmele9ad5e72017-02-08 11:16:29 +01001858 struct mlxsw_sp_nexthop_group_key key;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001859 struct mlxsw_sp_nexthop_group *nh_grp;
1860
Ido Schimmele9ad5e72017-02-08 11:16:29 +01001861 key.fi = fi;
1862 nh_grp = mlxsw_sp_nexthop_group_lookup(mlxsw_sp, key);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001863 if (!nh_grp) {
1864 nh_grp = mlxsw_sp_nexthop_group_create(mlxsw_sp, fi);
1865 if (IS_ERR(nh_grp))
1866 return PTR_ERR(nh_grp);
1867 }
1868 list_add_tail(&fib_entry->nexthop_group_node, &nh_grp->fib_list);
1869 fib_entry->nh_group = nh_grp;
1870 return 0;
1871}
1872
1873static void mlxsw_sp_nexthop_group_put(struct mlxsw_sp *mlxsw_sp,
1874 struct mlxsw_sp_fib_entry *fib_entry)
1875{
1876 struct mlxsw_sp_nexthop_group *nh_grp = fib_entry->nh_group;
1877
1878 list_del(&fib_entry->nexthop_group_node);
1879 if (!list_empty(&nh_grp->fib_list))
1880 return;
1881 mlxsw_sp_nexthop_group_destroy(mlxsw_sp, nh_grp);
1882}
1883
Ido Schimmel013b20f2017-02-08 11:16:36 +01001884static bool
1885mlxsw_sp_fib_entry_should_offload(const struct mlxsw_sp_fib_entry *fib_entry)
1886{
1887 struct mlxsw_sp_nexthop_group *nh_group = fib_entry->nh_group;
1888
Ido Schimmel9aecce12017-02-09 10:28:42 +01001889 if (fib_entry->params.tos)
1890 return false;
1891
Ido Schimmel013b20f2017-02-08 11:16:36 +01001892 switch (fib_entry->type) {
1893 case MLXSW_SP_FIB_ENTRY_TYPE_REMOTE:
1894 return !!nh_group->adj_index_valid;
1895 case MLXSW_SP_FIB_ENTRY_TYPE_LOCAL:
Ido Schimmel70ad3502017-02-08 11:16:38 +01001896 return !!nh_group->nh_rif;
Ido Schimmel013b20f2017-02-08 11:16:36 +01001897 default:
1898 return false;
1899 }
1900}
1901
1902static void mlxsw_sp_fib_entry_offload_set(struct mlxsw_sp_fib_entry *fib_entry)
1903{
1904 fib_entry->offloaded = true;
1905
Ido Schimmel76610eb2017-03-10 08:53:41 +01001906 switch (fib_entry->fib_node->fib->proto) {
Ido Schimmel013b20f2017-02-08 11:16:36 +01001907 case MLXSW_SP_L3_PROTO_IPV4:
1908 fib_info_offload_inc(fib_entry->nh_group->key.fi);
1909 break;
1910 case MLXSW_SP_L3_PROTO_IPV6:
1911 WARN_ON_ONCE(1);
1912 }
1913}
1914
1915static void
1916mlxsw_sp_fib_entry_offload_unset(struct mlxsw_sp_fib_entry *fib_entry)
1917{
Ido Schimmel76610eb2017-03-10 08:53:41 +01001918 switch (fib_entry->fib_node->fib->proto) {
Ido Schimmel013b20f2017-02-08 11:16:36 +01001919 case MLXSW_SP_L3_PROTO_IPV4:
1920 fib_info_offload_dec(fib_entry->nh_group->key.fi);
1921 break;
1922 case MLXSW_SP_L3_PROTO_IPV6:
1923 WARN_ON_ONCE(1);
1924 }
1925
1926 fib_entry->offloaded = false;
1927}
1928
1929static void
1930mlxsw_sp_fib_entry_offload_refresh(struct mlxsw_sp_fib_entry *fib_entry,
1931 enum mlxsw_reg_ralue_op op, int err)
1932{
1933 switch (op) {
1934 case MLXSW_REG_RALUE_OP_WRITE_DELETE:
1935 if (!fib_entry->offloaded)
1936 return;
1937 return mlxsw_sp_fib_entry_offload_unset(fib_entry);
1938 case MLXSW_REG_RALUE_OP_WRITE_WRITE:
1939 if (err)
1940 return;
1941 if (mlxsw_sp_fib_entry_should_offload(fib_entry) &&
1942 !fib_entry->offloaded)
1943 mlxsw_sp_fib_entry_offload_set(fib_entry);
1944 else if (!mlxsw_sp_fib_entry_should_offload(fib_entry) &&
1945 fib_entry->offloaded)
1946 mlxsw_sp_fib_entry_offload_unset(fib_entry);
1947 return;
1948 default:
1949 return;
1950 }
1951}
1952
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001953static int mlxsw_sp_fib_entry_op4_remote(struct mlxsw_sp *mlxsw_sp,
1954 struct mlxsw_sp_fib_entry *fib_entry,
1955 enum mlxsw_reg_ralue_op op)
1956{
1957 char ralue_pl[MLXSW_REG_RALUE_LEN];
Ido Schimmel76610eb2017-03-10 08:53:41 +01001958 struct mlxsw_sp_fib *fib = fib_entry->fib_node->fib;
Ido Schimmel9aecce12017-02-09 10:28:42 +01001959 u32 *p_dip = (u32 *) fib_entry->fib_node->key.addr;
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001960 enum mlxsw_reg_ralue_trap_action trap_action;
1961 u16 trap_id = 0;
1962 u32 adjacency_index = 0;
1963 u16 ecmp_size = 0;
1964
1965 /* In case the nexthop group adjacency index is valid, use it
1966 * with provided ECMP size. Otherwise, setup trap and pass
1967 * traffic to kernel.
1968 */
Ido Schimmel4b411472017-02-08 11:16:37 +01001969 if (mlxsw_sp_fib_entry_should_offload(fib_entry)) {
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001970 trap_action = MLXSW_REG_RALUE_TRAP_ACTION_NOP;
1971 adjacency_index = fib_entry->nh_group->adj_index;
1972 ecmp_size = fib_entry->nh_group->ecmp_size;
1973 } else {
1974 trap_action = MLXSW_REG_RALUE_TRAP_ACTION_TRAP;
1975 trap_id = MLXSW_TRAP_ID_RTR_INGRESS0;
1976 }
1977
Ido Schimmel1a9234e662016-09-19 08:29:26 +02001978 mlxsw_reg_ralue_pack4(ralue_pl,
Ido Schimmel76610eb2017-03-10 08:53:41 +01001979 (enum mlxsw_reg_ralxx_protocol) fib->proto, op,
1980 fib->vr->id, fib_entry->fib_node->key.prefix_len,
Ido Schimmel9aecce12017-02-09 10:28:42 +01001981 *p_dip);
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02001982 mlxsw_reg_ralue_act_remote_pack(ralue_pl, trap_action, trap_id,
1983 adjacency_index, ecmp_size);
1984 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
1985}
1986
Jiri Pirko61c503f2016-07-04 08:23:11 +02001987static int mlxsw_sp_fib_entry_op4_local(struct mlxsw_sp *mlxsw_sp,
1988 struct mlxsw_sp_fib_entry *fib_entry,
1989 enum mlxsw_reg_ralue_op op)
1990{
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01001991 struct mlxsw_sp_rif *rif = fib_entry->nh_group->nh_rif;
Ido Schimmel76610eb2017-03-10 08:53:41 +01001992 struct mlxsw_sp_fib *fib = fib_entry->fib_node->fib;
Ido Schimmel70ad3502017-02-08 11:16:38 +01001993 enum mlxsw_reg_ralue_trap_action trap_action;
Jiri Pirko61c503f2016-07-04 08:23:11 +02001994 char ralue_pl[MLXSW_REG_RALUE_LEN];
Ido Schimmel9aecce12017-02-09 10:28:42 +01001995 u32 *p_dip = (u32 *) fib_entry->fib_node->key.addr;
Ido Schimmel70ad3502017-02-08 11:16:38 +01001996 u16 trap_id = 0;
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01001997 u16 rif_index = 0;
Ido Schimmel70ad3502017-02-08 11:16:38 +01001998
1999 if (mlxsw_sp_fib_entry_should_offload(fib_entry)) {
2000 trap_action = MLXSW_REG_RALUE_TRAP_ACTION_NOP;
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01002001 rif_index = rif->rif_index;
Ido Schimmel70ad3502017-02-08 11:16:38 +01002002 } else {
2003 trap_action = MLXSW_REG_RALUE_TRAP_ACTION_TRAP;
2004 trap_id = MLXSW_TRAP_ID_RTR_INGRESS0;
2005 }
Jiri Pirko61c503f2016-07-04 08:23:11 +02002006
Ido Schimmel1a9234e662016-09-19 08:29:26 +02002007 mlxsw_reg_ralue_pack4(ralue_pl,
Ido Schimmel76610eb2017-03-10 08:53:41 +01002008 (enum mlxsw_reg_ralxx_protocol) fib->proto, op,
2009 fib->vr->id, fib_entry->fib_node->key.prefix_len,
Ido Schimmel9aecce12017-02-09 10:28:42 +01002010 *p_dip);
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01002011 mlxsw_reg_ralue_act_local_pack(ralue_pl, trap_action, trap_id,
2012 rif_index);
Jiri Pirko61c503f2016-07-04 08:23:11 +02002013 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
2014}
2015
2016static int mlxsw_sp_fib_entry_op4_trap(struct mlxsw_sp *mlxsw_sp,
2017 struct mlxsw_sp_fib_entry *fib_entry,
2018 enum mlxsw_reg_ralue_op op)
2019{
Ido Schimmel76610eb2017-03-10 08:53:41 +01002020 struct mlxsw_sp_fib *fib = fib_entry->fib_node->fib;
Jiri Pirko61c503f2016-07-04 08:23:11 +02002021 char ralue_pl[MLXSW_REG_RALUE_LEN];
Ido Schimmel9aecce12017-02-09 10:28:42 +01002022 u32 *p_dip = (u32 *) fib_entry->fib_node->key.addr;
Jiri Pirko61c503f2016-07-04 08:23:11 +02002023
Ido Schimmel1a9234e662016-09-19 08:29:26 +02002024 mlxsw_reg_ralue_pack4(ralue_pl,
Ido Schimmel76610eb2017-03-10 08:53:41 +01002025 (enum mlxsw_reg_ralxx_protocol) fib->proto, op,
2026 fib->vr->id, fib_entry->fib_node->key.prefix_len,
Ido Schimmel9aecce12017-02-09 10:28:42 +01002027 *p_dip);
Jiri Pirko61c503f2016-07-04 08:23:11 +02002028 mlxsw_reg_ralue_act_ip2me_pack(ralue_pl);
2029 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
2030}
2031
2032static int mlxsw_sp_fib_entry_op4(struct mlxsw_sp *mlxsw_sp,
2033 struct mlxsw_sp_fib_entry *fib_entry,
2034 enum mlxsw_reg_ralue_op op)
2035{
2036 switch (fib_entry->type) {
2037 case MLXSW_SP_FIB_ENTRY_TYPE_REMOTE:
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002038 return mlxsw_sp_fib_entry_op4_remote(mlxsw_sp, fib_entry, op);
Jiri Pirko61c503f2016-07-04 08:23:11 +02002039 case MLXSW_SP_FIB_ENTRY_TYPE_LOCAL:
2040 return mlxsw_sp_fib_entry_op4_local(mlxsw_sp, fib_entry, op);
2041 case MLXSW_SP_FIB_ENTRY_TYPE_TRAP:
2042 return mlxsw_sp_fib_entry_op4_trap(mlxsw_sp, fib_entry, op);
2043 }
2044 return -EINVAL;
2045}
2046
2047static int mlxsw_sp_fib_entry_op(struct mlxsw_sp *mlxsw_sp,
2048 struct mlxsw_sp_fib_entry *fib_entry,
2049 enum mlxsw_reg_ralue_op op)
2050{
Ido Schimmel013b20f2017-02-08 11:16:36 +01002051 int err = -EINVAL;
2052
Ido Schimmel76610eb2017-03-10 08:53:41 +01002053 switch (fib_entry->fib_node->fib->proto) {
Jiri Pirko61c503f2016-07-04 08:23:11 +02002054 case MLXSW_SP_L3_PROTO_IPV4:
Ido Schimmel013b20f2017-02-08 11:16:36 +01002055 err = mlxsw_sp_fib_entry_op4(mlxsw_sp, fib_entry, op);
2056 break;
Jiri Pirko61c503f2016-07-04 08:23:11 +02002057 case MLXSW_SP_L3_PROTO_IPV6:
Ido Schimmel013b20f2017-02-08 11:16:36 +01002058 return err;
Jiri Pirko61c503f2016-07-04 08:23:11 +02002059 }
Ido Schimmel013b20f2017-02-08 11:16:36 +01002060 mlxsw_sp_fib_entry_offload_refresh(fib_entry, op, err);
2061 return err;
Jiri Pirko61c503f2016-07-04 08:23:11 +02002062}
2063
2064static int mlxsw_sp_fib_entry_update(struct mlxsw_sp *mlxsw_sp,
2065 struct mlxsw_sp_fib_entry *fib_entry)
2066{
Jiri Pirko7146da32016-09-01 10:37:41 +02002067 return mlxsw_sp_fib_entry_op(mlxsw_sp, fib_entry,
2068 MLXSW_REG_RALUE_OP_WRITE_WRITE);
Jiri Pirko61c503f2016-07-04 08:23:11 +02002069}
2070
2071static int mlxsw_sp_fib_entry_del(struct mlxsw_sp *mlxsw_sp,
2072 struct mlxsw_sp_fib_entry *fib_entry)
2073{
2074 return mlxsw_sp_fib_entry_op(mlxsw_sp, fib_entry,
2075 MLXSW_REG_RALUE_OP_WRITE_DELETE);
2076}
2077
Jiri Pirko61c503f2016-07-04 08:23:11 +02002078static int
Ido Schimmel013b20f2017-02-08 11:16:36 +01002079mlxsw_sp_fib4_entry_type_set(struct mlxsw_sp *mlxsw_sp,
2080 const struct fib_entry_notifier_info *fen_info,
2081 struct mlxsw_sp_fib_entry *fib_entry)
Jiri Pirko61c503f2016-07-04 08:23:11 +02002082{
Jiri Pirkob45f64d2016-09-26 12:52:31 +02002083 struct fib_info *fi = fen_info->fi;
Jiri Pirko61c503f2016-07-04 08:23:11 +02002084
Ido Schimmel97989ee2017-03-10 08:53:38 +01002085 switch (fen_info->type) {
2086 case RTN_BROADCAST: /* fall through */
2087 case RTN_LOCAL:
Jiri Pirko61c503f2016-07-04 08:23:11 +02002088 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
2089 return 0;
Ido Schimmel97989ee2017-03-10 08:53:38 +01002090 case RTN_UNREACHABLE: /* fall through */
2091 case RTN_BLACKHOLE: /* fall through */
2092 case RTN_PROHIBIT:
2093 /* Packets hitting these routes need to be trapped, but
2094 * can do so with a lower priority than packets directed
2095 * at the host, so use action type local instead of trap.
2096 */
Jiri Pirkob45f64d2016-09-26 12:52:31 +02002097 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_LOCAL;
Ido Schimmel97989ee2017-03-10 08:53:38 +01002098 return 0;
2099 case RTN_UNICAST:
2100 if (fi->fib_nh->nh_scope != RT_SCOPE_LINK)
2101 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_LOCAL;
2102 else
2103 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_REMOTE;
2104 return 0;
2105 default:
2106 return -EINVAL;
2107 }
Jiri Pirkoa7ff87a2016-07-05 11:27:50 +02002108}
2109
Jiri Pirko5b004412016-09-01 10:37:40 +02002110static struct mlxsw_sp_fib_entry *
Ido Schimmel9aecce12017-02-09 10:28:42 +01002111mlxsw_sp_fib4_entry_create(struct mlxsw_sp *mlxsw_sp,
2112 struct mlxsw_sp_fib_node *fib_node,
2113 const struct fib_entry_notifier_info *fen_info)
Jiri Pirko5b004412016-09-01 10:37:40 +02002114{
2115 struct mlxsw_sp_fib_entry *fib_entry;
Ido Schimmel9aecce12017-02-09 10:28:42 +01002116 int err;
2117
2118 fib_entry = kzalloc(sizeof(*fib_entry), GFP_KERNEL);
2119 if (!fib_entry) {
2120 err = -ENOMEM;
2121 goto err_fib_entry_alloc;
2122 }
2123
2124 err = mlxsw_sp_fib4_entry_type_set(mlxsw_sp, fen_info, fib_entry);
2125 if (err)
2126 goto err_fib4_entry_type_set;
2127
2128 err = mlxsw_sp_nexthop_group_get(mlxsw_sp, fib_entry, fen_info->fi);
2129 if (err)
2130 goto err_nexthop_group_get;
2131
2132 fib_entry->params.prio = fen_info->fi->fib_priority;
2133 fib_entry->params.tb_id = fen_info->tb_id;
2134 fib_entry->params.type = fen_info->type;
2135 fib_entry->params.tos = fen_info->tos;
2136
2137 fib_entry->fib_node = fib_node;
2138
2139 return fib_entry;
2140
2141err_nexthop_group_get:
2142err_fib4_entry_type_set:
2143 kfree(fib_entry);
2144err_fib_entry_alloc:
2145 return ERR_PTR(err);
2146}
2147
2148static void mlxsw_sp_fib4_entry_destroy(struct mlxsw_sp *mlxsw_sp,
2149 struct mlxsw_sp_fib_entry *fib_entry)
2150{
2151 mlxsw_sp_nexthop_group_put(mlxsw_sp, fib_entry);
2152 kfree(fib_entry);
2153}
2154
2155static struct mlxsw_sp_fib_node *
2156mlxsw_sp_fib4_node_get(struct mlxsw_sp *mlxsw_sp,
2157 const struct fib_entry_notifier_info *fen_info);
2158
2159static struct mlxsw_sp_fib_entry *
2160mlxsw_sp_fib4_entry_lookup(struct mlxsw_sp *mlxsw_sp,
2161 const struct fib_entry_notifier_info *fen_info)
2162{
2163 struct mlxsw_sp_fib_entry *fib_entry;
2164 struct mlxsw_sp_fib_node *fib_node;
2165
2166 fib_node = mlxsw_sp_fib4_node_get(mlxsw_sp, fen_info);
2167 if (IS_ERR(fib_node))
2168 return NULL;
2169
2170 list_for_each_entry(fib_entry, &fib_node->entry_list, list) {
2171 if (fib_entry->params.tb_id == fen_info->tb_id &&
2172 fib_entry->params.tos == fen_info->tos &&
2173 fib_entry->params.type == fen_info->type &&
2174 fib_entry->nh_group->key.fi == fen_info->fi) {
2175 return fib_entry;
2176 }
2177 }
2178
2179 return NULL;
2180}
2181
2182static const struct rhashtable_params mlxsw_sp_fib_ht_params = {
2183 .key_offset = offsetof(struct mlxsw_sp_fib_node, key),
2184 .head_offset = offsetof(struct mlxsw_sp_fib_node, ht_node),
2185 .key_len = sizeof(struct mlxsw_sp_fib_key),
2186 .automatic_shrinking = true,
2187};
2188
2189static int mlxsw_sp_fib_node_insert(struct mlxsw_sp_fib *fib,
2190 struct mlxsw_sp_fib_node *fib_node)
2191{
2192 return rhashtable_insert_fast(&fib->ht, &fib_node->ht_node,
2193 mlxsw_sp_fib_ht_params);
2194}
2195
2196static void mlxsw_sp_fib_node_remove(struct mlxsw_sp_fib *fib,
2197 struct mlxsw_sp_fib_node *fib_node)
2198{
2199 rhashtable_remove_fast(&fib->ht, &fib_node->ht_node,
2200 mlxsw_sp_fib_ht_params);
2201}
2202
2203static struct mlxsw_sp_fib_node *
2204mlxsw_sp_fib_node_lookup(struct mlxsw_sp_fib *fib, const void *addr,
2205 size_t addr_len, unsigned char prefix_len)
2206{
2207 struct mlxsw_sp_fib_key key;
2208
2209 memset(&key, 0, sizeof(key));
2210 memcpy(key.addr, addr, addr_len);
2211 key.prefix_len = prefix_len;
2212 return rhashtable_lookup_fast(&fib->ht, &key, mlxsw_sp_fib_ht_params);
2213}
2214
2215static struct mlxsw_sp_fib_node *
Ido Schimmel76610eb2017-03-10 08:53:41 +01002216mlxsw_sp_fib_node_create(struct mlxsw_sp_fib *fib, const void *addr,
Ido Schimmel9aecce12017-02-09 10:28:42 +01002217 size_t addr_len, unsigned char prefix_len)
2218{
2219 struct mlxsw_sp_fib_node *fib_node;
2220
2221 fib_node = kzalloc(sizeof(*fib_node), GFP_KERNEL);
2222 if (!fib_node)
2223 return NULL;
2224
2225 INIT_LIST_HEAD(&fib_node->entry_list);
Ido Schimmel76610eb2017-03-10 08:53:41 +01002226 list_add(&fib_node->list, &fib->node_list);
Ido Schimmel9aecce12017-02-09 10:28:42 +01002227 memcpy(fib_node->key.addr, addr, addr_len);
2228 fib_node->key.prefix_len = prefix_len;
Ido Schimmel9aecce12017-02-09 10:28:42 +01002229
2230 return fib_node;
2231}
2232
2233static void mlxsw_sp_fib_node_destroy(struct mlxsw_sp_fib_node *fib_node)
2234{
Ido Schimmel9aecce12017-02-09 10:28:42 +01002235 list_del(&fib_node->list);
2236 WARN_ON(!list_empty(&fib_node->entry_list));
2237 kfree(fib_node);
2238}
2239
2240static bool
2241mlxsw_sp_fib_node_entry_is_first(const struct mlxsw_sp_fib_node *fib_node,
2242 const struct mlxsw_sp_fib_entry *fib_entry)
2243{
2244 return list_first_entry(&fib_node->entry_list,
2245 struct mlxsw_sp_fib_entry, list) == fib_entry;
2246}
2247
2248static void mlxsw_sp_fib_node_prefix_inc(struct mlxsw_sp_fib_node *fib_node)
2249{
2250 unsigned char prefix_len = fib_node->key.prefix_len;
Ido Schimmel76610eb2017-03-10 08:53:41 +01002251 struct mlxsw_sp_fib *fib = fib_node->fib;
Ido Schimmel9aecce12017-02-09 10:28:42 +01002252
2253 if (fib->prefix_ref_count[prefix_len]++ == 0)
2254 mlxsw_sp_prefix_usage_set(&fib->prefix_usage, prefix_len);
2255}
2256
2257static void mlxsw_sp_fib_node_prefix_dec(struct mlxsw_sp_fib_node *fib_node)
2258{
2259 unsigned char prefix_len = fib_node->key.prefix_len;
Ido Schimmel76610eb2017-03-10 08:53:41 +01002260 struct mlxsw_sp_fib *fib = fib_node->fib;
Ido Schimmel9aecce12017-02-09 10:28:42 +01002261
2262 if (--fib->prefix_ref_count[prefix_len] == 0)
2263 mlxsw_sp_prefix_usage_clear(&fib->prefix_usage, prefix_len);
2264}
2265
Ido Schimmel76610eb2017-03-10 08:53:41 +01002266static int mlxsw_sp_fib_node_init(struct mlxsw_sp *mlxsw_sp,
2267 struct mlxsw_sp_fib_node *fib_node,
2268 struct mlxsw_sp_fib *fib)
2269{
2270 struct mlxsw_sp_prefix_usage req_prefix_usage;
2271 struct mlxsw_sp_lpm_tree *lpm_tree;
2272 int err;
2273
2274 err = mlxsw_sp_fib_node_insert(fib, fib_node);
2275 if (err)
2276 return err;
2277 fib_node->fib = fib;
2278
2279 mlxsw_sp_prefix_usage_cpy(&req_prefix_usage, &fib->prefix_usage);
2280 mlxsw_sp_prefix_usage_set(&req_prefix_usage, fib_node->key.prefix_len);
2281
2282 if (!mlxsw_sp_prefix_usage_none(&fib->prefix_usage)) {
2283 err = mlxsw_sp_vr_lpm_tree_check(mlxsw_sp, fib,
2284 &req_prefix_usage);
2285 if (err)
2286 goto err_tree_check;
2287 } else {
2288 lpm_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, &req_prefix_usage,
2289 fib->proto);
2290 if (IS_ERR(lpm_tree))
2291 return PTR_ERR(lpm_tree);
2292 fib->lpm_tree = lpm_tree;
2293 err = mlxsw_sp_vr_lpm_tree_bind(mlxsw_sp, fib);
2294 if (err)
2295 goto err_tree_bind;
2296 }
2297
2298 mlxsw_sp_fib_node_prefix_inc(fib_node);
2299
2300 return 0;
2301
2302err_tree_bind:
2303 fib->lpm_tree = NULL;
2304 mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
2305err_tree_check:
2306 fib_node->fib = NULL;
2307 mlxsw_sp_fib_node_remove(fib, fib_node);
2308 return err;
2309}
2310
2311static void mlxsw_sp_fib_node_fini(struct mlxsw_sp *mlxsw_sp,
2312 struct mlxsw_sp_fib_node *fib_node)
2313{
2314 struct mlxsw_sp_lpm_tree *lpm_tree = fib_node->fib->lpm_tree;
2315 struct mlxsw_sp_fib *fib = fib_node->fib;
2316
2317 mlxsw_sp_fib_node_prefix_dec(fib_node);
2318
2319 if (mlxsw_sp_prefix_usage_none(&fib->prefix_usage)) {
2320 mlxsw_sp_vr_lpm_tree_unbind(mlxsw_sp, fib);
2321 fib->lpm_tree = NULL;
2322 mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
2323 } else {
2324 mlxsw_sp_vr_lpm_tree_check(mlxsw_sp, fib, &fib->prefix_usage);
2325 }
2326
2327 fib_node->fib = NULL;
2328 mlxsw_sp_fib_node_remove(fib, fib_node);
2329}
2330
Ido Schimmel9aecce12017-02-09 10:28:42 +01002331static struct mlxsw_sp_fib_node *
2332mlxsw_sp_fib4_node_get(struct mlxsw_sp *mlxsw_sp,
2333 const struct fib_entry_notifier_info *fen_info)
2334{
2335 struct mlxsw_sp_fib_node *fib_node;
Ido Schimmel76610eb2017-03-10 08:53:41 +01002336 struct mlxsw_sp_fib *fib;
Jiri Pirko5b004412016-09-01 10:37:40 +02002337 struct mlxsw_sp_vr *vr;
2338 int err;
2339
Ido Schimmel76610eb2017-03-10 08:53:41 +01002340 vr = mlxsw_sp_vr_get(mlxsw_sp, fen_info->tb_id);
Jiri Pirko5b004412016-09-01 10:37:40 +02002341 if (IS_ERR(vr))
2342 return ERR_CAST(vr);
Ido Schimmel76610eb2017-03-10 08:53:41 +01002343 fib = mlxsw_sp_vr_fib(vr, MLXSW_SP_L3_PROTO_IPV4);
Jiri Pirko5b004412016-09-01 10:37:40 +02002344
Ido Schimmel76610eb2017-03-10 08:53:41 +01002345 fib_node = mlxsw_sp_fib_node_lookup(fib, &fen_info->dst,
Ido Schimmel9aecce12017-02-09 10:28:42 +01002346 sizeof(fen_info->dst),
2347 fen_info->dst_len);
2348 if (fib_node)
2349 return fib_node;
2350
Ido Schimmel76610eb2017-03-10 08:53:41 +01002351 fib_node = mlxsw_sp_fib_node_create(fib, &fen_info->dst,
Ido Schimmel9aecce12017-02-09 10:28:42 +01002352 sizeof(fen_info->dst),
2353 fen_info->dst_len);
2354 if (!fib_node) {
Jiri Pirko5b004412016-09-01 10:37:40 +02002355 err = -ENOMEM;
Ido Schimmel9aecce12017-02-09 10:28:42 +01002356 goto err_fib_node_create;
Jiri Pirko5b004412016-09-01 10:37:40 +02002357 }
Jiri Pirko5b004412016-09-01 10:37:40 +02002358
Ido Schimmel76610eb2017-03-10 08:53:41 +01002359 err = mlxsw_sp_fib_node_init(mlxsw_sp, fib_node, fib);
2360 if (err)
2361 goto err_fib_node_init;
2362
Ido Schimmel9aecce12017-02-09 10:28:42 +01002363 return fib_node;
Jiri Pirko5b004412016-09-01 10:37:40 +02002364
Ido Schimmel76610eb2017-03-10 08:53:41 +01002365err_fib_node_init:
2366 mlxsw_sp_fib_node_destroy(fib_node);
Ido Schimmel9aecce12017-02-09 10:28:42 +01002367err_fib_node_create:
Ido Schimmel76610eb2017-03-10 08:53:41 +01002368 mlxsw_sp_vr_put(vr);
Jiri Pirko5b004412016-09-01 10:37:40 +02002369 return ERR_PTR(err);
2370}
2371
Ido Schimmel9aecce12017-02-09 10:28:42 +01002372static void mlxsw_sp_fib4_node_put(struct mlxsw_sp *mlxsw_sp,
2373 struct mlxsw_sp_fib_node *fib_node)
Jiri Pirko5b004412016-09-01 10:37:40 +02002374{
Ido Schimmel76610eb2017-03-10 08:53:41 +01002375 struct mlxsw_sp_vr *vr = fib_node->fib->vr;
Jiri Pirko5b004412016-09-01 10:37:40 +02002376
Ido Schimmel9aecce12017-02-09 10:28:42 +01002377 if (!list_empty(&fib_node->entry_list))
2378 return;
Ido Schimmel76610eb2017-03-10 08:53:41 +01002379 mlxsw_sp_fib_node_fini(mlxsw_sp, fib_node);
Ido Schimmel9aecce12017-02-09 10:28:42 +01002380 mlxsw_sp_fib_node_destroy(fib_node);
Ido Schimmel76610eb2017-03-10 08:53:41 +01002381 mlxsw_sp_vr_put(vr);
Jiri Pirko5b004412016-09-01 10:37:40 +02002382}
2383
Ido Schimmel9aecce12017-02-09 10:28:42 +01002384static struct mlxsw_sp_fib_entry *
2385mlxsw_sp_fib4_node_entry_find(const struct mlxsw_sp_fib_node *fib_node,
2386 const struct mlxsw_sp_fib_entry_params *params)
Jiri Pirko61c503f2016-07-04 08:23:11 +02002387{
Jiri Pirko61c503f2016-07-04 08:23:11 +02002388 struct mlxsw_sp_fib_entry *fib_entry;
Ido Schimmel9aecce12017-02-09 10:28:42 +01002389
2390 list_for_each_entry(fib_entry, &fib_node->entry_list, list) {
2391 if (fib_entry->params.tb_id > params->tb_id)
2392 continue;
2393 if (fib_entry->params.tb_id != params->tb_id)
2394 break;
2395 if (fib_entry->params.tos > params->tos)
2396 continue;
2397 if (fib_entry->params.prio >= params->prio ||
2398 fib_entry->params.tos < params->tos)
2399 return fib_entry;
2400 }
2401
2402 return NULL;
2403}
2404
Ido Schimmel4283bce2017-02-09 10:28:43 +01002405static int mlxsw_sp_fib4_node_list_append(struct mlxsw_sp_fib_entry *fib_entry,
2406 struct mlxsw_sp_fib_entry *new_entry)
2407{
2408 struct mlxsw_sp_fib_node *fib_node;
2409
2410 if (WARN_ON(!fib_entry))
2411 return -EINVAL;
2412
2413 fib_node = fib_entry->fib_node;
2414 list_for_each_entry_from(fib_entry, &fib_node->entry_list, list) {
2415 if (fib_entry->params.tb_id != new_entry->params.tb_id ||
2416 fib_entry->params.tos != new_entry->params.tos ||
2417 fib_entry->params.prio != new_entry->params.prio)
2418 break;
2419 }
2420
2421 list_add_tail(&new_entry->list, &fib_entry->list);
2422 return 0;
2423}
2424
Ido Schimmel9aecce12017-02-09 10:28:42 +01002425static int
2426mlxsw_sp_fib4_node_list_insert(struct mlxsw_sp_fib_node *fib_node,
Ido Schimmel4283bce2017-02-09 10:28:43 +01002427 struct mlxsw_sp_fib_entry *new_entry,
Ido Schimmel599cf8f2017-02-09 10:28:44 +01002428 bool replace, bool append)
Ido Schimmel9aecce12017-02-09 10:28:42 +01002429{
2430 struct mlxsw_sp_fib_entry *fib_entry;
2431
2432 fib_entry = mlxsw_sp_fib4_node_entry_find(fib_node, &new_entry->params);
2433
Ido Schimmel4283bce2017-02-09 10:28:43 +01002434 if (append)
2435 return mlxsw_sp_fib4_node_list_append(fib_entry, new_entry);
Ido Schimmel599cf8f2017-02-09 10:28:44 +01002436 if (replace && WARN_ON(!fib_entry))
2437 return -EINVAL;
Ido Schimmel4283bce2017-02-09 10:28:43 +01002438
Ido Schimmel599cf8f2017-02-09 10:28:44 +01002439 /* Insert new entry before replaced one, so that we can later
2440 * remove the second.
2441 */
Ido Schimmel9aecce12017-02-09 10:28:42 +01002442 if (fib_entry) {
2443 list_add_tail(&new_entry->list, &fib_entry->list);
2444 } else {
2445 struct mlxsw_sp_fib_entry *last;
2446
2447 list_for_each_entry(last, &fib_node->entry_list, list) {
2448 if (new_entry->params.tb_id > last->params.tb_id)
2449 break;
2450 fib_entry = last;
2451 }
2452
2453 if (fib_entry)
2454 list_add(&new_entry->list, &fib_entry->list);
2455 else
2456 list_add(&new_entry->list, &fib_node->entry_list);
2457 }
2458
2459 return 0;
2460}
2461
2462static void
2463mlxsw_sp_fib4_node_list_remove(struct mlxsw_sp_fib_entry *fib_entry)
2464{
2465 list_del(&fib_entry->list);
2466}
2467
2468static int
2469mlxsw_sp_fib4_node_entry_add(struct mlxsw_sp *mlxsw_sp,
2470 const struct mlxsw_sp_fib_node *fib_node,
2471 struct mlxsw_sp_fib_entry *fib_entry)
2472{
2473 if (!mlxsw_sp_fib_node_entry_is_first(fib_node, fib_entry))
2474 return 0;
2475
2476 /* To prevent packet loss, overwrite the previously offloaded
2477 * entry.
2478 */
2479 if (!list_is_singular(&fib_node->entry_list)) {
2480 enum mlxsw_reg_ralue_op op = MLXSW_REG_RALUE_OP_WRITE_DELETE;
2481 struct mlxsw_sp_fib_entry *n = list_next_entry(fib_entry, list);
2482
2483 mlxsw_sp_fib_entry_offload_refresh(n, op, 0);
2484 }
2485
2486 return mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
2487}
2488
2489static void
2490mlxsw_sp_fib4_node_entry_del(struct mlxsw_sp *mlxsw_sp,
2491 const struct mlxsw_sp_fib_node *fib_node,
2492 struct mlxsw_sp_fib_entry *fib_entry)
2493{
2494 if (!mlxsw_sp_fib_node_entry_is_first(fib_node, fib_entry))
2495 return;
2496
2497 /* Promote the next entry by overwriting the deleted entry */
2498 if (!list_is_singular(&fib_node->entry_list)) {
2499 struct mlxsw_sp_fib_entry *n = list_next_entry(fib_entry, list);
2500 enum mlxsw_reg_ralue_op op = MLXSW_REG_RALUE_OP_WRITE_DELETE;
2501
2502 mlxsw_sp_fib_entry_update(mlxsw_sp, n);
2503 mlxsw_sp_fib_entry_offload_refresh(fib_entry, op, 0);
2504 return;
2505 }
2506
2507 mlxsw_sp_fib_entry_del(mlxsw_sp, fib_entry);
2508}
2509
2510static int mlxsw_sp_fib4_node_entry_link(struct mlxsw_sp *mlxsw_sp,
Ido Schimmel4283bce2017-02-09 10:28:43 +01002511 struct mlxsw_sp_fib_entry *fib_entry,
Ido Schimmel599cf8f2017-02-09 10:28:44 +01002512 bool replace, bool append)
Ido Schimmel9aecce12017-02-09 10:28:42 +01002513{
2514 struct mlxsw_sp_fib_node *fib_node = fib_entry->fib_node;
2515 int err;
2516
Ido Schimmel599cf8f2017-02-09 10:28:44 +01002517 err = mlxsw_sp_fib4_node_list_insert(fib_node, fib_entry, replace,
2518 append);
Ido Schimmel9aecce12017-02-09 10:28:42 +01002519 if (err)
2520 return err;
2521
2522 err = mlxsw_sp_fib4_node_entry_add(mlxsw_sp, fib_node, fib_entry);
2523 if (err)
2524 goto err_fib4_node_entry_add;
2525
Ido Schimmel9aecce12017-02-09 10:28:42 +01002526 return 0;
2527
2528err_fib4_node_entry_add:
2529 mlxsw_sp_fib4_node_list_remove(fib_entry);
2530 return err;
2531}
2532
2533static void
2534mlxsw_sp_fib4_node_entry_unlink(struct mlxsw_sp *mlxsw_sp,
2535 struct mlxsw_sp_fib_entry *fib_entry)
2536{
2537 struct mlxsw_sp_fib_node *fib_node = fib_entry->fib_node;
2538
Ido Schimmel9aecce12017-02-09 10:28:42 +01002539 mlxsw_sp_fib4_node_entry_del(mlxsw_sp, fib_node, fib_entry);
2540 mlxsw_sp_fib4_node_list_remove(fib_entry);
2541}
2542
Ido Schimmel599cf8f2017-02-09 10:28:44 +01002543static void mlxsw_sp_fib4_entry_replace(struct mlxsw_sp *mlxsw_sp,
2544 struct mlxsw_sp_fib_entry *fib_entry,
2545 bool replace)
2546{
2547 struct mlxsw_sp_fib_node *fib_node = fib_entry->fib_node;
2548 struct mlxsw_sp_fib_entry *replaced;
2549
2550 if (!replace)
2551 return;
2552
2553 /* We inserted the new entry before replaced one */
2554 replaced = list_next_entry(fib_entry, list);
2555
2556 mlxsw_sp_fib4_node_entry_unlink(mlxsw_sp, replaced);
2557 mlxsw_sp_fib4_entry_destroy(mlxsw_sp, replaced);
2558 mlxsw_sp_fib4_node_put(mlxsw_sp, fib_node);
2559}
2560
Ido Schimmel9aecce12017-02-09 10:28:42 +01002561static int
2562mlxsw_sp_router_fib4_add(struct mlxsw_sp *mlxsw_sp,
Ido Schimmel4283bce2017-02-09 10:28:43 +01002563 const struct fib_entry_notifier_info *fen_info,
Ido Schimmel599cf8f2017-02-09 10:28:44 +01002564 bool replace, bool append)
Ido Schimmel9aecce12017-02-09 10:28:42 +01002565{
2566 struct mlxsw_sp_fib_entry *fib_entry;
2567 struct mlxsw_sp_fib_node *fib_node;
Jiri Pirko61c503f2016-07-04 08:23:11 +02002568 int err;
2569
Ido Schimmel9011b672017-05-16 19:38:25 +02002570 if (mlxsw_sp->router->aborted)
Jiri Pirkob45f64d2016-09-26 12:52:31 +02002571 return 0;
2572
Ido Schimmel9aecce12017-02-09 10:28:42 +01002573 fib_node = mlxsw_sp_fib4_node_get(mlxsw_sp, fen_info);
2574 if (IS_ERR(fib_node)) {
2575 dev_warn(mlxsw_sp->bus_info->dev, "Failed to get FIB node\n");
2576 return PTR_ERR(fib_node);
2577 }
2578
2579 fib_entry = mlxsw_sp_fib4_entry_create(mlxsw_sp, fib_node, fen_info);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02002580 if (IS_ERR(fib_entry)) {
Ido Schimmel9aecce12017-02-09 10:28:42 +01002581 dev_warn(mlxsw_sp->bus_info->dev, "Failed to create FIB entry\n");
2582 err = PTR_ERR(fib_entry);
2583 goto err_fib4_entry_create;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02002584 }
Jiri Pirko61c503f2016-07-04 08:23:11 +02002585
Ido Schimmel599cf8f2017-02-09 10:28:44 +01002586 err = mlxsw_sp_fib4_node_entry_link(mlxsw_sp, fib_entry, replace,
2587 append);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02002588 if (err) {
Ido Schimmel9aecce12017-02-09 10:28:42 +01002589 dev_warn(mlxsw_sp->bus_info->dev, "Failed to link FIB entry to node\n");
2590 goto err_fib4_node_entry_link;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02002591 }
Ido Schimmel9aecce12017-02-09 10:28:42 +01002592
Ido Schimmel599cf8f2017-02-09 10:28:44 +01002593 mlxsw_sp_fib4_entry_replace(mlxsw_sp, fib_entry, replace);
2594
Jiri Pirko61c503f2016-07-04 08:23:11 +02002595 return 0;
2596
Ido Schimmel9aecce12017-02-09 10:28:42 +01002597err_fib4_node_entry_link:
2598 mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib_entry);
2599err_fib4_entry_create:
2600 mlxsw_sp_fib4_node_put(mlxsw_sp, fib_node);
Jiri Pirko61c503f2016-07-04 08:23:11 +02002601 return err;
2602}
2603
Jiri Pirko37956d72016-10-20 16:05:43 +02002604static void mlxsw_sp_router_fib4_del(struct mlxsw_sp *mlxsw_sp,
2605 struct fib_entry_notifier_info *fen_info)
Jiri Pirko61c503f2016-07-04 08:23:11 +02002606{
Jiri Pirko61c503f2016-07-04 08:23:11 +02002607 struct mlxsw_sp_fib_entry *fib_entry;
Ido Schimmel9aecce12017-02-09 10:28:42 +01002608 struct mlxsw_sp_fib_node *fib_node;
Jiri Pirko61c503f2016-07-04 08:23:11 +02002609
Ido Schimmel9011b672017-05-16 19:38:25 +02002610 if (mlxsw_sp->router->aborted)
Jiri Pirko37956d72016-10-20 16:05:43 +02002611 return;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02002612
Ido Schimmel9aecce12017-02-09 10:28:42 +01002613 fib_entry = mlxsw_sp_fib4_entry_lookup(mlxsw_sp, fen_info);
2614 if (WARN_ON(!fib_entry))
Jiri Pirko37956d72016-10-20 16:05:43 +02002615 return;
Ido Schimmel9aecce12017-02-09 10:28:42 +01002616 fib_node = fib_entry->fib_node;
Jiri Pirko5b004412016-09-01 10:37:40 +02002617
Ido Schimmel9aecce12017-02-09 10:28:42 +01002618 mlxsw_sp_fib4_node_entry_unlink(mlxsw_sp, fib_entry);
2619 mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib_entry);
2620 mlxsw_sp_fib4_node_put(mlxsw_sp, fib_node);
Jiri Pirko61c503f2016-07-04 08:23:11 +02002621}
Jiri Pirkob45f64d2016-09-26 12:52:31 +02002622
2623static int mlxsw_sp_router_set_abort_trap(struct mlxsw_sp *mlxsw_sp)
2624{
2625 char ralta_pl[MLXSW_REG_RALTA_LEN];
2626 char ralst_pl[MLXSW_REG_RALST_LEN];
Ido Schimmelb5d90e62017-03-10 08:53:43 +01002627 int i, err;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02002628
2629 mlxsw_reg_ralta_pack(ralta_pl, true, MLXSW_REG_RALXX_PROTOCOL_IPV4,
2630 MLXSW_SP_LPM_TREE_MIN);
2631 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralta), ralta_pl);
2632 if (err)
2633 return err;
2634
2635 mlxsw_reg_ralst_pack(ralst_pl, 0xff, MLXSW_SP_LPM_TREE_MIN);
2636 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralst), ralst_pl);
2637 if (err)
2638 return err;
2639
Ido Schimmelb5d90e62017-03-10 08:53:43 +01002640 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
Ido Schimmel9011b672017-05-16 19:38:25 +02002641 struct mlxsw_sp_vr *vr = &mlxsw_sp->router->vrs[i];
Ido Schimmelb5d90e62017-03-10 08:53:43 +01002642 char raltb_pl[MLXSW_REG_RALTB_LEN];
2643 char ralue_pl[MLXSW_REG_RALUE_LEN];
Jiri Pirkob45f64d2016-09-26 12:52:31 +02002644
Ido Schimmelb5d90e62017-03-10 08:53:43 +01002645 if (!mlxsw_sp_vr_is_used(vr))
2646 continue;
2647
2648 mlxsw_reg_raltb_pack(raltb_pl, vr->id,
2649 MLXSW_REG_RALXX_PROTOCOL_IPV4,
2650 MLXSW_SP_LPM_TREE_MIN);
2651 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raltb),
2652 raltb_pl);
2653 if (err)
2654 return err;
2655
2656 mlxsw_reg_ralue_pack4(ralue_pl, MLXSW_SP_L3_PROTO_IPV4,
2657 MLXSW_REG_RALUE_OP_WRITE_WRITE, vr->id, 0,
2658 0);
2659 mlxsw_reg_ralue_act_ip2me_pack(ralue_pl);
2660 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue),
2661 ralue_pl);
2662 if (err)
2663 return err;
2664 }
2665
2666 return 0;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02002667}
2668
Ido Schimmel9aecce12017-02-09 10:28:42 +01002669static void mlxsw_sp_fib4_node_flush(struct mlxsw_sp *mlxsw_sp,
2670 struct mlxsw_sp_fib_node *fib_node)
2671{
2672 struct mlxsw_sp_fib_entry *fib_entry, *tmp;
2673
2674 list_for_each_entry_safe(fib_entry, tmp, &fib_node->entry_list, list) {
2675 bool do_break = &tmp->list == &fib_node->entry_list;
2676
2677 mlxsw_sp_fib4_node_entry_unlink(mlxsw_sp, fib_entry);
2678 mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib_entry);
2679 mlxsw_sp_fib4_node_put(mlxsw_sp, fib_node);
2680 /* Break when entry list is empty and node was freed.
2681 * Otherwise, we'll access freed memory in the next
2682 * iteration.
2683 */
2684 if (do_break)
2685 break;
2686 }
2687}
2688
2689static void mlxsw_sp_fib_node_flush(struct mlxsw_sp *mlxsw_sp,
2690 struct mlxsw_sp_fib_node *fib_node)
2691{
Ido Schimmel76610eb2017-03-10 08:53:41 +01002692 switch (fib_node->fib->proto) {
Ido Schimmel9aecce12017-02-09 10:28:42 +01002693 case MLXSW_SP_L3_PROTO_IPV4:
2694 mlxsw_sp_fib4_node_flush(mlxsw_sp, fib_node);
2695 break;
2696 case MLXSW_SP_L3_PROTO_IPV6:
2697 WARN_ON_ONCE(1);
2698 break;
2699 }
2700}
2701
Ido Schimmel76610eb2017-03-10 08:53:41 +01002702static void mlxsw_sp_vr_fib_flush(struct mlxsw_sp *mlxsw_sp,
2703 struct mlxsw_sp_vr *vr,
2704 enum mlxsw_sp_l3proto proto)
2705{
2706 struct mlxsw_sp_fib *fib = mlxsw_sp_vr_fib(vr, proto);
2707 struct mlxsw_sp_fib_node *fib_node, *tmp;
2708
2709 list_for_each_entry_safe(fib_node, tmp, &fib->node_list, list) {
2710 bool do_break = &tmp->list == &fib->node_list;
2711
2712 mlxsw_sp_fib_node_flush(mlxsw_sp, fib_node);
2713 if (do_break)
2714 break;
2715 }
2716}
2717
Ido Schimmelac571de2016-11-14 11:26:32 +01002718static void mlxsw_sp_router_fib_flush(struct mlxsw_sp *mlxsw_sp)
Jiri Pirkob45f64d2016-09-26 12:52:31 +02002719{
Jiri Pirkob45f64d2016-09-26 12:52:31 +02002720 int i;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02002721
Jiri Pirkoc1a38312016-10-21 16:07:23 +02002722 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
Ido Schimmel9011b672017-05-16 19:38:25 +02002723 struct mlxsw_sp_vr *vr = &mlxsw_sp->router->vrs[i];
Ido Schimmelac571de2016-11-14 11:26:32 +01002724
Ido Schimmel76610eb2017-03-10 08:53:41 +01002725 if (!mlxsw_sp_vr_is_used(vr))
Jiri Pirkob45f64d2016-09-26 12:52:31 +02002726 continue;
Ido Schimmel76610eb2017-03-10 08:53:41 +01002727 mlxsw_sp_vr_fib_flush(mlxsw_sp, vr, MLXSW_SP_L3_PROTO_IPV4);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02002728 }
Ido Schimmelac571de2016-11-14 11:26:32 +01002729}
2730
2731static void mlxsw_sp_router_fib4_abort(struct mlxsw_sp *mlxsw_sp)
2732{
2733 int err;
2734
Ido Schimmel9011b672017-05-16 19:38:25 +02002735 if (mlxsw_sp->router->aborted)
Ido Schimmeld331d302016-11-16 09:51:58 +01002736 return;
2737 dev_warn(mlxsw_sp->bus_info->dev, "FIB abort triggered. Note that FIB entries are no longer being offloaded to this device.\n");
Ido Schimmelac571de2016-11-14 11:26:32 +01002738 mlxsw_sp_router_fib_flush(mlxsw_sp);
Ido Schimmel9011b672017-05-16 19:38:25 +02002739 mlxsw_sp->router->aborted = true;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02002740 err = mlxsw_sp_router_set_abort_trap(mlxsw_sp);
2741 if (err)
2742 dev_warn(mlxsw_sp->bus_info->dev, "Failed to set abort trap.\n");
2743}
2744
Ido Schimmel30572242016-12-03 16:45:01 +01002745struct mlxsw_sp_fib_event_work {
Ido Schimmela0e47612017-02-06 16:20:10 +01002746 struct work_struct work;
Ido Schimmelad178c82017-02-08 11:16:40 +01002747 union {
2748 struct fib_entry_notifier_info fen_info;
Ido Schimmel5d7bfd12017-03-16 09:08:14 +01002749 struct fib_rule_notifier_info fr_info;
Ido Schimmelad178c82017-02-08 11:16:40 +01002750 struct fib_nh_notifier_info fnh_info;
2751 };
Ido Schimmel30572242016-12-03 16:45:01 +01002752 struct mlxsw_sp *mlxsw_sp;
2753 unsigned long event;
2754};
2755
2756static void mlxsw_sp_router_fib_event_work(struct work_struct *work)
Jiri Pirkob45f64d2016-09-26 12:52:31 +02002757{
Ido Schimmel30572242016-12-03 16:45:01 +01002758 struct mlxsw_sp_fib_event_work *fib_work =
Ido Schimmela0e47612017-02-06 16:20:10 +01002759 container_of(work, struct mlxsw_sp_fib_event_work, work);
Ido Schimmel30572242016-12-03 16:45:01 +01002760 struct mlxsw_sp *mlxsw_sp = fib_work->mlxsw_sp;
Ido Schimmel5d7bfd12017-03-16 09:08:14 +01002761 struct fib_rule *rule;
Ido Schimmel599cf8f2017-02-09 10:28:44 +01002762 bool replace, append;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02002763 int err;
2764
Ido Schimmel30572242016-12-03 16:45:01 +01002765 /* Protect internal structures from changes */
2766 rtnl_lock();
2767 switch (fib_work->event) {
Ido Schimmel599cf8f2017-02-09 10:28:44 +01002768 case FIB_EVENT_ENTRY_REPLACE: /* fall through */
Ido Schimmel4283bce2017-02-09 10:28:43 +01002769 case FIB_EVENT_ENTRY_APPEND: /* fall through */
Jiri Pirkob45f64d2016-09-26 12:52:31 +02002770 case FIB_EVENT_ENTRY_ADD:
Ido Schimmel599cf8f2017-02-09 10:28:44 +01002771 replace = fib_work->event == FIB_EVENT_ENTRY_REPLACE;
Ido Schimmel4283bce2017-02-09 10:28:43 +01002772 append = fib_work->event == FIB_EVENT_ENTRY_APPEND;
2773 err = mlxsw_sp_router_fib4_add(mlxsw_sp, &fib_work->fen_info,
Ido Schimmel599cf8f2017-02-09 10:28:44 +01002774 replace, append);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02002775 if (err)
2776 mlxsw_sp_router_fib4_abort(mlxsw_sp);
Ido Schimmel30572242016-12-03 16:45:01 +01002777 fib_info_put(fib_work->fen_info.fi);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02002778 break;
2779 case FIB_EVENT_ENTRY_DEL:
Ido Schimmel30572242016-12-03 16:45:01 +01002780 mlxsw_sp_router_fib4_del(mlxsw_sp, &fib_work->fen_info);
2781 fib_info_put(fib_work->fen_info.fi);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02002782 break;
2783 case FIB_EVENT_RULE_ADD: /* fall through */
2784 case FIB_EVENT_RULE_DEL:
Ido Schimmel5d7bfd12017-03-16 09:08:14 +01002785 rule = fib_work->fr_info.rule;
Ido Schimmelc7f6e662017-03-16 09:08:20 +01002786 if (!fib4_rule_default(rule) && !rule->l3mdev)
Ido Schimmel5d7bfd12017-03-16 09:08:14 +01002787 mlxsw_sp_router_fib4_abort(mlxsw_sp);
2788 fib_rule_put(rule);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02002789 break;
Ido Schimmelad178c82017-02-08 11:16:40 +01002790 case FIB_EVENT_NH_ADD: /* fall through */
2791 case FIB_EVENT_NH_DEL:
2792 mlxsw_sp_nexthop_event(mlxsw_sp, fib_work->event,
2793 fib_work->fnh_info.fib_nh);
2794 fib_info_put(fib_work->fnh_info.fib_nh->nh_parent);
2795 break;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02002796 }
Ido Schimmel30572242016-12-03 16:45:01 +01002797 rtnl_unlock();
2798 kfree(fib_work);
2799}
2800
2801/* Called with rcu_read_lock() */
2802static int mlxsw_sp_router_fib_event(struct notifier_block *nb,
2803 unsigned long event, void *ptr)
2804{
Ido Schimmel30572242016-12-03 16:45:01 +01002805 struct mlxsw_sp_fib_event_work *fib_work;
2806 struct fib_notifier_info *info = ptr;
Ido Schimmel7e39d112017-05-16 19:38:28 +02002807 struct mlxsw_sp_router *router;
Ido Schimmel30572242016-12-03 16:45:01 +01002808
2809 if (!net_eq(info->net, &init_net))
2810 return NOTIFY_DONE;
2811
2812 fib_work = kzalloc(sizeof(*fib_work), GFP_ATOMIC);
2813 if (WARN_ON(!fib_work))
2814 return NOTIFY_BAD;
2815
Ido Schimmela0e47612017-02-06 16:20:10 +01002816 INIT_WORK(&fib_work->work, mlxsw_sp_router_fib_event_work);
Ido Schimmel7e39d112017-05-16 19:38:28 +02002817 router = container_of(nb, struct mlxsw_sp_router, fib_nb);
2818 fib_work->mlxsw_sp = router->mlxsw_sp;
Ido Schimmel30572242016-12-03 16:45:01 +01002819 fib_work->event = event;
2820
2821 switch (event) {
Ido Schimmel599cf8f2017-02-09 10:28:44 +01002822 case FIB_EVENT_ENTRY_REPLACE: /* fall through */
Ido Schimmel4283bce2017-02-09 10:28:43 +01002823 case FIB_EVENT_ENTRY_APPEND: /* fall through */
Ido Schimmel30572242016-12-03 16:45:01 +01002824 case FIB_EVENT_ENTRY_ADD: /* fall through */
2825 case FIB_EVENT_ENTRY_DEL:
2826 memcpy(&fib_work->fen_info, ptr, sizeof(fib_work->fen_info));
2827 /* Take referece on fib_info to prevent it from being
2828 * freed while work is queued. Release it afterwards.
2829 */
2830 fib_info_hold(fib_work->fen_info.fi);
2831 break;
Ido Schimmel5d7bfd12017-03-16 09:08:14 +01002832 case FIB_EVENT_RULE_ADD: /* fall through */
2833 case FIB_EVENT_RULE_DEL:
2834 memcpy(&fib_work->fr_info, ptr, sizeof(fib_work->fr_info));
2835 fib_rule_get(fib_work->fr_info.rule);
2836 break;
Ido Schimmelad178c82017-02-08 11:16:40 +01002837 case FIB_EVENT_NH_ADD: /* fall through */
2838 case FIB_EVENT_NH_DEL:
2839 memcpy(&fib_work->fnh_info, ptr, sizeof(fib_work->fnh_info));
2840 fib_info_hold(fib_work->fnh_info.fib_nh->nh_parent);
2841 break;
Ido Schimmel30572242016-12-03 16:45:01 +01002842 }
2843
Ido Schimmela0e47612017-02-06 16:20:10 +01002844 mlxsw_core_schedule_work(&fib_work->work);
Ido Schimmel30572242016-12-03 16:45:01 +01002845
Jiri Pirkob45f64d2016-09-26 12:52:31 +02002846 return NOTIFY_DONE;
2847}
2848
Ido Schimmel4724ba562017-03-10 08:53:39 +01002849static struct mlxsw_sp_rif *
2850mlxsw_sp_rif_find_by_dev(const struct mlxsw_sp *mlxsw_sp,
2851 const struct net_device *dev)
2852{
2853 int i;
2854
2855 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++)
Ido Schimmel5f9efff2017-05-16 19:38:27 +02002856 if (mlxsw_sp->router->rifs[i] &&
2857 mlxsw_sp->router->rifs[i]->dev == dev)
2858 return mlxsw_sp->router->rifs[i];
Ido Schimmel4724ba562017-03-10 08:53:39 +01002859
2860 return NULL;
2861}
2862
2863static int mlxsw_sp_router_rif_disable(struct mlxsw_sp *mlxsw_sp, u16 rif)
2864{
2865 char ritr_pl[MLXSW_REG_RITR_LEN];
2866 int err;
2867
2868 mlxsw_reg_ritr_rif_pack(ritr_pl, rif);
2869 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
2870 if (WARN_ON_ONCE(err))
2871 return err;
2872
2873 mlxsw_reg_ritr_enable_set(ritr_pl, false);
2874 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
2875}
2876
2877static void mlxsw_sp_router_rif_gone_sync(struct mlxsw_sp *mlxsw_sp,
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01002878 struct mlxsw_sp_rif *rif)
Ido Schimmel4724ba562017-03-10 08:53:39 +01002879{
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01002880 mlxsw_sp_router_rif_disable(mlxsw_sp, rif->rif_index);
2881 mlxsw_sp_nexthop_rif_gone_sync(mlxsw_sp, rif);
2882 mlxsw_sp_neigh_rif_gone_sync(mlxsw_sp, rif);
Ido Schimmel4724ba562017-03-10 08:53:39 +01002883}
2884
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01002885static bool mlxsw_sp_rif_should_config(struct mlxsw_sp_rif *rif,
Ido Schimmel4724ba562017-03-10 08:53:39 +01002886 const struct in_device *in_dev,
2887 unsigned long event)
2888{
2889 switch (event) {
2890 case NETDEV_UP:
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01002891 if (!rif)
Ido Schimmel4724ba562017-03-10 08:53:39 +01002892 return true;
2893 return false;
2894 case NETDEV_DOWN:
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01002895 if (rif && !in_dev->ifa_list &&
2896 !netif_is_l3_slave(rif->dev))
Ido Schimmel4724ba562017-03-10 08:53:39 +01002897 return true;
2898 /* It is possible we already removed the RIF ourselves
2899 * if it was assigned to a netdev that is now a bridge
2900 * or LAG slave.
2901 */
2902 return false;
2903 }
2904
2905 return false;
2906}
2907
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01002908#define MLXSW_SP_INVALID_INDEX_RIF 0xffff
Ido Schimmel4724ba562017-03-10 08:53:39 +01002909static int mlxsw_sp_avail_rif_get(struct mlxsw_sp *mlxsw_sp)
2910{
2911 int i;
2912
2913 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++)
Ido Schimmel5f9efff2017-05-16 19:38:27 +02002914 if (!mlxsw_sp->router->rifs[i])
Ido Schimmel4724ba562017-03-10 08:53:39 +01002915 return i;
2916
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01002917 return MLXSW_SP_INVALID_INDEX_RIF;
Ido Schimmel4724ba562017-03-10 08:53:39 +01002918}
2919
2920static void mlxsw_sp_vport_rif_sp_attr_get(struct mlxsw_sp_port *mlxsw_sp_vport,
2921 bool *p_lagged, u16 *p_system_port)
2922{
2923 u8 local_port = mlxsw_sp_vport->local_port;
2924
2925 *p_lagged = mlxsw_sp_vport->lagged;
2926 *p_system_port = *p_lagged ? mlxsw_sp_vport->lag_id : local_port;
2927}
2928
2929static int mlxsw_sp_vport_rif_sp_op(struct mlxsw_sp_port *mlxsw_sp_vport,
Ido Schimmel69132292017-03-10 08:53:42 +01002930 u16 vr_id, struct net_device *l3_dev,
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01002931 u16 rif_index, bool create)
Ido Schimmel4724ba562017-03-10 08:53:39 +01002932{
2933 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_vport->mlxsw_sp;
2934 bool lagged = mlxsw_sp_vport->lagged;
2935 char ritr_pl[MLXSW_REG_RITR_LEN];
2936 u16 system_port;
2937
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01002938 mlxsw_reg_ritr_pack(ritr_pl, create, MLXSW_REG_RITR_SP_IF, rif_index,
2939 vr_id, l3_dev->mtu, l3_dev->dev_addr);
Ido Schimmel4724ba562017-03-10 08:53:39 +01002940
2941 mlxsw_sp_vport_rif_sp_attr_get(mlxsw_sp_vport, &lagged, &system_port);
2942 mlxsw_reg_ritr_sp_if_pack(ritr_pl, lagged, system_port,
2943 mlxsw_sp_vport_vid_get(mlxsw_sp_vport));
2944
2945 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
2946}
2947
Ido Schimmelce95e152017-05-26 08:37:27 +02002948static void
2949mlxsw_sp_port_vlan_rif_sp_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan);
Ido Schimmel4724ba562017-03-10 08:53:39 +01002950
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01002951static u16 mlxsw_sp_rif_sp_to_fid(u16 rif_index)
Ido Schimmel4724ba562017-03-10 08:53:39 +01002952{
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01002953 return MLXSW_SP_RFID_BASE + rif_index;
Ido Schimmel4724ba562017-03-10 08:53:39 +01002954}
2955
2956static struct mlxsw_sp_fid *
2957mlxsw_sp_rfid_alloc(u16 fid, struct net_device *l3_dev)
2958{
2959 struct mlxsw_sp_fid *f;
2960
2961 f = kzalloc(sizeof(*f), GFP_KERNEL);
2962 if (!f)
2963 return NULL;
2964
Ido Schimmelce95e152017-05-26 08:37:27 +02002965 f->leave = mlxsw_sp_port_vlan_rif_sp_leave;
Ido Schimmel4724ba562017-03-10 08:53:39 +01002966 f->ref_count = 0;
2967 f->dev = l3_dev;
2968 f->fid = fid;
2969
2970 return f;
2971}
2972
2973static struct mlxsw_sp_rif *
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01002974mlxsw_sp_rif_alloc(u16 rif_index, u16 vr_id, struct net_device *l3_dev,
Ido Schimmel69132292017-03-10 08:53:42 +01002975 struct mlxsw_sp_fid *f)
Ido Schimmel4724ba562017-03-10 08:53:39 +01002976{
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01002977 struct mlxsw_sp_rif *rif;
Ido Schimmel4724ba562017-03-10 08:53:39 +01002978
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01002979 rif = kzalloc(sizeof(*rif), GFP_KERNEL);
2980 if (!rif)
Ido Schimmel4724ba562017-03-10 08:53:39 +01002981 return NULL;
2982
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01002983 INIT_LIST_HEAD(&rif->nexthop_list);
2984 INIT_LIST_HEAD(&rif->neigh_list);
2985 ether_addr_copy(rif->addr, l3_dev->dev_addr);
2986 rif->mtu = l3_dev->mtu;
2987 rif->vr_id = vr_id;
2988 rif->dev = l3_dev;
2989 rif->rif_index = rif_index;
2990 rif->f = f;
Ido Schimmel4724ba562017-03-10 08:53:39 +01002991
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01002992 return rif;
Ido Schimmel4724ba562017-03-10 08:53:39 +01002993}
2994
Ido Schimmel5f9efff2017-05-16 19:38:27 +02002995struct mlxsw_sp_rif *mlxsw_sp_rif_by_index(const struct mlxsw_sp *mlxsw_sp,
2996 u16 rif_index)
2997{
2998 return mlxsw_sp->router->rifs[rif_index];
2999}
3000
Arkadi Sharshevskyfd1b9d42017-03-28 17:24:16 +02003001u16 mlxsw_sp_rif_index(const struct mlxsw_sp_rif *rif)
3002{
3003 return rif->rif_index;
3004}
3005
3006int mlxsw_sp_rif_dev_ifindex(const struct mlxsw_sp_rif *rif)
3007{
3008 return rif->dev->ifindex;
3009}
3010
Ido Schimmel4724ba562017-03-10 08:53:39 +01003011static struct mlxsw_sp_rif *
3012mlxsw_sp_vport_rif_sp_create(struct mlxsw_sp_port *mlxsw_sp_vport,
3013 struct net_device *l3_dev)
3014{
3015 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_vport->mlxsw_sp;
Ido Schimmel57837882017-03-16 09:08:16 +01003016 u32 tb_id = l3mdev_fib_table(l3_dev);
Ido Schimmel69132292017-03-10 08:53:42 +01003017 struct mlxsw_sp_vr *vr;
Ido Schimmel4724ba562017-03-10 08:53:39 +01003018 struct mlxsw_sp_fid *f;
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003019 struct mlxsw_sp_rif *rif;
3020 u16 fid, rif_index;
Ido Schimmel4724ba562017-03-10 08:53:39 +01003021 int err;
3022
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003023 rif_index = mlxsw_sp_avail_rif_get(mlxsw_sp);
3024 if (rif_index == MLXSW_SP_INVALID_INDEX_RIF)
Ido Schimmel4724ba562017-03-10 08:53:39 +01003025 return ERR_PTR(-ERANGE);
3026
Ido Schimmel57837882017-03-16 09:08:16 +01003027 vr = mlxsw_sp_vr_get(mlxsw_sp, tb_id ? : RT_TABLE_MAIN);
Ido Schimmel69132292017-03-10 08:53:42 +01003028 if (IS_ERR(vr))
3029 return ERR_CAST(vr);
3030
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003031 err = mlxsw_sp_vport_rif_sp_op(mlxsw_sp_vport, vr->id, l3_dev,
3032 rif_index, true);
Ido Schimmel4724ba562017-03-10 08:53:39 +01003033 if (err)
Ido Schimmel69132292017-03-10 08:53:42 +01003034 goto err_vport_rif_sp_op;
Ido Schimmel4724ba562017-03-10 08:53:39 +01003035
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003036 fid = mlxsw_sp_rif_sp_to_fid(rif_index);
Ido Schimmel4724ba562017-03-10 08:53:39 +01003037 err = mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, fid, true);
3038 if (err)
3039 goto err_rif_fdb_op;
3040
3041 f = mlxsw_sp_rfid_alloc(fid, l3_dev);
3042 if (!f) {
3043 err = -ENOMEM;
3044 goto err_rfid_alloc;
3045 }
3046
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003047 rif = mlxsw_sp_rif_alloc(rif_index, vr->id, l3_dev, f);
3048 if (!rif) {
Ido Schimmel4724ba562017-03-10 08:53:39 +01003049 err = -ENOMEM;
3050 goto err_rif_alloc;
3051 }
3052
Arkadi Sharshevskye0c0afd2017-03-28 17:24:15 +02003053 if (devlink_dpipe_table_counter_enabled(priv_to_devlink(mlxsw_sp->core),
3054 MLXSW_SP_DPIPE_TABLE_NAME_ERIF)) {
3055 err = mlxsw_sp_rif_counter_alloc(mlxsw_sp, rif,
3056 MLXSW_SP_RIF_COUNTER_EGRESS);
3057 if (err)
3058 netdev_dbg(mlxsw_sp_vport->dev,
3059 "Counter alloc Failed err=%d\n", err);
3060 }
3061
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003062 f->rif = rif;
Ido Schimmel5f9efff2017-05-16 19:38:27 +02003063 mlxsw_sp->router->rifs[rif_index] = rif;
Ido Schimmel69132292017-03-10 08:53:42 +01003064 vr->rif_count++;
Ido Schimmel4724ba562017-03-10 08:53:39 +01003065
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003066 return rif;
Ido Schimmel4724ba562017-03-10 08:53:39 +01003067
3068err_rif_alloc:
3069 kfree(f);
3070err_rfid_alloc:
3071 mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, fid, false);
3072err_rif_fdb_op:
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003073 mlxsw_sp_vport_rif_sp_op(mlxsw_sp_vport, vr->id, l3_dev, rif_index,
3074 false);
Ido Schimmel69132292017-03-10 08:53:42 +01003075err_vport_rif_sp_op:
3076 mlxsw_sp_vr_put(vr);
Ido Schimmel4724ba562017-03-10 08:53:39 +01003077 return ERR_PTR(err);
3078}
3079
3080static void mlxsw_sp_vport_rif_sp_destroy(struct mlxsw_sp_port *mlxsw_sp_vport,
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003081 struct mlxsw_sp_rif *rif)
Ido Schimmel4724ba562017-03-10 08:53:39 +01003082{
3083 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_vport->mlxsw_sp;
Ido Schimmel9011b672017-05-16 19:38:25 +02003084 struct mlxsw_sp_vr *vr = &mlxsw_sp->router->vrs[rif->vr_id];
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003085 struct net_device *l3_dev = rif->dev;
3086 struct mlxsw_sp_fid *f = rif->f;
3087 u16 rif_index = rif->rif_index;
Ido Schimmel4724ba562017-03-10 08:53:39 +01003088 u16 fid = f->fid;
Ido Schimmel4724ba562017-03-10 08:53:39 +01003089
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003090 mlxsw_sp_router_rif_gone_sync(mlxsw_sp, rif);
Ido Schimmel4724ba562017-03-10 08:53:39 +01003091
Arkadi Sharshevskye0c0afd2017-03-28 17:24:15 +02003092 mlxsw_sp_rif_counter_free(mlxsw_sp, rif, MLXSW_SP_RIF_COUNTER_EGRESS);
3093 mlxsw_sp_rif_counter_free(mlxsw_sp, rif, MLXSW_SP_RIF_COUNTER_INGRESS);
3094
Ido Schimmel69132292017-03-10 08:53:42 +01003095 vr->rif_count--;
Ido Schimmel5f9efff2017-05-16 19:38:27 +02003096 mlxsw_sp->router->rifs[rif_index] = NULL;
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003097 f->rif = NULL;
Ido Schimmel4724ba562017-03-10 08:53:39 +01003098
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003099 kfree(rif);
Ido Schimmel4724ba562017-03-10 08:53:39 +01003100
3101 kfree(f);
3102
3103 mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, fid, false);
3104
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003105 mlxsw_sp_vport_rif_sp_op(mlxsw_sp_vport, vr->id, l3_dev, rif_index,
3106 false);
Ido Schimmel69132292017-03-10 08:53:42 +01003107 mlxsw_sp_vr_put(vr);
Ido Schimmel4724ba562017-03-10 08:53:39 +01003108}
3109
3110static int mlxsw_sp_vport_rif_sp_join(struct mlxsw_sp_port *mlxsw_sp_vport,
3111 struct net_device *l3_dev)
3112{
3113 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_vport->mlxsw_sp;
Ido Schimmel03ea01e2017-05-23 21:56:30 +02003114 u16 vid = mlxsw_sp_vport_vid_get(mlxsw_sp_vport);
Ido Schimmel4aafc362017-05-26 08:37:25 +02003115 struct mlxsw_sp_port *mlxsw_sp_port;
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003116 struct mlxsw_sp_rif *rif;
Ido Schimmel03ea01e2017-05-23 21:56:30 +02003117 int err;
Ido Schimmel4724ba562017-03-10 08:53:39 +01003118
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003119 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev);
3120 if (!rif) {
3121 rif = mlxsw_sp_vport_rif_sp_create(mlxsw_sp_vport, l3_dev);
3122 if (IS_ERR(rif))
3123 return PTR_ERR(rif);
Ido Schimmel4724ba562017-03-10 08:53:39 +01003124 }
3125
Ido Schimmel03ea01e2017-05-23 21:56:30 +02003126 err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, false);
3127 if (err)
3128 goto err_port_vid_learning_set;
3129
3130 err = mlxsw_sp_port_vid_stp_set(mlxsw_sp_vport, vid,
3131 BR_STATE_FORWARDING);
3132 if (err)
3133 goto err_port_vid_stp_set;
3134
Ido Schimmel4aafc362017-05-26 08:37:25 +02003135 mlxsw_sp_port = mlxsw_sp_vport_port(mlxsw_sp_vport);
3136 if (mlxsw_sp_port->nr_port_vid_map++ == 0) {
3137 err = mlxsw_sp_port_vp_mode_trans(mlxsw_sp_port);
3138 if (err)
3139 goto err_port_vp_mode_trans;
3140 }
3141
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003142 mlxsw_sp_vport_fid_set(mlxsw_sp_vport, rif->f);
3143 rif->f->ref_count++;
Ido Schimmel4724ba562017-03-10 08:53:39 +01003144
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003145 netdev_dbg(mlxsw_sp_vport->dev, "Joined FID=%d\n", rif->f->fid);
Ido Schimmel4724ba562017-03-10 08:53:39 +01003146
3147 return 0;
Ido Schimmel03ea01e2017-05-23 21:56:30 +02003148
Ido Schimmel4aafc362017-05-26 08:37:25 +02003149err_port_vp_mode_trans:
3150 mlxsw_sp_port->nr_port_vid_map--;
3151 mlxsw_sp_port_vid_stp_set(mlxsw_sp_vport, vid, BR_STATE_BLOCKING);
Ido Schimmel03ea01e2017-05-23 21:56:30 +02003152err_port_vid_stp_set:
3153 mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, true);
3154err_port_vid_learning_set:
3155 if (rif->f->ref_count == 0)
3156 mlxsw_sp_vport_rif_sp_destroy(mlxsw_sp_vport, rif);
3157 return err;
Ido Schimmel4724ba562017-03-10 08:53:39 +01003158}
3159
Ido Schimmelce95e152017-05-26 08:37:27 +02003160static void
3161mlxsw_sp_port_vlan_rif_sp_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
Ido Schimmel4724ba562017-03-10 08:53:39 +01003162{
Ido Schimmelce95e152017-05-26 08:37:27 +02003163 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
3164 struct mlxsw_sp_port *mlxsw_sp_vport;
3165 u16 vid = mlxsw_sp_port_vlan->vid;
3166 struct mlxsw_sp_fid *f;
3167
3168 mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid);
3169 f = mlxsw_sp_vport_fid_get(mlxsw_sp_vport);
Ido Schimmel4724ba562017-03-10 08:53:39 +01003170
3171 netdev_dbg(mlxsw_sp_vport->dev, "Left FID=%d\n", f->fid);
3172
Ido Schimmel4aafc362017-05-26 08:37:25 +02003173 f->ref_count--;
3174 mlxsw_sp_vport_fid_set(mlxsw_sp_vport, NULL);
3175
Ido Schimmel4aafc362017-05-26 08:37:25 +02003176 if (mlxsw_sp_port->nr_port_vid_map == 1)
3177 mlxsw_sp_port_vlan_mode_trans(mlxsw_sp_port);
3178 mlxsw_sp_port->nr_port_vid_map--;
Ido Schimmel03ea01e2017-05-23 21:56:30 +02003179 mlxsw_sp_port_vid_stp_set(mlxsw_sp_vport, vid, BR_STATE_BLOCKING);
3180 mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, true);
Ido Schimmel4aafc362017-05-26 08:37:25 +02003181
3182 if (f->ref_count == 0)
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003183 mlxsw_sp_vport_rif_sp_destroy(mlxsw_sp_vport, f->rif);
Ido Schimmel4724ba562017-03-10 08:53:39 +01003184}
3185
3186static int mlxsw_sp_inetaddr_vport_event(struct net_device *l3_dev,
3187 struct net_device *port_dev,
3188 unsigned long event, u16 vid)
3189{
3190 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(port_dev);
Ido Schimmelce95e152017-05-26 08:37:27 +02003191 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
Ido Schimmel4724ba562017-03-10 08:53:39 +01003192 struct mlxsw_sp_port *mlxsw_sp_vport;
3193
3194 mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid);
3195 if (WARN_ON(!mlxsw_sp_vport))
3196 return -EINVAL;
Ido Schimmelce95e152017-05-26 08:37:27 +02003197 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
Ido Schimmel4724ba562017-03-10 08:53:39 +01003198
3199 switch (event) {
3200 case NETDEV_UP:
3201 return mlxsw_sp_vport_rif_sp_join(mlxsw_sp_vport, l3_dev);
3202 case NETDEV_DOWN:
Ido Schimmelce95e152017-05-26 08:37:27 +02003203 mlxsw_sp_port_vlan_rif_sp_leave(mlxsw_sp_port_vlan);
Ido Schimmel4724ba562017-03-10 08:53:39 +01003204 break;
3205 }
3206
3207 return 0;
3208}
3209
3210static int mlxsw_sp_inetaddr_port_event(struct net_device *port_dev,
3211 unsigned long event)
3212{
Jiri Pirko2b94e582017-04-18 16:55:37 +02003213 if (netif_is_bridge_port(port_dev) ||
3214 netif_is_lag_port(port_dev) ||
3215 netif_is_ovs_port(port_dev))
Ido Schimmel4724ba562017-03-10 08:53:39 +01003216 return 0;
3217
3218 return mlxsw_sp_inetaddr_vport_event(port_dev, port_dev, event, 1);
3219}
3220
3221static int __mlxsw_sp_inetaddr_lag_event(struct net_device *l3_dev,
3222 struct net_device *lag_dev,
3223 unsigned long event, u16 vid)
3224{
3225 struct net_device *port_dev;
3226 struct list_head *iter;
3227 int err;
3228
3229 netdev_for_each_lower_dev(lag_dev, port_dev, iter) {
3230 if (mlxsw_sp_port_dev_check(port_dev)) {
3231 err = mlxsw_sp_inetaddr_vport_event(l3_dev, port_dev,
3232 event, vid);
3233 if (err)
3234 return err;
3235 }
3236 }
3237
3238 return 0;
3239}
3240
3241static int mlxsw_sp_inetaddr_lag_event(struct net_device *lag_dev,
3242 unsigned long event)
3243{
3244 if (netif_is_bridge_port(lag_dev))
3245 return 0;
3246
3247 return __mlxsw_sp_inetaddr_lag_event(lag_dev, lag_dev, event, 1);
3248}
3249
3250static struct mlxsw_sp_fid *mlxsw_sp_bridge_fid_get(struct mlxsw_sp *mlxsw_sp,
3251 struct net_device *l3_dev)
3252{
3253 u16 fid;
3254
3255 if (is_vlan_dev(l3_dev))
3256 fid = vlan_dev_vlan_id(l3_dev);
Ido Schimmel5f6935c2017-05-16 19:38:26 +02003257 else if (mlxsw_sp_master_bridge(mlxsw_sp)->dev == l3_dev)
Ido Schimmel4724ba562017-03-10 08:53:39 +01003258 fid = 1;
3259 else
3260 return mlxsw_sp_vfid_find(mlxsw_sp, l3_dev);
3261
3262 return mlxsw_sp_fid_find(mlxsw_sp, fid);
3263}
3264
Ido Schimmel5ec2ee72017-03-24 08:02:48 +01003265static u8 mlxsw_sp_router_port(const struct mlxsw_sp *mlxsw_sp)
3266{
3267 return mlxsw_core_max_ports(mlxsw_sp->core) + 1;
3268}
3269
Ido Schimmel4724ba562017-03-10 08:53:39 +01003270static enum mlxsw_flood_table_type mlxsw_sp_flood_table_type_get(u16 fid)
3271{
3272 return mlxsw_sp_fid_is_vfid(fid) ? MLXSW_REG_SFGC_TABLE_TYPE_FID :
3273 MLXSW_REG_SFGC_TABLE_TYPE_FID_OFFEST;
3274}
3275
3276static u16 mlxsw_sp_flood_table_index_get(u16 fid)
3277{
3278 return mlxsw_sp_fid_is_vfid(fid) ? mlxsw_sp_fid_to_vfid(fid) : fid;
3279}
3280
3281static int mlxsw_sp_router_port_flood_set(struct mlxsw_sp *mlxsw_sp, u16 fid,
3282 bool set)
3283{
Ido Schimmel5ec2ee72017-03-24 08:02:48 +01003284 u8 router_port = mlxsw_sp_router_port(mlxsw_sp);
Ido Schimmel4724ba562017-03-10 08:53:39 +01003285 enum mlxsw_flood_table_type table_type;
3286 char *sftr_pl;
3287 u16 index;
3288 int err;
3289
3290 sftr_pl = kmalloc(MLXSW_REG_SFTR_LEN, GFP_KERNEL);
3291 if (!sftr_pl)
3292 return -ENOMEM;
3293
3294 table_type = mlxsw_sp_flood_table_type_get(fid);
3295 index = mlxsw_sp_flood_table_index_get(fid);
3296 mlxsw_reg_sftr_pack(sftr_pl, MLXSW_SP_FLOOD_TABLE_BC, index, table_type,
Ido Schimmel5ec2ee72017-03-24 08:02:48 +01003297 1, router_port, set);
Ido Schimmel4724ba562017-03-10 08:53:39 +01003298 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sftr), sftr_pl);
3299
3300 kfree(sftr_pl);
3301 return err;
3302}
3303
3304static enum mlxsw_reg_ritr_if_type mlxsw_sp_rif_type_get(u16 fid)
3305{
3306 if (mlxsw_sp_fid_is_vfid(fid))
3307 return MLXSW_REG_RITR_FID_IF;
3308 else
3309 return MLXSW_REG_RITR_VLAN_IF;
3310}
3311
Ido Schimmel69132292017-03-10 08:53:42 +01003312static int mlxsw_sp_rif_bridge_op(struct mlxsw_sp *mlxsw_sp, u16 vr_id,
Ido Schimmel4724ba562017-03-10 08:53:39 +01003313 struct net_device *l3_dev,
3314 u16 fid, u16 rif,
3315 bool create)
3316{
3317 enum mlxsw_reg_ritr_if_type rif_type;
3318 char ritr_pl[MLXSW_REG_RITR_LEN];
3319
3320 rif_type = mlxsw_sp_rif_type_get(fid);
Ido Schimmel69132292017-03-10 08:53:42 +01003321 mlxsw_reg_ritr_pack(ritr_pl, create, rif_type, rif, vr_id, l3_dev->mtu,
Ido Schimmel4724ba562017-03-10 08:53:39 +01003322 l3_dev->dev_addr);
3323 mlxsw_reg_ritr_fid_set(ritr_pl, rif_type, fid);
3324
3325 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
3326}
3327
3328static int mlxsw_sp_rif_bridge_create(struct mlxsw_sp *mlxsw_sp,
3329 struct net_device *l3_dev,
3330 struct mlxsw_sp_fid *f)
3331{
Ido Schimmel57837882017-03-16 09:08:16 +01003332 u32 tb_id = l3mdev_fib_table(l3_dev);
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003333 struct mlxsw_sp_rif *rif;
Ido Schimmel69132292017-03-10 08:53:42 +01003334 struct mlxsw_sp_vr *vr;
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003335 u16 rif_index;
Ido Schimmel4724ba562017-03-10 08:53:39 +01003336 int err;
3337
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003338 rif_index = mlxsw_sp_avail_rif_get(mlxsw_sp);
3339 if (rif_index == MLXSW_SP_INVALID_INDEX_RIF)
Ido Schimmel4724ba562017-03-10 08:53:39 +01003340 return -ERANGE;
3341
Ido Schimmel57837882017-03-16 09:08:16 +01003342 vr = mlxsw_sp_vr_get(mlxsw_sp, tb_id ? : RT_TABLE_MAIN);
Ido Schimmel69132292017-03-10 08:53:42 +01003343 if (IS_ERR(vr))
3344 return PTR_ERR(vr);
3345
Ido Schimmel4724ba562017-03-10 08:53:39 +01003346 err = mlxsw_sp_router_port_flood_set(mlxsw_sp, f->fid, true);
3347 if (err)
Ido Schimmel69132292017-03-10 08:53:42 +01003348 goto err_port_flood_set;
Ido Schimmel4724ba562017-03-10 08:53:39 +01003349
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003350 err = mlxsw_sp_rif_bridge_op(mlxsw_sp, vr->id, l3_dev, f->fid,
3351 rif_index, true);
Ido Schimmel4724ba562017-03-10 08:53:39 +01003352 if (err)
3353 goto err_rif_bridge_op;
3354
3355 err = mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, f->fid, true);
3356 if (err)
3357 goto err_rif_fdb_op;
3358
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003359 rif = mlxsw_sp_rif_alloc(rif_index, vr->id, l3_dev, f);
3360 if (!rif) {
Ido Schimmel4724ba562017-03-10 08:53:39 +01003361 err = -ENOMEM;
3362 goto err_rif_alloc;
3363 }
3364
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003365 f->rif = rif;
Ido Schimmel5f9efff2017-05-16 19:38:27 +02003366 mlxsw_sp->router->rifs[rif_index] = rif;
Ido Schimmel69132292017-03-10 08:53:42 +01003367 vr->rif_count++;
Ido Schimmel4724ba562017-03-10 08:53:39 +01003368
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003369 netdev_dbg(l3_dev, "RIF=%d created\n", rif_index);
Ido Schimmel4724ba562017-03-10 08:53:39 +01003370
3371 return 0;
3372
3373err_rif_alloc:
3374 mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, f->fid, false);
3375err_rif_fdb_op:
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003376 mlxsw_sp_rif_bridge_op(mlxsw_sp, vr->id, l3_dev, f->fid, rif_index,
3377 false);
Ido Schimmel4724ba562017-03-10 08:53:39 +01003378err_rif_bridge_op:
3379 mlxsw_sp_router_port_flood_set(mlxsw_sp, f->fid, false);
Ido Schimmel69132292017-03-10 08:53:42 +01003380err_port_flood_set:
3381 mlxsw_sp_vr_put(vr);
Ido Schimmel4724ba562017-03-10 08:53:39 +01003382 return err;
3383}
3384
3385void mlxsw_sp_rif_bridge_destroy(struct mlxsw_sp *mlxsw_sp,
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003386 struct mlxsw_sp_rif *rif)
Ido Schimmel4724ba562017-03-10 08:53:39 +01003387{
Ido Schimmel9011b672017-05-16 19:38:25 +02003388 struct mlxsw_sp_vr *vr = &mlxsw_sp->router->vrs[rif->vr_id];
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003389 struct net_device *l3_dev = rif->dev;
3390 struct mlxsw_sp_fid *f = rif->f;
3391 u16 rif_index = rif->rif_index;
Ido Schimmel4724ba562017-03-10 08:53:39 +01003392
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003393 mlxsw_sp_router_rif_gone_sync(mlxsw_sp, rif);
Ido Schimmel4724ba562017-03-10 08:53:39 +01003394
Ido Schimmel69132292017-03-10 08:53:42 +01003395 vr->rif_count--;
Ido Schimmel5f9efff2017-05-16 19:38:27 +02003396 mlxsw_sp->router->rifs[rif_index] = NULL;
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003397 f->rif = NULL;
Ido Schimmel4724ba562017-03-10 08:53:39 +01003398
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003399 kfree(rif);
Ido Schimmel4724ba562017-03-10 08:53:39 +01003400
3401 mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, f->fid, false);
3402
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003403 mlxsw_sp_rif_bridge_op(mlxsw_sp, vr->id, l3_dev, f->fid, rif_index,
3404 false);
Ido Schimmel4724ba562017-03-10 08:53:39 +01003405
3406 mlxsw_sp_router_port_flood_set(mlxsw_sp, f->fid, false);
3407
Ido Schimmel69132292017-03-10 08:53:42 +01003408 mlxsw_sp_vr_put(vr);
3409
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003410 netdev_dbg(l3_dev, "RIF=%d destroyed\n", rif_index);
Ido Schimmel4724ba562017-03-10 08:53:39 +01003411}
3412
3413static int mlxsw_sp_inetaddr_bridge_event(struct net_device *l3_dev,
3414 struct net_device *br_dev,
3415 unsigned long event)
3416{
3417 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(l3_dev);
3418 struct mlxsw_sp_fid *f;
3419
3420 /* FID can either be an actual FID if the L3 device is the
3421 * VLAN-aware bridge or a VLAN device on top. Otherwise, the
3422 * L3 device is a VLAN-unaware bridge and we get a vFID.
3423 */
3424 f = mlxsw_sp_bridge_fid_get(mlxsw_sp, l3_dev);
3425 if (WARN_ON(!f))
3426 return -EINVAL;
3427
3428 switch (event) {
3429 case NETDEV_UP:
3430 return mlxsw_sp_rif_bridge_create(mlxsw_sp, l3_dev, f);
3431 case NETDEV_DOWN:
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003432 mlxsw_sp_rif_bridge_destroy(mlxsw_sp, f->rif);
Ido Schimmel4724ba562017-03-10 08:53:39 +01003433 break;
3434 }
3435
3436 return 0;
3437}
3438
3439static int mlxsw_sp_inetaddr_vlan_event(struct net_device *vlan_dev,
3440 unsigned long event)
3441{
3442 struct net_device *real_dev = vlan_dev_real_dev(vlan_dev);
3443 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(vlan_dev);
3444 u16 vid = vlan_dev_vlan_id(vlan_dev);
3445
3446 if (mlxsw_sp_port_dev_check(real_dev))
3447 return mlxsw_sp_inetaddr_vport_event(vlan_dev, real_dev, event,
3448 vid);
3449 else if (netif_is_lag_master(real_dev))
3450 return __mlxsw_sp_inetaddr_lag_event(vlan_dev, real_dev, event,
3451 vid);
3452 else if (netif_is_bridge_master(real_dev) &&
Ido Schimmel5f6935c2017-05-16 19:38:26 +02003453 mlxsw_sp_master_bridge(mlxsw_sp)->dev == real_dev)
Ido Schimmel4724ba562017-03-10 08:53:39 +01003454 return mlxsw_sp_inetaddr_bridge_event(vlan_dev, real_dev,
3455 event);
3456
3457 return 0;
3458}
3459
Ido Schimmelb1e45522017-04-30 19:47:14 +03003460static int __mlxsw_sp_inetaddr_event(struct net_device *dev,
3461 unsigned long event)
3462{
3463 if (mlxsw_sp_port_dev_check(dev))
3464 return mlxsw_sp_inetaddr_port_event(dev, event);
3465 else if (netif_is_lag_master(dev))
3466 return mlxsw_sp_inetaddr_lag_event(dev, event);
3467 else if (netif_is_bridge_master(dev))
3468 return mlxsw_sp_inetaddr_bridge_event(dev, dev, event);
3469 else if (is_vlan_dev(dev))
3470 return mlxsw_sp_inetaddr_vlan_event(dev, event);
3471 else
3472 return 0;
3473}
3474
Ido Schimmel4724ba562017-03-10 08:53:39 +01003475int mlxsw_sp_inetaddr_event(struct notifier_block *unused,
3476 unsigned long event, void *ptr)
3477{
3478 struct in_ifaddr *ifa = (struct in_ifaddr *) ptr;
3479 struct net_device *dev = ifa->ifa_dev->dev;
3480 struct mlxsw_sp *mlxsw_sp;
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003481 struct mlxsw_sp_rif *rif;
Ido Schimmel4724ba562017-03-10 08:53:39 +01003482 int err = 0;
3483
3484 mlxsw_sp = mlxsw_sp_lower_get(dev);
3485 if (!mlxsw_sp)
3486 goto out;
3487
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003488 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
3489 if (!mlxsw_sp_rif_should_config(rif, ifa->ifa_dev, event))
Ido Schimmel4724ba562017-03-10 08:53:39 +01003490 goto out;
3491
Ido Schimmelb1e45522017-04-30 19:47:14 +03003492 err = __mlxsw_sp_inetaddr_event(dev, event);
Ido Schimmel4724ba562017-03-10 08:53:39 +01003493out:
3494 return notifier_from_errno(err);
3495}
3496
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003497static int mlxsw_sp_rif_edit(struct mlxsw_sp *mlxsw_sp, u16 rif_index,
Ido Schimmel4724ba562017-03-10 08:53:39 +01003498 const char *mac, int mtu)
3499{
3500 char ritr_pl[MLXSW_REG_RITR_LEN];
3501 int err;
3502
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003503 mlxsw_reg_ritr_rif_pack(ritr_pl, rif_index);
Ido Schimmel4724ba562017-03-10 08:53:39 +01003504 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
3505 if (err)
3506 return err;
3507
3508 mlxsw_reg_ritr_mtu_set(ritr_pl, mtu);
3509 mlxsw_reg_ritr_if_mac_memcpy_to(ritr_pl, mac);
3510 mlxsw_reg_ritr_op_set(ritr_pl, MLXSW_REG_RITR_RIF_CREATE);
3511 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
3512}
3513
3514int mlxsw_sp_netdevice_router_port_event(struct net_device *dev)
3515{
3516 struct mlxsw_sp *mlxsw_sp;
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003517 struct mlxsw_sp_rif *rif;
Ido Schimmel4724ba562017-03-10 08:53:39 +01003518 int err;
3519
3520 mlxsw_sp = mlxsw_sp_lower_get(dev);
3521 if (!mlxsw_sp)
3522 return 0;
3523
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003524 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
3525 if (!rif)
Ido Schimmel4724ba562017-03-10 08:53:39 +01003526 return 0;
3527
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003528 err = mlxsw_sp_rif_fdb_op(mlxsw_sp, rif->addr, rif->f->fid, false);
Ido Schimmel4724ba562017-03-10 08:53:39 +01003529 if (err)
3530 return err;
3531
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003532 err = mlxsw_sp_rif_edit(mlxsw_sp, rif->rif_index, dev->dev_addr,
3533 dev->mtu);
Ido Schimmel4724ba562017-03-10 08:53:39 +01003534 if (err)
3535 goto err_rif_edit;
3536
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003537 err = mlxsw_sp_rif_fdb_op(mlxsw_sp, dev->dev_addr, rif->f->fid, true);
Ido Schimmel4724ba562017-03-10 08:53:39 +01003538 if (err)
3539 goto err_rif_fdb_op;
3540
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003541 ether_addr_copy(rif->addr, dev->dev_addr);
3542 rif->mtu = dev->mtu;
Ido Schimmel4724ba562017-03-10 08:53:39 +01003543
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003544 netdev_dbg(dev, "Updated RIF=%d\n", rif->rif_index);
Ido Schimmel4724ba562017-03-10 08:53:39 +01003545
3546 return 0;
3547
3548err_rif_fdb_op:
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003549 mlxsw_sp_rif_edit(mlxsw_sp, rif->rif_index, rif->addr, rif->mtu);
Ido Schimmel4724ba562017-03-10 08:53:39 +01003550err_rif_edit:
Arkadi Sharshevskybf952332017-03-17 09:38:00 +01003551 mlxsw_sp_rif_fdb_op(mlxsw_sp, rif->addr, rif->f->fid, true);
Ido Schimmel4724ba562017-03-10 08:53:39 +01003552 return err;
3553}
3554
Ido Schimmelb1e45522017-04-30 19:47:14 +03003555static int mlxsw_sp_port_vrf_join(struct mlxsw_sp *mlxsw_sp,
3556 struct net_device *l3_dev)
Ido Schimmel7179eb52017-03-16 09:08:18 +01003557{
Ido Schimmelb1e45522017-04-30 19:47:14 +03003558 struct mlxsw_sp_rif *rif;
Ido Schimmel7179eb52017-03-16 09:08:18 +01003559
Ido Schimmelb1e45522017-04-30 19:47:14 +03003560 /* If netdev is already associated with a RIF, then we need to
3561 * destroy it and create a new one with the new virtual router ID.
Ido Schimmel7179eb52017-03-16 09:08:18 +01003562 */
Ido Schimmelb1e45522017-04-30 19:47:14 +03003563 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev);
3564 if (rif)
3565 __mlxsw_sp_inetaddr_event(l3_dev, NETDEV_DOWN);
Ido Schimmel7179eb52017-03-16 09:08:18 +01003566
Ido Schimmelb1e45522017-04-30 19:47:14 +03003567 return __mlxsw_sp_inetaddr_event(l3_dev, NETDEV_UP);
Ido Schimmel7179eb52017-03-16 09:08:18 +01003568}
3569
Ido Schimmelb1e45522017-04-30 19:47:14 +03003570static void mlxsw_sp_port_vrf_leave(struct mlxsw_sp *mlxsw_sp,
3571 struct net_device *l3_dev)
Ido Schimmel7179eb52017-03-16 09:08:18 +01003572{
Ido Schimmelb1e45522017-04-30 19:47:14 +03003573 struct mlxsw_sp_rif *rif;
Ido Schimmel7179eb52017-03-16 09:08:18 +01003574
Ido Schimmelb1e45522017-04-30 19:47:14 +03003575 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev);
3576 if (!rif)
Ido Schimmel7179eb52017-03-16 09:08:18 +01003577 return;
Ido Schimmelb1e45522017-04-30 19:47:14 +03003578 __mlxsw_sp_inetaddr_event(l3_dev, NETDEV_DOWN);
Ido Schimmel7179eb52017-03-16 09:08:18 +01003579}
3580
Ido Schimmelb1e45522017-04-30 19:47:14 +03003581int mlxsw_sp_netdevice_vrf_event(struct net_device *l3_dev, unsigned long event,
3582 struct netdev_notifier_changeupper_info *info)
Ido Schimmel3d70e4582017-03-16 09:08:19 +01003583{
Ido Schimmelb1e45522017-04-30 19:47:14 +03003584 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(l3_dev);
3585 int err = 0;
Ido Schimmel3d70e4582017-03-16 09:08:19 +01003586
Ido Schimmelb1e45522017-04-30 19:47:14 +03003587 if (!mlxsw_sp)
3588 return 0;
Ido Schimmel3d70e4582017-03-16 09:08:19 +01003589
Ido Schimmelb1e45522017-04-30 19:47:14 +03003590 switch (event) {
3591 case NETDEV_PRECHANGEUPPER:
3592 return 0;
3593 case NETDEV_CHANGEUPPER:
3594 if (info->linking)
3595 err = mlxsw_sp_port_vrf_join(mlxsw_sp, l3_dev);
3596 else
3597 mlxsw_sp_port_vrf_leave(mlxsw_sp, l3_dev);
3598 break;
3599 }
Ido Schimmel3d70e4582017-03-16 09:08:19 +01003600
Ido Schimmelb1e45522017-04-30 19:47:14 +03003601 return err;
Ido Schimmel3d70e4582017-03-16 09:08:19 +01003602}
3603
Ido Schimmel348b8fc2017-05-16 19:38:29 +02003604static int mlxsw_sp_rifs_init(struct mlxsw_sp *mlxsw_sp)
3605{
3606 u64 max_rifs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS);
3607
3608 mlxsw_sp->router->rifs = kcalloc(max_rifs,
3609 sizeof(struct mlxsw_sp_rif *),
3610 GFP_KERNEL);
3611 if (!mlxsw_sp->router->rifs)
3612 return -ENOMEM;
3613 return 0;
3614}
3615
3616static void mlxsw_sp_rifs_fini(struct mlxsw_sp *mlxsw_sp)
3617{
3618 int i;
3619
3620 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++)
3621 WARN_ON_ONCE(mlxsw_sp->router->rifs[i]);
3622
3623 kfree(mlxsw_sp->router->rifs);
3624}
3625
Ido Schimmelc3852ef2016-12-03 16:45:07 +01003626static void mlxsw_sp_router_fib_dump_flush(struct notifier_block *nb)
3627{
Ido Schimmel7e39d112017-05-16 19:38:28 +02003628 struct mlxsw_sp_router *router;
Ido Schimmelc3852ef2016-12-03 16:45:07 +01003629
3630 /* Flush pending FIB notifications and then flush the device's
3631 * table before requesting another dump. The FIB notification
3632 * block is unregistered, so no need to take RTNL.
3633 */
3634 mlxsw_core_flush_owq();
Ido Schimmel7e39d112017-05-16 19:38:28 +02003635 router = container_of(nb, struct mlxsw_sp_router, fib_nb);
3636 mlxsw_sp_router_fib_flush(router->mlxsw_sp);
Ido Schimmelc3852ef2016-12-03 16:45:07 +01003637}
3638
Ido Schimmel4724ba562017-03-10 08:53:39 +01003639static int __mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp)
3640{
3641 char rgcr_pl[MLXSW_REG_RGCR_LEN];
3642 u64 max_rifs;
3643 int err;
3644
3645 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_RIFS))
3646 return -EIO;
Ido Schimmel4724ba562017-03-10 08:53:39 +01003647 max_rifs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS);
Ido Schimmel4724ba562017-03-10 08:53:39 +01003648
3649 mlxsw_reg_rgcr_pack(rgcr_pl, true);
3650 mlxsw_reg_rgcr_max_router_interfaces_set(rgcr_pl, max_rifs);
3651 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rgcr), rgcr_pl);
3652 if (err)
Ido Schimmel348b8fc2017-05-16 19:38:29 +02003653 return err;
Ido Schimmel4724ba562017-03-10 08:53:39 +01003654 return 0;
Ido Schimmel4724ba562017-03-10 08:53:39 +01003655}
3656
3657static void __mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp)
3658{
3659 char rgcr_pl[MLXSW_REG_RGCR_LEN];
Ido Schimmel4724ba562017-03-10 08:53:39 +01003660
3661 mlxsw_reg_rgcr_pack(rgcr_pl, false);
3662 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rgcr), rgcr_pl);
Ido Schimmel4724ba562017-03-10 08:53:39 +01003663}
3664
Jiri Pirkob45f64d2016-09-26 12:52:31 +02003665int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp)
3666{
Ido Schimmel9011b672017-05-16 19:38:25 +02003667 struct mlxsw_sp_router *router;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02003668 int err;
3669
Ido Schimmel9011b672017-05-16 19:38:25 +02003670 router = kzalloc(sizeof(*mlxsw_sp->router), GFP_KERNEL);
3671 if (!router)
3672 return -ENOMEM;
3673 mlxsw_sp->router = router;
3674 router->mlxsw_sp = mlxsw_sp;
3675
3676 INIT_LIST_HEAD(&mlxsw_sp->router->nexthop_neighs_list);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02003677 err = __mlxsw_sp_router_init(mlxsw_sp);
3678 if (err)
Ido Schimmel9011b672017-05-16 19:38:25 +02003679 goto err_router_init;
Jiri Pirkob45f64d2016-09-26 12:52:31 +02003680
Ido Schimmel348b8fc2017-05-16 19:38:29 +02003681 err = mlxsw_sp_rifs_init(mlxsw_sp);
3682 if (err)
3683 goto err_rifs_init;
3684
Ido Schimmel9011b672017-05-16 19:38:25 +02003685 err = rhashtable_init(&mlxsw_sp->router->nexthop_ht,
Ido Schimmelc53b8e12017-02-08 11:16:30 +01003686 &mlxsw_sp_nexthop_ht_params);
3687 if (err)
3688 goto err_nexthop_ht_init;
3689
Ido Schimmel9011b672017-05-16 19:38:25 +02003690 err = rhashtable_init(&mlxsw_sp->router->nexthop_group_ht,
Ido Schimmele9ad5e72017-02-08 11:16:29 +01003691 &mlxsw_sp_nexthop_group_ht_params);
3692 if (err)
3693 goto err_nexthop_group_ht_init;
3694
Ido Schimmel8494ab02017-03-24 08:02:47 +01003695 err = mlxsw_sp_lpm_init(mlxsw_sp);
3696 if (err)
3697 goto err_lpm_init;
3698
Jiri Pirkob45f64d2016-09-26 12:52:31 +02003699 err = mlxsw_sp_vrs_init(mlxsw_sp);
3700 if (err)
3701 goto err_vrs_init;
3702
Ido Schimmel8c9583a2016-10-27 15:12:57 +02003703 err = mlxsw_sp_neigh_init(mlxsw_sp);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02003704 if (err)
3705 goto err_neigh_init;
3706
Ido Schimmel7e39d112017-05-16 19:38:28 +02003707 mlxsw_sp->router->fib_nb.notifier_call = mlxsw_sp_router_fib_event;
3708 err = register_fib_notifier(&mlxsw_sp->router->fib_nb,
Ido Schimmelc3852ef2016-12-03 16:45:07 +01003709 mlxsw_sp_router_fib_dump_flush);
3710 if (err)
3711 goto err_register_fib_notifier;
3712
Jiri Pirkob45f64d2016-09-26 12:52:31 +02003713 return 0;
3714
Ido Schimmelc3852ef2016-12-03 16:45:07 +01003715err_register_fib_notifier:
3716 mlxsw_sp_neigh_fini(mlxsw_sp);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02003717err_neigh_init:
3718 mlxsw_sp_vrs_fini(mlxsw_sp);
3719err_vrs_init:
Ido Schimmel8494ab02017-03-24 08:02:47 +01003720 mlxsw_sp_lpm_fini(mlxsw_sp);
3721err_lpm_init:
Ido Schimmel9011b672017-05-16 19:38:25 +02003722 rhashtable_destroy(&mlxsw_sp->router->nexthop_group_ht);
Ido Schimmele9ad5e72017-02-08 11:16:29 +01003723err_nexthop_group_ht_init:
Ido Schimmel9011b672017-05-16 19:38:25 +02003724 rhashtable_destroy(&mlxsw_sp->router->nexthop_ht);
Ido Schimmelc53b8e12017-02-08 11:16:30 +01003725err_nexthop_ht_init:
Ido Schimmel348b8fc2017-05-16 19:38:29 +02003726 mlxsw_sp_rifs_fini(mlxsw_sp);
3727err_rifs_init:
Jiri Pirkob45f64d2016-09-26 12:52:31 +02003728 __mlxsw_sp_router_fini(mlxsw_sp);
Ido Schimmel9011b672017-05-16 19:38:25 +02003729err_router_init:
3730 kfree(mlxsw_sp->router);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02003731 return err;
3732}
3733
3734void mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp)
3735{
Ido Schimmel7e39d112017-05-16 19:38:28 +02003736 unregister_fib_notifier(&mlxsw_sp->router->fib_nb);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02003737 mlxsw_sp_neigh_fini(mlxsw_sp);
3738 mlxsw_sp_vrs_fini(mlxsw_sp);
Ido Schimmel8494ab02017-03-24 08:02:47 +01003739 mlxsw_sp_lpm_fini(mlxsw_sp);
Ido Schimmel9011b672017-05-16 19:38:25 +02003740 rhashtable_destroy(&mlxsw_sp->router->nexthop_group_ht);
3741 rhashtable_destroy(&mlxsw_sp->router->nexthop_ht);
Ido Schimmel348b8fc2017-05-16 19:38:29 +02003742 mlxsw_sp_rifs_fini(mlxsw_sp);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02003743 __mlxsw_sp_router_fini(mlxsw_sp);
Ido Schimmel9011b672017-05-16 19:38:25 +02003744 kfree(mlxsw_sp->router);
Jiri Pirkob45f64d2016-09-26 12:52:31 +02003745}